bacula-15.0.3/0000755000175000017500000000000014771010173012662 5ustar bsbuildbsbuildbacula-15.0.3/scripts/0000755000175000017500000000000014771010173014351 5ustar bsbuildbsbuildbacula-15.0.3/scripts/mtx-changer.conf0000644000175000017500000000472414771010173017444 0ustar bsbuildbsbuild# # Copyright (C) 2000-2015 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # # This file is sourced by the mtx-changer script every time it runs. # You can put your site customization here, and when you do an # upgrade, the process should not modify this file. Thus you # preserve your mtx-changer configuration. # # We update the version when an incompatible change # to mtx-changer or this conf file is made, such as # adding a new required variable. version=2 # Set to 1 if you want to do offline before unload offline=0 # Set to amount of time in seconds to wait after an offline offline_sleep=0 # Set to amount of time in seconds to wait after a load load_sleep=0 # Set to 1 to do an inventory before a status. Not normally needed. inventory=0 # If you have a VXA PacketLoader, it might display a different # Storage Element line, so try setting the following to 1 vxa_packetloader=0 # # Debug logging # # If you have multiple SD's, set this differently for each one # so you know which message comes from which one. This can # be any string, and will appear in each debug message just # after the time stamp. chgr_id=0 # Set to 1 if you want debug info written to a log debug_log=0 # Set to debug level you want to see # 0 is off # 10 is important events (load, unload, loaded) # 100 is everything # Note debug_log must be set to 1 for anything to be generated # debug_level=10 # Debug levels by importance # Normally you do not need to change this dbglvl=100 # More important messages idbglvl=10 # # mt status output # SunOS No Additional Sense # FreeBSD Current Driver State: at rest. # Linux ONLINE # Note Debian has a different mt than the standard Linux version. # When no tape is in the drive it waits 2 minutes. # When a tape is in the drive, it prints user unfriendly output. # Note, with Ubuntu Gusty (8.04), there are two versions of mt, # so we attempt to figure out which one. # Note, with mt GNU cpio, we use "drive status" output # OS=`uname` case ${OS} in SunOS) ready="No Additional Sense" ;; FreeBSD) ready="Current Driver State: at rest." ;; Linux) ready="ONLINE" if test -f /etc/debian_version ; then mt --version|grep "mt-st" >/dev/null 2>&1 if test $? -eq 1 ; then ready="drive status" fi else mt --version|grep "GNU cpio" >/dev/null 2>&1 if test $? -eq 0 ; then ready="drive status" fi fi ;; esac bacula-15.0.3/scripts/bat.desktop.xsu.in0000644000175000017500000000054414771010173017740 0ustar bsbuildbsbuild[Desktop Entry] Name=Bacula Administration Tool Comment=Bacula Director Console Icon=/usr/share/pixmaps/bat_icon.png Exec=kdesu -t -c "@sbindir@/bat -c @sysconfdir@/bat.conf" -f @sysconfdir@/bat.kdesu Terminal=false Type=Application Encoding=UTF-8 StartupNotify=true X-Desktop-File-Install-Version=0.3 Categories=System;Application;Utility;X-Red-Hat-Base; bacula-15.0.3/scripts/bacula-ctl-fd.in0000644000175000017500000001333314771010173017302 0ustar bsbuildbsbuild#! /bin/sh # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2023 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # bacula-ctl-fd This shell script takes care of starting and stopping # the bacula File daemon. # # This is pretty much watered down version of the RedHat script # that works on Solaris as well as Linux, but it won't work everywhere. # # description: The Leading Open Source Backup Solution. # PSCMD="@PSCMD@" PS="ps" # # On Solaris, you may need to use nawk, or alternatively, # add the GNU binaries to your path, such as /usr/xpg4/bin # AWK=@AWK@ # All these are not *really* needed but it makes it # easier to "steal" this code for the development # environment where they are different. # BACFDBIN=@sbindir@ BACFDCFG=@sysconfdir@ PIDDIR=@piddir@ SUBSYSDIR=@subsysdir@ FD_PORT=@fd_port@ FD_USER=@fd_user@ FD_GROUP=@fd_group@ Bacula="@BACULA@" PIDOF=@PIDOF@ # A function to stop a program. killproc() { RC=0 # Test syntax. if [ $# = 0 ]; then echo "Usage: killproc {program} {port} [signal]" return 1 fi notset=0 # check for third arg to be kill level if [ "$3" != "" ] ; then killlevel=$3 else notset=1 killlevel="-9" fi # Get base program name base=`basename $1` # Find pid. pid=`pidofproc $base $2` # Kill it. if [ "$pid" != "" ] ; then if [ "$notset" = "1" ] ; then if ${PS} -p "$pid">/dev/null 2>&1; then # TERM first, then KILL if not dead kill -TERM $pid 2>/dev/null sleep 1 if ${PS} -p "$pid" >/dev/null 2>&1 ; then sleep 1 if ${PS} -p "$pid" >/dev/null 2>&1 ; then sleep 3 if ${PS} -p "$pid" >/dev/null 2>&1 ; then kill -KILL $pid 2>/dev/null fi fi fi fi ${PS} -p "$pid" >/dev/null 2>&1 RC=$? [ $RC -eq 0 ] && failure "$base shutdown" || success "$base shutdown" # RC=$((! $RC)) # use specified level only else if ${PS} -p "$pid" >/dev/null 2>&1; then kill $killlevel $pid 2>/dev/null RC=$? [ $RC -eq 0 ] && success "$base $killlevel" || failure "$base $killlevel" fi fi else failure "$base shutdown" fi # Remove pid file if any. if [ "$notset" = "1" ]; then rm -f ${PIDDIR}/$base.$2.pid fi return $RC } # A function to find the pid of a program. pidofproc() { pid="" # Test syntax. if [ $# = 0 ] ; then echo "Usage: pidofproc {program}" return 1 fi # Get base program name base=`basename $1` # First try PID file if [ -f ${PIDDIR}/$base.$2.pid ] ; then pid=`head -n 1 ${PIDDIR}/$base.$2.pid` if [ "$pid" != "" ] ; then echo $pid return 0 fi fi # Next try "pidof" if [ -x ${PIDOF} ] ; then pid=`${PIDOF} $1` fi if [ "$pid" != "" ] ; then echo $pid return 0 fi # Finally try to extract it from ps pid=`${PSCMD} | grep $1 | ${AWK} '{ print $1 }' | tr '\n' ' '` echo $pid return 0 } status() { pid="" # Test syntax. if [ $# = 0 ] ; then echo "Usage: status {program} {port}" return 1 fi # Get base program name base=`basename $1` # First try "pidof" if [ -x ${PIDOF} ] ; then pid=`${PIDOF} $1` fi if [ "$pid" != "" ] ; then echo "$base (pid $pid) is running..." return 0 else pid=`${PSCMD} | ${AWK} 'BEGIN { prog=ARGV[1]; ARGC=1 } { if ((prog == $2) || (("(" prog ")") == $2) || (("[" prog "]") == $2) || ((prog ":") == $2)) { print $1 ; exit 0 } }' $1` if [ "$pid" != "" ] ; then echo "$base (pid $pid) is running..." return 0 fi fi # Next try the PID files if [ -f ${PIDDIR}/$base.$2.pid ] ; then pid=`head -n 1 ${PIDDIR}/$base.$2.pid` if [ "$pid" != "" ] ; then echo "$base dead but pid file exists" return 1 fi fi # See if the subsys lock exists if [ -f ${SUBSYSDIR}/$base ] ; then echo "$base dead but subsys locked" return 2 fi echo "$base is stopped" return 3 } success() { return 0 } failure() { rc=$? return $rc } OS=`uname -s` # if /lib/tls exists, force Bacula to use the glibc pthreads instead if [ -d "/lib/tls" -a $OS = "Linux" -a `uname -r | cut -c1-3` = "2.4" ] ; then export LD_ASSUME_KERNEL=2.4.19 fi case "$1" in start) [ -x ${BACFDBIN}/bacula-fd ] && { echo "Starting the $Bacula File daemon" OPTIONS='' if [ "${FD_USER}" != '' ]; then OPTIONS="${OPTIONS} -u ${FD_USER}" fi if [ "${FD_GROUP}" != '' ]; then OPTIONS="${OPTIONS} -g ${FD_GROUP}" fi if [ "x${VALGRIND_FD}" = "x1" ]; then valgrind --leak-check=full ${BACFDBIN}/bacula-fd $2 $3 ${OPTIONS} -v -c ${BACFDCFG}/bacula-fd.conf else ${BACFDBIN}/bacula-fd $2 $3 ${OPTIONS} -v -c ${BACFDCFG}/bacula-fd.conf fi } ;; stop) # Stop the FD first so that SD will fail jobs and update catalog [ -x ${BACFDBIN}/bacula-fd ] && { echo "Stopping the $Bacula File daemon" killproc ${BACFDBIN}/bacula-fd ${FD_PORT} $2 } ;; restart) $0 stop sleep 5 $0 start ;; status) [ -x ${BACFDBIN}/bacula-fd ] && status ${BACFDBIN}/bacula-fd ${FD_PORT} ;; *) echo "Usage: $0 {start|stop|restart|status}" exit 1 ;; esac exit 0 bacula-15.0.3/scripts/install-key-manager.sh.in0000644000175000017500000000703514771010173021163 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2023 Bacula Systems SA # License: BSD 2-Clause; see file LICENSE-FOSS # # This script setup a "master-key" for the volume encryption on the SD # # tell where GNUPG will work (usually in /opt/bacula/etc/gnupg) GNUPGHOME="@sysconfdir@/gnupg" KEYMAN_CONF="@sysconfdir@/key-manager.conf" ############################################################################# # # usage # ############################################################################# usage() { echo "$0 [ check | install ]" echo "setup a master key for the key-manager" exit 1 } ############################################################################# # # check_gnupg # ############################################################################# check_gnupg() { # Check if GnuPG is installed GNUPGBIN=`which gpg` if [ -z "$GNUPGBIN" ] ; then echo "GnuPG is not found or not installed" >&2 exit 1 fi } ############################################################################# # # check # ############################################################################# check_python_gnupg() { # Check if the python3 API is installed for GnuPG PYTHON3=`which python3` if [ -z "$PYTHON3" ] ; then echo "python3 is not found or not installed" >&2 exit 1 fi OUT=`mktemp` $PYTHON3 -c 'import gnupg;print("OK" if gnupg.GPG else "KO")' >$OUT 2>/dev/null out=`cat $OUT` if [ "$out" != "OK" ] ; then echo "python3 gnupg module is not found" >&2 echo "try: pip3 install gnupg" >&2 exit 1 fi } ############################################################################# # # check # ############################################################################# check() { check_gnupg check_python_gnupg if [ -e $GNUPGHOME ] ; then echo "Directory \"$GNUPGHOME\" exists" else echo "Directory \"$GNUPGHOME\" doesn't exist" fi exit 0 } ############################################################################# # # install # ############################################################################# install() { check_gnupg # Don't overwrite an existing configuration if [ -e "$GNUPGHOME" ] ; then echo "Directory \"$GNUPGHOME\" already exists, abort" >&2 exit 1 fi mkdir $GNUPGHOME chmod go-rwx $GNUPGHOME export GNUPGHOME PASSPHRASE=`openssl rand -base64 10` GNUPG_SCRIPT=`mktemp` cat > $GNUPG_SCRIPT <$GNUPG_OUT 2>&1 if [ $? != 0 ] ; then cat $GNUPG_OUT rm $GNUPG_OUT echo "Error with gpg" >&2 exit 1 fi rm $GNUPG_SCRIPT # retrieve the fingerprint of the key fpr=`$GNUPGBIN -k --with-colons | awk -F : '$1 ~/fpr/ { print $10;exit }'` cat >$KEYMAN_CONF </dev/null # does dirname exit? if [ $? = 0 ] ; then cwd=`dirname $0` if [ x$cwd = x. ]; then cwd=`pwd` fi if [ x$cwd = x@sbindir@ ] ; then echo "bconsole not properly installed." exit 1 fi fi if [ x@sbindir@ = x@sysconfdir@ ]; then echo "bconsole not properly installed." exit 1 fi if [ $# = 1 ] ; then echo "doing bconsole $1.conf" @sbindir@/bconsole -c $1.conf else @sbindir@/bconsole -c @sysconfdir@/bconsole.conf fi bacula-15.0.3/scripts/breload.in0000644000175000017500000000344314771010173016315 0ustar bsbuildbsbuild#! /bin/sh # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # breload This shell script takes care of reloading the director after # a backup of the configuration and a bacula-dir -t test # # BACDIRBIN=@sbindir@ BACDIRCFG=@sysconfdir@ BACWORKDIR=@working_dir@ BACBKPDIR=$BACWORKDIR/bkp Bacula="@BACULA@" DIR_USER=@dir_user@ RET=0 if [ -x ${BACDIRBIN}/bacula-dir -a -r ${BACDIRCFG}/bacula-dir.conf ]; then echo "Testing the $Bacula Director daemon configuration" if [ $(whoami) != "$DIR_USER" ]; then USER_OPT="-u $DIR_USER" fi ${BACDIRBIN}/bacula-dir -t $USER_OPT -c ${BACDIRCFG}/bacula-dir.conf RET=$? if [ $RET = 0 ]; then if [ ! -d $BACBKPDIR ]; then echo "Creating Backup configuration directory" mkdir -p $BACBKPDIR chmod 700 $BACBKPDIR chown $DIR_USER $BACBKPDIR fi if [ -d $BACDIRCFG/conf.d ]; then CONFD=$BACDIRCFG/conf.d fi if [ -d $BACBKPDIR ]; then echo "Backup configuration" tar cfz $BACBKPDIR/bacula-dir-conf.$(date +%s).tgz $BACDIRCFG/*conf $CONFD fi echo reload | ${BACDIRBIN}/bconsole >/dev/null echo "Reloading configuration" else echo "Can't reload configuration, please correct errors first" fi fi exit $RET bacula-15.0.3/scripts/bacula-tray-monitor.desktop.in0000644000175000017500000000046314771010173022245 0ustar bsbuildbsbuild[Desktop Entry] Name=Bacula Monitor Comment=Notification Tray Monitor Icon=bacula-tray-monitor Exec=@sbindir@/bacula-tray-monitor -c @sysconfdir@/bacula-tray-monitor.conf Terminal=false Type=Application Encoding=UTF-8 X-Desktop-File-Install-Version=0.3 Categories=System;Application;Utility;X-Red-Hat-Base; bacula-15.0.3/scripts/MaximumConcurrentJobPerLevel0000755000175000017500000001206214771010173022052 0ustar bsbuildbsbuild#!/usr/bin/perl -w # Copyright (C) 2000-2023 Bacula Systems SA # License: BSD 2-Clause; see file LICENSE-FOSS use strict; my $VERSION = 1.1; ################################################################ # Installation ################################################################ # # - Install the perl extension JSON (perl-JSON or libjson-perl) # # - Copy the script into /opt/bacula/scripts # - Configure the variables at the top of the script (bconsole, limits) # - Use the following runscript # Job { # RunScript { # RunsWhen = Queued # Command = "/opt/bacula/scripts/MaximumConcurrentJobPerLevel '%c' %l" # Abort Job On Error = no # RunsOnClient = no # } # ... # } # # Can be executed manually, and the VERBOSE=1 environnement variable # might help to diagnose problems. # # We use a file per client and level to avoid concurrency issues # the location of the file is controlled by the $working variable. ################################################################ # Arguments my $client = shift or usage(); my $level = shift or usage(); my $verbose = $ENV{VERBOSE} || 0; ################################################################ # Custom my $bconsole = "/opt/bacula/bin/bconsole -u10"; my $working = "/opt/bacula/working/mcjpl"; my $conflict_time = 5; my %MaximumConcurrentJob = ( 'Full' => 1, 'Differential' => 1, 'Incremental' => 1 ); ################################################################ # The Job intend to use a separate file-daemon for each of our clusters. The # schedule calls for Full, Incremental, and Differential backups to # occasionally run simultaneously but I want to make sure that a slot is always # open for one job of each level to run against the cluster. # The behavior might be summarized by: # Maximum Concurrent Full Jobs = 1 # Maximum Concurrent Differential Jobs = 1 # Maximum Concurrent Incremental Jobs = 1 sub usage { print "ERROR: Incorrect usage: $0 client level\n"; exit -1; } use File::Temp; # The JSON package must be installed libjson-perl or perl-JSON eval "use JSON;"; if ($@) { print "ERROR: Perl JSON module not found. Job control disabled.\n$@"; exit -1; } # We store some information in our $working directory if (! -d $working) { mkdir($working); } my $l; # Get the list of running jobs for the same level and the same client if ($level =~ /^([FDI])/) { $l = $1; } else { print "Level $level not handled by Job control procedure\n"; exit -1; } # We escape the client name to avoid issues with unexpected characters my $client_esc = $client; $client_esc =~ s/[^a-z0-9.-_]/_/gi; # The file in our working directory is used to avoid concurrent conflicts # If the same level for the given client was authorized few seconds ago, # we can delay our test to the next loop. my @attrs = stat("$working/${client_esc}_${l}"); if (@attrs) { # attrs[9] is the mtime if ($attrs[9] > scalar(time() - $conflict_time)) { print "Job started recently with the same level, testing the next time\n"; exit 1; } } # We put our bconsole commands output into a temp file my ($fh, $filename) = File::Temp::tempfile(); if (!open(FP, "|$bconsole> $filename")) { print "ERROR: Unable to execute bconsole. Job control disabled.\n$!"; unlink($filename); exit -1; } print FP ".api 2 api_opts=j\n"; print FP ".status dir running client=\"$client\"\nquit\n"; close(FP); unlink($filename); # The file is still open via tempfile() my $running; while (my $line = <$fh>) { if ($verbose) { print "DEBUG: $line"; } # {"running":[{"jobid":3,"level":"F","type":"B","status":"a","status_desc":"SD despooling Attributes","comment":"","jobbytes":0,"jobfiles":0,"job":"BackupClient1.2023-03-01_13.46.46_03","name":"BackupClient1","clientname":"zog8-fd","fileset":"Full Set","storage":"File1","rstorage":"","schedtime_epoch":1677674805,"schedtime":"2023-03-01 13:46:45","starttime_epoch":1677674808,"starttime":"2023-03-01 13:46:48","priority":10,"errors":0}],"error":0,"errmsg":""} if ($line =~ /^\{/) { $running = $line; last; } } if (!$running) { print "ERROR: Unable to get running job list. Job control disabled.\n"; exit -1; } # We have a JSON string that we can decode and analyze. All parameters # can be used in our decision to run or not my $json = JSON::decode_json($running); if (!$json || !$json->{running}) { print "ERROR: Unable to decode JSON output from Director. Job control disabled.\n"; exit -1; } # In this example, we filter the job list by level and by status my @jobs = grep { $_->{level} eq $l && $_->{status} eq 'R' } @{ $json->{running} }; # @jobs contains the list of running jobs with the given level my $nb = scalar(@jobs); print "Found $nb Job(s) running at level $level for $client\n"; # We do a simple check on the number of jobs running for the client at a certain level if ($nb < $MaximumConcurrentJob{$level}) { if (open(FP, ">$working/${client_esc}_${l}")) { close(FP); } # OK, let it go! exit 0; } else { # Need to wait for the next time exit 1; } bacula-15.0.3/scripts/btraceback.dbx0000644000175000017500000000232614771010173017134 0ustar bsbuildbsbuild# btraceback.dbx # # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # dbxenv language_mode c++ echo "fail_time ==> \c"; print -l (char *)fail_time echo "exename ==> \c"; print -l (char *)exename echo "exepath ==> \c"; print -l (char *)exepath echo "assert_msg ==> \c"; print -l (char *)assert_msg echo "db_engine_name ==> \c"; print -l (char *)db_engine_name echo "version ==> \c"; print -l (char *)version echo "host_os ==> \c"; print -l (char *)host_os echo "distname ==> \c"; print -l (char *)distname echo "distver ==> \c"; print -l (char *)distver echo "dist_name ==> \c"; print -l (char *)dist_name echo "******** RUNNING THREADS/LWPS:" echo lwps echo echo echo "******** STACK TRACE OF CURRENT THREAD/LWP:" echo where echo echo echo "******** VARIABLES DUMP OF CURRENT THREAD/LWP:" echo dump for LWP in $(lwps | sh sed -e 's/.*@//' -e 's/ .*//'); do ( if lwp l@$LWP; then echo "******************************************" echo echo "******** STACK TRACE OF THREAD/LWP ${LWP}:" echo where echo echo "******** VARIABLES DUMP OF THREAD/LWP ${LWP}:" echo dump echo "******************************************" fi ) done quit bacula-15.0.3/scripts/bat.desktop.consolehelper.in0000644000175000017500000000047314771010173021764 0ustar bsbuildbsbuild[Desktop Entry] Name=Bacula Administration Tool Comment=Bacula Director Console Icon=/usr/share/pixmaps/bat_icon.png Exec=/usr/bin/bat -c @sysconfdir@/bat.conf Terminal=false Type=Application Encoding=UTF-8 StartupNotify=true X-Desktop-File-Install-Version=0.3 Categories=System;Application;Utility;X-Red-Hat-Base; bacula-15.0.3/scripts/bacula-ctl-sd.in0000644000175000017500000001332614771010173017321 0ustar bsbuildbsbuild#! /bin/sh # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2023 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # bacula-ctl-sd This shell script takes care of starting and stopping # the bacula Storage daemon # # This is pretty much watered down version of the RedHat script # that works on Solaris as well as Linux, but it won't work everywhere. # # description: The Leading Open Source Backup Solution. # PSCMD="@PSCMD@" PS="ps" # # On Solaris, you may need to use nawk, or alternatively, # add the GNU binaries to your path, such as /usr/xpg4/bin # AWK=@AWK@ # All these are not *really* needed but it makes it # easier to "steal" this code for the development # environment where they are different. # BACSDBIN=@sbindir@ BACSDCFG=@sysconfdir@ PIDDIR=@piddir@ SUBSYSDIR=@subsysdir@ SD_PORT=@sd_port@ SD_USER=@sd_user@ SD_GROUP=@sd_group@ Bacula="@BACULA@" PIDOF=@PIDOF@ # A function to stop a program. killproc() { RC=0 # Test syntax. if [ $# = 0 ]; then echo "Usage: killproc {program} {port} [signal]" return 1 fi notset=0 # check for third arg to be kill level if [ "$3" != "" ] ; then killlevel=$3 else notset=1 killlevel="-9" fi # Get base program name base=`basename $1` # Find pid. pid=`pidofproc $base $2` # Kill it. if [ "$pid" != "" ] ; then if [ "$notset" = "1" ] ; then if ${PS} -p "$pid">/dev/null 2>&1; then # TERM first, then KILL if not dead kill -TERM $pid 2>/dev/null sleep 1 if ${PS} -p "$pid" >/dev/null 2>&1 ; then sleep 1 if ${PS} -p "$pid" >/dev/null 2>&1 ; then sleep 3 if ${PS} -p "$pid" >/dev/null 2>&1 ; then kill -KILL $pid 2>/dev/null fi fi fi fi ${PS} -p "$pid" >/dev/null 2>&1 RC=$? [ $RC -eq 0 ] && failure "$base shutdown" || success "$base shutdown" # RC=$((! $RC)) # use specified level only else if ${PS} -p "$pid" >/dev/null 2>&1; then kill $killlevel $pid 2>/dev/null RC=$? [ $RC -eq 0 ] && success "$base $killlevel" || failure "$base $killlevel" fi fi else failure "$base shutdown" fi # Remove pid file if any. if [ "$notset" = "1" ]; then rm -f ${PIDDIR}/$base.$2.pid fi return $RC } # A function to find the pid of a program. pidofproc() { pid="" # Test syntax. if [ $# = 0 ] ; then echo "Usage: pidofproc {program}" return 1 fi # Get base program name base=`basename $1` # First try PID file if [ -f ${PIDDIR}/$base.$2.pid ] ; then pid=`head -n 1 ${PIDDIR}/$base.$2.pid` if [ "$pid" != "" ] ; then echo $pid return 0 fi fi # Next try "pidof" if [ -x ${PIDOF} ] ; then pid=`${PIDOF} $1` fi if [ "$pid" != "" ] ; then echo $pid return 0 fi # Finally try to extract it from ps pid=`${PSCMD} | grep $1 | ${AWK} '{ print $1 }' | tr '\n' ' '` echo $pid return 0 } status() { pid="" # Test syntax. if [ $# = 0 ] ; then echo "Usage: status {program} {port}" return 1 fi # Get base program name base=`basename $1` # First try "pidof" if [ -x ${PIDOF} ] ; then pid=`${PIDOF} $1` fi if [ "$pid" != "" ] ; then echo "$base (pid $pid) is running..." return 0 else pid=`${PSCMD} | ${AWK} 'BEGIN { prog=ARGV[1]; ARGC=1 } { if ((prog == $2) || (("(" prog ")") == $2) || (("[" prog "]") == $2) || ((prog ":") == $2)) { print $1 ; exit 0 } }' $1` if [ "$pid" != "" ] ; then echo "$base (pid $pid) is running..." return 0 fi fi # Next try the PID files if [ -f ${PIDDIR}/$base.$2.pid ] ; then pid=`head -n 1 ${PIDDIR}/$base.$2.pid` if [ "$pid" != "" ] ; then echo "$base dead but pid file exists" return 1 fi fi # See if the subsys lock exists if [ -f ${SUBSYSDIR}/$base ] ; then echo "$base dead but subsys locked" return 2 fi echo "$base is stopped" return 3 } success() { return 0 } failure() { rc=$? return $rc } OS=`uname -s` # if /lib/tls exists, force Bacula to use the glibc pthreads instead if [ -d "/lib/tls" -a $OS = "Linux" -a `uname -r | cut -c1-3` = "2.4" ] ; then export LD_ASSUME_KERNEL=2.4.19 fi case "$1" in start) [ -x ${BACSDBIN}/bacula-sd ] && { echo "Starting the $Bacula Storage daemon" OPTIONS='' if [ "${SD_USER}" != '' ]; then OPTIONS="${OPTIONS} -u ${SD_USER}" fi if [ "${SD_GROUP}" != '' ]; then OPTIONS="${OPTIONS} -g ${SD_GROUP}" fi ulimit -l unlimited > /dev/null 2> /dev/null || true if [ "x${VALGRIND_SD}" = "x1" ]; then valgrind --leak-check=full ${BACSDBIN}/bacula-sd $2 $3 ${OPTIONS} -v -c ${BACSDCFG}/bacula-sd.conf else ${BACSDBIN}/bacula-sd $2 $3 ${OPTIONS} -v -c ${BACSDCFG}/bacula-sd.conf fi } ;; stop) [ -x ${BACSDBIN}/bacula-sd ] && { echo "Stopping the $Bacula Storage daemon" killproc ${BACSDBIN}/bacula-sd ${SD_PORT} $2 } ;; restart) $0 stop sleep 5 $0 start ;; status) [ -x ${BACSDBIN}/bacula-sd ] && status ${BACSDBIN}/bacula-sd ${SD_PORT} ;; *) echo "Usage: $0 {start|stop|restart|status}" exit 1 ;; esac exit 0 bacula-15.0.3/scripts/getver0000755000175000017500000000023114771010173015567 0ustar bsbuildbsbuild#!/bin/sh versionh=$1 if [ ! -f "$1" ]; then echo "Usage: $0 version.h" >&2 exit 1 fi sed -n -e 's/^#define VERSION.*"\(.*\)"$/\1/p' $versionh bacula-15.0.3/scripts/btraceback.gdb0000644000175000017500000000036314771010173017112 0ustar bsbuildbsbuildprint fail_time print my_name print exename print exepath print assert_msg print db_engine_name print version print host_os print distname print distver print host_name print dist_name show env TestName bt full thread apply all bt detach quit bacula-15.0.3/scripts/disk-changer.in0000644000175000017500000002453114771010173017245 0ustar bsbuildbsbuild#!/bin/sh # # Bacula interface to virtual autoloader using disk storage # # Written by Kern Sibbald # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # If you set in your Device resource # # Changer Command = "path-to-this-script/disk-changer %c %o %S %a %d" # you will have the following input to this script: # # So Bacula will always call with all the following arguments, even though # in come cases, not all are used. Note, the Volume name is not always # included. # # disk-changer "changer-device" "command" "slot" "archive-device" "drive-index" "volume" # $1 $2 $3 $4 $5 $6 # # By default the autochanger has 10 Volumes and 1 Drive. # # Note: For this script to work, you *must" specify # Device Type = File # in each of the Devices associated with your AutoChanger resource. # # changer-device is the name of a file that overrides the default # volumes and drives. It may have: # maxslot=n where n is one based (default 10) # maxdrive=m where m is zero based (default 1 -- i.e. 2 drives) # # This code can also simulate barcodes. You simply put # a list of the slots and barcodes in the "base" directory/barcodes. # See below for the base directory definition. Example of a # barcodes file: # /var/bacula/barcodes # 1:Vol001 # 2:Vol002 # ... # # archive-device is the name of the base directory where you want the # Volumes stored appended with /drive0 for the first drive; /drive1 # for the second drive, ... For example, you might use # /var/bacula/drive0 Note: you must not have a trailing slash, and # the string (e.g. /drive0) must be unique, and it must not match # any other part of the directory name. These restrictions could be # easily removed by any clever script jockey. # # Full example: disk-changer /var/bacula/conf load 1 /var/bacula/drive0 0 TestVol001 # # The Volumes will be created with names slot1, slot2, slot3, ... maxslot in the # base directory. In the above example the base directory is /var/bacula. # However, as with tapes, their Bacula Volume names will be stored inside the # Volume label. In addition to the Volumes (e.g. /var/bacula/slot1, # /var/bacula/slot3, ...) this script will create a /var/bacula/loadedn # file to keep track of what Slot is loaded. You should not change this file. # # Modified 8 June 2010 to accept Volume names from the calling program as arg 6. # In this case, rather than storing the data in slotn, it is stored in the # Volume name. Note: for this to work, Volume names may not include spaces. # wd=@working_dir@ # # log whats done # # to turn on logging, uncomment the following line #touch $wd/disk-changer.log # dbgfile="$wd/disk-changer.log" debug() { if test -f $dbgfile; then echo "`date +\"%Y%m%d-%H:%M:%S\"` $*" >> $dbgfile fi } # # Create a temporary file # make_temp_file() { TMPFILE=`mktemp -t mtx.XXXXXXXXXX` if test x${TMPFILE} = x; then TMPFILE="$wd/disk-changer.$$" if test -f ${TMPFILE}; then echo "Temp file security problem on: ${TMPFILE}" exit 1 fi fi } # check parameter count on commandline # check_parm_count() { pCount=$1 pCountNeed=$2 if test $pCount -lt $pCountNeed; then echo "usage: disk-changer ctl-device command [slot archive-device drive-index]" echo " Insufficient number of arguments arguments given." if test $pCount -lt 2; then echo " Mimimum usage is first two arguments ..." else echo " Command expected $pCountNeed arguments" fi exit 1 fi } # # Strip off the final name in order to get the Directory ($dir) # that we are dealing with. # get_dir() { bn=`basename $device` dir=`echo "$device" | sed -e s%/$bn%%g` if [ ! -d $dir ]; then echo "ERROR: Autochanger directory \"$dir\" does not exist." echo " You must create it." exit 1 fi } # # Get the Volume name from the call line, or directly from # the volslotn information. # get_vol() { havevol=0 debug "vol=$volume" if test "x$volume" != x && test "x$volume" != "x*NONE*" ; then debug "touching $dir/$volume" touch $dir/$volume echo "$volume" >$dir/volslot${slot} havevol=1 elif [ -f $dir/volslot${slot} ]; then volume=`cat $dir/volslot${slot}` havevol=1 fi } # Setup arguments ctl=$1 cmd="$2" slot=$3 device=$4 drive=$5 volume=$6 # set defaults maxdrive=1 maxslot=10 # Pull in conf file if [ -f $ctl ]; then . $ctl fi # Check for special cases where only 2 arguments are needed, # all others are a minimum of 5 # case $2 in list|listall) check_parm_count $# 2 ;; slots) check_parm_count $# 2 ;; transfer) check_parm_count $# 4 if [ $slot -gt $maxslot ]; then echo "Slot ($slot) out of range (1-$maxslot)" debug "Error: Slot ($slot) out of range (1-$maxslot)" exit 1 fi ;; *) check_parm_count $# 5 if [ $drive -gt $maxdrive ]; then echo "Drive ($drive) out of range (0-$maxdrive)" debug "Error: Drive ($drive) out of range (0-$maxdrive)" exit 1 fi if [ $slot -gt $maxslot ]; then echo "Slot ($slot) out of range (1-$maxslot)" debug "Error: Slot ($slot) out of range (1-$maxslot)" exit 1 fi ;; esac debug "Parms: $ctl $cmd $slot $device $drive $volume $havevol" case $cmd in unload) debug "Doing disk -f $ctl unload $slot $device $drive $volume" get_dir if [ -f $dir/loaded${drive} ]; then ld=`cat $dir/loaded${drive}` else echo "Storage Element $slot is Already Full" debug "Unload error: $dir/loaded${drive} is already unloaded" exit 1 fi if [ $slot -eq $ld ]; then echo "0" >$dir/loaded${drive} unlink $device 2>/dev/null >/dev/null unlink ${device}.add 2>/dev/null >/dev/null rm -f ${device} ${device}.add else echo "Storage Element $slot is Already Full" debug "Unload error: $dir/loaded${drive} slot=$ld is already unloaded" exit 1 fi ;; load) debug "Doing disk $ctl load $slot $device $drive $volume" get_dir i=0 # Check if slot already in a drive while [ $i -le $maxdrive ]; do if [ -f $dir/loaded${i} ]; then ld=`cat $dir/loaded${i}` else ld=0 fi if [ $ld -eq $slot ]; then echo "Drive ${i} Full (Storage element ${ld} loaded)" debug "Load error: Cannot load Slot=${ld} in drive=$drive. Already in drive=${i}" exit 1 fi i=`expr $i + 1` done # Check if we have a Volume name get_vol if [ $havevol -eq 0 ]; then # check if slot exists if [ ! -f $dir/slot${slot} ] ; then echo "source Element Address $slot is Empty" debug "Load error: source Element Address $slot is Empty" exit 1 fi fi if [ -f $dir/loaded${drive} ]; then ld=`cat $dir/loaded${drive}` else ld=0 fi if [ $ld -ne 0 ]; then echo "Drive ${drive} Full (Storage element ${ld} loaded)" echo "Load error: Drive ${drive} Full (Storage element ${ld} loaded)" exit 1 fi echo "0" >$dir/loaded${drive} unlink $device 2>/dev/null >/dev/null unlink ${device}.add 2>/dev/null >/dev/null rm -f ${device} ${device}.add if [ $havevol -ne 0 ]; then ln -s $dir/$volume $device ln -s $dir/${volume}.add ${device}.add rtn=$? else ln -s $dir/slot${slot} $device ln -s $dir/slot${slot}.add ${device}.add rtn=$? fi if [ $rtn -eq 0 ]; then echo $slot >$dir/loaded${drive} fi exit $rtn ;; list) debug "Doing disk -f $ctl -- to list volumes" get_dir if [ -f $dir/barcodes ]; then cat $dir/barcodes else i=1 while [ $i -le $maxslot ]; do slot=$i volume= get_vol if [ $havevol -eq 0 ]; then echo "$i:" else echo "$i:$volume" fi i=`expr $i + 1` done fi exit 0 ;; listall) # ***FIXME*** must add new Volume stuff make_temp_file debug "Doing disk -f $ctl -- to list volumes" get_dir if [ ! -f $dir/barcodes ]; then exit 0 fi # we print drive content seen by autochanger # and we also remove loaded media from the barcode list i=0 while [ $i -le $maxdrive ]; do if [ -f $dir/loaded${i} ]; then ld=`cat $dir/loaded${i}` v=`awk -F: "/^$ld:/"' { print $2 }' $dir/barcodes` echo "D:$i:F:$ld:$v" echo "^$ld:" >> $TMPFILE fi i=`expr $i + 1` done # Empty slots are not in barcodes file # When we detect a gap, we print missing rows as empty # At the end, we fill the gap between the last entry and maxslot grep -v -f $TMPFILE $dir/barcodes | sort -n | \ perl -ne 'BEGIN { $cur=1 } if (/(\d+):(.+)?/) { if ($cur == $1) { print "S:$1:F:$2\n" } else { while ($cur < $1) { print "S:$cur:E\n"; $cur++; } } $cur++; } END { while ($cur < '"$maxslot"') { print "S:$cur:E\n"; $cur++; } } ' rm -f $TMPFILE exit 0 ;; transfer) # ***FIXME*** must add new Volume stuff get_dir make_temp_file slotdest=$device if [ -f $dir/slot{$slotdest} ]; then echo "destination Element Address $slot is Full" exit 1 fi if [ ! -f $dir/slot${slot} ] ; then echo "source Element Address $slot is Empty" exit 1 fi echo "Transfering $slot to $slotdest" mv $dir/slot${slot} $dir/slot{$slotdest} mv $dir/slot${slot}.add $dir/slot{$slotdest}.add if [ -f $dir/barcodes ]; then sed "s/^$slot:/$slotdest:/" > $TMPFILE sort -n $TMPFILE > $dir/barcodes fi exit 0 ;; loaded) debug "Doing disk -f $ctl $drive -- to find what is loaded" get_dir if [ -f $dir/loaded${drive} ]; then a=`cat $dir/loaded${drive}` else a="0" fi debug "Loaded: drive=$drive is $a" echo $a exit ;; slots) debug "Doing disk -f $ctl -- to get count of slots" echo $maxslot ;; esac bacula-15.0.3/scripts/btraceback.mdb0000644000175000017500000000066114771010173017121 0ustar bsbuildbsbuild# btraceback.mdb # # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # $G ::echo "******** RUNNING LWPS/THREADS:" ::echo ::walk thread ::echo ::echo ::echo "******** STACK TRACE OF CURRENT LWP:" ::echo $C ::echo ::echo ::echo "******** VARIABLES DUMP OF CURRENT LWP:" ::echo ::echo "******** STACK TRACE OF LWPS:" ::walk thread | ::findstack ::echo "******** VARIABLES DUMP OF LWPS:" ::quit bacula-15.0.3/scripts/mtx-changer.in0000644000175000017500000002427214771010173017125 0ustar bsbuildbsbuild#!/bin/sh # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # If you set in your Device resource # # Changer Command = "path-to-this-script/mtx-changer %c %o %S %a %d" # you will have the following input to this script: # # So Bacula will always call with all the following arguments, even though # in come cases, not all are used. # # mtx-changer "changer-device" "command" "slot" "archive-device" "drive-index" # $1 $2 $3 $4 $5 # # for example: # # mtx-changer /dev/sg0 load 1 /dev/nst0 0 (on a Linux system) # # will request to load the first cartidge into drive 0, where # the SCSI control channel is /dev/sg0, and the read/write device # is /dev/nst0. # # The commands are: # Command Function # unload unload a given slot # load load a given slot # loaded which slot is loaded? # list list Volume names (requires barcode reader) # slots how many slots total? # listall list all info # transfer # # Slots are numbered from 1 ... # Drives are numbered from 0 ... # # # If you need to an offline, refer to the drive as $4 # e.g. mt -f $4 offline # # Many changers need an offline after the unload. Also many # changers need a sleep 60 after the mtx load. # # N.B. If you change the script, take care to return either # the mtx exit code or a 0. If the script exits with a non-zero # exit code, Bacula will assume the request failed. # # myversion must be the same as version in mtx-changer.conf myversion=2 # source our conf file if test ! -f @scriptdir@/mtx-changer.conf ; then echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" echo "ERROR: @scriptdir@/mtx-changer.conf file not found!!!!" echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" exit 1 fi . @scriptdir@/mtx-changer.conf if test "${version}" != "${myversion}" ; then echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" echo "ERROR: @scriptdir@/mtx-changer.conf has wrong version. Wanted ${myversion}, got ${version} !!!" echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" exit 1 fi MTX=@MTX@ if test ${debug_log} -ne 0 ; then touch @working_dir@/mtx.log fi dbgfile="@working_dir@/mtx.log" debug() { if test -f $dbgfile -a ${debug_level} -ge $1; then echo "`date +%m%d-%H:%M:%S.%N|cut -c1-16` ${chgr_id} $2" >> $dbgfile fi } # # Create a temporary file # make_temp_file() { TMPFILE=`mktemp @working_dir@/mtx.XXXXXXXXXX` if test x${TMPFILE} = x; then TMPFILE="@working_dir@/mtx.$$" if test -f ${TMPFILE}; then echo "ERROR: Temp file security problem on: ${TMPFILE}" exit 1 fi fi } # # Create a temporary file for stderr # # Note, this file is used because sometime mtx emits # unexpected error messages followed by the output # expected during success. # So we separate STDOUT and STDERR in # certain of the mtx commands. The contents of STDERR # is then printed after the STDOUT produced by mtx # thus we sometimes get better changer results. # make_err_file() { ERRFILE=`mktemp @working_dir@/mtx.err.XXXXXXXXXX` if test x${ERRFILE} = x; then ERRFILE="@working_dir@/mtx.err.$$" if test -f ${ERRFILE}; then echo "ERROR: Temp file security problem on: ${ERRFILE}" exit 1 fi fi } # # The purpose of this function to wait a maximum # time for the drive. It will # return as soon as the drive is ready, or after # waiting a maximum of 300 seconds. # Note, this is very system dependent, so if you are # not running on Linux, you will probably need to # re-write it, or at least change the grep target. # We've attempted to get the appropriate OS grep targets # in the code at the top of this script. # wait_for_drive() { i=0 while [ $i -le 300 ]; do # Wait max 300 seconds if mt -f $1 status 2>&1 | grep "${ready}" >/dev/null 2>&1; then break fi debug $dbglvl "Device $1 - not ready, retrying..." sleep 1 i=`expr $i + 1` done } # check parameter count on commandline # check_parm_count() { pCount=$1 pCountNeed=$2 if test $pCount -lt $pCountNeed; then echo "ERROR: usage: mtx-changer ctl-device command [slot archive-device drive-index]" echo " Insufficient number of arguments given." if test $pCount -lt 2; then echo " Mimimum usage is first two arguments ..." else echo " Command expected $pCountNeed arguments" fi exit 1 fi } # Check for special cases where only 2 arguments are needed, # all others are a minimum of 5 # case $2 in list|listall) check_parm_count $# 2 ;; slots) check_parm_count $# 2 ;; transfer) check_parm_count $# 4 ;; *) check_parm_count $# 5 ;; esac # Setup arguments ctl=$1 cmd="$2" slot=$3 device=$4 drive=$5 debug $dbglvl "Parms: $ctl $cmd $slot $device $drive" case $cmd in unload) if test ${offline} -eq 1 ; then mt -f $device offline fi if test ${offline_sleep} -ne 0 ; then sleep ${offline_sleep} fi make_err_file for i in 1 2 3 4 5 ; do debug $idbglvl "Doing mtx -f $ctl unload slot=$slot drv=$drive" ${MTX} -f $ctl unload $slot $drive 2>${ERRFILE} rtn=$? if test $rtn -eq 0 ; then break fi grep "Error Code=" ${ERRFILE} 2>/dev/null 1>/dev/null if test $? -ne 0 ; then break fi sleep $i done cat ${ERRFILE} rm -f ${ERRFILE} >/dev/null 2>&1 if test $rtn -ne 0 ; then debug $idbglvl "FAIL: mtx -f $ctl unload slot=$slot drv=$drive" fi exit $rtn ;; load) make_err_file for i in 1 2 3 4 5 ; do debug $idbglvl "Doing mtx -f $ctl load slot=$slot drv=$drive" ${MTX} -f $ctl load $slot $drive 2>${ERRFILE} rtn=$? if test $rtn -eq 0 ; then break fi grep "Error Code=" ${ERRFILE} 2>/dev/null 1>/dev/null if test $? -ne 0 ; then break fi sleep $i done if test ${load_sleep} -ne 0 ; then sleep ${load_sleep} fi wait_for_drive $device cat ${ERRFILE} rm -f ${ERRFILE} >/dev/null 2>&1 if test $rtn -ne 0 ; then debug $idbglvl "FAIL: mtx -f $ctl load slot=$slot drv=$drive" fi exit $rtn ;; list) make_temp_file if test ${inventory} -ne 0 ; then ${MTX} -f $ctl inventory fi debug $dbglvl "Doing mtx -f $ctl list" ${MTX} -f $ctl status >${TMPFILE} rtn=$? if test ${vxa_packetloader} -ne 0 ; then cat ${TMPFILE} | grep " *Storage Element [0-9]*:.*Full" | sed "s/ *Storage Element //" | sed "s/Full :VolumeTag=//" else cat ${TMPFILE} | grep " Storage Element [0-9]*:.*Full" | awk "{print \$3 \$4}" | sed "s/Full *\(:VolumeTag=\)*//" fi cat ${TMPFILE} | grep "^Data Transfer Element [0-9]*:Full (Storage Element [0-9]" | awk '{printf "%s:%s\n",$7,$10}' rm -f ${TMPFILE} >/dev/null 2>&1 if test $rtn -ne 0 ; then debug $idbglvl "FAIL: mtx -f $ctl list" fi exit $rtn ;; listall) # Drive content: D:Drive num:F:Slot loaded:Volume Name # D:0:F:2:vol2 or D:Drive num:E # D:1:F:42:vol42 # D:3:E # # Slot content: # S:1:F:vol1 S:Slot num:F:Volume Name # S:2:E or S:Slot num:E # S:3:F:vol4 # # Import/Export tray slots: # I:10:F:vol10 I:Slot num:F:Volume Name # I:11:E or I:Slot num:E # I:12:F:vol40 make_temp_file if test ${inventory} -ne 0 ; then ${MTX} -f $ctl inventory fi debug $dbglvl "Doing mtx -f $ctl -- to list all" ${MTX} -f $ctl status >${TMPFILE} rtn=$? # can be converted to awk+sed+cut, see below perl -ne ' /Data Transfer Element (\d+):Empty/ && print "D:$1:E\n"; /Data Transfer Element (\d+):Full \(Storage Element (\d+) Loaded\)(:VolumeTag =\s*(.+))?/ && print "D:$1:F:$2:$4\n"; /Storage Element (\d+):Empty/ && print "S:$1:E\n"; /Storage Element (\d+):Full( :VolumeTag=(.+))?/ && print "S:$1:F:$3\n"; /Storage Element (\d+) IMPORT.EXPORT:Empty/ && print "I:$1:E\n"; /Storage Element (\d+) IMPORT.EXPORT:Full( :VolumeTag=(.+))?/ && print "I:$1:F:$3\n";' ${TMPFILE} # If perl isn't installed, you can use by those commands #cat ${TMPFILE} | grep "Data Transfer Element" | awk "{print \"D:\"\$4 \$7 \$9 \$10}" | sed "s/=/:/" | sed "s/Full/F:/" | sed "s/Empty/E/" #cat ${TMPFILE} | grep -v "Data Transfer Element" | grep "Storage Element" | grep -v "IMPORT/EXPORT" | awk "{print \"S:\"\$3 \$4 \$5}" | sed "s/IMPORT\/EXPORT//" | sed "s/Full *:VolumeTag=/F:/" | sed "s/Empty/E/" #cat ${TMPFILE} | grep -v "Data Transfer Element" | grep "Storage Element" | grep "IMPORT/EXPORT" | awk "{print \"I:\"\$3 \$4 \$5}" | sed "s/IMPORT\/EXPORT//" | sed "s/Full *:VolumeTag=/F:/" | sed "s/Empty/E/" rm -f ${TMPFILE} >/dev/null 2>&1 exit $rtn ;; transfer) slotdest=$device debug $dbglvl "Doing transfer from $slot to $slotdest" ${MTX} -f $ctl transfer $slot $slotdest rtn=$? if test $rtn -ne 0 ; then debug $idbglvl "FAIL: mtx -f $ctl transfer from=$slot to=$slotdest" fi exit $rtn ;; loaded) make_temp_file debug $idbglvl "Doing mtx -f $ctl $drive -- to find what is loaded" ${MTX} -f $ctl status >${TMPFILE} rtn=$? cat ${TMPFILE} | grep "^Data Transfer Element $drive:Full" | awk "{print \$7}" cat ${TMPFILE} | grep "^Data Transfer Element $drive:Empty" | awk "{print 0}" rm -f ${TMPFILE} >/dev/null 2>&1 if test $rtn -ne 0 ; then debug $idbglvl "FAIL: mtx -f $ctl loaded drv=$drive" fi exit $rtn ;; slots) debug $dbglvl "Doing mtx -f $ctl -- to get count of slots" ${MTX} -f $ctl status | grep " *Storage Changer" | awk "{print \$5}" rtn=$? if test $rtn -ne 0 ; then debug $idbglvl "FAIL: mtx -f $ctl slots" fi ;; esac bacula-15.0.3/scripts/bat.pamd0000644000175000017500000000040614771010173015762 0ustar bsbuildbsbuild#%PAM-1.0 auth sufficient pam_rootok.so auth sufficient pam_timestamp.so auth required pam_stack.so service=system-auth session optional pam_xauth.so session optional pam_timestamp.so account required pam_permit.so bacula-15.0.3/scripts/bat.console_apps.in0000644000175000017500000000007314771010173020133 0ustar bsbuildbsbuildUSER=root PROGRAM=@sbindir@/bat SESSION=true FALLBACK=true bacula-15.0.3/scripts/key-manager.py.in0000644000175000017500000005214114771010173017533 0ustar bsbuildbsbuild#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Bacula(R) - The Network Backup Solution # # # This script is a simple key-manager for the Volume Encryption done by the # Storage Daemon. # # One key is automatically generated at LABEL time for every VOLUME and # is stored in the "KEY_DIR" directory. # # The keys are a random sequence of bytes as generated by /dev/urandom. # # Two encryption methods are available: AES_128_XTS & AES_256_XTS # # A third encryption method, called "NULL", exists for testing purposes only. # # The main purpose of this script is to provide an example and illustrate the # protocol between the key-manager and the Storage Daemon. It may be used in # a production environment. # # Use --help to get help the command line parameters of this script. # If you modify this script, rename it to avoid the possibility of this script # being overwritten during an upgrade. # # The script gets its input from environment variables and returns its # output via STDOUT. # # The Storage Daemon passes the following environment variables: # # - OPERATION: This is can "LABEL" when the volume is labeled. In this case # the script should generate a new key for the volume. This variable can # also be "READ" when the volume has already been labeled and the Storage # Daemon needs the already existing key to read or append data to the volume. # # - VOLUME_NAME: This is the name of the volume. # # Some variables already exist to support a "Master Key" in the future. # This feature is not yet supported, but will come later: # # - ENC_CIPHER_KEY: This is a base64 encoded version of the key encrypted by # the "master key" # # - MASTER_KEYID: This is a base64 encoded version of the Key Id of # the "master key" that was used to encrypt the ENC_CIPHER_KEY value above. # # The Storage Daemon expects some values in return via STDOUT: # # - volumename: This is a repetition of the name of the volume that is # given to the script. This field is optional and ignored by Bacula. # # - cipher: This is the cipher that the Storage Daemon must use. # The Storage Daemon knows the following ciphers: AES_128_XTS and AES_256_XTS. # Of course the key lengths vary with the cipher. # # - cipher_key: This is the symmetric key in base64 format. # # - comment: This is a single line of text that is optional and ignored # by the SD. # # - error: This is a single line error message. # This is optional, but when provided, the SD considers that the script # returned an error and will display this error in the job log. # # The Storage Daemon expects an exit code of 0. If the script exits with a # different error code, any output is ignored and the Storage Daemon will # display a generic message with the exit code in the job log. # # To return an error to the Storage Daemon, the script must set the "error" # variable string and return an error code of 0. # # Here are some input/output samples to illustrate the script's funtion: # # $ OPERATION=LABEL VOLUME_NAME=Volume0001 ./key-manager.py getkey --cipher AES_128_XTS --key-dir tmp/keys # cipher: AES_128_XTS # cipher_key: G6HksAYDnNGr67AAx2Lb/vecTVjZoYAqSLZ7lGMyDVE= # volume_name: Volume0001 # # $ OPERATION=READ VOLUME_NAME=Volume0001 ./key-manager.py getkey --cipher AES_128_XTS --key-dir tmp/keys # cipher: AES_128_XTS # cipher_key: G6HksAYDnNGr67AAx2Lb/vecTVjZoYAqSLZ7lGMyDVE= # volume_name: Volume0001 # # $ cat tmp/keys/Volume0001 # cipher: AES_128_XTS # cipher_key: G6HksAYDnNGr67AAx2Lb/vecTVjZoYAqSLZ7lGMyDVE= # volume_name: Volume0001 # # $ OPERATION=READ VOLUME_NAME=MissingVol ./key-manager.py getkey --cipher AES_128_XTS --key-dir tmp/keys 2>/dev/null # error: no key information for volume "MissingVol" # $ echo $? # 0 # # $ OPERATION=BAD_CMD VOLUME_NAME=Volume0002 ./key-manager.py getkey --cipher AES_128_XTS --key-dir tmp/keys 2>/dev/null # error: environment variable OPERATION invalid "BAD_CMD" for volume "Volume0002" # $ echo $? # 0 # # ------------ # BEGIN SCRIPT # ------------ import sys import logging import argparse import re import os import base64 import codecs import random import tempfile if sys.version_info[0] < 3: # python 2.7 import ConfigParser as configparser else: # python >= 3.X import configparser # logging.raiseExceptions=False LOG_FILE="@working_dir@/key-manager.log" KEY_DIR="@sysconfdir@/keydir" CONFIG_FILE="@sysconfdir@/key-manager.conf" GNUPGHOME="@sysconfdir@/gnupg" # trick to use the .in as a python script if LOG_FILE.startswith('@'): LOG_FILE=os.path.join(tempfile.gettempdir(), 'key-manager.log') if KEY_DIR.startswith('@'): KEY_DIR=os.path.join(tempfile.gettempdir(), 'keydir') if CONFIG_FILE.startswith('@'): CONFIG_FILE=os.path.join(tempfile.gettempdir(), 'key-manager.conf') if GNUPGHOME.startswith('@'): GNUPGHOME=os.path.join(tempfile.gettempdir(), 'gnupg') MASTER_KEYID_SIZE=20 want_to_have_all_the_same_keys=False #want_to_have_all_the_same_keys=True CIPHERS=[ 'NULL', 'AES_128_XTS', 'AES_256_XTS' ] DEFAULT_CIPHER=CIPHERS[1] MAX_NAME_LENGTH=128 volume_re=re.compile('[A-Za-z0-9:.-_]{1,128}') # the config from the configuration file if any config=None class CryptoCtx: master_key_id=None cipher=DEFAULT_CIPHER stealth=False passphrase=None # raiseExceptions=False class MyFileHandler(logging.FileHandler): """raise an exception when the format don't match the parameters instead of printing an error on stderr """ def emit(self, record): """dont use try/except and dont call handleError""" try: msg = self.format(record) stream = self.stream stream.write(msg) stream.write(self.terminator) self.flush() except Exception: if False: self.handleError(record) else: raise def escape_volume_name(name): escapechar='=' replace_esc='{}0x{:02x}'.format(escapechar, ord(escapechar)) replace_colon='{}0x{:02x}'.format(escapechar, ord(':')) newname=name.replace(escapechar, replace_esc) newname=newname.replace(':', replace_colon) return newname def add_console_logger(): console=logging.StreamHandler() console.setFormatter(logging.Formatter('%(levelname)-3.3s %(filename)s:%(lineno)d %(message)s', '%H:%M:%S')) console.setLevel(logging.INFO) # must be INFO for prod logging.getLogger().addHandler(console) return console def add_file_logger(filename): filelog=logging.FileHandler(filename) # %(asctime)s '%Y-%m-%d %H:%M:%S' filelog.setFormatter(logging.Formatter('%(asctime)s %(levelname)-3.3s %(filename)s:%(lineno)d %(message)s', '%H:%M:%S')) filelog.setLevel(logging.INFO) logging.getLogger().addHandler(filelog) return filelog def volume_regex_type(arg_value): if not volume_re.match(arg_value): raise argparse.ArgumentTypeError return arg_value def setup_logging(debug, verbose, logfile): level=logging.WARNING if debug: level=logging.DEBUG elif verbose: level=logging.INFO logging.getLogger().setLevel(level) if logfile: filelog=MyFileHandler(logfile) # %(asctime)s '%Y-%m-%d %H:%M:%S' filelog.setFormatter(logging.Formatter('%(asctime)s %(levelname)-3.3s %(filename)s:%(lineno)d %(message)s', '%Y-%m-%d %H:%M:%S')) filelog.setLevel(level) logging.getLogger().addHandler(filelog) else: console=logging.StreamHandler() console.setFormatter(logging.Formatter('%(levelname)-3.3s %(filename)s:%(lineno)d %(message)s', '%H:%M:%S')) console.setLevel(level) logging.getLogger().addHandler(console) def test(args): assert 'hello123'==escape_volume_name('hello123') assert 'vol=0x3dname=0x3a.-_end'==escape_volume_name('vol=name:.-_end') def bytes_xor(data, key): """trivial encode and decode function""" enc=[] for i, ch in enumerate(data): enc.append(ch ^ key[i%len(key)]) return bytes(enc) def check_force_cipher_env(cipher): return os.getenv('FORCE_CIPHER', cipher) def get_crypto_ctx_from_config(args, volume_name, master_keyid=None): """ retrieve the master-key defined in the config file or a default CTX return None : for error MasterKey object : the master-key or a default context if no config """ if args.config: config=configparser.ConfigParser() try: config.read(args.config) except configparser.ParsingError as exc: logging.error("parsing configuration file \"%s\": %s", args.config, str(exc)) print('error: parsing configuration file \"{}\"\n'.format(args.config)) return None the_section=None if master_keyid: if config.has_section(master_keyid): the_section=master_keyid else: logging.error("configuration file \"%s\" has no master-key \"%s\"", args.config, master_keyid) print('error: configuration file \"{}\" has no master-key \"{}\"\n'.format(args.config, master_keyid)) return None else: # search for the section matching the volume for section in config.sections(): try: volume_regex=config.get(section, 'volume_regex') except configparser.NoOptionError: logging.debug("ignore section \"%s\"", section) continue try: match=re.match(volume_regex, volume_name) except re.error: logging.error("regular expression error in configuration file \"%s\" in section \"%s\" : %s", args.config, section, str(exc)) print("error: regular expression error in configuration file \"{}\" in section \"{}\" : {}".format(args.config, section, str(exc))) return None if match: the_section=section break if not the_section: logging.debug("no master-key defined for volume \"%s\"", volume_name) crypto_ctx=CryptoCtx() if the_section==None: # no master key crypto_ctx.master_key_id=None crypto_ctx.cipher=args.cipher else: crypto_ctx.master_key_id=the_section try: crypto_ctx.gnupghome=config.get(the_section, 'gnupghome') if crypto_ctx.gnupghome.startswith('"') and crypto_ctx.gnupghome.endswith('"'): crypto_ctx.gnupghome=crypto_ctx.gnupghome[1:-1] except configparser.NoOptionError: crypto_ctx.gnupghome=GNUPGHOME try: crypto_ctx.cipher=config.get(the_section, 'cipher') except configparser.NoOptionError: crypto_ctx.cipher=args.cipher try: crypto_ctx.stealth=config.getboolean(the_section, 'stealth') except configparser.NoOptionError: pass try: crypto_ctx.passphrase=config.get(the_section, 'passphrase') except configparser.NoOptionError: pass logging.info("use masterkey %r and cipher \"%s\" for volume \"%s\"", crypto_ctx.master_key_id, crypto_ctx.cipher, volume_name) else: crypto_ctx=CryptoCtx() crypto_ctx.cipher=args.cipher return crypto_ctx def generate_key(crypto_ctx, volume_name): if crypto_ctx.cipher=='AES_128_XTS': key_size=32 elif crypto_ctx.cipher=='AES_256_XTS': key_size=64 elif crypto_ctx.cipher=='NULL': key_size=16 else: logging.error('unknown cipher %s', crypto_ctx.cipher) print('error: unknown cipher %s'.format(crypto_ctx.cipher)) return None # unknown cipher urandom=open('/dev/urandom', 'rb') key=urandom.read(key_size) if want_to_have_all_the_same_keys: key=b'A'*key_size key_base64=codecs.decode(base64.b64encode(key)) r=dict() r['cipher']=crypto_ctx.cipher r['cipher_key']=key_base64 r['volume_name']=volume_name if crypto_ctx.master_key_id: try: import gnupg gnupg.GPG # check that we have the module and not the GnuPG directory except (ImportError, AttributeError): logging.error('module gnupg is not installed') print('error: python module gnupg is not installed') return None gpg=gnupg.GPG(gnupghome=crypto_ctx.gnupghome) master_keyid_base64=codecs.decode(base64.b64encode(codecs.encode(crypto_ctx.master_key_id))) r['master_keyid']=master_keyid_base64 enc_key=gpg.encrypt(key, crypto_ctx.master_key_id, armor=False) enc_key_base64=codecs.decode(base64.b64encode(enc_key.data)) r['enc_cipher_key']=enc_key_base64 return r def decrypt_key(crypto_ctx, volume_name, enc_cipher_key): try: import gnupg gnupg.GPG # check that we have the module and not the GnuPG directory except (ImportError, AttributeError): logging.error('module gnupg is not installed') print('error: python module gnupg is not installed') return None r=dict() r['cipher']=crypto_ctx.cipher gpg=gnupg.GPG(gnupghome=crypto_ctx.gnupghome) master_keyid_base64=codecs.decode(base64.b64encode(codecs.encode(crypto_ctx.master_key_id))) r['master_keyid']=master_keyid_base64 passphrase=crypto_ctx.passphrase cipher_key=gpg.decrypt(enc_cipher_key, passphrase=passphrase) if cipher_key.ok==False: logging.error('decryption error for volume "{}":'.format(volume_name, cipher_key.status)) print('error: decryption error for volume "{}":'.format(volume_name, cipher_key.status)) return None cipher_key_base64=codecs.decode(base64.b64encode(cipher_key.data)) r['cipher_key']=cipher_key_base64 r['volume_name']=volume_name return r def decode_data(data): d=dict() for line in data.split('\n'): if line: k, v=line.split(':', 1) d[k.strip()]=v.strip() return d def encode_data(dct, exclude=None): lines=[] for key, value in dct.items(): if not exclude or not key in exclude: lines.append('{}: {}'.format(key, value)) lines.append('') return '\n'.join(lines) def getkey0(args): operation=os.getenv('OPERATION') volume_name=os.getenv('VOLUME_NAME') if not volume_name: logging.error("environment variable VOLUME_NAME missing or empty") print('error: environment variable VOLUME_NAME missing or empty\n') return 0 if not operation: logging.error("environment variable OPERATION missing or empty") print('error: environment variable OPERATION missing or empty\n') return 0 if not operation in [ 'LABEL', 'READ']: logging.error("environment variable OPERATION invalid \"%s\" for volume \"%s\"", operation, volume_name) print("error: environment variable OPERATION invalid \"{}\" for volume \"{}\"\n".format(operation, volume_name)) return 0 enc_cipher_key=os.getenv('ENC_CIPHER_KEY') master_keyid=os.getenv('MASTER_KEYID') logging.info('getkey OPERATION="%s" VOLUME_NAME="%s"%s%s', operation, volume_name, ' ENC_CIPHER_KEY="{}"'.format(enc_cipher_key) if enc_cipher_key else "", ' MASTER_KEYID="{}"'.format(master_keyid) if master_keyid else "") key_filename=os.path.join(args.key_dir, escape_volume_name(volume_name)) if operation=='LABEL': crypto_ctx=get_crypto_ctx_from_config(args, volume_name) if crypto_ctx==None: return 0 # error reading the config file crypto_ctx.cipher=check_force_cipher_env(crypto_ctx.cipher) if os.path.isfile(key_filename): logging.info("delete old keyfile for volume \"%s\" : %s", volume_name, key_filename) os.unlink(key_filename) ctx=generate_key(crypto_ctx, volume_name) if ctx==None: return 0 # error while generating the key (wrong cipher or gnupg not installed) logging.info("generate key volume=%s cipher=%s enckey=%s masterkey=%s", ctx['volume_name'], ctx['cipher'], ctx.get('enc_cipher_key', ''), ctx.get('master_keyid', '')) if crypto_ctx.stealth: # don't keep an un-encrypted version of the cipher_key # use the masterkey id to decrypte the enckey exclude=set(['cipher_key']) else: exclude=set() data=encode_data(ctx, exclude=exclude) f=open(key_filename, 'wt') f.write(data) f.close() output=encode_data(ctx) # including the 'cipher_key' elif operation=='READ': ctx=dict() if os.path.isfile(key_filename): # use data in the key file data=open(key_filename, 'rt').read() ctx=decode_data(data) if 'cipher_key' in ctx: logging.info("read key volume=%s cipher=%s", ctx['volume_name'], ctx['cipher']) output=encode_data(ctx) elif not enc_cipher_key: logging.error("no cipher key nor encrypted cipher key for volume \"%s\"", volume_name) print('error: no cipher key nor encrypted cipher key for volume "{}"'.format(volume_name)) return 0 else: enc_cipher_key_raw=base64.b64decode(codecs.encode(enc_cipher_key)) master_keyid_raw=base64.b64decode(codecs.encode(master_keyid)) master_keyid_ascii=codecs.decode(master_keyid_raw) # maybe we can retrieve the passphrase for the master-key crypto_ctx=get_crypto_ctx_from_config(args, volume_name, master_keyid_ascii) if crypto_ctx==None: return 0 # error no master-key # maybe the master_keyid from the volume could have done the job # if gnupg still remember this master-key despit it has been # removed from the key-manager config file crypto_ctx.cipher=check_force_cipher_env(crypto_ctx.cipher) # use the master-key to decrypt the enc_cipher_key ctx=decrypt_key(crypto_ctx, volume_name, enc_cipher_key_raw) if ctx==None: return 0 # error decrypting key logging.info("read key volume=%s cipher=%s cipher_key=%s masterkey=%s", ctx['volume_name'], ctx['cipher'], ctx['cipher_key'], ctx['master_keyid']) output=encode_data(ctx) else: output='error: unknown operation \"%r\"'.format(operation) print(output) return 0 def getkey(args): try: getkey0(args) except: logging.exception("unhandled exception in getkey0") sys.exit(1) mainparser=argparse.ArgumentParser(description='Bacula Storage Daemon key manager ') subparsers=mainparser.add_subparsers(dest='command', metavar='', title='valid commands') common_parser=argparse.ArgumentParser(add_help=False) common_parser.add_argument('--key-dir', '-k', metavar='DIRECTORY', type=str, default=KEY_DIR, help='the directory where to store the keys') common_parser.add_argument('--config', '-C', metavar='CONFIG', type=str, help='the configuration file') common_parser.add_argument('--log', metavar='LOGFILE', type=str, default=LOG_FILE, help='setup the logfile') common_parser.add_argument('--debug', '-d', action='store_true', help='enable debugging') common_parser.add_argument('--verbose', '-v', action='store_true', help='be verbose') parser=subparsers.add_parser('getkey', description="Retrieve a key", parents=[common_parser, ], help="retrieve a key or generate one if don't exist yet") parser.add_argument('--cipher', '-c', metavar='CIPHER', choices=CIPHERS, default=DEFAULT_CIPHER, help='set the default cipher in {}'.format(', '.join(CIPHERS))) parser.set_defaults(func=getkey) parser=subparsers.add_parser('test', description="Run some internal test of the code") parser.set_defaults(func=test) args=mainparser.parse_args() args._parser=mainparser setup_logging(getattr(args, 'debug', None), getattr(args, 'verbose', None), getattr(args, 'log', None)) # check for the key_dir directory if hasattr(args, 'key_dir'): if not os.path.exists(args.key_dir): try: os.makedirs(args.key_dir, 0o700) except: logging.error('Cannot create the "key" directory %s', args.key_dir) else: logging.error('The "key" directory don\'t exists. Create directory %s', args.key_dir) if not os.path.isdir(args.key_dir): logging.error('The "key" directory don\'t exists: %s', args.key_dir) mainparser.error('error: path "{}" is not a directory'.format(args.key_dir)) if not os.access(args.key_dir, os.R_OK|os.W_OK): logging.error('The "key" directory is not accessible for READ and WRITE: %s', args.key_dir) mainparser.error('error: need read and write access to "{}"'.format(args.key_dir)) # check for the config file if hasattr(args, 'config'): if args.config==None and os.path.exists(CONFIG_FILE): args.config=CONFIG_FILE # the default file exists, use it logging.debug('Use config file %s', args.config) if args.config!=None and (not os.path.exists(args.config) or not os.access(args.config, os.R_OK)): logging.error('The config file don\'t exists or cannot be read: %s', args.config) mainparser.error('The config file don\'t exists or cannot be read: %s'.format(args.config)) sys.exit(args.func(args)) bacula-15.0.3/scripts/bacula.vim0000644000175000017500000001436314771010173016324 0ustar bsbuildbsbuild" Vim syntax file " Put this file to your $HOME/.vim/syntax/ and use :syntax on " Language: Bacula " Maintainer: Eric Bollengier " URL: " Latest Revision: 2007-02-11 if version < 600 syntax clear elseif exists("b:current_syntax") finish endif " comments syn region BacComment display oneline start="#" end="$" keepend contains=BacTodo syn region BacComment2 start="/\*" end="\*/" syn region BacInclude start=/^@/ end="$" syntax region xCond start=/\w+\s*{/ms=e+1 end=/}/me=s-1 syntax keyword BacName Name syn case ignore syn keyword LevelElt contained Full Incremental Differential " todo syn keyword BacTodo contained TODO FIXME XXX NOTE syn region BacString start=/"/ skip=/\\"/ end=/"/ " Specifique Client { syn region BacClient display start=/Client {/ end="^}" contains=BacString,BacComment,BacC1,BacC2,BacC3,BacC4 syn match BacC1 contained /File\s*Retention/ syn match BacC2 contained /Maximum\s*Concurrent\s*Jobs/ syn match BacC3 contained /Job\s*Retention/ syn keyword BacC4 contained Name Password Address Catalog AutoPrune FDPort " FileSet { syn region BacFileSet display start="FileSet {" end="^}" contains=BacString,BacComment,BacName,BacFSInc,BacFSExc,BacFS2 syn region BacFSInc contained display start="Include {" end="}" contains=BacString,BacComment,BacFSOpt,BacFS1 syn region BacFSExc contained display start="Exclude {" end="}" contains=BacString,BacComment,BacFSOpt,BacFS1 syn region BacFSOpt contained display start="Options {" end="}" contains=BacString,BacComment,BacFSOpt1,BacFSOpt2 syn keyword BacFSOpt1 contained verify signature onefs noatime RegexFile Exclude Wild WildDir WildFile CheckChanges aclsupport syn match BacFSOpt2 contained /ignore case/ syn keyword BacFS1 contained File syn match BacFS2 contained /Enable VSS/ " Storage { syn region BacSto display start="Storage {" end="}" contains=BacName,BacComment,BacString,BacSto1,BacSto2 syn keyword BacSto1 contained Address SDPort Password Device Autochanger syn match BacSto2 contained /Media\s*Type/ " Director { syn region BacDir display start="Director {" end="}" contains=BacName,BacComment,BacString,BacDir,BacDir1,BacDir2 syn keyword BacDir1 contained DIRport QueryFile WorkingDirectory PidDirectory Password Messages syn match BacDir2 contained /Maximum\s*Concurrent\s*Jobs/ " Catalog { syn region BacCat display start="Catalog {" end="}" contains=BacName,BacComment,BacString,BacCat1 syn keyword BacCat1 contained dbname user password dbport " Job { syn region BacJob display start="Job {" end="^}" contains=BacJ1,BacJ2,BacString,BacComment,Level,BacC2,BacJ3,BacRun syn region BacJobDefs display start="JobDefs {" end="^}" contains=BacJ1,BacJ2,BacString,BacComment,Level,BacC2,BacJ3 syn region Level display start="Level =" end="$" contains=LevelElt syn keyword BacJ1 contained Schedule Name Priority Client Pool JobDefs FileSet SpoolData Storage where syn keyword BacJ2 contained RunBeforeJob RunAfterJob Type Messages ClientRunAfterJob syn match BacJ3 contained /Write Bootstrap/ " RunScript { syn region BacRun contained display start="RunScript {" end="}" contains=BacR1,BacR2,BacR3,BacR4,BacRW,BacString,BacComment syn match BacR1 contained /Runs\s*When/ syn match BacR2 contained /Runs\s*On\s*Client/ syn match BacR3 contained /Runs\s*On\s*Failure/ syn keyword BacR4 contained Command syn keyword BacRW contained After Before Always " Schedule { syn region BacSched display start="Schedule {" end="^}" contains=BacSR,BacString,BacComment,BacName,BacRun syn keyword BacS1 contained Pool FullPool on at syn keyword BacS2 contained sun mon tue wed thu fri sat sunday monday tuesday wednesday thursday friday saturday syn keyword BacS3 contained jan feb mar apr may jun jul aug sep oct nov dec syn keyword BacS4 contained 1st 2nd 3rd 4th 5th first second third fourth fifth syn region BacSR contained display start="Run = " end="$" contains=BacS1,BacS2,BacS3,BacS4,LevelElt syn keyword BacSpecial false true yes no " Pool syn region BacPool display start="Pool {" end="^}" contains=BacP1,BacP2,BacP3,BacString,BacComment syn match BacP1 contained /Pool\s*Type/ syn match BacP2 contained /Volume\s*Retention/ syn keyword BacP3 contained Name AutoPrune Recycle syn case match if version >= 508 || !exists("did_screen_syn_inits") if version < 508 let did_screen_syn_inits = 1 command -nargs=+ HiLink hi link else command -nargs=+ HiLink hi def link endif " Define the default highlighting. HiLink BacFileSet Function HiLink BacFSInc Function HiLink BacFSExc Function HiLink BacFSOpt Function HiLink BacFSOpt1 Keyword HiLink BacFSOpt2 Keyword HiLink BacFS1 Keyword HiLink BacFS2 Keyword HiLink BacInclude Include HiLink BacComment Comment HiLink BacComment2 Comment HiLink BacTodo Todo HiLink LevelElt String HiLink BacRun Function HiLink BacCat Function HiLink BacCat1 Keyword HiLink BacSto Function HiLink BacSto1 Keyword HiLink BacSto2 Keyword HiLink BacDir Function HiLink BacDir1 keyword HiLink BacDir2 keyword HiLink BacJob Function HiLink BacJobDefs Function HiLink BacJ1 Keyword HiLink BacJ2 Keyword HiLink BacJ3 Keyword HiLink BacClient Function HiLink BacC1 Keyword HiLink BacC2 Keyword HiLink BacC3 Keyword HiLink BacC4 Keyword HiLink Level Keyword HiLink BacSched Function HiLink BacS1 Keyword HiLink BacS2 String HiLink BacS3 String HiLink BacS4 String HiLink BacR1 Keyword HiLink BacR2 Keyword HiLink BacR3 Keyword HiLink BacR4 Keyword HiLink BacRW String HiLink BacPool Function HiLink BacP1 Keyword HiLink BacP2 Keyword HiLink BacP3 Keyword HiLink BacName Keyword HiLink BacString String HiLink BacNumber Number HiLink BacCommand BacCommands HiLink BacCommands Keyword HiLink BacSpecial Boolean HiLink BacKey Function HiLink Equal Comment delcommand HiLink endif bacula-15.0.3/scripts/bacula.png0000644000175000017500000000564714771010173016322 0ustar bsbuildbsbuildPNG  IHDR@@iqbKGD pHYs ,tIME Ц 4IDATx[o׵{IIԅ,KrS8i ;A #$] -yHq8ɲ( >g#BF?Au$]7\7\7\<I`|.AY2Y\^`u)BL"#@PB"%@85q< K!xw'#e"F*@wg,rb D`-r9Ç#a"G%`W0q"HpPV}ƍye`&B@ us Pxō_ekGNj *("A$LʩE(Efq㱟JT(1r ;;/.LD`/yU,ɲ H%+H>ˮ(^eKkR_~aG6jR}!pP *N.D)m0d9:,+q`@uujz!&I &`\>Jv>.A31 \RF)K^@Ųʅmy^G.'nS_(u:@c޽} omlaH9XTتVˣsj7DQ8sx _~II7o$cX_gs9򠎃|re(}N4"J*gH)%Avu:Xrmc nr44FҀB@T:#GC[J GG1pH&}le8 LvTvt XX r9vڿ-xx_s. y6:n]\$L8 C*ՒIɭz{ld1 0N+C7%/-iqX v>e#XD%T|E/[_' f#x2Li` 2yXmaZ3/^0" 5HJn ؚ[ժ<8g#$T,`lMVCpqB:pHӡl ÐZ>OP`GufQx۲h%_|y0"A6%N?QT~R6 6']/t'(!B`^G}$?keaݪ0!`1۶1Mu8Nل*,:kkkO:S! ;0 $z=0<dclMw/]'8?y"Uav z260p]dax ~ 74CS"Ӝ!p~U}qLOŐNR ݊ovQ\Ftv>O8?htj:"PZ Ub~|E`:.7QBLhAy15D°=eloѠ9-M#/K`UΣ]oBwr9BG qUN+EbhJヒﺘu qjAIjWqXg4N+om]Y.cln2_] z>Vs85n?zD7i(bF#1ue/j$P&uޥGP*!pa'ЫPʤ=UeDǬHin udW_>=q9T$8}]B]1}W* 03 |E^WVQ,ln{^5)_`BJ(Q8La0m<o8.^DuWB۲vSL@MJrN8M=ii(q?euiJl\%aS~g_ AJXE: ${WD}/bacula.$2.traceback echo "" >> ${WD}/bacula.$2.traceback case `uname -s` in SunOS) # # See what debuggers are available on this platform. # We need to to some tricks to find out as a which on # a non existing binary gives: # # no in # # So we use the return code which is 0 when it finds # somethings and 1 if not. # which gdb > /dev/null 2>&1 && GDB=`which gdb` || GDB='' which dbx > /dev/null 2>&1 && DBX=`which dbx` || DBX='' which mdb > /dev/null 2>&1 && MDB=`which mdb` || MDB='' gcore -o ${WD}/${PNAME} $2 if [ ! -z "${DBX}" ]; then ${DBX} $1 $2 < @scriptdir@/btraceback.dbx >> ${WD}/bacula.$2.traceback 2>&1 elif [ ! -z "${GDB}" ]; then ${GDB} -quiet -batch -x @scriptdir@/btraceback.gdb $1 $2 >> ${WD}/bacula.$2.traceback 2>&1 elif [ ! -z "${MDB}" ]; then ${MDB} -u -p $2 < @scriptdir@/btraceback.mdb >> ${WD}/bacula.$2.traceback 2>&1 fi ;; *) which gdb > /dev/null 2>&1 && GDB=`which gdb` || GDB='' if [ ! -z "${GDB}" ]; then gdb -quiet -batch -x @scriptdir@/btraceback.gdb $1 $2 >> ${WD}/bacula.$2.traceback 2>&1 else echo "Please install a debugger (gdb) to receive a traceback." >> ${WD}/bacula.$2.traceback 2>&1 fi ;; esac PNAME="${PNAME} on `hostname`" cat ${WD}/bacula.$2.traceback \ | @sbindir@/bsmtp -h @smtp_host@ -f @dump_email@ -s "Bacula traceback. Process crashed ${PNAME}" @dump_email@ bacula-15.0.3/scripts/kubernetes-bacula-backup/0000755000175000017500000000000014771010173021210 5ustar bsbuildbsbuildbacula-15.0.3/scripts/kubernetes-bacula-backup/requirements.txt0000644000175000017500000000030714771010173024474 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Author: Radoslaw Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # pyinstaller bacula-15.0.3/scripts/kubernetes-bacula-backup/Dockerfile.in0000644000175000017500000000272514771010173023615 0ustar bsbuildbsbuild# # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. FROM python:3.8-slim AS build ADD requirements.txt / ADD baculatar.py / RUN apt-get update && apt-get -y install binutils && pip3 install -r requirements.txt RUN pyinstaller --onefile baculatar.py FROM python:3.8-slim ARG VERSION=@KUBERNETES_IMAGE_VERSION@ LABEL maintainer="Radosław Korzeniewski " LABEL org.label-schema.schema-version="1.0" LABEL org.label-schema.description="This is a Bacula container for incluster Kubernetes Plugin backup." LABEL org.label-schema.vendor="Bacula" LABEL org.label-schema.version=$VERSION ENV PLUGINMODE 'backup' ENV PLUGINHOST 'localhost' ENV PLUGINPORT '9104' ENV PLUGINTOKEN '' ENV PLUGINJOB '' ENV PLUGINCONNRETRIES '600' ENV PLUGINCONNTIMEOUT '600' COPY --from=build /dist/baculatar / COPY tar /tar CMD ["/baculatar"] bacula-15.0.3/scripts/kubernetes-bacula-backup/copylibs0000755000175000017500000000133514771010173022764 0ustar bsbuildbsbuild#!/bin/bash # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # copy the libraries needed for a binary into the # local directory mkdir -p $PWD/libs/usr/lib64 function copylib { file=$1 PWD=`pwd` F=`dirname $file` mkdir -p $PWD/libs/$F cp -v $file $PWD/libs/$F } ldd $1 | awk '/=>/ { print $3 }' | while read file do copylib $file done # We need to copy some common libraries used by libc file=`ldd $1 | awk '/ld-linux/ { print $1 }'` copylib $file file=`ldconfig -p | grep -v lib32 | awk '/libresolv\.so\./ { gsub(/.+=> /, ""); print $1 }'` copylib $file file=`ldconfig -p | grep -v lib32 | awk '/libnss_dns\.so\./ { gsub(/.+=> /, ""); print $1 }'` copylib $file bacula-15.0.3/scripts/kubernetes-bacula-backup/baculatar.py0000644000175000017500000002261714771010173023530 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2020 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Author: Radoslaw Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # from __future__ import unicode_literals import os import sys import ssl import socket import time import logging import subprocess COPYRIGHT = 'Copyright (C) 2000-2022 Kern Sibbald' VERSION = '1.0-rpk' TARCMD = '/tar' TARPIPE = '/tmp/baculatar.fifo' TARSTDOUT = '/tmp/baculatar.stdout' TARSTDERR = '/tmp/baculatar.stderr' CONNRETRIES = 600 CONNTIMEOUT = 600 DEFAULTPORT = 9104 ERR_NO_DIR_FOUND = "No /{dir}/ directory found. Cannot execute backup!" class BaculaConnection(object): def __init__(self, *args, **kwargs): super(BaculaConnection, self).__init__(*args, **kwargs) self.mode = os.getenv('PLUGINMODE', 'backup') if self.mode not in ('backup', 'restore'): self.mode = 'backup' self.host = os.getenv('PLUGINHOST', 'localhost') self.port = os.getenv('PLUGINPORT', DEFAULTPORT) self.token = os.getenv('PLUGINTOKEN', '') self.jobname = os.getenv('PLUGINJOB', 'undefined') self.connretries = os.getenv('PLUGINCONNRETRIES', CONNRETRIES) if isinstance(self.connretries, str): try: self.connretries = int(self.connretries) except ValueError: logging.error('$PLUGINCONNRETRIES ValueError: {retry} using default.'.format(retry=self.connretries)) self.connretries = CONNRETRIES self.conntimeout = os.getenv('PLUGINCONNTIMEOUT', CONNTIMEOUT) if isinstance(self.conntimeout, str): try: self.conntimeout = int(self.conntimeout) except ValueError: logging.error('$PLUGINCONNTIMEOUT ValueError: {timeout} using default.'.format(timeout=self.conntimeout)) self.connretries = CONNTIMEOUT self.context = ssl.SSLContext(ssl.PROTOCOL_TLS) self.tarproc = None self.conn = None self.tout = None self.terr = None if isinstance(self.port, str): try: self.port = int(self.port) except ValueError: logging.error('$PLUGINPORT ValueError: {port} using default.'.format(port=self.port)) self.port = DEFAULTPORT if isinstance(self.connretries, str): try: self.connretries = int(self.connretries) except ValueError: logging.error('$PLUGINCONNRETRIES ValueError: {retry} using default.'.format(port=self.connretries)) self.port = CONNRETRIES logging.info('BaculaConnection: mode={mode} host={host} port={port} token={token}'.format( mode=self.mode, host=self.host, port=self.port, token=self.token )) def authenticate(self): logging.debug('Authenticate') logging.debug(self.conn) if self.conn is not None: # first prepare the hello string self.conn.send(bytes('{strlen:03}:Hello:{job}'.format(job=self.jobname, strlen=len(self.jobname) + 7), 'ascii')) self.conn.send(bytes('Token: {token:64}\n'.format(token=self.token), 'ascii')) stat = self.conn.recv(2) if stat.decode() == 'OK': logging.info('Authenticated.') else: logging.error('Authentication failure.') self.disconnect() sys.exit(4) def connect(self): if self.conn is None: sock = socket.socket(socket.AF_INET) self.conn = self.context.wrap_socket(sock, server_hostname=self.host) for attempt in range(self.connretries): try: logging.info('Try to connect to: {host}:{port} {att}/{retry}'.format(host=self.host, port=self.port, att=attempt, retry=self.connretries)) self.conn.connect((self.host, self.port)) except ssl.SSLError as e: logging.debug(e) logging.error('SSLError - cannot continue.') sys.exit(2) except OSError as e: logging.debug(e) time.sleep(1) logging.debug('Connected OSError') except Exception as e: logging.debug(e) time.sleep(1) logging.debug('Connected Exception') break else: logging.info('Connected.') break else: logging.error('Cannot connect to {host}:{port} max retries exceed.'.format( host=self.host, port=self.port )) sys.exit(1) self.authenticate() def disconnect(self): if self.conn is not None: self.conn.close() logging.info('Disconnected.') self.conn = None def sendfile(self, filename): with open(filename, 'rb') as fd: while True: data = fd.read(65536) if len(data) > 0: self.conn.send(data) else: break def receivefile(self, filename): with open(filename, 'wb') as fd: while True: data = self.conn.recv(65536) logging.info('recv:D' + str(len(data))) if len(data) > 0: fd.write(data) else: break def prepare_execute(self): if not os.path.exists(TARPIPE): os.mkfifo(TARPIPE) self.tout = open(TARSTDOUT, 'w') self.terr = open(TARSTDERR, 'w') def logging_from_file(self, filename): with open(filename, 'r') as fd: while True: log = fd.readline().rstrip() if not log: break logging.info(log) def final_execute(self): # close current execution descriptors self.tout.close() self.terr.close() self.disconnect() # wait for tar to terminate exitcode = -1 try: exitcode = self.tarproc.wait(timeout=self.conntimeout) except subprocess.TimeoutExpired: logging.error('Timeout waiting {} for tar to proceed. Terminate!'.format(self.conntimeout)) self.tarproc.terminate() # send execution logs logging.debug('Final_execute') self.connect() logging.info('tar exit status:{}'.format(exitcode)) self.conn.send("{}\n".format(exitcode).encode()) self.conn.send(b'---- stderr ----\n') self.sendfile(TARSTDERR) self.conn.send(b'---- list ----\n') self.sendfile(TARSTDOUT) self.conn.send(b'---- end ----\n') self.disconnect() logging.info('-- stderr --') self.logging_from_file(TARSTDERR) logging.info('-- list --') self.logging_from_file(TARSTDOUT) logging.info('Finish.') def err_no_dir_found(self): logging.error(ERR_NO_DIR_FOUND.format(dir=self.mode)) self.terr.write('404 ' + ERR_NO_DIR_FOUND.format(dir=self.mode) + '\n') self.final_execute() def execute_backup(self): logging.debug('Execute_backup') self.prepare_execute() logging.debug('Executed_prepare_execute') if os.path.isdir('/backup'): logging.debug('Isdir') self.tarproc = subprocess.Popen([TARCMD, '-cvvf', TARPIPE, '-C', '/backup', '.'], stderr=self.terr, stdout=self.tout) self.sendfile(TARPIPE) self.final_execute() else: logging.debug('Nodir') self.err_no_dir_found() def execute_restore(self): logging.debug('Execute_restore') self.prepare_execute() if os.path.isdir('/restore'): self.tarproc = subprocess.Popen([TARCMD, '-xvvf', TARPIPE, '-C', '/restore'], stderr=self.terr, stdout=self.tout) self.receivefile(TARPIPE) self.final_execute() else: self.err_no_dir_found() def execute(self): method = getattr(self, 'execute_' + self.mode, None) if method is None: logging.error('Invalid mode={mode} of execution!'.format(mode=self.mode)) self.disconnect() sys.exit(3) logging.info('BaculaJob: {}'.format(self.jobname)) self.connect() method() def main(): logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.DEBUG) logging.info('Bacula Kubernetes backup helper version {ver}. Copyright {cc}'.format( ver=VERSION, cc=COPYRIGHT )) bacula = BaculaConnection() bacula.execute() if __name__ == '__main__': main() bacula-15.0.3/scripts/kubernetes-bacula-backup/Makefile0000644000175000017500000000227614771010173022657 0ustar bsbuildbsbuild# # Simple Makefile for building plugins for Bacula # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # The script creates a docker image of the baculatar.py # program. It uses also a static version of tar. # # It requires docker and pyinstaller (installed via pip3) # The reference platform is bionic # D=$(shell date +%d%b%y) all: bacula-backup-$(D).tar.gz bacula-backup-$(D).tar.gz: baculatar.py tar Dockerfile docker build -t bacula-backup . docker tag bacula-backup:latest bacula-backup:$(D) docker save -o bacula-backup-$(D).tar bacula-backup:$(D) gzip -9 bacula-backup-$(D).tar baculatar: baculatar.py pyinstaller baculatar.py ./copylibs dist/baculatar/libpython* rm -rf dist build pyinstaller -F baculatar.py mv -f dist/baculatar . ./copylibs baculatar tar: rm -rf archbuild git clone https://github.com/ebl/tar-static.git archbuild (cd archbuild; ./build.sh) cp archbuild/releases/tar . rm -rf archbuild clean: rm -rf baculatar.c bacula-backup-*.tar.gz __pycache__ baculatar.spec rm -rf dist build baculatar libs distclean: clean rm -f tar upload: bacula-backup-$(D).tar.gz rsync -avz bacula-backup-$(D).tar.gz bacula.org:/home/src/depkgs/ bacula-15.0.3/scripts/freespace0000755000175000017500000000107214771010173016234 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Bacula interface to get device freespace # # If you set in your Device resource # # Free Space Command = "path-to-this-script/freespace %a" # you will have the following input to this script: # # # freespace "archive-device" # $1 # OS=`uname` case ${OS} in SunOS) cmd="/bin/df -P" ;; FreeBSD) cmd="/bin/df -P" ;; Linux) cmd="/bin/df -P" ;; *) cmd="/bin/df -P" ;; esac $cmd $1 | tail -1 | awk '{ print $4 }' bacula-15.0.3/scripts/magic.bacula.txt0000644000175000017500000000105714771010173017423 0ustar bsbuildbsbuildThe file magic.bacula can be added to your system files so that the file command will recognize Bacula Volumes as such. If I understand this correctly, you add that to the database of "magic" that the command file uses. See 'man 1 file' and 'man 5 magic'. On my system, there are many ways to add to the magic. For testing, you'd add it to /etc/magic, usually. This is an interesting contribution that might also be relevant to the maintainers of magic, IMO. Explanation by: Arno magic.bacula submitted in bug #715 by "shattered". bacula-15.0.3/scripts/defaultconfig0000755000175000017500000000134614771010173017115 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # This is a default configuration file for Bacula that # sets reasonable defaults, and assumes that you do not # have MySQL running. It will "install" Bacula into # bin and etc in the current directory. # CFLAGS="-g -Wall" \ ./configure \ --sbindir=$HOME/bacula/bin \ --sysconfdir=$HOME/bacula/bin \ --with-pid-dir=$HOME/bacula/bin \ --with-subsys-dir=$HOME/bacula/bin \ --enable-smartalloc \ --with-mysql=$HOME/mysql \ --with-working-dir=$HOME/bacula/bin/working \ --with-dump-email=root@localhost \ --with-job-email=root@localhost \ --with-smtp-host=localhost \ --with-baseport=9101 exit 0 bacula-15.0.3/scripts/magic.bacula0000644000175000017500000000045214771010173016603 0ustar bsbuildbsbuild### Old Bacula volume 12 string BB01 old Bacula volume data, >20 bedate x session time %s ### Current Bacula volume 12 string BB02 Bacula volume data, >20 bedate x session time %s, >24 belong -1 PRE_LABEL, >24 belong -2 VOL_LABEL, >>57 belong x version %d, >>93 string x name '%s' bacula-15.0.3/scripts/logwatch/0000755000175000017500000000000014771010173016161 5ustar bsbuildbsbuildbacula-15.0.3/scripts/logwatch/applybaculadate0000755000175000017500000000262414771010173021246 0ustar bsbuildbsbuild#!/usr/bin/perl ######################################################################## ## Copyright (c) 2009 Sigma Consulting Services Limited ## v1.00 2009/06/21 16:54:23 Ian McMichael ## ## This program is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program. If not, see . ######################################################################## use Logwatch ':dates'; my $Debug = $ENV{'LOGWATCH_DEBUG'} || 0; $SearchDate = TimeFilter('%d-%b %H:%M'); if ( $Debug > 5 ) { print STDERR "DEBUG: Inside ApplyBaculaDate...\n"; print STDERR "DEBUG: Looking For: " . $SearchDate . "\n"; } my $OutputLine = 0; while (defined($ThisLine = )) { if ($ThisLine =~ m/^$SearchDate /o) { $OutputLine = 1; } elsif ($ThisLine !~ m/^\s+/o) { $OutputLine = 0; } if ($OutputLine) { print $ThisLine; } } # vi: shiftwidth=3 syntax=perl tabstop=3 et bacula-15.0.3/scripts/logwatch/logfile.bacula.conf.in0000644000175000017500000000014114771010173022300 0ustar bsbuildbsbuild# What actual file? Defaults to LogPath if not absolute path.... LogFile = @logdir@/bacula.log bacula-15.0.3/scripts/logwatch/Makefile.in0000644000175000017500000000165614771010173020236 0ustar bsbuildbsbuild# Makefile to install logwatch script # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # 08 Jan 2005 D. Scott Barninger # ifeq ("$(shell test -d /etc/log.d)",0) SYSCONFDIR=/etc/log.d SCRIPTDIR=/etc/log.d else SYSCONFDIR=/etc/logwatch SCRIPTDIR=/usr/share/logwatch endif INSTALL=@INSTALL@ all: install install: $(INSTALL) -m 755 bacula $(DESTDIR)$(SCRIPTDIR)/scripts/services/bacula $(INSTALL) -m 755 applybaculadate $(DESTDIR)$(SCRIPTDIR)/scripts/shared/applybaculadate $(INSTALL) -m 644 logfile.bacula.conf $(DESTDIR)$(SYSCONFDIR)/conf/logfiles/bacula.conf $(INSTALL) -m 644 services.bacula.conf $(DESTDIR)$(SYSCONFDIR)/conf/services/bacula.conf uninstall: rm -f $(DESTDIR)$(SCRIPTDIR)/scripts/services/bacula rm -f $(DESTDIR)$(SCRIPTDIR)/scripts/shared/applybaculadate rm -f $(DESTDIR)$(SYSCONFDIR)/conf/logfiles/bacula.conf rm -f $(DESTDIR)$(SYSCONFDIR)/conf/services/bacula.conf bacula-15.0.3/scripts/logwatch/README0000644000175000017500000000256414771010173017050 0ustar bsbuildbsbuildInstallation instructions for bacula logwatch script 04 Sep 2005 Installation into a standard logwatch configuration is fairly straightforward. Run 'make install' as root from this directory. For manual install copy the files as indicated below: cp -p scripts/logwatch/bacula /etc/log.d/scripts/services/bacula cp -p scripts/logwatch/applybacula /etc/log.d/scripts/shared/applybaculadate cp -p scripts/logwatch/logfile.bacula.conf /etc/log.d/conf/logfiles/bacula.conf cp -p scripts/logwatch/services.bacula.conf /etc/log.d/conf/services/bacula.conf chmod 755 /etc/log.d/scripts/services/bacula chmod 755 /etc/log.d/scripts/shared/applybaculadate chmod 644 /etc/log.d/conf/logfiles/bacula.conf chmod 644 /etc/log.d/conf/services/bacula.conf To test your installation run logwatch (as root): /etc/log.d/logwatch The following is the kind of output you could expect to be added to the standard logwatch listing: --------------------- bacula Begin ------------------------ Jobs Run: 2005-09-02 2012 backupNightlySave.2005-09-02_01.05.00 BackupOK 2005-09-02 2013 scottNightlySave.2005-09-02_01.05.01 BackupOK 2005-09-02 2014 marthaNightlySave.2005-09-02_01.05.02 BackupOK 2005-09-02 2015 lyndaNightlySave.2005-09-02_01.05.03 BackupOK 2005-09-02 2016 backupBackupCatalog.2005-09-02_01.10.00 BackupOK ---------------------- bacula End ------------------------- bacula-15.0.3/scripts/logwatch/bacula0000755000175000017500000000345414771010173017344 0ustar bsbuildbsbuild#!/usr/bin/perl -w # # logwatch filter script for bacula log files # # Mon Jan 03 2005 # D. Scott Barninger and Karl Cunningham # # Copyright (C) 2000-2015 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS use strict; use POSIX qw(strftime); my (@JobName,$JobStatus,$ThisName,$ThisStatus,$ThisDate); my ($Job,$JobId,$JobDate,$Debug,$DebugCounter,%data); # set debug level $Debug = $ENV{'LOGWATCH_DEBUG'} || 0; if ( $Debug >= 5 ) { print STDERR "\n\nDEBUG: Inside Bacula Filter \n\n"; $DebugCounter = 1; } while () { chomp; if ( $Debug >= 5 ) { print STDERR "DEBUG($DebugCounter): $_\n"; $DebugCounter++; } # Test the line for a new entry, which is a jobid record if (/^\s*JobId:\s+(\d+)/) { $JobId = $1; next; } (/^\s*Job:\s*(.*)/) and $Job = $1; (/^\s*Termination:\s*(.*)/) and $JobStatus = $1; (/^\s*Job:.*(\d{4}-\d{2}-\d{2})/) and $JobDate = $1; if ($JobId and $Job and $JobStatus and $JobDate) { $data{$JobId} = { "Job" => $Job, "JobStatus" => $JobStatus, "JobDate" => $JobDate, }; $JobId = $Job = $JobStatus = $JobDate = ""; } } # if we have data print it out, otherwise do nothing if (scalar(keys(%data))) { print "\nJobs Run:\n"; foreach my $Id (sort {$a<=>$b} (keys(%data))) { $ThisName = $data{$Id}{Job}; $ThisStatus = $data{$Id}{JobStatus}; $ThisDate = $data{$Id}{JobDate}; $ThisName =~ s/\s//g; $ThisStatus =~ s/\s//g; print "$ThisDate $Id $ThisName\n $ThisStatus\n\n"; } } exit(0); bacula-15.0.3/scripts/logwatch/services.bacula.conf0000644000175000017500000000012014771010173022072 0ustar bsbuildbsbuildTitle = "bacula" # Which logfile group... LogFile = bacula *ApplyBaculaDate = bacula-15.0.3/scripts/check_dmsg0000755000175000017500000000111514771010173016364 0ustar bsbuildbsbuild#!/usr/bin/perl -w use strict; # Copyright (C) 2000-2022 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # Written by Eric Bollengier # Code analysis tool to detect incorrect debug levels # Usage: # g++ -E -c -fno-strict-aliasing -fno-exceptions -fno-rtti -I. -I.. -g -O2 -Wall -fno-strict-aliasing -fno-exceptions -fno-rtti file.c | check_dmsg my $found=0; while (my $line = <>) { if ($line =~ /d_msg\("(.+?)\", (\d+), 0, "(.+?)",/) { print "$1:$2\t[$3]\n"; $found++; } } print "Found $found Dmsg(0) problems\n" if ($found); exit ($found>0); bacula-15.0.3/scripts/filetype.vim0000644000175000017500000000026514771010173016712 0ustar bsbuildbsbuild" put this file to $HOME/.vim if exists("have_load_filetypes") finish endif augroup filetypedetect au! BufRead,BufNewFile bacula-dir.conf setfiletype bacula augroup END bacula-15.0.3/scripts/logrotate.in0000644000175000017500000000061514771010173016703 0ustar bsbuildbsbuild# # Copyright (C) 2000-2022 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # # If you are appending to a log file (default), to # have your log file compressed, rotated, and after a time # deleted, after possibly editing the values below, # copy this file to: # # /etc/logrotate.d/bacula # @logdir@/bacula.log { monthly rotate 5 notifempty missingok } bacula-15.0.3/scripts/baculabackupreport0000644000175000017500000000024414771010173020145 0ustar bsbuildbsbuildecho Visit https://github.com/waa/baculabackupreport to get the current version echo of this very good reporting tool. A must have for all Bacula sysadmin. bacula-15.0.3/scripts/aws_cloud_driver.in0000755000175000017500000003450714771010173020250 0ustar bsbuildbsbuild#!/usr/bin/env python2 # -*- coding: utf-8 -*- # Bacula(R) - The Network Backup Solution # Copyright (C) 2000-2023 Kern Sibbald # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # This notice must be preserved when any source code is # conveyed and/or propagated. # Bacula(R) is a registered trademark of Kern Sibbald. # # Routines for aws S3 cloud driver # # Written by Norbert Bizet, August MMXVIII # # import sys, os, json, time from subprocess import Popen, PIPE, call from multiprocessing import Pool, cpu_count from inspect import stack import logging, traceback # RETRY DOWNLOAD RETRY_DOWNLOAD = 0x0D def vol_ls(): try: logging.info("enter vol_ls") cmd = ["aws", "s3", "ls", cloud_path] if endpoint_url: cmd += ["--endpoint-url", endpoint_url] proc = Popen( cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True) output,err = proc.communicate() logging.debug("vol_ls proc communicate output:{0} , err:{1}".format(output,err)) # sort out only exception errors since progress is reported into stderr if err: logging.error("vol_ls got error {0}".format(err)) sys.stderr.write(err) if output: # expected output format will be "VolName1\nVolName2\nVolName3\n" output = "\n".join(list(filter(None, [line.rsplit(' ',2)[2].strip() for line in output.replace(cloud_path, '').replace('/','').splitlines()]))) + "\n" # forward out stds logging.info("vol_ls got ouput") logging.info("vol_ls outputing {0}".format(output)) sys.stdout.write(output) if proc.returncode == 1: # possible to ls unexisting path. In this case, return code will be 1. return 0 return proc.returncode except Exception as e: exc = traceback.format_exception_only(type(e), e)[0] sys.stderr.write(exc) logging.error(traceback.format_exc()) return 1 def ls(): try: logging.info("enter ls") cmd = ["aws", "s3", "ls", os.path.join(cloud_path, volume, part)] if endpoint_url: cmd += ["--endpoint-url", endpoint_url] proc = Popen( cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True) output,err = proc.communicate() logging.debug("ls proc communicate output:{0} , err:{1}".format(output,err)) # sort out only exception errors since progress is reported into stderr if err: logging.error("ls got error {0}".format(err)) sys.stderr.write(err) if output: logging.info("ls got ouput") # parse and format output #mtime mtimes = [int(time.mktime(time.strptime(line.rsplit(' ',2)[0].strip(),'%Y-%m-%d %H:%M:%S'))) for line in output.splitlines()] #size sizes = [line.rsplit(' ',2)[1].strip() for line in output.splitlines()] #names names = [line.rsplit(' ',2)[2].strip() for line in output.splitlines()] # expected output format will be "name:Name1,mtime:Time1,size:Size1\nname:Name2,mtime:Time2,size:Size2\nname:Name3,mtime:Time3,size:Size3\n" output = "\n".join(["name:{0},mtime:{1},size:{2}".format(n,t,s) for n, t, s in zip(names, mtimes, sizes)]) + "\n" # forward out stds logging.info("ls outputing {0}".format(output)) sys.stdout.write(output) return proc.returncode except Exception as e: exc = traceback.format_exception_only(type(e), e)[0] sys.stderr.write(exc) logging.error(traceback.format_exc()) return 1 def download(): try: logging.info("enter download") cmd = ["aws", "s3", "cp", os.path.join(cloud_path, volume, part), "-", "--only-show-errors"] if endpoint_url: cmd += ["--endpoint-url", endpoint_url] proc = Popen( cmd, stderr=PIPE, universal_newlines=True) output,err = proc.communicate() logging.debug("download proc communicate output:{0} , err:{1}".format(output,err)) if err and err.find("An error occurred (InvalidObjectState) when calling the GetObject operation") != -1: restore() return RETRY_DOWNLOAD return proc.returncode except Exception as e: exc = traceback.format_exception_only(type(e), e)[0] sys.stderr.write(exc) logging.error(traceback.format_exc()) return 1 def delete(): try: logging.info("enter delete") cmd = ["aws", "s3", "rm", os.path.join(cloud_path, volume, part), "--only-show-errors"] if endpoint_url: cmd += ["--endpoint-url", endpoint_url] proc = Popen( cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True) output,err = proc.communicate() logging.debug("delete proc communicate output:{0} , err:{1}".format(output,err)) # sort out only exception errors since progress is reported into stderr (yuck!) if err: logging.error("delete got error {0}".format(err)) sys.stderr.write(err) if output: logging.info("delete got ouput {0}".format(output)) sys.stdout.write(output) return proc.returncode except Exception as e: exc = traceback.format_exception_only(type(e), e)[0] sys.stderr.write(exc) logging.error(traceback.format_exc()) return 1 def upload(): try: logging.info("enter upload") cmd = ["aws", "s3", "cp", "-", os.path.join(cloud_path, volume, part), "--storage-class", objects_default_tier, "--only-show-errors"] if endpoint_url: cmd += ["--endpoint-url", endpoint_url] proc = Popen( cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True) output,err = proc.communicate() logging.debug("upload proc communicate output:{0} , err:{1}".format(output,err)) # sort out only exception errors since progress is reported into stderr (yuck!) if err: sys.stderr.write(err) ret = proc.returncode cmd = ["aws", "s3", "ls", os.path.join(cloud_path, volume, part)] if endpoint_url: cmd += ["--endpoint-url", endpoint_url] proc = Popen( cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True) output,err = proc.communicate() logging.debug("ls proc communicate output:{0} , err:{1}".format(output,err)) # sort out only exception errors since progress is reported into stderr if err: logging.error("ls got error {0}".format(err)) sys.stderr.write(err) if output: logging.info("ls got ouput") # parse and format output #mtime mtimes = [int(time.mktime(time.strptime(line.rsplit(' ',2)[0].strip(),'%Y-%m-%d %H:%M:%S'))) for line in output.splitlines() if line.endswith(part)] #size sizes = [line.rsplit(' ',2)[1].strip() for line in output.splitlines() if line.endswith(part)] #names names = [line.rsplit(' ',2)[2].strip() for line in output.splitlines() if line.endswith(part)] # expected output format will be "name:Name1,mtime:Time1,size:Size1\nname:Name2,mtime:Time2,size:Size2\nname:Name3,mtime:Time3,size:Size3\n" output = "\n".join(["name:{0},mtime:{1},size:{2}".format(n,t,s) for n, t, s in zip(names, mtimes, sizes)]) + "\n" # forward out stds logging.info("ls outputing {0}".format(output)) sys.stdout.write(output) ret = ret + proc.returncode return ret except Exception as e: exc = traceback.format_exception_only(type(e), e)[0] sys.stderr.write(exc) logging.error(traceback.format_exc()) return 1 def move(): try: logging.info("enter move from {0} to {1}".format(os.path.join(cloud_path, volume, part), os.path.join(cloud_path, volume, local_part))) cmd = ["aws", "s3", "mv", os.path.join(cloud_path, volume, part), os.path.join(cloud_path, volume, local_part), "--only-show-errors"] if endpoint_url: cmd += ["--endpoint-url", endpoint_url] proc = Popen( cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True) output,err = proc.communicate() logging.debug("move proc communicate output:{0} , err:{1}".format(output,err)) if not err: sys.stdout.write("{0}\0".format(local_part)) return 0 else: if err.find("Key \"{0}\" does not exist".format(os.path.join(volume, part))) != -1: logging.debug("move cant find source {0}. OK.".format(part)) return 0 logging.error("move got error {0}".format(err)) sys.stderr.write(err) return proc.returncode except Exception as e: exc = traceback.format_exception_only(type(e), e)[0] sys.stderr.write(exc) logging.error(traceback.format_exc()) return 1 def restore(): try: logging.info("enter restore {0}".format(os.path.join(cloud_path, volume, part))) cmd = ["aws", "s3api", "restore-object", "--restore-request", "{{\"Days\": {0}, \"GlacierJobParameters\" : {{\"Tier\" : \"{1}\"}}}}".format(transfer_retention_days,transfer_priority), "--bucket", bucket, "--key", os.path.join(volume, part)] if endpoint_url: cmd += ["--endpoint-url", endpoint_url] proc = Popen( cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True) output,err = proc.communicate() logging.debug("restore proc communicate output:{0} , err:{1}".format(output,err)) if err: logging.error("restore got error {0}".format(err)) sys.stderr.write(err) if output: logging.info("restore got ouput {0}".format(output)) sys.stdout.write(output) return proc.returncode except Exception as e: exc = traceback.format_exception_only(type(e), e)[0] sys.stderr.write(exc) logging.error(traceback.format_exc()) return 1 def wait_on_restore(): try: logging.info("enter wait_on_restore {0}".format(os.path.join(cloud_path, volume, part))) cmd = ["aws", "s3api", "head-object", "--bucket", bucket, "--key", os.path.join(volume, part)] if endpoint_url: cmd += ["--endpoint-url", endpoint_url] proc = Popen( cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True) output,err = proc.communicate() logging.debug("wait_on_restore proc communicate output:{0} , err:{1}".format(output,err)) if err: logging.error("wait_on_restore got error {0}".format(err)) sys.stderr.write(err) return 1 if output: logging.info("wait_on_restore got ouput {0}".format(output)) j = json.loads(output) if "Restore" in j and j["Restore"]: if j["Restore"]=='ongoing-request="true"': logging.info("Ongoing restore detected") sys.stdout.write("WOR-INPROGRESS") elif j["Restore"]=='ongoing-request="false"': logging.info("NO Ongoing restore detected") sys.stdout.write("WOR-DONE") return 0 except Exception as e: exc = traceback.format_exception_only(type(e), e)[0] sys.stderr.write(exc) logging.error(traceback.format_exc()) return 1 if __name__ == '__main__': # initialize the return code with a weird value ret = 10 if ('CLOUD_DEBUG' in os.environ) and os.environ['CLOUD_DEBUG']: logging.basicConfig(filename='@working_dir@/aws_cloud_driver.log', level=logging.DEBUG, filemode='a', format='%(asctime)-15s-%(process)d- %(levelname)s -%(message)s') else: logging.basicConfig(level=logging.CRITICAL) try: if len(sys.argv) < 3: sys.stderr.write("google_driver: invalid number of parameters\0.") ret = 1 fct=sys.argv[1] volume=sys.argv[2] if len(sys.argv) > 3: part=sys.argv[3] else: part='' if len(sys.argv) > 4: local_part=sys.argv[4] else: local_part='*None*' logging.info("--{1} {2} {3} {4} --".format(sys.argv[0], fct, volume, part, local_part)) bucket=os.environ['CLOUD_BUCKET'] cloud_path = "s3://{0}".format(bucket) access_key=os.environ['CLOUD_ACCESS_KEY'] secret_key=os.environ['CLOUD_SECRET_KEY'] region=os.environ['CLOUD_REGION'] protocol=os.environ['CLOUD_PROTOCOL'] uri_type=os.environ['CLOUD_URI_TYPE'] endpoint_url=os.environ['CLOUD_BLOB_ENDPOINT'] transfer_priority_switcher = { "0":"Expedited", "1":"Standard", "2":"Bulk" } t_prio = os.environ['CLOUD_TRANSFER_PRIORITY'] transfer_priority = transfer_priority_switcher[t_prio] transfer_retention_days = os.environ['CLOUD_TRANSFER_RETENTION_DAYS'] objects_default_tier_switcher = { "0":"STANDARD", "1":"STANDARD_IA", "2":"INTELLIGENT_TIERING", "3":"ONEZONE_IA", "4":"GLACIER_IR", "5":"GLACIER", "6":"DEEP_ARCHIVE", "7":"REDUCED_REDUNDANCY" } t_tier_idx = os.environ['CLOUD_OBJECTS_DEFAULT_TIER'] objects_default_tier = objects_default_tier_switcher[t_tier_idx] os.environ['AWS_DEFAULT_REGION'] = region os.environ['AWS_ACCESS_KEY_ID'] = access_key os.environ['AWS_SECRET_ACCESS_KEY'] = secret_key logging.info("bucket {0}, cloud_path {1}, access_key {2}, secret_key {3}, region {4}, protocol {5}, uri_type {6}, transfer_prio {7}, tranfer_ret {8}, default_tier {9}, endpoint_url {10}".format(bucket, cloud_path, access_key, "XXX", region, protocol, uri_type, transfer_priority, transfer_retention_days, objects_default_tier, endpoint_url)) switcher = { "vol_ls":vol_ls, "ls":ls, "download":download, "delete":delete, "upload":upload, "move":move, "wait_on_restore":wait_on_restore } if sys.argv[1] in switcher: ret = switcher[sys.argv[1]]() if ret is 0: logging.info("{0} returned {1}".format(fct, ret)) else: logging.error("{0} returned {1}".format(fct, ret)) else: sys.stderr.write('unsupported function {0}\n'.format(sys.argv[1])) ret = 3 except Exception as e: exc = traceback.format_exception_only(type(e), e)[0] sys.stderr.write(exc) logging.error(traceback.format_exc()) ret = 4 #only once outside of the try-catch statement sys.exit(ret) bacula-15.0.3/scripts/Makefile.in0000755000175000017500000001370414771010173016426 0ustar bsbuildbsbuild# # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # @MCOMMON@ working_dir=@working_dir@ srcdir = @srcdir@ VPATH = @srcdir@ .PATH: @srcdir@ topdir = .. thisdir = scripts CYTHON=@CYTHON@ CYTHON_LIBS=@CYTHON_LIBS@ CYTHON_INC=@CYTHON_INC@ first_rule: all dummy: EXTRA_INSTALL_SCRIPTS=@EXTRA_INSTALL_SCRIPTS@ MKDIR = $(topdir)/autoconf/mkinstalldirs #------------------------------------------------------------------------- all: Makefile depend: #------------------------------------------------------------------------- installdirs: $(MKDIR) $(DESTDIR)$(sysconfdir) $(MKDIR) $(DESTDIR)$(sbindir) $(MKDIR) $(DESTDIR)$(scriptdir) $(MKDIR) $(DESTDIR)$(mandir) install: installdirs $(INSTALL_SCRIPT) bconsole $(DESTDIR)$(scriptdir)/bconsole $(INSTALL_SCRIPT) bacula $(DESTDIR)$(scriptdir)/bacula $(INSTALL_SCRIPT) bacula_config $(DESTDIR)$(scriptdir)/bacula_config $(INSTALL_SCRIPT) bacula $(DESTDIR)$(sbindir)/bacula $(INSTALL_SCRIPT) tapealert $(DESTDIR)$(scriptdir)/tapealert $(INSTALL_SCRIPT) isworm $(DESTDIR)$(scriptdir)/isworm $(INSTALL_SCRIPT) bacula-ctl-dir $(DESTDIR)$(scriptdir)/bacula-ctl-dir $(INSTALL_SCRIPT) bacula-ctl-fd $(DESTDIR)$(scriptdir)/bacula-ctl-fd $(INSTALL_SCRIPT) bacula-ctl-sd $(DESTDIR)$(scriptdir)/bacula-ctl-sd $(INSTALL_SCRIPT) get_malware_abuse.ch $(DESTDIR)$(sbindir)/get_malware_abuse.ch $(INSTALL_SCRIPT) md5tobase64.py $(DESTDIR)$(sbindir)/md5tobase64.py @if test -f ${DESTDIR}${scriptdir}/mtx-changer; then \ echo " ==> Saving existing mtx-changer to mtx-changer.old"; \ $(MV) -f ${DESTDIR}${scriptdir}/mtx-changer ${DESTDIR}${scriptdir}/mtx-changer.old; \ fi $(INSTALL_SCRIPT) mtx-changer $(DESTDIR)$(scriptdir)/mtx-changer @if test -f ${DESTDIR}${scriptdir}/mtx-changer.conf; then \ echo " ==> Installing mtx-changer.conf to mtx-changer.conf.new"; \ $(INSTALL_DATA) mtx-changer.conf $(DESTDIR)$(scriptdir)/mtx-changer.conf.new; \ else \ $(INSTALL_DATA) mtx-changer.conf $(DESTDIR)$(scriptdir)/mtx-changer.conf; \ fi @if test -f ${DESTDIR}${scriptdir}/disk-changer; then \ echo " ==> Saving existing disk-changer to disk-changer.old"; \ $(MV) -f ${DESTDIR}${scriptdir}/disk-changer ${DESTDIR}${scriptdir}/disk-changer.old; \ fi $(INSTALL_SCRIPT) disk-changer $(DESTDIR)$(scriptdir)/disk-changer $(INSTALL_DATA) btraceback.gdb $(DESTDIR)$(scriptdir)/btraceback.gdb $(INSTALL_DATA) btraceback.dbx $(DESTDIR)$(scriptdir)/btraceback.dbx $(INSTALL_DATA) btraceback.mdb $(DESTDIR)$(scriptdir)/btraceback.mdb @if test -f ${DESTDIR}${scriptdir}/baculabackupreport; then \ echo " ==> Saving existing baculabackupreport to baculabackupreport.old"; \ $(MV) -f ${DESTDIR}${scriptdir}/baculabackupreport ${DESTDIR}${scriptdir}/baculabackupreport.old; \ fi $(INSTALL_SCRIPT) baculabackupreport $(DESTDIR)$(scriptdir)/baculabackupreport $(INSTALL_SCRIPT) bacula-tray-monitor.desktop $(DESTDIR)$(scriptdir)/bacula-tray-monitor.desktop chmod 0644 $(DESTDIR)$(scriptdir)/btraceback.gdb \ $(DESTDIR)$(scriptdir)/btraceback.dbx \ $(DESTDIR)$(scriptdir)/btraceback.mdb $(INSTALL_SCRIPT) btraceback $(DESTDIR)$(sbindir)/btraceback @if test -f ${DESTDIR}${scriptdir}/key-manager.py; then \ echo " ==> Saving existing key-manager.py to key-manager.py.old"; \ $(MV) -f ${DESTDIR}${scriptdir}/key-manager.py ${DESTDIR}${scriptdir}/key-manager.py.old; \ fi $(INSTALL_SCRIPT) key-manager.py $(DESTDIR)$(scriptdir)/key-manager.py $(INSTALL_SCRIPT) install-key-manager.sh $(DESTDIR)$(scriptdir)/install-key-manager.sh uninstall: (cd $(DESTDIR)$(scriptdir); $(RMF) bconsole) (cd $(DESTDIR)$(scriptdir); $(RMF) bacula) (cd $(DESTDIR)$(scriptdir); $(RMF) bacula_config) (cd $(DESTDIR)$(sbindir); $(RMF) bacula) (cd $(DESTDIR)$(sbindir); $(RMF) tapealert) (cd $(DESTDIR)$(scriptdir); $(RMF) baculabackupreport) (cd $(DESTDIR)$(scriptdir); $(RMF) bacula-ctl-dir) (cd $(DESTDIR)$(scriptdir); $(RMF) bacula-ctl-fd) (cd $(DESTDIR)$(scriptdir); $(RMF) bacula-ctl-sd) (cd $(DESTDIR)$(scriptdir); $(RMF) fd) (cd $(DESTDIR)$(scriptdir); $(RMF) mtx-changer) (cd $(DESTDIR)$(scriptdir); $(RMF) disk-changer) (cd $(DESTDIR)$(scriptdir); $(RMF) dvd-handler) (cd $(DESTDIR)$(scriptdir); $(RMF) btraceback.gdb) (cd $(DESTDIR)$(scriptdir); $(RMF) btraceback.dbx) (cd $(DESTDIR)$(scriptdir); $(RMF) btraceback.mdb) (cd $(DESTDIR)$(scriptdir); $(RMF) breload) (cd $(DESTDIR)$(sbindir); $(RMF) btraceback) (cd $(DESTDIR)$(sbindir); $(RMF) generic_cloud_driver) (cd $(DESTDIR)$(sbindir); $(RMF) aws_cloud_driver) (cd $(DESTDIR)$(scriptdir); $(RMF) key-manager.py) (cd $(DESTDIR)$(scriptdir); $(RMF) install-key-manager.sh) aws_cloud_driver.C: aws_cloud_driver $(CYTHON) --embed -o aws_cloud_driver.C aws_cloud_driver aws_cloud_driver.bin: aws_cloud_driver.C $(CC) -o aws_cloud_driver.bin aws_cloud_driver.C $(CYTHON_INC) $(CYTHON_LIBS) strip aws_cloud_driver.bin install-aws-cloud: aws_cloud_driver.bin $(INSTALL_SCRIPT) aws_cloud_driver.bin $(DESTDIR)$(plugindir)/aws_cloud_driver install-generic-cloud: $(INSTALL_SCRIPT) generic_cloud_driver $(DESTDIR)$(plugindir)/generic_cloud_driver install-regress-drivers: $(INSTALL_SCRIPT) aws_cloud_driver $(DESTDIR)$(plugindir)/aws_cloud_driver Makefile: Makefile.in cd $(topdir) \ && CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status chmod 755 bacula btraceback chmod 755 bacula-ctl-dir bacula-ctl-fd bacula-ctl-sd bacula_config chmod 755 disk-changer mtx-changer bconsole tapealert chmod 755 key-manager.py install-key-manager.sh Makefiles: $(SHELL) config.status chmod 755 bacula btraceback chmod 755 bacula-ctl-dir bacula-ctl-fd bacula-ctl-sd chmod 755 mtx-changer bconsole tapealert key-manager.py clean: @$(RMF) *~ 1 2 3 # clean for distribution distclean: clean @$(RMF) bacula fd btraceback @$(RMF) bacula-ctl-dir bacula-ctl-fd bacula-ctl-sd bacula_config @$(RMF) bconsole logrotate bacula.desktop @$(RMF) mtx-changer dvd-handler # ------------------------------------------------------------------------ bacula-15.0.3/scripts/bacula.in0000755000175000017500000000431614771010173016137 0ustar bsbuildbsbuild#! /bin/sh # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2023 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # bacula This shell script takes care of starting and stopping # the bacula daemons. # # This is pretty much watered down version of the RedHat script # that works on Solaris as well as Linux, but it won't work everywhere. # # description: The Leading Open Source Backup Solution. # # All these are not *really* needed but it makes it # easier to "steal" this code for the development # environment where they are different. # SCRIPTDIR=@scriptdir@ # # Disable Glibc malloc checks, it doesn't help and it keeps from getting # good dumps MALLOC_CHECK_=0 export MALLOC_CHECK_ case "$1" in start) [ -x ${SCRIPTDIR}/bacula-ctl-sd ] && ${SCRIPTDIR}/bacula-ctl-sd $1 $2 [ -x ${SCRIPTDIR}/bacula-ctl-fd ] && ${SCRIPTDIR}/bacula-ctl-fd $1 $2 [ -x ${SCRIPTDIR}/bacula-ctl-dir ] && ${SCRIPTDIR}/bacula-ctl-dir $1 $2 ;; stop) # Stop the FD first so that SD will fail jobs and update catalog [ -x ${SCRIPTDIR}/bacula-ctl-fd ] && ${SCRIPTDIR}/bacula-ctl-fd $1 $2 [ -x ${SCRIPTDIR}/bacula-ctl-sd ] && ${SCRIPTDIR}/bacula-ctl-sd $1 $2 [ -x ${SCRIPTDIR}/bacula-ctl-dir ] && ${SCRIPTDIR}/bacula-ctl-dir $1 $2 ;; restart) $0 stop sleep 2 $0 start ;; status) [ -x ${SCRIPTDIR}/bacula-ctl-sd ] && ${SCRIPTDIR}/bacula-ctl-sd status [ -x ${SCRIPTDIR}/bacula-ctl-fd ] && ${SCRIPTDIR}/bacula-ctl-fd status [ -x ${SCRIPTDIR}/bacula-ctl-dir ] && ${SCRIPTDIR}/bacula-ctl-dir status ;; *) echo "Usage: $0 {start|stop|restart|status}" exit 1 ;; esac exit 0 bacula-15.0.3/scripts/bat.desktop.in0000644000175000017500000000047414771010173017124 0ustar bsbuildbsbuild[Desktop Entry] Name=Bacula Administration Tool Comment=Bacula Director Console Icon=/usr/share/pixmaps/bat_icon.png Exec=@sbindir@/bat -c @sysconfdir@/bat.conf Terminal=false Type=Application Encoding=UTF-8 StartupNotify=true X-Desktop-File-Install-Version=0.3 Categories=System;Application;Utility;X-Red-Hat-Base; bacula-15.0.3/scripts/manual_prune.pl0000755000175000017500000001527614771010173017412 0ustar bsbuildbsbuild#!/usr/bin/perl -w # # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # use strict; =head1 DESCRIPTION manual_prune.pl -- prune volumes =head2 USAGE manual_prune.pl [--bconsole=/path/to/bconsole] [--help] [--doprune] [--expired] [--fixerror] [--fileprune] This program when run will manually prune all Volumes that it finds in your Bacula catalog. It will respect all the Retention periods. manual_prune must have access to bconsole. It will execute bconsole from /opt/bacula/bin. If bconsole is in a different location, you must specify the path to it with the --bconsole=... option. If you do not add --doprune, you will see what the script proposes to do, but it will not prune. If you add --fixerror, it will change the status of any Volume that is marked Error to Used so that it will be pruned. If you add --expired, it will attempt to prune only those Volumes where the Volume Retention period has expired. If you use --fileprune, the script will prune files and pathvisibility useful to avoid blocking Bacula during pruning. Adding --debug will print additional debug information. =head1 LICENSE # # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # =head1 VERSION 1.4 =cut use Getopt::Long qw/:config no_ignore_case/; use Pod::Usage; use File::Temp; my $help; my $do_prune; my $expired; my $debug; # set to your bconsole prog my $bconsole = "/opt/bacula/bin/bconsole"; my $do_fix; my $do_file_prune; GetOptions('help' => \$help, 'bconsole=s' => \$bconsole, 'expired' => \$expired, 'debug' => \$debug, 'fixerror' => \$do_fix, 'fileprune' => \$do_file_prune, 'doprune' => \$do_prune) || Pod::Usage::pod2usage(-exitval => 2, -verbose => 2) ; if ($help) { Pod::Usage::pod2usage(-exitval => 2, -verbose => 2) ; } if (! -x $bconsole) { die "Can't exec $bconsole, please specify --bconsole option $!"; } my @vol; my @vol_purged; # This fix can work with File based device. Don't use it for Tape media if ($do_fix) { my ($fh, $file) = File::Temp::tempfile(); print $fh "sql SELECT VolumeName AS \"?vol?\" FROM Media WHERE VolStatus = 'Error'; "; close($fh); open(FP, "cat $file | $bconsole|") or die "Can't open $bconsole (ERR=$!), adjust your \$PATH"; while (my $l = ) { if ($l =~ /^\s*\|\s*([\w\d:\. \-]+?)\s*\|/) { if ($debug) { print $l; } push @vol, $1; } } close(FP); unlink($file); if (scalar(@vol) > 0) { print "Will try to fix volume in Error: ", join(",", @vol), "\n"; open(FP, "|$bconsole") or die "Can't send commands to $bconsole"; print FP map { "update volume=$_ volstatus=Used\n" } @vol; close(FP); @vol = (); } } if ($do_file_prune) { my ($fh, $file) = File::Temp::tempfile(); if ($do_prune) { print $fh "sql BEGIN; CREATE TEMPORARY TABLE temp AS SELECT DISTINCT JobId FROM Job JOIN JobMedia USING (JobId) JOIN (SELECT Media.MediaId AS MediaId FROM Media WHERE VolStatus IN ('Full', 'Used') AND ( (Media.LastWritten) + interval '1 second' * (Media.VolRetention) ) < NOW()) AS M USING (MediaId) WHERE Job.JobFiles > 50000 AND Job.PurgedFiles=0; SELECT JobId FROM temp; DELETE FROM File WHERE JobId IN (SELECT JobId FROM temp); DELETE FROM PathVisibility WHERE JobId IN (SELECT JobId FROM temp); UPDATE Job SET PurgedFiles=1 WHERE JobId IN (SELECT JobId FROM temp); DROP TABLE temp; COMMIT; quit "; } else { print $fh "sql SELECT DISTINCT JobId FROM Job JOIN JobMedia USING (JobId) JOIN (SELECT Media.MediaId AS MediaId FROM Media WHERE VolStatus IN ('Full', 'Used') AND ( (Media.LastWritten) + interval '1 second' * (Media.VolRetention) ) < NOW()) AS M USING (MediaId) WHERE Job.JobFiles > 50000 AND Job.PurgedFiles=0; quit "; } close($fh); open(FP, "cat $file | $bconsole|") or die "Can't open $bconsole (ERR=$!), adjust your \$PATH"; while (my $l = ) { if ($debug || !$do_prune) { print $l; } } close(FP); unlink($file); exit 0; } # TODO: Fix it for SQLite # works only for postgresql and MySQL at the moment # One of the two query will fail, but it's not a problem if ($expired) { my ($fh, $file) = File::Temp::tempfile(); print $fh "sql SELECT Media.VolumeName AS volumename, Media.LastWritten AS lastwritten, ( (Media.LastWritten) + interval '1 second' * (Media.VolRetention) ) AS expire FROM Media WHERE VolStatus IN ('Full', 'Used') AND ( (Media.LastWritten) + interval '1 second' * (Media.VolRetention) ) < NOW(); SELECT Media.VolumeName AS volumename, Media.LastWritten AS lastwritten, ( Media.LastWritten + Media.VolRetention ) AS expire FROM Media WHERE VolStatus IN ('Full', 'Used') AND ( Media.LastWritten + Media.VolRetention ) < NOW(); quit "; close($fh); open(FP, "cat $file | $bconsole|") or die "Can't open $bconsole (ERR=$!), adjust your \$PATH"; while (my $l = ) { # | TestVolume001 | 2011-06-17 14:36:59 | 2011-06-17 14:37:00 if ($l =~ /^\s*\|\s*([\w\d:\. \-]+?)\s*\|\s*\d/) { if ($debug) { print $l; } push @vol, $1; } } close(FP); unlink($file); } else { open(FP, "echo list volumes | $bconsole|") or die "Can't open $bconsole (ERR=$!), adjust your \$PATH"; while (my $l = ) { # | 1 | TestVolume001 | Used if ($l =~ /^\s*\|\s*[\d,]+\s*\|\s*([\w\d-]+)\s*\|\s*Used/) { push @vol, $1; } if ($l =~ /^\s*\|\s*[\d,]+\s*\|\s*([\w\d-]+)\s*\|\s*Full/) { push @vol, $1; } if ($l =~ /^\s*\|\s*[\d,]+\s*\|\s*([\w\d-]+)\s*\|\s*Purged/) { push @vol_purged, $1; } } close(FP); if ($? != 0) { system("echo list volumes | $bconsole"); die "bconsole returns a non zero status, please check that you can execute it"; } } if (!scalar(@vol)) { print "No Volume(s) found to prune.\n"; } else { if ($do_prune) { print "Attempting to to prune ", join(",", @vol), "\n"; open(FP, "|$bconsole") or die "Can't send commands to $bconsole"; print FP map { "prune volume=$_ yes\n" } @vol; close(FP); } else { print "Would have attempted to prune ", join(",", @vol), "\n"; print "You can actually prune by specifying the --doprune option.\n" } } bacula-15.0.3/scripts/get_malware_abuse.ch0000755000175000017500000000466514771010173020351 0ustar bsbuildbsbuild#!/usr/bin/perl -w # Copyright (C) 2000-2022 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS use strict; use File::Basename; use Fcntl ':flock'; # Import LOCK_* constants ################################################################ # Script to update the Bacula malware database from abuse.ch # # Output: dest file # Return code: # -1: Error # 0: Should truncate the database and load the data # 1: No changes # 2: Should load the data (incremental changes) my $type = shift or die "Usage: $0 [MD5|SHA256] destfile"; my $dest = shift or die "Usage: $0 [MD5|SHA256] destfile"; my $level = "full"; # full or recent my $url; ################################################################ # We use a lock file to avoid to update the same database in parallel my $lock = "$dest.lk"; my $fh; open($fh, ">", $lock) or die "Unable to create lock file $lock $@"; flock($fh, LOCK_EX) or die "Unable to lock $lock $@"; ################################################################ # We check if the destination database file already exists # If yes, we check the date to see if we can update it or not if (-r $dest && -s $dest > 0 ) { my $last_mod_time = (stat($dest))[9]; # mtime my $now = scalar(time); if (($last_mod_time + 60*60) > $now) { print "$dest\n"; exit 1; # Nothing to do } elsif (($last_mod_time + 24*60*60) > $now) { $level = "recent"; } # We can adapt the level of the full } # Place where to find additional commands my $dir = dirname($0); if ($type eq "MD5") { $url = "https://bazaar.abuse.ch/export/txt/md5/$level/"; } elsif ($type eq "SHA256") { $url = "https://bazaar.abuse.ch/export/txt/sha256/$level/"; } else { die "Unknown algorithm $type. Expecting MD5 or SHA256"; } if (! -x "$dir/md5tobase64.py") { die "Unable to find $dir/md5tobase64.py"; } my $uncompress = ""; if ($level eq "full") { $uncompress = "|gunzip"; } if ($ENV{REGRESS_MALWARE_URL}) { $url = $ENV{REGRESS_MALWARE_URL}; $uncompress = ""; } open(FP2, ">$dest") or die "Unable to open $dest $!"; open(FP, "curl --silent $url $uncompress | $dir/md5tobase64.py|") or die "Unable to download $url database"; while (my $l = ) { print FP2 $l; } close(FP) or die "Unable to download $url database"; close(FP2); print "$dest\n"; if ($level eq "full") { exit 0; # Truncate + load } else { exit 2; # Load } bacula-15.0.3/scripts/devel_bacula.in0000755000175000017500000001402614771010173017315 0ustar bsbuildbsbuild#! /bin/sh # # Copyright (C) 2000-2022 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # bacula This shell script takes care of starting and stopping # the bacula daemons. # # It runs with different ports than the production version, # and using the current development enviornment. # # This is pretty much watered down version of the RedHat script # that works on Solaris as well as Linux, but it won't work everywhere. # # description: The Leading Open Source Backup Solution. # PSCMD="@PSCMD@" pwd=`pwd` BACDIRBIN=${pwd}/src/dird BACDIRCFG=${pwd}/src/dird BACFDBIN=${pwd}/src/filed BACFDCFG=${pwd}/src/filed BACSDBIN=${pwd}/src/stored BACSDCFG=${pwd}/src/stored PIDDIR=@piddir@ SUBSYSDIR=@subsysdir@ # Use non-production ports DIR_PORT=8101 FD_PORT=8102 SD_PORT=8103 DIR_USER=@dir_user@ DIR_GROUP=@dir_group@ FD_USER=@fd_user@ FD_GROUP=@fd_group@ SD_USER=@sd_user@ SD_GROUP=@sd_group@ # A function to stop a program. killproc() { RC=0 # Test syntax. if [ $# = 0 ]; then echo "Usage: killproc {program} [signal]" return 1 fi notset=0 # check for third arg to be kill level if [ "$3" != "" ] ; then killlevel=$3 else notset=1 killlevel="-9" fi # Get base program name base=`basename $1` # Find pid. pid=`pidofproc $base $2` # Kill it. if [ "$pid" != "" ] ; then if [ "$notset" = "1" ] ; then if ps -p $pid>/dev/null 2>&1; then # TERM first, then KILL if not dead kill -TERM $pid 2>/dev/null sleep 1 if ps -p $pid >/dev/null 2>&1 ; then sleep 1 if ps -p $pid >/dev/null 2>&1 ; then sleep 3 if ps -p $pid >/dev/null 2>&1 ; then kill -KILL $pid 2>/dev/null fi fi fi fi ps -p $pid >/dev/null 2>&1 RC=$? [ $RC -eq 0 ] && failure "$base shutdown" || success "$base shutdown" # RC=$((! $RC)) # use specified level only else if ps -p $pid >/dev/null 2>&1; then kill $killlevel $pid 2>/dev/null RC=$? [ $RC -eq 0 ] && success "$base $killlevel" || failure "$base $killlevel" fi fi else failure "$base shutdown" fi # Remove pid file if any. if [ "$notset" = "1" ]; then rm -f ${PIDDIR}/$base.$2.pid fi return $RC } # A function to find the pid of a program. pidofproc() { pid="" # Test syntax. if [ $# = 0 ] ; then echo "Usage: pidofproc {program}" return 1 fi # Get base program name base=`basename $1` # First try PID file if [ -f ${PIDDIR}/$base.$2.pid ] ; then pid=`head -1 ${PIDDIR}/$base.$2.pid` if [ "$pid" != "" ] ; then echo $pid return 0 fi fi # Next try "pidof" if [ -x /sbin/pidof ] ; then pid=`/sbin/pidof $1` fi if [ "$pid" != "" ] ; then echo $pid return 0 fi # Finally try to extract it from ps ${PSCMD} | grep $1 | awk '{ print $1 }' | tr '\n' ' ' return 0 } status() { # Test syntax. if [ $# = 0 ] ; then echo "Usage: status {program}" return 1 fi # Get base program name base=`basename $1` # First try "pidof" if [ -x /sbin/pidof ] ; then pid=`/sbin/pidof $1` fi if [ "$pid" != "" ] ; then echo "$base (pid $pid) is running..." return 0 else pid=`${PSCMD} | awk 'BEGIN { prog=ARGV[1]; ARGC=1 } { if ((prog == $2) || (("(" prog ")") == $2) || (("[" prog "]") == $2) || ((prog ":") == $2)) { print $1 ; exit 0 } }' $1` if [ "$pid" != "" ] ; then echo "$base (pid $pid) is running..." return 0 fi fi # Next try the PID files if [ -f ${PIDDIR}/$base.$2.pid ] ; then pid=`head -1 ${PIDDIR}/$base.$2.pid` if [ "$pid" != "" ] ; then echo "$base dead but pid file exists" return 1 fi fi # See if the subsys lock exists if [ -f ${SUBSYSDIR}/$base ] ; then echo "$base dead but subsys locked" return 2 fi echo "$base is stopped" return 3 } success() { return 0 } failure() { rc=$? return $rc } case "$1" in start) [ -x ${BACSDBIN}/bacula-sd ] && { echo "Starting the Storage daemon" OPTIONS='' if [ "${SD_USER}" != '' ]; then OPTIONS="${OPTIONS} -u ${SD_USER}" fi if [ "${SD_GROUP}" != '' ]; then OPTIONS="${OPTIONS} -g ${SD_GROUP}" fi ${BACSDBIN}/bacula-sd $2 ${OPTIONS} -v -c ${BACSDCFG}/stored.conf } [ -x ${BACFDBIN}/bacula-fd ] && { echo "Starting the File daemon" OPTIONS='' if [ "${FD_USER}" != '' ]; then OPTIONS="${OPTIONS} -u ${FD_USER}" fi if [ "${FD_GROUP}" != '' ]; then OPTIONS="${OPTIONS} -g ${FD_GROUP}" fi ${BACFDBIN}/bacula-fd $2 ${OPTIONS} -v -c ${BACFDCFG}/filed.conf } [ -x ${BACDIRBIN}/bacula-dir ] && { sleep 2 echo "Starting the Director daemon" OPTIONS='' if [ "${DIR_USER}" != '' ]; then OPTIONS="${OPTIONS} -u ${DIR_USER}" fi if [ "${DIR_GROUP}" != '' ]; then OPTIONS="${OPTIONS} -g ${DIR_GROUP}" fi if [ "${VALGRIND}" != '' ]; then valgrind --leak-check=full ${BACDIRBIN}/bacula-dir $2 ${OPTIONS} -v -c ${BACDIRCFG}/dird.conf else ${BACDIRBIN}/bacula-dir $2 ${OPTIONS} -v -c ${BACDIRCFG}/dird.conf fi } ;; stop) # Stop the FD first so that SD will fail jobs and update catalog [ -x ${BACFDBIN}/bacula-fd ] && { echo "Stopping the File daemon" killproc ${BACFDBIN}/bacula-fd ${FD_PORT} } [ -x ${BACSDBIN}/bacula-sd ] && { echo "Stopping the Storage daemon" killproc ${BACSDBIN}/bacula-sd ${SD_PORT} } [ -x ${BACDIRBIN}/bacula-dir ] && { echo "Stopping the Director daemon" killproc ${BACDIRBIN}/bacula-dir ${DIR_PORT} } echo ;; restart) $0 stop sleep 5 $0 start ;; status) [ -x ${BACSDBIN}/bacula-sd ] && status ${BACSDBIN}/bacula-sd ${SD_PORT} [ -x ${BACFDBIN}/bacula-fd ] && status ${BACFDBIN}/bacula-fd ${FD_PORT} [ -x ${BACDIRBIN}/bacula-dir ] && status ${BACDIRBIN}/bacula-dir ${DIR_PORT} ;; *) echo "Usage: $0 {start|stop|restart|status}" exit 1 ;; esac exit 0 bacula-15.0.3/scripts/getgitcommit0000755000175000017500000000031114771010173016766 0ustar bsbuildbsbuild#!/bin/sh GIT=$(which git) if [ "x$GIT" != "x" ] then COMMIT=$(git log --pretty=format:%h -n 1 2>/dev/null) if [ "x$COMMIT" != "x" ] then echo "$COMMIT" fi else echo "Unknown" fi bacula-15.0.3/scripts/bacula-ctl-dir.in0000644000175000017500000001344514771010173017473 0ustar bsbuildbsbuild#! /bin/sh # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2023 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # bacula-ctl-dir This shell script takes care of starting and stopping # the bacula Director daemon # # This is pretty much watered down version of the RedHat script # that works on Solaris as well as Linux, but it won't work everywhere. # # description: The Leading Open Source Backup Solution. # # Copyright (C) 2000-2022 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # PSCMD="@PSCMD@" PS="ps" # # On Solaris, you may need to use nawk, or alternatively, # add the GNU binaries to your path, such as /usr/xpg4/bin # AWK=@AWK@ # All these are not *really* needed but it makes it # easier to "steal" this code for the development # environment where they are different. # BACDIRBIN=@sbindir@ BACDIRCFG=@sysconfdir@ PIDDIR=@piddir@ SUBSYSDIR=@subsysdir@ DIR_PORT=@dir_port@ DIR_USER=@dir_user@ DIR_GROUP=@dir_group@ Bacula="@BACULA@" PIDOF=@PIDOF@ # A function to stop a program. killproc() { RC=0 # Test syntax. if [ $# = 0 ]; then echo "Usage: killproc {program} {port} [signal]" return 1 fi notset=0 # check for third arg to be kill level if [ "$3" != "" ] ; then killlevel=$3 else notset=1 killlevel="-9" fi # Get base program name base=`basename $1` # Find pid. pid=`pidofproc $base $2` # Kill it. if [ "$pid" != "" ] ; then if [ "$notset" = "1" ] ; then if ${PS} -p "$pid">/dev/null 2>&1; then # TERM first, then KILL if not dead kill -TERM $pid 2>/dev/null sleep 1 if ${PS} -p "$pid" >/dev/null 2>&1 ; then sleep 1 if ${PS} -p "$pid" >/dev/null 2>&1 ; then sleep 3 if ${PS} -p "$pid" >/dev/null 2>&1 ; then kill -KILL $pid 2>/dev/null fi fi fi fi ${PS} -p "$pid" >/dev/null 2>&1 RC=$? [ $RC -eq 0 ] && failure "$base shutdown" || success "$base shutdown" # RC=$((! $RC)) # use specified level only else if ${PS} -p "$pid" >/dev/null 2>&1; then kill $killlevel $pid 2>/dev/null RC=$? [ $RC -eq 0 ] && success "$base $killlevel" || failure "$base $killlevel" fi fi else failure "$base shutdown" fi # Remove pid file if any. if [ "$notset" = "1" ]; then rm -f ${PIDDIR}/$base.$2.pid fi return $RC } # A function to find the pid of a program. pidofproc() { pid="" # Test syntax. if [ $# = 0 ] ; then echo "Usage: pidofproc {program}" return 1 fi # Get base program name base=`basename $1` # First try PID file if [ -f ${PIDDIR}/$base.$2.pid ] ; then pid=`head -n 1 ${PIDDIR}/$base.$2.pid` if [ "$pid" != "" ] ; then echo $pid return 0 fi fi # Next try "pidof" if [ -x ${PIDOF} ] ; then pid=`${PIDOF} $1` fi if [ "$pid" != "" ] ; then echo $pid return 0 fi # Finally try to extract it from ps pid=`${PSCMD} | grep $1 | ${AWK} '{ print $1 }' | tr '\n' ' '` echo $pid return 0 } status() { pid="" # Test syntax. if [ $# = 0 ] ; then echo "Usage: status {program} {port}" return 1 fi # Get base program name base=`basename $1` # First try "pidof" if [ -x ${PIDOF} ] ; then pid=`${PIDOF} $1` fi if [ "$pid" != "" ] ; then echo "$base (pid $pid) is running..." return 0 else pid=`${PSCMD} | ${AWK} 'BEGIN { prog=ARGV[1]; ARGC=1 } { if ((prog == $2) || (("(" prog ")") == $2) || (("[" prog "]") == $2) || ((prog ":") == $2)) { print $1 ; exit 0 } }' $1` if [ "$pid" != "" ] ; then echo "$base (pid $pid) is running..." return 0 fi fi # Next try the PID files if [ -f ${PIDDIR}/$base.$2.pid ] ; then pid=`head -n 1 ${PIDDIR}/$base.$2.pid` if [ "$pid" != "" ] ; then echo "$base dead but pid file exists" return 1 fi fi # See if the subsys lock exists if [ -f ${SUBSYSDIR}/$base ] ; then echo "$base dead but subsys locked" return 2 fi echo "$base is stopped" return 3 } success() { return 0 } failure() { rc=$? return $rc } OS=`uname -s` # if /lib/tls exists, force Bacula to use the glibc pthreads instead if [ -d "/lib/tls" -a $OS = "Linux" -a `uname -r | cut -c1-3` = "2.4" ] ; then export LD_ASSUME_KERNEL=2.4.19 fi case "$1" in start) [ -x ${BACDIRBIN}/bacula-dir ] && { echo "Starting the $Bacula Director daemon" OPTIONS='' if [ "${DIR_USER}" != '' ]; then OPTIONS="${OPTIONS} -u ${DIR_USER}" fi if [ "${DIR_GROUP}" != '' ]; then OPTIONS="${OPTIONS} -g ${DIR_GROUP}" fi if [ "x${VALGRIND_DIR}" = "x1" ]; then valgrind --leak-check=full ${BACDIRBIN}/bacula-dir $2 $3 ${OPTIONS} -v -c ${BACDIRCFG}/bacula-dir.conf else ${BACDIRBIN}/bacula-dir $2 $3 ${OPTIONS} -v -c ${BACDIRCFG}/bacula-dir.conf fi sleep 1 } ;; stop) [ -x ${BACDIRBIN}/bacula-dir ] && { echo "Stopping the $Bacula Director daemon" killproc ${BACDIRBIN}/bacula-dir ${DIR_PORT} $2 } ;; restart) $0 stop sleep 5 $0 start ;; status) [ -x ${BACDIRBIN}/bacula-dir ] && status ${BACDIRBIN}/bacula-dir ${DIR_PORT} ;; *) echo "Usage: $0 {start|stop|restart|status}" exit 1 ;; esac exit 0 bacula-15.0.3/scripts/md5tobase64.py0000755000175000017500000000256714771010173016775 0ustar bsbuildbsbuild#!/usr/bin/env python3 # Copyright (C) 2000-2022 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # Script used to convert the md5sum output to the md5 bacula format import sys import argparse import re #import codecs import base64 md5_re=re.compile('[0-9A-Fa-f]{32}') parser=argparse.ArgumentParser(description='Convert md5 in Hexa into base64.') parser.add_argument('file', metavar='FILENAME', help="input file. If no file use STDIN", nargs='*') parser.add_argument('--keep-padding', action='store_true', help="keep the '=' at the end if any") args=parser.parse_args() def handle_file(input_file, remove_padding): nb=0 for line in input_file: line=line.rstrip() if md5_re.match(line): # don't use the line below that does mime encoding (split at 76c & add a \n) # b64=codecs.encode(codecs.decode(line, 'hex'), 'base64').decode() b64=base64.b64encode(bytes.fromhex(line)).decode() if remove_padding: b64=b64.rstrip('=') print(b64) nb = nb + 1 return nb nb=0 remove_padding=not args.keep_padding if args.file: for filename in args.file: input_file=open(filename) nb += handle_file(input_file, remove_padding) else: # use stdin nb += handle_file(sys.stdin, remove_padding) # Raise an error if we have converted nothing sys.exit(nb == 0) bacula-15.0.3/configure0000755000175000017500000367054714771010173014617 0ustar bsbuildbsbuild#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.69 for bacula 15.0.0. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # # # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Use a proper internal environment variable to ensure we don't fall # into an infinite loop, continuously re-executing ourselves. if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then _as_can_reexec=no; export _as_can_reexec; # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 as_fn_exit 255 fi # We don't want this to propagate to other subprocesses. { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : else exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1 test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 test \$(( 1 + 1 )) = 2 || exit 1 test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO PATH=/empty FPATH=/empty; export PATH FPATH test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1" if (eval "$as_required") 2>/dev/null; then : as_have_required=yes else as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir/$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : CONFIG_SHELL=$as_shell as_have_required=yes if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : break 2 fi fi done;; esac as_found=false done $as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : CONFIG_SHELL=$SHELL as_have_required=yes fi; } IFS=$as_save_IFS if test "x$CONFIG_SHELL" != x; then : export CONFIG_SHELL # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi if test x$as_have_required = xno; then : $as_echo "$0: This script requires a shell more modern than all" $as_echo "$0: the shells that I found on your system." if test x${ZSH_VERSION+set} = xset ; then $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, $0: including any error possibly output before this $0: message. Then install a modern shell, or manually run $0: the script under such a shell if you do have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # If we had to re-execute with $CONFIG_SHELL, we're ensured to have # already done that, so ensure we don't try to do so again and fall # in an infinite loop. This has already happened in practice. _as_can_reexec=no; export _as_can_reexec # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" SHELL=${CONFIG_SHELL-/bin/sh} test -n "$DJDIR" || exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME='bacula' PACKAGE_TARNAME='bacula' PACKAGE_VERSION='15.0.0' PACKAGE_STRING='bacula 15.0.0' PACKAGE_BUGREPORT='' PACKAGE_URL='' ac_unique_file="src/version.h" # Factoring default headers for most tests. ac_includes_default="\ #include #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_SYS_STAT_H # include #endif #ifdef STDC_HEADERS # include # include #else # ifdef HAVE_STDLIB_H # include # endif #endif #ifdef HAVE_STRING_H # if !defined STDC_HEADERS && defined HAVE_MEMORY_H # include # endif # include #endif #ifdef HAVE_STRINGS_H # include #endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif #ifdef HAVE_UNISTD_H # include #endif" ac_header_list= ac_subst_vars='LTLIBOBJS COMPRESS_MANPAGES DISTVER DISTNAME MACOSX WIN32 PSCMD TAPEDRIVE SYSTEMD_UNITDIR OBJLIST WRAPLIBS WLDFLAGS WCFLAGS DLIB DINCLUDE DEBUG FDLIBS CAP_LIBS DIR_PLUGIN_DIR DIRPLUG_INSTALL_TARGET BPAM_LDAP_TARGET_INSTALL BPAM_LDAP_TARGET TOOLS_INSTALL FD_PLUGIN_INSTALL FD_PLUGIN_DIR docker_bin EXTRA_INSTALL_SCRIPTS PLUGIN_INSTALL_TARGET LDAP_LIBS LDAP_INC LDAP_LDFLAGS GPFS_CFLAGS XATTROBJS ACLOBJS ACSLS_BUILD_TARGET ACSLS_OS_DEFINE ACSLS_LIBDIR LZO_LIBS LZO_INC ANDROID_API ANDROID_QT_BUILD_LIBS ANDROID_QT_APK_MAKE ANDROID_QT_QMAKE AFS_LIBS AFS_CFLAGS ZLIBS S3_LIBS S3_INC CLOUD_INSTALL_TARGETS CLOUD_DRIVERS LIBOBJS X_EXTRA_LIBS X_LIBS X_PRE_LIBS X_CFLAGS XMKMF GETCONF EXTRA_LIBSD_SRCS EXTRA_FINDLIB_SRCS EXTRA_BLS_SRCS EXTRA_STORED_SRCS EXTRA_DIRD_SRCS EXTRA_LIB_SRCS EXTRA_FILED_SRCS EXTRA_CATS_SRCS SHARED_CATALOG_TARGETS DEFAULT_DB_TYPE DB_LIBS DB_BACKENDS uncomment_dbi SQLITE_BINDIR SQLITE_INCLUDE SQLITE_LIBS MYSQL_BINDIR MYSQL_INCLUDE MYSQL_LIBS POSTGRESQL_BINDIR POSTGRESQL_INCLUDE POSTGRESQL_LIBS CYTHON_INC CYTHON_LIBS CYTHON SBINPERM fd_group fd_user sd_group sd_user dir_group dir_user db_ssl_options db_port db_password db_user db_name mon_sd_password mon_fd_password mon_dir_password sd_password fd_password dir_password sd_port fd_port dir_port baseport subsysdir piddir smtp_host job_email dump_email plugindir logdir bsrdir scriptdir hostname basename archivedir working_dir IOKITLIBS OPENSSL_INC OPENSSL_LIBS TOKYOCABINET_INC TOKYOCABINET_LIBS ZSTD_INC ZSTD_LIBS CURL_INC CURL_LIBS READLINE_SRC CONS_LDFLAGS CONS_LIBS CONS_SRC CONS_OBJ CONS_INC STORED_DIR DIR_TOOLS DIRD_DIR ALL_DIRS STATIC_CONS STATIC_DIR STATIC_SD STATIC_FD TTOOL_LDFLAGS QWT QWT_LIB QWT_LDFLAGS QWT_INC BAT_DIR INTL_LIBTOOL_SUFFIX_PREFIX INTLOBJS GENCAT INSTOBJEXT DATADIRNAME CATOBJEXT USE_INCLUDED_LIBINTL BUILD_INCLUDED_LIBINTL INTLBISON HAVE_WPRINTF HAVE_SNPRINTF HAVE_ASPRINTF HAVE_POSIX_PRINTF GLIBC21 ALLOCA GLIBC2 POSUB LTLIBINTL LIBINTL INTLLIBS LTLIBICONV LIBICONV INTL_MACOSX_LIBS MSGMERGE XGETTEXT GMSGFMT MSGFMT USE_NLS MKINSTALLDIRS SET_MAKE HAVE_KFREEBSD_OS_FALSE HAVE_KFREEBSD_OS_TRUE HAVE_DARWIN_OS_FALSE HAVE_DARWIN_OS_TRUE HAVE_IRIX_OS_FALSE HAVE_IRIX_OS_TRUE HAVE_SGI_OS_FALSE HAVE_SGI_OS_TRUE HAVE_BSDI_OS_FALSE HAVE_BSDI_OS_TRUE HAVE_OPENBSD_OS_FALSE HAVE_OPENBSD_OS_TRUE HAVE_NETBSD_OS_FALSE HAVE_NETBSD_OS_TRUE HAVE_FREEBSD_OS_FALSE HAVE_FREEBSD_OS_TRUE HAVE_LINUX_OS_FALSE HAVE_LINUX_OS_TRUE HAVE_HPUX_OS_FALSE HAVE_HPUX_OS_TRUE HAVE_AIX_OS_FALSE HAVE_AIX_OS_TRUE HAVE_OSF1_OS_FALSE HAVE_OSF1_OS_TRUE HAVE_HURD_OS_FALSE HAVE_HURD_OS_TRUE HAVE_SUN_OS_FALSE HAVE_SUN_OS_TRUE INCLUDE_UNINSTALL_TARGET INCLUDE_INSTALL_TARGET QMAKE_LIBTOOL LIBTOOL_CLEAN_TARGET LIBTOOL_UNINSTALL_TARGET LIBTOOL_INSTALL_TARGET DEFAULT_SHARED_OBJECT_TYPE DEFAULT_ARCHIVE_TYPE DEFAULT_OBJECT_TYPE LIBADD_DL LT_DLPREOPEN LIBADD_DLD_LINK LIBADD_SHL_LOAD LIBADD_DLOPEN LT_DLLOADERS CXXCPP OTOOL64 OTOOL LIPO NMEDIT DSYMUTIL MANIFEST_TOOL RANLIB STRIP ac_ct_AR DLLTOOL OBJDUMP LN_S NM ac_ct_DUMPBIN DUMPBIN LD FGREP host_os host_vendor host_cpu host build_os build_vendor build_cpu build LIBTOOL CHECK_DMSG DO_CHECK_DMSG LOCAL_DEFS LOCAL_LDFLAGS LOCAL_CFLAGS LOCAL_LIBS MAKE_SHELL ARFLAGS AWK PIDOF GMAKE QMAKE PKG_CONFIG_LIBDIR PKG_CONFIG_PATH PKG_CONFIG DVDRWFORMAT DVDRWMEDIAINFO GROWISOFS PYTHON MKISOFS DD MTX OPENSSL AR TBL CMP ECHO SED CP REMOVE MV INSTALL_DATA INSTALL_SCRIPT INSTALL_PROGRAM EGREP GREP CPP ac_ct_CXX CXXFLAGS CXX OBJEXT EXEEXT ac_ct_CC CPPFLAGS LDFLAGS CFLAGS CC LIBBACFIND_LT_RELEASE LIBBACCATS_LT_RELEASE LIBBACSQL_LT_RELEASE LIBBACCFG_LT_RELEASE LIBBAC_LT_RELEASE KUBERNETES_IMAGE_VERSION LIBRSYNC_VERSION NDMP_VERSION JAVA_VERSION VIX_VERSION DEPKGS_VERSION DEPKGS_QT_VERSION BDB_VERSION post_host BACULA LSMDATE DATE VERSION FALSEPRG TRUEPRG WIN32TOPDIR WIN32MAINDIR WIN32BUILDDIR TOP_DIR BUILD_DIR target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir runstatedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='MCOMMON' ac_user_opts=' enable_option_checking enable_check_dmsg enable_libtool enable_shared enable_static with_pic enable_fast_install with_gnu_ld with_sysroot enable_libtool_lock enable_includes enable_nls enable_rpath with_libiconv_prefix with_libintl_prefix with_included_gettext enable_bat enable_smartalloc enable_lockmgr enable_static_tools enable_static_fd enable_static_sd enable_static_dir enable_static_cons enable_client_only enable_build_dird enable_build_stored enable_conio enable_ipv6 enable_readline with_readline with_tcp_wrappers with_curl with_zstd with_openssl with_working_dir with_archivedir with_basename with_hostname with_scriptdir with_bsrdir with_logdir with_plugindir with_dump_email with_job_email with_smtp_host with_pid_dir with_subsys_dir with_baseport with_dir_password with_fd_password with_sd_password with_mon_dir_password with_mon_fd_password with_mon_sd_password with_db_name with_db_user with_db_password with_db_port with_db_ssl_options with_dir_user with_dir_group with_sd_user with_sd_group with_fd_user with_fd_group with_sbin_perm with_cython enable_batch_insert with_postgresql with_mysql with_embedded_mysql with_sqlite3 enable_bee enable_largefile with_x enable_s3 with_s3 with_aws enable_afs with_afsdir enable_lzo with_lzo enable_acsls enable_acl enable_xattr enable_gpfs with_gpfsdir with_ldap enable_antivirus_plugin enable_docker_plugin enable_kubernetes_plugin enable_cdp_plugin enable_totp_bpam enable_ldap_bpam with_systemd ' ac_precious_vars='build_alias host_alias target_alias CC CFLAGS LDFLAGS LIBS CPPFLAGS CXX CXXFLAGS CCC CPP PKG_CONFIG PKG_CONFIG_PATH PKG_CONFIG_LIBDIR CXXCPP XMKMF' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' runstatedir='${localstatedir}/run' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *=) ac_optarg= ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -runstatedir | --runstatedir | --runstatedi | --runstated \ | --runstate | --runstat | --runsta | --runst | --runs \ | --run | --ru | --r) ac_prev=runstatedir ;; -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ | --run=* | --ru=* | --r=*) runstatedir=$ac_optarg ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error $? "unrecognized option: \`$ac_option' Try \`$0 --help' for more information" ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error $? "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir runstatedir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error $? "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error $? "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures bacula 15.0.0 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking ...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/bacula] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF X features: --x-includes=DIR X include files are in DIR --x-libraries=DIR X library files are in DIR System types: --build=BUILD configure for building on BUILD [guessed] --host=HOST cross-compile to build programs to run on HOST [BUILD] _ACEOF fi if test -n "$ac_init_help"; then case $ac_init_help in short | recursive ) echo "Configuration of bacula 15.0.0:";; esac cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --enable-check-dmsg enable check for Dmsg(0) [default=no] --enable-libtool enable building using GNU libtool [default=yes] --enable-shared[=PKGS] build shared libraries [default=yes] --enable-static[=PKGS] build static libraries [default=no] --enable-fast-install[=PKGS] optimize for fast installation [default=yes] --disable-libtool-lock avoid locking (might break parallel builds) --enable-includes enable installing of include files [default=no] --disable-nls do not use Native Language Support --disable-rpath do not hardcode runtime library paths --enable-bat enable build of bat Qt4/5 GUI [default=no] --enable-smartalloc enable smartalloc debugging support [default=no] --enable-lockmgr enable lock manager support [default=no] --enable-static-tools enable static tape tools [default=no] --enable-static-fd enable static File daemon [default=no] --enable-static-sd enable static Storage daemon [default=no] --enable-static-dir enable static Director [default=no] --enable-static-cons enable static Console [default=no] --enable-client-only build client (File daemon) only [default=no] --enable-build-dird enable building of dird (Director) [default=yes] --enable-build-stored enable building of stored (Storage daemon) [default=yes] --disable-conio disable conio support [default=no] --enable-ipv6 enable ipv6 support [default=yes] --disable-readline disable readline support [default=yes] --enable-batch-insert enable the DB batch insert code [default=yes] --disable-bee disable BEE support [default=auto] --disable-largefile omit support for large files --disable-s3 disable S3 support [default=yes] --disable-afs disable afs support [default=auto] --disable-lzo disable lzo support [default=yes] --disable-acsls disable ACSLS support [default=yes] --disable-acl disable acl support [default=auto] --disable-xattr disable xattr support [default=auto] --disable-gpfs disable gpfs support [default=auto] --disable-antivirus-plugin disable Antivirus support [default=yes] --disable-docker-plugin disable Docker plugin support [default=auto] --disable-kubernetes-plugin disable Kubernetes plugin support [default=auto] --disable-cdp-plugin disable CDP plugin support [default=auto] --disable-totp-bpam disable TOTP BPAM support [default=yes] --disable-ldap-bpam disable LDAP BPAM support [default=auto] Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-pic[=PKGS] try to use only PIC/non-PIC objects [default=use both] --with-gnu-ld assume the C compiler uses GNU ld [default=no] --with-sysroot=DIR Search for dependent libraries within DIR (or the compiler's sysroot if not specified). --with-gnu-ld assume the C compiler uses GNU ld default=no --with-libiconv-prefix[=DIR] search for libiconv in DIR/include and DIR/lib --without-libiconv-prefix don't search for libiconv in includedir and libdir --with-libintl-prefix[=DIR] search for libintl in DIR/include and DIR/lib --without-libintl-prefix don't search for libintl in includedir and libdir --with-included-gettext use the GNU gettext library included here --with-readline[=DIR] specify readline library directory --with-tcp-wrappers[=DIR] enable tcpwrappers support --with-curl[=DIR] Include CURL support. DIR is the CURL base --with-zstd[=DIR] Include ZSTD support. DIR is the ZSTD base --with-openssl[=DIR] Include OpenSSL support. DIR is the OpenSSL base --with-working-dir=PATH specify path of Bacula working directory --with-archivedir=PATH specify path of SD archive directory --with-basename=RESNAME specify base resource name for daemons --with-hostname=RESNAME specify host name for daemons --with-scriptdir=PATH specify path of Bacula scripts directory --with-bsrdir=PATH specify path of Bacula bsrs directory --with-logdir=PATH specify path of Bacula logs directory --with-plugindir=PATH specify path of Bacula plugins directory --with-dump-email=EMAIL dump email address --with-job-email=EMAIL job output email address --with-smtp-host=HOST SMTP mail host address --with-pid-dir=PATH specify location of Bacula pid files --with-subsys-dir=PATH specify location of Bacula subsys file --with-baseport=PORT specify base port address for daemons --with-dir-password=PASSWORD specify Director's password --with-fd-password=PASSWORD specify Client's password --with-sd-password=PASSWORD specify Storage daemon's password --with-mon-dir-password=PASSWORD specify Director's password used by the monitor --with-mon-fd-password=PASSWORD specify Client's password used by the monitor --with-mon-sd-password=PASSWORD specify Storage daemon's password used by the monitor --with-db-name=DBNAME specify database name [default=bacula] --with-db-user=UNAME specify database user [default=bacula] --with-db-password=PWD specify database password [default=*none*] --with-db-port=DBPORT specify a database port [default=null] --with-db-ssl-options=DBSSLOPTIONS specify SSL options for database user connection [default=null] --with-dir-user=USER specify user for Director daemon --with-dir-group=GROUP specify group for Director daemon --with-sd-user=USER specify user for Storage daemon --with-sd-group=GROUP specify group for Storage daemon --with-fd-user=USER specify user for File daemon --with-fd-group=GROUP specify group for File daemon --with-sbin-perm=MODE specify permissions for sbin binaries [default=0750] --with-cython=cython Cython path --with-postgresql[=DIR] Include PostgreSQL support. DIR is the PostgreSQL base install directory, [default=/usr/local/pgsql] --with-mysql[=DIR] Include MySQL support. DIR is the MySQL base install directory, default is to search through a number of common places for the MySQL files. --with-embedded-mysql[=DIR] Include MySQL support. DIR is the MySQL base install directory, default is to search through a number of common places for the MySQL files. --with-sqlite3[=DIR] Include SQLite3 support. DIR is the SQLite3 base install directory, default is to search through a number of common places for the SQLite3 files. --with-x use the X Window System --with-s3[=DIR] specify s3 library directory --with-aws specify Amazon Storage Cloud --with-afsdir[=DIR] Directory holding AFS includes/libs --with-lzo[=DIR] specify lzo library directory --with-gpfsdir[=DIR] Directory holding GPFS includes/libs --with-ldap[=DIR] enable LDAP support --with-systemd[=UNITDIR] Include systemd support. UNITDIR is where systemd system .service files are located, default is to ask systemctl. Some influential environment variables: CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory CXX C++ compiler command CXXFLAGS C++ compiler flags CPP C preprocessor PKG_CONFIG path to pkg-config utility PKG_CONFIG_PATH directories to add to pkg-config's search path PKG_CONFIG_LIBDIR path overriding pkg-config's built-in search path CXXCPP C++ preprocessor XMKMF Path to xmkmf, Makefile generator for X Window System Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to the package provider. _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF bacula configure 15.0.0 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## # ac_fn_c_try_compile LINENO # -------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_compile # ac_fn_cxx_try_compile LINENO # ---------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_compile # ac_fn_c_try_cpp LINENO # ---------------------- # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_cpp # ac_fn_c_try_link LINENO # ----------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_link # ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists and can be compiled using the include files in # INCLUDES, setting the cache variable VAR accordingly. ac_fn_c_check_header_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_compile # ac_fn_c_try_run LINENO # ---------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. Assumes # that executables *can* be run. ac_fn_c_try_run () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then : ac_retval=0 else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_run # ac_fn_c_check_func LINENO FUNC VAR # ---------------------------------- # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_c_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_func # ac_fn_cxx_try_cpp LINENO # ------------------------ # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_cpp # ac_fn_cxx_try_link LINENO # ------------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_link # ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES # --------------------------------------------- # Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR # accordingly. ac_fn_c_check_decl () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack as_decl_name=`echo $2|sed 's/ *(.*//'` as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'` { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5 $as_echo_n "checking whether $as_decl_name is declared... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { #ifndef $as_decl_name #ifdef __cplusplus (void) $as_decl_use; #else (void) $as_decl_name; #endif #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_decl # ac_fn_c_check_type LINENO TYPE VAR INCLUDES # ------------------------------------------- # Tests whether TYPE exists after having included INCLUDES, setting cache # variable VAR accordingly. ac_fn_c_check_type () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof ($2)) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof (($2))) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else eval "$3=yes" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_type # ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using # the include files in INCLUDES and setting the cache variable VAR # accordingly. ac_fn_c_check_header_mongrel () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if eval \${$3+:} false; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <$2> _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.i conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_mongrel # ac_fn_c_compute_int LINENO EXPR VAR INCLUDES # -------------------------------------------- # Tries to find the compile-time value of EXPR in a program that includes # INCLUDES, setting VAR accordingly. Returns whether the value could be # computed ac_fn_c_compute_int () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if test "$cross_compiling" = yes; then # Depending upon the size, compute the lo and hi bounds. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) >= 0)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_lo=0 ac_mid=0 while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) <= $ac_mid)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_hi=$ac_mid; break else as_fn_arith $ac_mid + 1 && ac_lo=$as_val if test $ac_lo -le $ac_mid; then ac_lo= ac_hi= break fi as_fn_arith 2 '*' $ac_mid + 1 && ac_mid=$as_val fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext done else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) < 0)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_hi=-1 ac_mid=-1 while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) >= $ac_mid)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_lo=$ac_mid; break else as_fn_arith '(' $ac_mid ')' - 1 && ac_hi=$as_val if test $ac_mid -le $ac_hi; then ac_lo= ac_hi= break fi as_fn_arith 2 '*' $ac_mid && ac_mid=$as_val fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext done else ac_lo= ac_hi= fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext # Binary search between lo and hi bounds. while test "x$ac_lo" != "x$ac_hi"; do as_fn_arith '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo && ac_mid=$as_val cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) <= $ac_mid)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_hi=$ac_mid else as_fn_arith '(' $ac_mid ')' + 1 && ac_lo=$as_val fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext done case $ac_lo in #(( ?*) eval "$3=\$ac_lo"; ac_retval=0 ;; '') ac_retval=1 ;; esac else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 static long int longval () { return $2; } static unsigned long int ulongval () { return $2; } #include #include int main () { FILE *f = fopen ("conftest.val", "w"); if (! f) return 1; if (($2) < 0) { long int i = longval (); if (i != ($2)) return 1; fprintf (f, "%ld", i); } else { unsigned long int i = ulongval (); if (i != ($2)) return 1; fprintf (f, "%lu", i); } /* Do not output a trailing newline, as this causes \r\n confusion on some platforms. */ return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : echo >>conftest.val; read $3 &5 $as_echo_n "checking for $2.$3... " >&6; } if eval \${$4+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $5 int main () { static $2 ac_aggr; if (ac_aggr.$3) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$4=yes" else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $5 int main () { static $2 ac_aggr; if (sizeof ac_aggr.$3) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$4=yes" else eval "$4=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$4 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_member # ac_fn_cxx_try_run LINENO # ------------------------ # Try to link conftest.$ac_ext, and return whether this succeeded. Assumes # that executables *can* be run. ac_fn_cxx_try_run () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then : ac_retval=0 else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_run # ac_fn_cxx_check_func LINENO FUNC VAR # ------------------------------------ # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_cxx_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_cxx_check_func cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by bacula $as_me 15.0.0, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo $as_echo "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo $as_echo "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then $as_echo "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then $as_echo "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h $as_echo "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_URL "$PACKAGE_URL" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then # We do not want a PATH search for config.site. case $CONFIG_SITE in #(( -*) ac_site_file1=./$CONFIG_SITE;; */*) ac_site_file1=$CONFIG_SITE;; *) ac_site_file1=./$CONFIG_SITE;; esac elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi as_fn_append ac_header_list " stdlib.h" as_fn_append ac_header_list " unistd.h" as_fn_append ac_header_list " sys/param.h" # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu BUILD_DIR=`pwd` cd .. TOP_DIR=`pwd` cd ${BUILD_DIR} ac_aux_dir= for ac_dir in ${BUILD_DIR}/autoconf "$srcdir"/${BUILD_DIR}/autoconf; do if test -f "$ac_dir/install-sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install-sh -c" break elif test -f "$ac_dir/install.sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install.sh -c" break elif test -f "$ac_dir/shtool"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/shtool install -c" break fi done if test -z "$ac_aux_dir"; then as_fn_error $? "cannot find install-sh, install.sh, or shtool in ${BUILD_DIR}/autoconf \"$srcdir\"/${BUILD_DIR}/autoconf" "$LINENO" 5 fi # These three variables are undocumented and unsupported, # and are intended to be withdrawn in a future Autoconf release. # They can cause serious problems if a builder's source tree is in a directory # whose full name contains unusual characters. ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. ac_config_headers="$ac_config_headers src/config.h:autoconf/config.h.in" WIN32BUILDDIR=${BUILD_DIR}/src/win32 WIN32MAINDIR=${BUILD_DIR} WIN32TOPDIR=${TOP_DIR} for ac_prog in true do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_TRUEPRG+:} false; then : $as_echo_n "(cached) " >&6 else case $TRUEPRG in [\\/]* | ?:[\\/]*) ac_cv_path_TRUEPRG="$TRUEPRG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_TRUEPRG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi TRUEPRG=$ac_cv_path_TRUEPRG if test -n "$TRUEPRG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $TRUEPRG" >&5 $as_echo "$TRUEPRG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$TRUEPRG" && break done test -n "$TRUEPRG" || TRUEPRG=":" for ac_prog in false do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_FALSEPRG+:} false; then : $as_echo_n "(cached) " >&6 else case $FALSEPRG in [\\/]* | ?:[\\/]*) ac_cv_path_FALSEPRG="$FALSEPRG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_FALSEPRG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi FALSEPRG=$ac_cv_path_FALSEPRG if test -n "$FALSEPRG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $FALSEPRG" >&5 $as_echo "$FALSEPRG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$FALSEPRG" && break done test -n "$FALSEPRG" || FALSEPRG=":" post_host= if test "x$BACULA" != x; then post_host=`echo -${BACULA} | tr 'A-Z ' 'a-z-'` fi BACULA=${BACULA:-Bacula} VERSION=`sed -n -e 's/^#define VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` RELEASE=`sed -n -e 's/^#define RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` DATE=`sed -n -e 's/^#define BDATE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` LSMDATE=`sed -n -e 's/^#define LSMDATE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` BDB_VERSION=`sed -n -e 's/^#define BDB_VERSION \(.*\)$/\1/p' ${srcdir}/src/cats/cats.h` DEPKGS_VERSION=`sed -n -e 's/^#define DEPKGS_VERSION \(.*\)$/\1/p' ${srcdir}/src/cats/cats.h` DEPKGS_QT_VERSION=`sed -n -e 's/^#define DEPKGS_QT_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` BQT_VERSION=`sed -n -e 's/^#define BQT_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` VIX_VERSION=`sed -n -e 's/^#define VIX_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` JAVA_VERSION=`sed -n -e 's/^#define JAVA_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` NDMP_VERSION=`sed -n -e 's/^#define NDMP_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` LIBRSYNC_VERSION=`sed -n -e 's/^#define LIBRSYNC_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` KUBERNETES_IMAGE_VERSION=`sed -n -e 's/^#define KUBERNETES_IMAGE_VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` LIBBAC_LT_RELEASE=`sed -n -e 's/^#.*LIBBAC_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` LIBBACCFG_LT_RELEASE=`sed -n -e 's/^#.*LIBBACCFG_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` LIBBACPY_LT_RELEASE=`sed -n -e 's/^#.*LIBBACPY_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` LIBBAC_LT_RELEASE=${LIBBAC_LT_RELEASE:-$VERSION} LIBBACCFG_LT_RELEASE=${LIBBACCFG_LT_RELEASE:-$VERSION} LIBBACSQL_LT_RELEASE=`sed -n -e 's/^#.*LIBBACSQL_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` LIBBACCATS_LT_RELEASE=`sed -n -e 's/^#.*LIBBACCATS_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` LIBBACSQL_LT_RELEASE=${LIBBACSQL_LT_RELEASE:-$VERSION} LIBBACCATS_LT_RELEASE=${LIBBACCATS_LT_RELEASE:-$VERSION} LIBBACFIND_LT_RELEASE=`sed -n -e 's/^#.*LIBBACFIND_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h` LIBBACFIND_LT_RELEASE=${LIBBACFIND_LT_RELEASE:-$VERSION} PFILES="platforms/Makefile" echo "configuring for ${BACULA} $VERSION ($DATE)" ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 $as_echo_n "checking whether the C compiler works... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi if test -z "$ac_file"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 $as_echo_n "checking for C compiler default output file name... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if ${ac_cv_objext+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu if test -z "$CXX"; then if test -n "$CCC"; then CXX=$CCC else if test -n "$ac_tool_prefix"; then for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CXX"; then ac_cv_prog_CXX="$CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CXX=$ac_cv_prog_CXX if test -n "$CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 $as_echo "$CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CXX" && break done fi if test -z "$CXX"; then ac_ct_CXX=$CXX for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CXX"; then ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CXX="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CXX=$ac_cv_prog_ac_ct_CXX if test -n "$ac_ct_CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 $as_echo "$ac_ct_CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CXX" && break done if test "x$ac_ct_CXX" = x; then CXX="g++" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CXX=$ac_ct_CXX fi fi fi fi # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 $as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } if ${ac_cv_cxx_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_cxx_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 $as_echo "$ac_cv_cxx_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GXX=yes else GXX= fi ac_test_CXXFLAGS=${CXXFLAGS+set} ac_save_CXXFLAGS=$CXXFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 $as_echo_n "checking whether $CXX accepts -g... " >&6; } if ${ac_cv_prog_cxx_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_cxx_werror_flag=$ac_cxx_werror_flag ac_cxx_werror_flag=yes ac_cv_prog_cxx_g=no CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes else CXXFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : else ac_cxx_werror_flag=$ac_save_cxx_werror_flag CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cxx_werror_flag=$ac_save_cxx_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 $as_echo "$ac_cv_prog_cxx_g" >&6; } if test "$ac_test_CXXFLAGS" = set; then CXXFLAGS=$ac_save_CXXFLAGS elif test $ac_cv_prog_cxx_g = yes; then if test "$GXX" = yes; then CXXFLAGS="-g -O2" else CXXFLAGS="-g" fi else if test "$GXX" = yes; then CXXFLAGS="-O2" else CXXFLAGS= fi fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x$CC" != xcc; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC and cc understand -c and -o together" >&5 $as_echo_n "checking whether $CC and cc understand -c and -o together... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether cc understands -c and -o together" >&5 $as_echo_n "checking whether cc understands -c and -o together... " >&6; } fi set dummy $CC; ac_cc=`$as_echo "$2" | sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'` if eval \${ac_cv_prog_cc_${ac_cc}_c_o+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF # Make sure it works both with $CC and with simple cc. # We do the test twice because some compilers refuse to overwrite an # existing .o file with -o, though they will create one. ac_try='$CC -c conftest.$ac_ext -o conftest2.$ac_objext >&5' rm -f conftest2.* if { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -f conftest2.$ac_objext && { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then eval ac_cv_prog_cc_${ac_cc}_c_o=yes if test "x$CC" != xcc; then # Test first that cc exists at all. if { ac_try='cc -c conftest.$ac_ext >&5' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then ac_try='cc -c conftest.$ac_ext -o conftest2.$ac_objext >&5' rm -f conftest2.* if { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -f conftest2.$ac_objext && { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then # cc works too. : else # cc exists but doesn't like -o. eval ac_cv_prog_cc_${ac_cc}_c_o=no fi fi fi else eval ac_cv_prog_cc_${ac_cc}_c_o=no fi rm -f core conftest* fi if eval test \$ac_cv_prog_cc_${ac_cc}_c_o = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "#define NO_MINUS_C_MINUS_O 1" >>confdefs.h fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } if ${ac_cv_path_GREP+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then ac_path_GREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_GREP" || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_GREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_GREP=$GREP fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } if ${ac_cv_path_EGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else if test -z "$EGREP"; then ac_path_EGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_EGREP" || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_EGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_EGREP=$EGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" if test $ac_cv_c_compiler_gnu = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC needs -traditional" >&5 $as_echo_n "checking whether $CC needs -traditional... " >&6; } if ${ac_cv_prog_gcc_traditional+:} false; then : $as_echo_n "(cached) " >&6 else ac_pattern="Autoconf.*'x'" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include Autoconf TIOCGETP _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "$ac_pattern" >/dev/null 2>&1; then : ac_cv_prog_gcc_traditional=yes else ac_cv_prog_gcc_traditional=no fi rm -f conftest* if test $ac_cv_prog_gcc_traditional = no; then cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include Autoconf TCGETA _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "$ac_pattern" >/dev/null 2>&1; then : ac_cv_prog_gcc_traditional=yes fi rm -f conftest* fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_gcc_traditional" >&5 $as_echo "$ac_cv_prog_gcc_traditional" >&6; } if test $ac_cv_prog_gcc_traditional = yes; then CC="$CC -traditional" fi fi BASECC=`basename $CC` have_gcc=no if test x"$GCC" = "xyes"; then $as_echo "#define HAVE_GCC 1" >>confdefs.h have_gcc=yes fi # Extract the first word of "$CXX", so it can be a program name with args. set dummy $CXX; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_CXX+:} false; then : $as_echo_n "(cached) " >&6 else case $CXX in [\\/]* | ?:[\\/]*) ac_cv_path_CXX="$CXX" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_CXX="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_CXX" && ac_cv_path_CXX="$CXX" ;; esac fi CXX=$ac_cv_path_CXX if test -n "$CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 $as_echo "$CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test ! -e $CXX; then as_fn_error $? "Unable to find C++ compiler" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for multiarch system" >&5 $as_echo_n "checking for multiarch system... " >&6; } multiarch=`$CC $CFLAGS -print-multiarch 2>/dev/null` { $as_echo "$as_me:${as_lineno-$LINENO}: result: $multiarch" >&5 $as_echo "$multiarch" >&6; } # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 $as_echo_n "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then if ${ac_cv_path_install+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in #(( ./ | .// | /[cC]/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else rm -rf conftest.one conftest.two conftest.dir echo one > conftest.one echo two > conftest.two mkdir conftest.dir if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && test -s conftest.one && test -s conftest.two && test -s conftest.dir/conftest.one && test -s conftest.dir/conftest.two then ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" break 3 fi fi fi done done ;; esac done IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 $as_echo "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' # Extract the first word of "mv", so it can be a program name with args. set dummy mv; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_MV+:} false; then : $as_echo_n "(cached) " >&6 else case $MV in [\\/]* | ?:[\\/]*) ac_cv_path_MV="$MV" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_MV="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_MV" && ac_cv_path_MV="mv" ;; esac fi MV=$ac_cv_path_MV if test -n "$MV"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MV" >&5 $as_echo "$MV" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "rm", so it can be a program name with args. set dummy rm; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_REMOVE+:} false; then : $as_echo_n "(cached) " >&6 else case $REMOVE in [\\/]* | ?:[\\/]*) ac_cv_path_REMOVE="$REMOVE" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_REMOVE="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_REMOVE" && ac_cv_path_REMOVE="rm" ;; esac fi REMOVE=$ac_cv_path_REMOVE if test -n "$REMOVE"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $REMOVE" >&5 $as_echo "$REMOVE" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "cp", so it can be a program name with args. set dummy cp; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_CP+:} false; then : $as_echo_n "(cached) " >&6 else case $CP in [\\/]* | ?:[\\/]*) ac_cv_path_CP="$CP" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_CP="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_CP" && ac_cv_path_CP="cp" ;; esac fi CP=$ac_cv_path_CP if test -n "$CP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CP" >&5 $as_echo "$CP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "sed", so it can be a program name with args. set dummy sed; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_SED+:} false; then : $as_echo_n "(cached) " >&6 else case $SED in [\\/]* | ?:[\\/]*) ac_cv_path_SED="$SED" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_SED="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_SED" && ac_cv_path_SED="sed" ;; esac fi SED=$ac_cv_path_SED if test -n "$SED"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $SED" >&5 $as_echo "$SED" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "echo", so it can be a program name with args. set dummy echo; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_ECHO+:} false; then : $as_echo_n "(cached) " >&6 else case $ECHO in [\\/]* | ?:[\\/]*) ac_cv_path_ECHO="$ECHO" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_ECHO="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_ECHO" && ac_cv_path_ECHO="echo" ;; esac fi ECHO=$ac_cv_path_ECHO if test -n "$ECHO"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ECHO" >&5 $as_echo "$ECHO" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "cmp", so it can be a program name with args. set dummy cmp; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_CMP+:} false; then : $as_echo_n "(cached) " >&6 else case $CMP in [\\/]* | ?:[\\/]*) ac_cv_path_CMP="$CMP" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_CMP="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_CMP" && ac_cv_path_CMP="cmp" ;; esac fi CMP=$ac_cv_path_CMP if test -n "$CMP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CMP" >&5 $as_echo "$CMP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "tbl", so it can be a program name with args. set dummy tbl; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_TBL+:} false; then : $as_echo_n "(cached) " >&6 else case $TBL in [\\/]* | ?:[\\/]*) ac_cv_path_TBL="$TBL" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_TBL="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_TBL" && ac_cv_path_TBL="tbl" ;; esac fi TBL=$ac_cv_path_TBL if test -n "$TBL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $TBL" >&5 $as_echo "$TBL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "ar", so it can be a program name with args. set dummy ar; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_AR+:} false; then : $as_echo_n "(cached) " >&6 else case $AR in [\\/]* | ?:[\\/]*) ac_cv_path_AR="$AR" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_AR="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_AR" && ac_cv_path_AR="ar" ;; esac fi AR=$ac_cv_path_AR if test -n "$AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 $as_echo "$AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "openssl", so it can be a program name with args. set dummy openssl; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_OPENSSL+:} false; then : $as_echo_n "(cached) " >&6 else case $OPENSSL in [\\/]* | ?:[\\/]*) ac_cv_path_OPENSSL="$OPENSSL" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_OPENSSL="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_OPENSSL" && ac_cv_path_OPENSSL="none" ;; esac fi OPENSSL=$ac_cv_path_OPENSSL if test -n "$OPENSSL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OPENSSL" >&5 $as_echo "$OPENSSL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "mtx", so it can be a program name with args. set dummy mtx; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_MTX+:} false; then : $as_echo_n "(cached) " >&6 else case $MTX in [\\/]* | ?:[\\/]*) ac_cv_path_MTX="$MTX" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_MTX="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_MTX" && ac_cv_path_MTX="mtx" ;; esac fi MTX=$ac_cv_path_MTX if test -n "$MTX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MTX" >&5 $as_echo "$MTX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "dd", so it can be a program name with args. set dummy dd; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_DD+:} false; then : $as_echo_n "(cached) " >&6 else case $DD in [\\/]* | ?:[\\/]*) ac_cv_path_DD="$DD" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_DD="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_DD" && ac_cv_path_DD="dd" ;; esac fi DD=$ac_cv_path_DD if test -n "$DD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DD" >&5 $as_echo "$DD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "mkisofs", so it can be a program name with args. set dummy mkisofs; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_MKISOFS+:} false; then : $as_echo_n "(cached) " >&6 else case $MKISOFS in [\\/]* | ?:[\\/]*) ac_cv_path_MKISOFS="$MKISOFS" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_MKISOFS="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_MKISOFS" && ac_cv_path_MKISOFS="mkisofs" ;; esac fi MKISOFS=$ac_cv_path_MKISOFS if test -n "$MKISOFS"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKISOFS" >&5 $as_echo "$MKISOFS" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "python", so it can be a program name with args. set dummy python; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PYTHON+:} false; then : $as_echo_n "(cached) " >&6 else case $PYTHON in [\\/]* | ?:[\\/]*) ac_cv_path_PYTHON="$PYTHON" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PYTHON="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_PYTHON" && ac_cv_path_PYTHON="python" ;; esac fi PYTHON=$ac_cv_path_PYTHON if test -n "$PYTHON"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PYTHON" >&5 $as_echo "$PYTHON" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "growisofs", so it can be a program name with args. set dummy growisofs; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_GROWISOFS+:} false; then : $as_echo_n "(cached) " >&6 else case $GROWISOFS in [\\/]* | ?:[\\/]*) ac_cv_path_GROWISOFS="$GROWISOFS" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_GROWISOFS="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_GROWISOFS" && ac_cv_path_GROWISOFS="growisofs" ;; esac fi GROWISOFS=$ac_cv_path_GROWISOFS if test -n "$GROWISOFS"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GROWISOFS" >&5 $as_echo "$GROWISOFS" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "dvd+rw-mediainfo", so it can be a program name with args. set dummy dvd+rw-mediainfo; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_DVDRWMEDIAINFO+:} false; then : $as_echo_n "(cached) " >&6 else case $DVDRWMEDIAINFO in [\\/]* | ?:[\\/]*) ac_cv_path_DVDRWMEDIAINFO="$DVDRWMEDIAINFO" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_DVDRWMEDIAINFO="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_DVDRWMEDIAINFO" && ac_cv_path_DVDRWMEDIAINFO="dvd+rw-mediainfo" ;; esac fi DVDRWMEDIAINFO=$ac_cv_path_DVDRWMEDIAINFO if test -n "$DVDRWMEDIAINFO"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DVDRWMEDIAINFO" >&5 $as_echo "$DVDRWMEDIAINFO" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "dvd+rw-format", so it can be a program name with args. set dummy dvd+rw-format; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_DVDRWFORMAT+:} false; then : $as_echo_n "(cached) " >&6 else case $DVDRWFORMAT in [\\/]* | ?:[\\/]*) ac_cv_path_DVDRWFORMAT="$DVDRWFORMAT" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_DVDRWFORMAT="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_DVDRWFORMAT" && ac_cv_path_DVDRWFORMAT="dvd+rw-format" ;; esac fi DVDRWFORMAT=$ac_cv_path_DVDRWFORMAT if test -n "$DVDRWFORMAT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DVDRWFORMAT" >&5 $as_echo "$DVDRWFORMAT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args. set dummy ${ac_tool_prefix}pkg-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PKG_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PKG_CONFIG=$ac_cv_path_PKG_CONFIG if test -n "$PKG_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PKG_CONFIG" >&5 $as_echo "$PKG_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_path_PKG_CONFIG"; then ac_pt_PKG_CONFIG=$PKG_CONFIG # Extract the first word of "pkg-config", so it can be a program name with args. set dummy pkg-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_ac_pt_PKG_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $ac_pt_PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_ac_pt_PKG_CONFIG="$ac_pt_PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ac_pt_PKG_CONFIG=$ac_cv_path_ac_pt_PKG_CONFIG if test -n "$ac_pt_PKG_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_PKG_CONFIG" >&5 $as_echo "$ac_pt_PKG_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_pt_PKG_CONFIG" = x; then PKG_CONFIG="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac PKG_CONFIG=$ac_pt_PKG_CONFIG fi else PKG_CONFIG="$ac_cv_path_PKG_CONFIG" fi fi if test -n "$PKG_CONFIG"; then _pkg_min_version=0.9.0 { $as_echo "$as_me:${as_lineno-$LINENO}: checking pkg-config is at least version $_pkg_min_version" >&5 $as_echo_n "checking pkg-config is at least version $_pkg_min_version... " >&6; } if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } PKG_CONFIG="" fi fi # Extract the first word of "qmake", so it can be a program name with args. set dummy qmake; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_QMAKE+:} false; then : $as_echo_n "(cached) " >&6 else case $QMAKE in [\\/]* | ?:[\\/]*) ac_cv_path_QMAKE="$QMAKE" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_QMAKE="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_QMAKE" && ac_cv_path_QMAKE="none" ;; esac fi QMAKE=$ac_cv_path_QMAKE if test -n "$QMAKE"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $QMAKE" >&5 $as_echo "$QMAKE" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "gmake", so it can be a program name with args. set dummy gmake; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_GMAKE+:} false; then : $as_echo_n "(cached) " >&6 else case $GMAKE in [\\/]* | ?:[\\/]*) ac_cv_path_GMAKE="$GMAKE" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_GMAKE="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_GMAKE" && ac_cv_path_GMAKE="none" ;; esac fi GMAKE=$ac_cv_path_GMAKE if test -n "$GMAKE"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GMAKE" >&5 $as_echo "$GMAKE" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "pidof", so it can be a program name with args. set dummy pidof; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PIDOF+:} false; then : $as_echo_n "(cached) " >&6 else case $PIDOF in [\\/]* | ?:[\\/]*) ac_cv_path_PIDOF="$PIDOF" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PIDOF="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_PIDOF" && ac_cv_path_PIDOF="pidof" ;; esac fi PIDOF=$ac_cv_path_PIDOF if test -n "$PIDOF"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PIDOF" >&5 $as_echo "$PIDOF" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AWK+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AWK="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 $as_echo "$AWK" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AWK" && break done # Some AWK programs fail, so test it and warn the user if echo xfoo | $AWK 'BEGIN { prog=ARGV1; ARGC=1 } { if ((prog == $2) || (("(" prog ")") == $2) || (("" prog "") == $2) || ((prog ":") == $2)) { print $1 ; exit 0 } }' xfoo>/dev/null; then :; else as_fn_error $? "!!!!!!!!! WARNING !!!!!!!!!!!!!! The regex engine of $AWK is too broken to be used you might want to install GNU AWK. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" "$LINENO" 5 fi THE_AWK=$AWK # Extract the first word of "$THE_AWK", so it can be a program name with args. set dummy $THE_AWK; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_AWK+:} false; then : $as_echo_n "(cached) " >&6 else case $AWK in [\\/]* | ?:[\\/]*) ac_cv_path_AWK="$AWK" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_AWK="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_AWK" && ac_cv_path_AWK="$THE_AWK" ;; esac fi AWK=$ac_cv_path_AWK if test -n "$AWK"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 $as_echo "$AWK" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ARFLAG" || ARFLAGS="cr" MAKE_SHELL=/bin/sh do_check_dmsg=no # Check whether --enable-check-dmsg was given. if test "${enable_check_dmsg+set}" = set; then : enableval=$enable_check_dmsg; if test x$enableval = xyes; then do_check_dmsg=yes fi fi if test x$do_check_dmsg = xyes; then DO_CHECK_DMSG= CHECK_DMSG=$PWD/scripts/check_dmsg else DO_CHECK_DMSG=true CHECK_DMSG=true fi use_libtool=yes # Check whether --enable-libtool was given. if test "${enable_libtool+set}" = set; then : enableval=$enable_libtool; if test x$enableval = xno; then use_libtool=no fi fi case `pwd` in *\ * | *\ *) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 $as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; esac macro_version='2.4.2' macro_revision='1.3337' ltmain="$ac_aux_dir/ltmain.sh" # Make sure we can run config.sub. $SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 $as_echo_n "checking build system type... " >&6; } if ${ac_cv_build+:} false; then : $as_echo_n "(cached) " >&6 else ac_build_alias=$build_alias test "x$ac_build_alias" = x && ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` test "x$ac_build_alias" = x && as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 $as_echo "$ac_cv_build" >&6; } case $ac_cv_build in *-*-*) ;; *) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; esac build=$ac_cv_build ac_save_IFS=$IFS; IFS='-' set x $ac_cv_build shift build_cpu=$1 build_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: build_os=$* IFS=$ac_save_IFS case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 $as_echo_n "checking host system type... " >&6; } if ${ac_cv_host+:} false; then : $as_echo_n "(cached) " >&6 else if test "x$host_alias" = x; then ac_cv_host=$ac_cv_build else ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 $as_echo "$ac_cv_host" >&6; } case $ac_cv_host in *-*-*) ;; *) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; esac host=$ac_cv_host ac_save_IFS=$IFS; IFS='-' set x $ac_cv_host shift host_cpu=$1 host_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: host_os=$* IFS=$ac_save_IFS case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac # Backslashify metacharacters that are still active within # double-quoted strings. sed_quote_subst='s/\(["`$\\]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\(["`\\]\)/\\\1/g' # Sed substitution to delay expansion of an escaped shell variable in a # double_quote_subst'ed string. delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' # Sed substitution to delay expansion of an escaped single quote. delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' # Sed substitution to avoid accidental globbing in evaled expressions no_glob_subst='s/\*/\\\*/g' ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 $as_echo_n "checking how to print strings... " >&6; } # Test print first, because it will be a builtin if present. if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='print -r --' elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='printf %s\n' else # Use this function as a fallback that always works. func_fallback_echo () { eval 'cat <<_LTECHO_EOF $1 _LTECHO_EOF' } ECHO='func_fallback_echo' fi # func_echo_all arg... # Invoke $ECHO with all args, space-separated. func_echo_all () { $ECHO "" } case "$ECHO" in printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5 $as_echo "printf" >&6; } ;; print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 $as_echo "print -r" >&6; } ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5 $as_echo "cat" >&6; } ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 $as_echo_n "checking for a sed that does not truncate output... " >&6; } if ${ac_cv_path_SED+:} false; then : $as_echo_n "(cached) " >&6 else ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ for ac_i in 1 2 3 4 5 6 7; do ac_script="$ac_script$as_nl$ac_script" done echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed { ac_script=; unset ac_script;} if test -z "$SED"; then ac_path_SED_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_SED" || continue # Check for GNU ac_path_SED and select it if it is found. # Check for GNU $ac_path_SED case `"$ac_path_SED" --version 2>&1` in *GNU*) ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo '' >> "conftest.nl" "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_SED_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_SED="$ac_path_SED" ac_path_SED_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_SED_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_SED"; then as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 fi else ac_cv_path_SED=$SED fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 $as_echo "$ac_cv_path_SED" >&6; } SED="$ac_cv_path_SED" rm -f conftest.sed test -z "$SED" && SED=sed Xsed="$SED -e 1s/^X//" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 $as_echo_n "checking for fgrep... " >&6; } if ${ac_cv_path_FGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 then ac_cv_path_FGREP="$GREP -F" else if test -z "$FGREP"; then ac_path_FGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in fgrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_FGREP" || continue # Check for GNU ac_path_FGREP and select it if it is found. # Check for GNU $ac_path_FGREP case `"$ac_path_FGREP" --version 2>&1` in *GNU*) ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'FGREP' >> "conftest.nl" "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_FGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_FGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_FGREP"; then as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_FGREP=$FGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 $as_echo "$ac_cv_path_FGREP" >&6; } FGREP="$ac_cv_path_FGREP" test -z "$GREP" && GREP=grep # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then : withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes else with_gnu_ld=no fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if ${lt_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$LD"; then lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if ${lt_cv_prog_gnu_ld+:} false; then : $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 $as_echo "$lt_cv_prog_gnu_ld" >&6; } with_gnu_ld=$lt_cv_prog_gnu_ld { $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 $as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } if ${lt_cv_path_NM+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$NM"; then # Let the user override the test. lt_cv_path_NM="$NM" else lt_nm_to_check="${ac_tool_prefix}nm" if test -n "$ac_tool_prefix" && test "$build" = "$host"; then lt_nm_to_check="$lt_nm_to_check nm" fi for lt_tmp_nm in $lt_nm_to_check; do lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. tmp_nm="$ac_dir/$lt_tmp_nm" if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then # Check to see if the nm accepts a BSD-compat flag. # Adding the `sed 1q' prevents false positives on HP-UX, which says: # nm: unknown option "B" ignored # Tru64's nm complains that /dev/null is an invalid object file case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in */dev/null* | *'Invalid file or object type'*) lt_cv_path_NM="$tmp_nm -B" break ;; *) case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in */dev/null*) lt_cv_path_NM="$tmp_nm -p" break ;; *) lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but continue # so that we can try to find one that supports BSD flags ;; esac ;; esac fi done IFS="$lt_save_ifs" done : ${lt_cv_path_NM=no} fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 $as_echo "$lt_cv_path_NM" >&6; } if test "$lt_cv_path_NM" != "no"; then NM="$lt_cv_path_NM" else # Didn't find any BSD compatible name lister, look for dumpbin. if test -n "$DUMPBIN"; then : # Let the user override the test. else if test -n "$ac_tool_prefix"; then for ac_prog in dumpbin "link -dump" do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_DUMPBIN+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$DUMPBIN"; then ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DUMPBIN=$ac_cv_prog_DUMPBIN if test -n "$DUMPBIN"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 $as_echo "$DUMPBIN" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$DUMPBIN" && break done fi if test -z "$DUMPBIN"; then ac_ct_DUMPBIN=$DUMPBIN for ac_prog in dumpbin "link -dump" do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DUMPBIN"; then ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN if test -n "$ac_ct_DUMPBIN"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 $as_echo "$ac_ct_DUMPBIN" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_DUMPBIN" && break done if test "x$ac_ct_DUMPBIN" = x; then DUMPBIN=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DUMPBIN=$ac_ct_DUMPBIN fi fi case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in *COFF*) DUMPBIN="$DUMPBIN -symbols" ;; *) DUMPBIN=: ;; esac fi if test "$DUMPBIN" != ":"; then NM="$DUMPBIN" fi fi test -z "$NM" && NM=nm { $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 $as_echo_n "checking the name lister ($NM) interface... " >&6; } if ${lt_cv_nm_interface+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_nm_interface="BSD nm" echo "int some_variable = 0;" > conftest.$ac_ext (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) (eval "$ac_compile" 2>conftest.err) cat conftest.err >&5 (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) cat conftest.err >&5 (eval echo "\"\$as_me:$LINENO: output\"" >&5) cat conftest.out >&5 if $GREP 'External.*some_variable' conftest.out > /dev/null; then lt_cv_nm_interface="MS dumpbin" fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 $as_echo "$lt_cv_nm_interface" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 $as_echo_n "checking whether ln -s works... " >&6; } LN_S=$as_ln_s if test "$LN_S" = "ln -s"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 $as_echo "no, using $LN_S" >&6; } fi # find the maximum length of command line arguments { $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 $as_echo_n "checking the maximum length of command line arguments... " >&6; } if ${lt_cv_sys_max_cmd_len+:} false; then : $as_echo_n "(cached) " >&6 else i=0 teststring="ABCD" case $build_os in msdosdjgpp*) # On DJGPP, this test can blow up pretty badly due to problems in libc # (any single argument exceeding 2000 bytes causes a buffer overrun # during glob expansion). Even if it were fixed, the result of this # check would be larger than it should be. lt_cv_sys_max_cmd_len=12288; # 12K is about right ;; gnu*) # Under GNU Hurd, this test is not required because there is # no limit to the length of command line arguments. # Libtool will interpret -1 as no limit whatsoever lt_cv_sys_max_cmd_len=-1; ;; cygwin* | mingw* | cegcc*) # On Win9x/ME, this test blows up -- it succeeds, but takes # about 5 minutes as the teststring grows exponentially. # Worse, since 9x/ME are not pre-emptively multitasking, # you end up with a "frozen" computer, even though with patience # the test eventually succeeds (with a max line length of 256k). # Instead, let's just punt: use the minimum linelength reported by # all of the supported platforms: 8192 (on NT/2K/XP). lt_cv_sys_max_cmd_len=8192; ;; mint*) # On MiNT this can take a long time and run out of memory. lt_cv_sys_max_cmd_len=8192; ;; amigaos*) # On AmigaOS with pdksh, this test takes hours, literally. # So we just punt and use a minimum line length of 8192. lt_cv_sys_max_cmd_len=8192; ;; netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) # This has been around since 386BSD, at least. Likely further. if test -x /sbin/sysctl; then lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` elif test -x /usr/sbin/sysctl; then lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` else lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs fi # And add a safety zone lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ;; interix*) # We know the value 262144 and hardcode it with a safety zone (like BSD) lt_cv_sys_max_cmd_len=196608 ;; os2*) # The test takes a long time on OS/2. lt_cv_sys_max_cmd_len=8192 ;; osf*) # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not # nice to cause kernel panics so lets avoid the loop below. # First set a reasonable default. lt_cv_sys_max_cmd_len=16384 # if test -x /sbin/sysconfig; then case `/sbin/sysconfig -q proc exec_disable_arg_limit` in *1*) lt_cv_sys_max_cmd_len=-1 ;; esac fi ;; sco3.2v5*) lt_cv_sys_max_cmd_len=102400 ;; sysv5* | sco5v6* | sysv4.2uw2*) kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` if test -n "$kargmax"; then lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` else lt_cv_sys_max_cmd_len=32768 fi ;; *) lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` if test -n "$lt_cv_sys_max_cmd_len" && \ test undefined != "$lt_cv_sys_max_cmd_len"; then lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` else # Make teststring a little bigger before we do anything with it. # a 1K string should be a reasonable start. for i in 1 2 3 4 5 6 7 8 ; do teststring=$teststring$teststring done SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} # If test is not a shell built-in, we'll probably end up computing a # maximum length that is only half of the actual maximum length, but # we can't tell. while { test "X"`env echo "$teststring$teststring" 2>/dev/null` \ = "X$teststring$teststring"; } >/dev/null 2>&1 && test $i != 17 # 1/2 MB should be enough do i=`expr $i + 1` teststring=$teststring$teststring done # Only check the string length outside the loop. lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` teststring= # Add a significant safety factor because C++ compilers can tack on # massive amounts of additional arguments before passing them to the # linker. It appears as though 1/2 is a usable value. lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` fi ;; esac fi if test -n $lt_cv_sys_max_cmd_len ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 $as_echo "$lt_cv_sys_max_cmd_len" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 $as_echo "none" >&6; } fi max_cmd_len=$lt_cv_sys_max_cmd_len : ${CP="cp -f"} : ${MV="mv -f"} : ${RM="rm -f"} { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands some XSI constructs" >&5 $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; } # Try some XSI features xsi_shell=no ( _lt_dummy="a/b/c" test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ = c,a/b,b/c, \ && eval 'test $(( 1 + 1 )) -eq 2 \ && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ && xsi_shell=yes { $as_echo "$as_me:${as_lineno-$LINENO}: result: $xsi_shell" >&5 $as_echo "$xsi_shell" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands \"+=\"" >&5 $as_echo_n "checking whether the shell understands \"+=\"... " >&6; } lt_shell_append=no ( foo=bar; set foo baz; eval "$1+=\$2" && test "$foo" = barbaz ) \ >/dev/null 2>&1 \ && lt_shell_append=yes { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_shell_append" >&5 $as_echo "$lt_shell_append" >&6; } if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then lt_unset=unset else lt_unset=false fi # test EBCDIC or ASCII case `echo X|tr X '\101'` in A) # ASCII based system # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr lt_SP2NL='tr \040 \012' lt_NL2SP='tr \015\012 \040\040' ;; *) # EBCDIC based system lt_SP2NL='tr \100 \n' lt_NL2SP='tr \r\n \100\100' ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 $as_echo_n "checking how to convert $build file names to $host format... " >&6; } if ${lt_cv_to_host_file_cmd+:} false; then : $as_echo_n "(cached) " >&6 else case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ;; esac ;; *-*-cygwin* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_noop ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ;; esac ;; * ) # unhandled hosts (and "normal" native builds) lt_cv_to_host_file_cmd=func_convert_file_noop ;; esac fi to_host_file_cmd=$lt_cv_to_host_file_cmd { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 $as_echo "$lt_cv_to_host_file_cmd" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 $as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } if ${lt_cv_to_tool_file_cmd+:} false; then : $as_echo_n "(cached) " >&6 else #assume ordinary cross tools, or native build. lt_cv_to_tool_file_cmd=func_convert_file_noop case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ;; esac ;; esac fi to_tool_file_cmd=$lt_cv_to_tool_file_cmd { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 $as_echo "$lt_cv_to_tool_file_cmd" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 $as_echo_n "checking for $LD option to reload object files... " >&6; } if ${lt_cv_ld_reload_flag+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_reload_flag='-r' fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 $as_echo "$lt_cv_ld_reload_flag" >&6; } reload_flag=$lt_cv_ld_reload_flag case $reload_flag in "" | " "*) ;; *) reload_flag=" $reload_flag" ;; esac reload_cmds='$LD$reload_flag -o $output$reload_objs' case $host_os in cygwin* | mingw* | pw32* | cegcc*) if test "$GCC" != yes; then reload_cmds=false fi ;; darwin*) if test "$GCC" = yes; then reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' else reload_cmds='$LD$reload_flag -o $output$reload_objs' fi ;; esac if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. set dummy ${ac_tool_prefix}objdump; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_OBJDUMP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$OBJDUMP"; then ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OBJDUMP=$ac_cv_prog_OBJDUMP if test -n "$OBJDUMP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 $as_echo "$OBJDUMP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OBJDUMP"; then ac_ct_OBJDUMP=$OBJDUMP # Extract the first word of "objdump", so it can be a program name with args. set dummy objdump; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OBJDUMP"; then ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_OBJDUMP="objdump" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP if test -n "$ac_ct_OBJDUMP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 $as_echo "$ac_ct_OBJDUMP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OBJDUMP" = x; then OBJDUMP="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OBJDUMP=$ac_ct_OBJDUMP fi else OBJDUMP="$ac_cv_prog_OBJDUMP" fi test -z "$OBJDUMP" && OBJDUMP=objdump { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 $as_echo_n "checking how to recognize dependent libraries... " >&6; } if ${lt_cv_deplibs_check_method+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_file_magic_cmd='$MAGIC_CMD' lt_cv_file_magic_test_file= lt_cv_deplibs_check_method='unknown' # Need to set the preceding variable on all platforms that support # interlibrary dependencies. # 'none' -- dependencies not supported. # `unknown' -- same as none, but documents that we really don't know. # 'pass_all' -- all dependencies passed with no checks. # 'test_compile' -- check by making test program. # 'file_magic [[regex]]' -- check by looking for files in library path # which responds to the $file_magic_cmd with a given extended regex. # If you have `file' or equivalent on your system and you're not sure # whether `pass_all' will *always* work, you probably want this one. case $host_os in aix[4-9]*) lt_cv_deplibs_check_method=pass_all ;; beos*) lt_cv_deplibs_check_method=pass_all ;; bsdi[45]*) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' lt_cv_file_magic_cmd='/usr/bin/file -L' lt_cv_file_magic_test_file=/shlib/libc.so ;; cygwin*) # func_win32_libid is a shell function defined in ltmain.sh lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' ;; mingw* | pw32*) # Base MSYS/MinGW do not provide the 'file' command needed by # func_win32_libid shell function, so use a weaker test based on 'objdump', # unless we find 'file', for example because we are cross-compiling. # func_win32_libid assumes BSD nm, so disallow it if using MS dumpbin. if ( test "$lt_cv_nm_interface" = "BSD nm" && file / ) >/dev/null 2>&1; then lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' else # Keep this pattern in sync with the one in func_win32_libid. lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' lt_cv_file_magic_cmd='$OBJDUMP -f' fi ;; cegcc*) # use the weaker test based on 'objdump'. See mingw*. lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' lt_cv_file_magic_cmd='$OBJDUMP -f' ;; darwin* | rhapsody*) lt_cv_deplibs_check_method=pass_all ;; freebsd* | dragonfly*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then case $host_cpu in i*86 ) # Not sure whether the presence of OpenBSD here was a mistake. # Let's accept both of them until this is cleared up. lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` ;; esac else lt_cv_deplibs_check_method=pass_all fi ;; haiku*) lt_cv_deplibs_check_method=pass_all ;; hpux10.20* | hpux11*) lt_cv_file_magic_cmd=/usr/bin/file case $host_cpu in ia64*) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so ;; hppa*64*) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl ;; *) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' lt_cv_file_magic_test_file=/usr/lib/libc.sl ;; esac ;; interix[3-9]*) # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' ;; irix5* | irix6* | nonstopux*) case $LD in *-32|*"-32 ") libmagic=32-bit;; *-n32|*"-n32 ") libmagic=N32;; *-64|*"-64 ") libmagic=64-bit;; *) libmagic=never-match;; esac lt_cv_deplibs_check_method=pass_all ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) lt_cv_deplibs_check_method=pass_all ;; netbsd* | netbsdelf*-gnu) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' fi ;; newos6*) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=/usr/lib/libnls.so ;; *nto* | *qnx*) lt_cv_deplibs_check_method=pass_all ;; openbsd*) if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' fi ;; osf3* | osf4* | osf5*) lt_cv_deplibs_check_method=pass_all ;; rdos*) lt_cv_deplibs_check_method=pass_all ;; solaris*) lt_cv_deplibs_check_method=pass_all ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) lt_cv_deplibs_check_method=pass_all ;; sysv4 | sysv4.3*) case $host_vendor in motorola) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` ;; ncr) lt_cv_deplibs_check_method=pass_all ;; sequent) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' ;; sni) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" lt_cv_file_magic_test_file=/lib/libc.so ;; siemens) lt_cv_deplibs_check_method=pass_all ;; pc) lt_cv_deplibs_check_method=pass_all ;; esac ;; tpf*) lt_cv_deplibs_check_method=pass_all ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 $as_echo "$lt_cv_deplibs_check_method" >&6; } file_magic_glob= want_nocaseglob=no if test "$build" = "$host"; then case $host_os in mingw* | pw32*) if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then want_nocaseglob=yes else file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` fi ;; esac fi file_magic_cmd=$lt_cv_file_magic_cmd deplibs_check_method=$lt_cv_deplibs_check_method test -z "$deplibs_check_method" && deplibs_check_method=unknown if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. set dummy ${ac_tool_prefix}dlltool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_DLLTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$DLLTOOL"; then ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DLLTOOL=$ac_cv_prog_DLLTOOL if test -n "$DLLTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 $as_echo "$DLLTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_DLLTOOL"; then ac_ct_DLLTOOL=$DLLTOOL # Extract the first word of "dlltool", so it can be a program name with args. set dummy dlltool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DLLTOOL"; then ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_DLLTOOL="dlltool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL if test -n "$ac_ct_DLLTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 $as_echo "$ac_ct_DLLTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_DLLTOOL" = x; then DLLTOOL="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DLLTOOL=$ac_ct_DLLTOOL fi else DLLTOOL="$ac_cv_prog_DLLTOOL" fi test -z "$DLLTOOL" && DLLTOOL=dlltool { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 $as_echo_n "checking how to associate runtime and link libraries... " >&6; } if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_sharedlib_from_linklib_cmd='unknown' case $host_os in cygwin* | mingw* | pw32* | cegcc*) # two different shell functions defined in ltmain.sh # decide which to use based on capabilities of $DLLTOOL case `$DLLTOOL --help 2>&1` in *--identify-strict*) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ;; *) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ;; esac ;; *) # fallback: assume linklib IS sharedlib lt_cv_sharedlib_from_linklib_cmd="$ECHO" ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 $as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO if test -n "$ac_tool_prefix"; then for ac_prog in ar do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AR"; then ac_cv_prog_AR="$AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AR="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AR=$ac_cv_prog_AR if test -n "$AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 $as_echo "$AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AR" && break done fi if test -z "$AR"; then ac_ct_AR=$AR for ac_prog in ar do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_AR"; then ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_AR="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_AR=$ac_cv_prog_ac_ct_AR if test -n "$ac_ct_AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 $as_echo "$ac_ct_AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_AR" && break done if test "x$ac_ct_AR" = x; then AR="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac AR=$ac_ct_AR fi fi : ${AR=ar} : ${AR_FLAGS=cru} { $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 $as_echo_n "checking for archiver @FILE support... " >&6; } if ${lt_cv_ar_at_file+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ar_at_file=no cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : echo conftest.$ac_objext > conftest.lst lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 (eval $lt_ar_try) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if test "$ac_status" -eq 0; then # Ensure the archiver fails upon bogus file names. rm -f conftest.$ac_objext libconftest.a { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 (eval $lt_ar_try) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if test "$ac_status" -ne 0; then lt_cv_ar_at_file=@ fi fi rm -f conftest.* libconftest.a fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 $as_echo "$lt_cv_ar_at_file" >&6; } if test "x$lt_cv_ar_at_file" = xno; then archiver_list_spec= else archiver_list_spec=$lt_cv_ar_at_file fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 $as_echo "$STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_STRIP="strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 $as_echo "$ac_ct_STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi test -z "$STRIP" && STRIP=: if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. set dummy ${ac_tool_prefix}ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$RANLIB"; then ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi RANLIB=$ac_cv_prog_RANLIB if test -n "$RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 $as_echo "$RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_RANLIB"; then ac_ct_RANLIB=$RANLIB # Extract the first word of "ranlib", so it can be a program name with args. set dummy ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_RANLIB"; then ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_RANLIB="ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB if test -n "$ac_ct_RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 $as_echo "$ac_ct_RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_RANLIB" = x; then RANLIB=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac RANLIB=$ac_ct_RANLIB fi else RANLIB="$ac_cv_prog_RANLIB" fi test -z "$RANLIB" && RANLIB=: # Determine commands to create old-style static archives. old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' old_postinstall_cmds='chmod 644 $oldlib' old_postuninstall_cmds= if test -n "$RANLIB"; then case $host_os in openbsd*) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" ;; *) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" ;; esac old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" fi case $host_os in darwin*) lock_old_archive_extraction=yes ;; *) lock_old_archive_extraction=no ;; esac # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # Check for command to grab the raw symbol name followed by C symbol from nm. { $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 $as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } if ${lt_cv_sys_global_symbol_pipe+:} false; then : $as_echo_n "(cached) " >&6 else # These are sane defaults that work on at least a few old systems. # [They come from Ultrix. What could be older than Ultrix?!! ;)] # Character class describing NM global symbol codes. symcode='[BCDEGRST]' # Regexp to match symbols that can be accessed directly from C. sympat='\([_A-Za-z][_A-Za-z0-9]*\)' # Define system-specific variables. case $host_os in aix*) symcode='[BCDT]' ;; cygwin* | mingw* | pw32* | cegcc*) symcode='[ABCDGISTW]' ;; hpux*) if test "$host_cpu" = ia64; then symcode='[ABCDEGRST]' fi ;; irix* | nonstopux*) symcode='[BCDEGRST]' ;; osf*) symcode='[BCDEGQRST]' ;; solaris*) symcode='[BDRT]' ;; sco3.2v5*) symcode='[DT]' ;; sysv4.2uw2*) symcode='[DT]' ;; sysv5* | sco5v6* | unixware* | OpenUNIX*) symcode='[ABDT]' ;; sysv4) symcode='[DFNSTU]' ;; esac # If we're using GNU nm, then use its standard symbol codes. case `$NM -V 2>&1` in *GNU* | *'with BFD'*) symcode='[ABCDGIRSTW]' ;; esac # Transform an extracted symbol line into a proper C declaration. # Some systems (esp. on ia64) link data and code symbols differently, # so use this general approach. lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" # Transform an extracted symbol line into symbol name and symbol address lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" # Handle CRLF in mingw tool chain opt_cr= case $build_os in mingw*) opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp ;; esac # Try without a prefix underscore, then with it. for ac_symprfx in "" "_"; do # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. symxfrm="\\1 $ac_symprfx\\2 \\2" # Write the raw and C identifiers. if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Fake it for dumpbin and say T for any non-static function # and D for any global variable. # Also find C++ and __fastcall symbols from MSVC++, # which start with @ or ?. lt_cv_sys_global_symbol_pipe="$AWK '"\ " {last_section=section; section=\$ 3};"\ " /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ " /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ " \$ 0!~/External *\|/{next};"\ " / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ " {if(hide[section]) next};"\ " {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ " {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ " s[1]~/^[@?]/{print s[1], s[1]; next};"\ " s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ " ' prfx=^$ac_symprfx" else lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" fi lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" # Check to see that the pipe works correctly. pipe_works=no rm -f conftest* cat > conftest.$ac_ext <<_LT_EOF #ifdef __cplusplus extern "C" { #endif char nm_test_var; void nm_test_func(void); void nm_test_func(void){} #ifdef __cplusplus } #endif int main(){nm_test_var='a';nm_test_func();return(0);} _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then # Now try to grab the symbols. nlist=conftest.nm if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5 (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s "$nlist"; then # Try sorting and uniquifying the output. if sort "$nlist" | uniq > "$nlist"T; then mv -f "$nlist"T "$nlist" else rm -f "$nlist"T fi # Make sure that we snagged all the symbols we need. if $GREP ' nm_test_var$' "$nlist" >/dev/null; then if $GREP ' nm_test_func$' "$nlist" >/dev/null; then cat <<_LT_EOF > conftest.$ac_ext /* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ #if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) /* DATA imports from DLLs on WIN32 con't be const, because runtime relocations are performed -- see ld's documentation on pseudo-relocs. */ # define LT_DLSYM_CONST #elif defined(__osf__) /* This system does not cope well with relocations in const data. */ # define LT_DLSYM_CONST #else # define LT_DLSYM_CONST const #endif #ifdef __cplusplus extern "C" { #endif _LT_EOF # Now generate the symbol file. eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' cat <<_LT_EOF >> conftest.$ac_ext /* The mapping between symbol names and symbols. */ LT_DLSYM_CONST struct { const char *name; void *address; } lt__PROGRAM__LTX_preloaded_symbols[] = { { "@PROGRAM@", (void *) 0 }, _LT_EOF $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext cat <<\_LT_EOF >> conftest.$ac_ext {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt__PROGRAM__LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif _LT_EOF # Now try linking the two files. mv conftest.$ac_objext conftstm.$ac_objext lt_globsym_save_LIBS=$LIBS lt_globsym_save_CFLAGS=$CFLAGS LIBS="conftstm.$ac_objext" CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s conftest${ac_exeext}; then pipe_works=yes fi LIBS=$lt_globsym_save_LIBS CFLAGS=$lt_globsym_save_CFLAGS else echo "cannot find nm_test_func in $nlist" >&5 fi else echo "cannot find nm_test_var in $nlist" >&5 fi else echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 fi else echo "$progname: failed program was:" >&5 cat conftest.$ac_ext >&5 fi rm -rf conftest* conftst* # Do not use the global_symbol_pipe unless it works. if test "$pipe_works" = yes; then break else lt_cv_sys_global_symbol_pipe= fi done fi if test -z "$lt_cv_sys_global_symbol_pipe"; then lt_cv_sys_global_symbol_to_cdecl= fi if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 $as_echo "failed" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 $as_echo "ok" >&6; } fi # Response file support. if test "$lt_cv_nm_interface" = "MS dumpbin"; then nm_file_list_spec='@' elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then nm_file_list_spec='@' fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 $as_echo_n "checking for sysroot... " >&6; } # Check whether --with-sysroot was given. if test "${with_sysroot+set}" = set; then : withval=$with_sysroot; else with_sysroot=no fi lt_sysroot= case ${with_sysroot} in #( yes) if test "$GCC" = yes; then lt_sysroot=`$CC --print-sysroot 2>/dev/null` fi ;; #( /*) lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` ;; #( no|'') ;; #( *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_sysroot}" >&5 $as_echo "${with_sysroot}" >&6; } as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 $as_echo "${lt_sysroot:-no}" >&6; } # Check whether --enable-libtool-lock was given. if test "${enable_libtool_lock+set}" = set; then : enableval=$enable_libtool_lock; fi test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes # Some flags need to be propagated to the compiler or linker for good # libtool support. case $host in ia64-*-hpux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.$ac_objext` in *ELF-32*) HPUX_IA64_MODE="32" ;; *ELF-64*) HPUX_IA64_MODE="64" ;; esac fi rm -rf conftest* ;; *-*-irix6*) # Find out which ABI we are using. echo '#line '$LINENO' "configure"' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then if test "$lt_cv_prog_gnu_ld" = yes; then case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -melf32bsmip" ;; *N32*) LD="${LD-ld} -melf32bmipn32" ;; *64-bit*) LD="${LD-ld} -melf64bmip" ;; esac else case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -32" ;; *N32*) LD="${LD-ld} -n32" ;; *64-bit*) LD="${LD-ld} -64" ;; esac fi fi rm -rf conftest* ;; x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.o` in *32-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_i386_fbsd" ;; x86_64-*linux*) case `/usr/bin/file conftest.o` in *x86-64*) LD="${LD-ld} -m elf32_x86_64" ;; *) LD="${LD-ld} -m elf_i386" ;; esac ;; powerpc64le-*) LD="${LD-ld} -m elf32lppclinux" ;; powerpc64-*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) LD="${LD-ld} -m elf_s390" ;; sparc64-*linux*) LD="${LD-ld} -m elf32_sparc" ;; esac ;; *64-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_x86_64_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_x86_64" ;; powerpcle-*) LD="${LD-ld} -m elf64lppc" ;; powerpc-*) LD="${LD-ld} -m elf64ppc" ;; s390*-*linux*|s390*-*tpf*) LD="${LD-ld} -m elf64_s390" ;; sparc*-*linux*) LD="${LD-ld} -m elf64_sparc" ;; esac ;; esac fi rm -rf conftest* ;; *-*-sco3.2v5*) # On SCO OpenServer 5, we need -belf to get full-featured binaries. SAVE_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -belf" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 $as_echo_n "checking whether the C compiler needs -belf... " >&6; } if ${lt_cv_cc_needs_belf+:} false; then : $as_echo_n "(cached) " >&6 else ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_cc_needs_belf=yes else lt_cv_cc_needs_belf=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 $as_echo "$lt_cv_cc_needs_belf" >&6; } if test x"$lt_cv_cc_needs_belf" != x"yes"; then # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf CFLAGS="$SAVE_CFLAGS" fi ;; *-*solaris*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.o` in *64-bit*) case $lt_cv_prog_gnu_ld in yes*) case $host in i?86-*-solaris*) LD="${LD-ld} -m elf_x86_64" ;; sparc*-*-solaris*) LD="${LD-ld} -m elf64_sparc" ;; esac # GNU ld 2.21 introduced _sol2 emulations. Use them if available. if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then LD="${LD-ld}_sol2" fi ;; *) if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then LD="${LD-ld} -64" fi ;; esac ;; esac fi rm -rf conftest* ;; esac need_locks="$enable_libtool_lock" if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. set dummy ${ac_tool_prefix}mt; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$MANIFEST_TOOL"; then ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL if test -n "$MANIFEST_TOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 $as_echo "$MANIFEST_TOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_MANIFEST_TOOL"; then ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL # Extract the first word of "mt", so it can be a program name with args. set dummy mt; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_MANIFEST_TOOL"; then ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL if test -n "$ac_ct_MANIFEST_TOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 $as_echo "$ac_ct_MANIFEST_TOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_MANIFEST_TOOL" = x; then MANIFEST_TOOL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL fi else MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" fi test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 $as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } if ${lt_cv_path_mainfest_tool+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_path_mainfest_tool=no echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out cat conftest.err >&5 if $GREP 'Manifest Tool' conftest.out > /dev/null; then lt_cv_path_mainfest_tool=yes fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 $as_echo "$lt_cv_path_mainfest_tool" >&6; } if test "x$lt_cv_path_mainfest_tool" != xyes; then MANIFEST_TOOL=: fi case $host_os in rhapsody* | darwin*) if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_DSYMUTIL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$DSYMUTIL"; then ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DSYMUTIL=$ac_cv_prog_DSYMUTIL if test -n "$DSYMUTIL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 $as_echo "$DSYMUTIL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_DSYMUTIL"; then ac_ct_DSYMUTIL=$DSYMUTIL # Extract the first word of "dsymutil", so it can be a program name with args. set dummy dsymutil; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DSYMUTIL"; then ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL if test -n "$ac_ct_DSYMUTIL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 $as_echo "$ac_ct_DSYMUTIL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_DSYMUTIL" = x; then DSYMUTIL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DSYMUTIL=$ac_ct_DSYMUTIL fi else DSYMUTIL="$ac_cv_prog_DSYMUTIL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. set dummy ${ac_tool_prefix}nmedit; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_NMEDIT+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$NMEDIT"; then ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi NMEDIT=$ac_cv_prog_NMEDIT if test -n "$NMEDIT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 $as_echo "$NMEDIT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_NMEDIT"; then ac_ct_NMEDIT=$NMEDIT # Extract the first word of "nmedit", so it can be a program name with args. set dummy nmedit; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_NMEDIT"; then ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_NMEDIT="nmedit" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT if test -n "$ac_ct_NMEDIT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 $as_echo "$ac_ct_NMEDIT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_NMEDIT" = x; then NMEDIT=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac NMEDIT=$ac_ct_NMEDIT fi else NMEDIT="$ac_cv_prog_NMEDIT" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. set dummy ${ac_tool_prefix}lipo; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_LIPO+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$LIPO"; then ac_cv_prog_LIPO="$LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_LIPO="${ac_tool_prefix}lipo" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi LIPO=$ac_cv_prog_LIPO if test -n "$LIPO"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 $as_echo "$LIPO" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_LIPO"; then ac_ct_LIPO=$LIPO # Extract the first word of "lipo", so it can be a program name with args. set dummy lipo; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_LIPO+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_LIPO"; then ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_LIPO="lipo" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO if test -n "$ac_ct_LIPO"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 $as_echo "$ac_ct_LIPO" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_LIPO" = x; then LIPO=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac LIPO=$ac_ct_LIPO fi else LIPO="$ac_cv_prog_LIPO" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. set dummy ${ac_tool_prefix}otool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_OTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$OTOOL"; then ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_OTOOL="${ac_tool_prefix}otool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OTOOL=$ac_cv_prog_OTOOL if test -n "$OTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 $as_echo "$OTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OTOOL"; then ac_ct_OTOOL=$OTOOL # Extract the first word of "otool", so it can be a program name with args. set dummy otool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_OTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OTOOL"; then ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_OTOOL="otool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL if test -n "$ac_ct_OTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 $as_echo "$ac_ct_OTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OTOOL" = x; then OTOOL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OTOOL=$ac_ct_OTOOL fi else OTOOL="$ac_cv_prog_OTOOL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. set dummy ${ac_tool_prefix}otool64; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_OTOOL64+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$OTOOL64"; then ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OTOOL64=$ac_cv_prog_OTOOL64 if test -n "$OTOOL64"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 $as_echo "$OTOOL64" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OTOOL64"; then ac_ct_OTOOL64=$OTOOL64 # Extract the first word of "otool64", so it can be a program name with args. set dummy otool64; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OTOOL64"; then ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_OTOOL64="otool64" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 if test -n "$ac_ct_OTOOL64"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 $as_echo "$ac_ct_OTOOL64" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OTOOL64" = x; then OTOOL64=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OTOOL64=$ac_ct_OTOOL64 fi else OTOOL64="$ac_cv_prog_OTOOL64" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 $as_echo_n "checking for -single_module linker flag... " >&6; } if ${lt_cv_apple_cc_single_mod+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_apple_cc_single_mod=no if test -z "${LT_MULTI_MODULE}"; then # By default we will add the -single_module flag. You can override # by either setting the environment variable LT_MULTI_MODULE # non-empty at configure time, or by adding -multi_module to the # link flags. rm -rf libconftest.dylib* echo "int foo(void){return 1;}" > conftest.c echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c" >&5 $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err _lt_result=$? # If there is a non-empty error log, and "single_module" # appears in it, assume the flag caused a linker warning if test -s conftest.err && $GREP single_module conftest.err; then cat conftest.err >&5 # Otherwise, if the output was created with a 0 exit code from # the compiler, it worked. elif test -f libconftest.dylib && test $_lt_result -eq 0; then lt_cv_apple_cc_single_mod=yes else cat conftest.err >&5 fi rm -rf libconftest.dylib* rm -f conftest.* fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 $as_echo "$lt_cv_apple_cc_single_mod" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 $as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } if ${lt_cv_ld_exported_symbols_list+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_exported_symbols_list=no save_LDFLAGS=$LDFLAGS echo "_main" > conftest.sym LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_ld_exported_symbols_list=yes else lt_cv_ld_exported_symbols_list=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 $as_echo "$lt_cv_ld_exported_symbols_list" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 $as_echo_n "checking for -force_load linker flag... " >&6; } if ${lt_cv_ld_force_load+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_force_load=no cat > conftest.c << _LT_EOF int forced_loaded() { return 2;} _LT_EOF echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 echo "$AR cru libconftest.a conftest.o" >&5 $AR cru libconftest.a conftest.o 2>&5 echo "$RANLIB libconftest.a" >&5 $RANLIB libconftest.a 2>&5 cat > conftest.c << _LT_EOF int main() { return 0;} _LT_EOF echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err _lt_result=$? if test -s conftest.err && $GREP force_load conftest.err; then cat conftest.err >&5 elif test -f conftest && test $_lt_result -eq 0 && $GREP forced_load conftest >/dev/null 2>&1 ; then lt_cv_ld_force_load=yes else cat conftest.err >&5 fi rm -f conftest.err libconftest.a conftest conftest.c rm -rf conftest.dSYM fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 $as_echo "$lt_cv_ld_force_load" >&6; } case $host_os in rhapsody* | darwin1.[012]) _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; darwin1.*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; darwin*) # darwin 5.x on # if running on 10.5 or later, the deployment target defaults # to the OS version, if on x86, and 10.4, the deployment # target defaults to 10.4. Don't you love it? case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in 10.0,*86*-darwin8*|10.0,*-darwin[91]*) _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; 10.[012]*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; 10.*) _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; esac ;; esac if test "$lt_cv_apple_cc_single_mod" = "yes"; then _lt_dar_single_mod='$single_module' fi if test "$lt_cv_ld_exported_symbols_list" = "yes"; then _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' else _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' fi if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then _lt_dsymutil='~$DSYMUTIL $lib || :' else _lt_dsymutil= fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_header in dlfcn.h do : ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default " if test "x$ac_cv_header_dlfcn_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_DLFCN_H 1 _ACEOF fi done func_stripname_cnf () { case ${2} in .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; esac } # func_stripname_cnf # Set options # Check whether --enable-shared was given. if test "${enable_shared+set}" = set; then : enableval=$enable_shared; p=${PACKAGE-default} case $enableval in yes) enable_shared=yes ;; no) enable_shared=no ;; *) enable_shared=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_shared=yes fi done IFS="$lt_save_ifs" ;; esac else enable_shared=yes fi # Check whether --enable-static was given. if test "${enable_static+set}" = set; then : enableval=$enable_static; p=${PACKAGE-default} case $enableval in yes) enable_static=yes ;; no) enable_static=no ;; *) enable_static=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_static=yes fi done IFS="$lt_save_ifs" ;; esac else enable_static=no fi enable_dlopen=no enable_win32_dll=no # Check whether --with-pic was given. if test "${with_pic+set}" = set; then : withval=$with_pic; lt_p=${PACKAGE-default} case $withval in yes|no) pic_mode=$withval ;; *) pic_mode=default # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for lt_pkg in $withval; do IFS="$lt_save_ifs" if test "X$lt_pkg" = "X$lt_p"; then pic_mode=yes fi done IFS="$lt_save_ifs" ;; esac else pic_mode=default fi test -z "$pic_mode" && pic_mode=default # Check whether --enable-fast-install was given. if test "${enable_fast_install+set}" = set; then : enableval=$enable_fast_install; p=${PACKAGE-default} case $enableval in yes) enable_fast_install=yes ;; no) enable_fast_install=no ;; *) enable_fast_install=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_fast_install=yes fi done IFS="$lt_save_ifs" ;; esac else enable_fast_install=yes fi # This can be used to rebuild libtool when needed LIBTOOL_DEPS="$ltmain" # Always use our own libtool. LIBTOOL='$(SHELL) $(top_builddir)/libtool' test -z "$LN_S" && LN_S="ln -s" if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 $as_echo_n "checking for objdir... " >&6; } if ${lt_cv_objdir+:} false; then : $as_echo_n "(cached) " >&6 else rm -f .libs 2>/dev/null mkdir .libs 2>/dev/null if test -d .libs; then lt_cv_objdir=.libs else # MS-DOS does not allow filenames that begin with a dot. lt_cv_objdir=_libs fi rmdir .libs 2>/dev/null fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 $as_echo "$lt_cv_objdir" >&6; } objdir=$lt_cv_objdir cat >>confdefs.h <<_ACEOF #define LT_OBJDIR "$lt_cv_objdir/" _ACEOF case $host_os in aix3*) # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test "X${COLLECT_NAMES+set}" != Xset; then COLLECT_NAMES= export COLLECT_NAMES fi ;; esac # Global variables: ofile=libtool can_build_shared=yes # All known linkers require a `.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a with_gnu_ld="$lt_cv_prog_gnu_ld" old_CC="$CC" old_CFLAGS="$CFLAGS" # Set sane defaults for various variables test -z "$CC" && CC=cc test -z "$LTCC" && LTCC=$CC test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS test -z "$LD" && LD=ld test -z "$ac_objext" && ac_objext=o for cc_temp in $compiler""; do case $cc_temp in compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; \-*) ;; *) break;; esac done cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` # Only perform the check for file, if the check method requires it test -z "$MAGIC_CMD" && MAGIC_CMD=file case $deplibs_check_method in file_magic*) if test "$file_magic_cmd" = '$MAGIC_CMD'; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 $as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } if ${lt_cv_path_MAGIC_CMD+:} false; then : $as_echo_n "(cached) " >&6 else case $MAGIC_CMD in [\\/*] | ?:[\\/]*) lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD="$MAGIC_CMD" lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" for ac_dir in $ac_dummy; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f $ac_dir/${ac_tool_prefix}file; then lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS="$lt_save_ifs" MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac fi MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if test -n "$MAGIC_CMD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 $as_echo "$MAGIC_CMD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test -z "$lt_cv_path_MAGIC_CMD"; then if test -n "$ac_tool_prefix"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5 $as_echo_n "checking for file... " >&6; } if ${lt_cv_path_MAGIC_CMD+:} false; then : $as_echo_n "(cached) " >&6 else case $MAGIC_CMD in [\\/*] | ?:[\\/]*) lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD="$MAGIC_CMD" lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" for ac_dir in $ac_dummy; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f $ac_dir/file; then lt_cv_path_MAGIC_CMD="$ac_dir/file" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS="$lt_save_ifs" MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac fi MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if test -n "$MAGIC_CMD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 $as_echo "$MAGIC_CMD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else MAGIC_CMD=: fi fi fi ;; esac # Use C for the default configuration in the libtool script lt_save_CC="$CC" ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # Source file extension for C test sources. ac_ext=c # Object file extension for compiled C test sources. objext=o objext=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(){return(0);}' # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # Save the default compiler, since it gets overwritten when the other # tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. compiler_DEFAULT=$CC # save warnings/boilerplate of simple test code ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... if test -n "$compiler"; then lt_prog_compiler_no_builtin_flag= if test "$GCC" = yes; then case $cc_basename in nvcc*) lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; *) lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 $as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_rtti_exceptions=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-fno-rtti -fno-exceptions" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_rtti_exceptions=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 $as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" else : fi fi lt_prog_compiler_wl= lt_prog_compiler_pic= lt_prog_compiler_static= if test "$GCC" = yes; then lt_prog_compiler_wl='-Wl,' lt_prog_compiler_static='-static' case $host_os in aix*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor lt_prog_compiler_static='-Bstatic' fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support lt_prog_compiler_pic='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the `-m68020' flag to GCC prevents building anything better, # like `-m68040'. lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries lt_prog_compiler_pic='-DDLL_EXPORT' ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files lt_prog_compiler_pic='-fno-common' ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. lt_prog_compiler_static= ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) # +Z the default ;; *) lt_prog_compiler_pic='-fPIC' ;; esac ;; interix[3-9]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; msdosdjgpp*) # Just because we use GCC doesn't mean we suddenly get shared libraries # on systems that don't support them. lt_prog_compiler_can_build_shared=no enable_shared=no ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic='-fPIC -shared' ;; sysv4*MP*) if test -d /usr/nec; then lt_prog_compiler_pic=-Kconform_pic fi ;; *) lt_prog_compiler_pic='-fPIC' ;; esac case $cc_basename in nvcc*) # Cuda Compiler Driver 2.2 lt_prog_compiler_wl='-Xlinker ' if test -n "$lt_prog_compiler_pic"; then lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic" fi ;; esac else # PORTME Check for flag to pass linker flags through the system compiler. case $host_os in aix*) lt_prog_compiler_wl='-Wl,' if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor lt_prog_compiler_static='-Bstatic' else lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' fi ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). lt_prog_compiler_pic='-DDLL_EXPORT' ;; hpux9* | hpux10* | hpux11*) lt_prog_compiler_wl='-Wl,' # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but # not for PA HP-UX. case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) lt_prog_compiler_pic='+Z' ;; esac # Is there a better lt_prog_compiler_static that works with the bundled CC? lt_prog_compiler_static='${wl}-a ${wl}archive' ;; irix5* | irix6* | nonstopux*) lt_prog_compiler_wl='-Wl,' # PIC (with -KPIC) is the default. lt_prog_compiler_static='-non_shared' ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in # old Intel for x86_64 which still supported -KPIC. ecc*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-static' ;; # icc used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. icc* | ifort*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fPIC' lt_prog_compiler_static='-static' ;; # Lahey Fortran 8.1. lf95*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='--shared' lt_prog_compiler_static='--static' ;; nagfor*) # NAG Fortran compiler lt_prog_compiler_wl='-Wl,-Wl,,' lt_prog_compiler_pic='-PIC' lt_prog_compiler_static='-Bstatic' ;; pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group compilers (*not* the Pentium gcc compiler, # which looks to be a dead project) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fpic' lt_prog_compiler_static='-Bstatic' ;; ccc*) lt_prog_compiler_wl='-Wl,' # All Alpha code is PIC. lt_prog_compiler_static='-non_shared' ;; xl* | bgxl* | bgf* | mpixl*) # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-qpic' lt_prog_compiler_static='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*) # Sun Fortran 8.3 passes all unrecognized flags to the linker lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='' ;; *Sun\ F* | *Sun*Fortran*) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='-Qoption ld ' ;; *Sun\ C*) # Sun C 5.9 lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='-Wl,' ;; *Intel*\ [CF]*Compiler*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fPIC' lt_prog_compiler_static='-static' ;; *Portland\ Group*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fpic' lt_prog_compiler_static='-Bstatic' ;; esac ;; esac ;; newsos6) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic='-fPIC -shared' ;; osf3* | osf4* | osf5*) lt_prog_compiler_wl='-Wl,' # All OSF/1 code is PIC. lt_prog_compiler_static='-non_shared' ;; rdos*) lt_prog_compiler_static='-non_shared' ;; solaris*) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' case $cc_basename in f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) lt_prog_compiler_wl='-Qoption ld ';; *) lt_prog_compiler_wl='-Wl,';; esac ;; sunos4*) lt_prog_compiler_wl='-Qoption ld ' lt_prog_compiler_pic='-PIC' lt_prog_compiler_static='-Bstatic' ;; sysv4 | sysv4.2uw2* | sysv4.3*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; sysv4*MP*) if test -d /usr/nec ;then lt_prog_compiler_pic='-Kconform_pic' lt_prog_compiler_static='-Bstatic' fi ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; unicos*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_can_build_shared=no ;; uts4*) lt_prog_compiler_pic='-pic' lt_prog_compiler_static='-Bstatic' ;; *) lt_prog_compiler_can_build_shared=no ;; esac fi case $host_os in # For platforms which do not support PIC, -DPIC is meaningless: *djgpp*) lt_prog_compiler_pic= ;; *) lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 $as_echo_n "checking for $compiler option to produce PIC... " >&6; } if ${lt_cv_prog_compiler_pic+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic=$lt_prog_compiler_pic fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 $as_echo "$lt_cv_prog_compiler_pic" >&6; } lt_prog_compiler_pic=$lt_cv_prog_compiler_pic # # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 $as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } if ${lt_cv_prog_compiler_pic_works+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_works=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic -DPIC" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_pic_works=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 $as_echo "$lt_cv_prog_compiler_pic_works" >&6; } if test x"$lt_cv_prog_compiler_pic_works" = xyes; then case $lt_prog_compiler_pic in "" | " "*) ;; *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; esac else lt_prog_compiler_pic= lt_prog_compiler_can_build_shared=no fi fi # # Check to make sure the static flag actually works. # wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 $as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } if ${lt_cv_prog_compiler_static_works+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_static_works=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_static_works=yes fi else lt_cv_prog_compiler_static_works=yes fi fi $RM -r conftest* LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 $as_echo "$lt_cv_prog_compiler_static_works" >&6; } if test x"$lt_cv_prog_compiler_static_works" = xyes; then : else lt_prog_compiler_static= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 $as_echo "$lt_cv_prog_compiler_c_o" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 $as_echo "$lt_cv_prog_compiler_c_o" >&6; } hard_links="nottested" if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then # do not overwrite the value of need_locks provided by the user { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 $as_echo_n "checking if we can lock with hard links... " >&6; } hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 $as_echo "$hard_links" >&6; } if test "$hard_links" = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 $as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} need_locks=warn fi else need_locks=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } runpath_var= allow_undefined_flag= always_export_symbols=no archive_cmds= archive_expsym_cmds= compiler_needs_object=no enable_shared_with_static_runtimes=no export_dynamic_flag_spec= export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' hardcode_automatic=no hardcode_direct=no hardcode_direct_absolute=no hardcode_libdir_flag_spec= hardcode_libdir_separator= hardcode_minus_L=no hardcode_shlibpath_var=unsupported inherit_rpath=no link_all_deplibs=unknown module_cmds= module_expsym_cmds= old_archive_from_new_cmds= old_archive_from_expsyms_cmds= thread_safe_flag_spec= whole_archive_flag_spec= # include_expsyms should be a list of space-separated symbols to be *always* # included in the symbol list include_expsyms= # exclude_expsyms can be an extended regexp of symbols to exclude # it will be wrapped by ` (' and `)$', so one must not match beginning or # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', # as well as any symbol that contains `d'. exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. # Exclude shared library initialization/finalization symbols. extract_expsyms_cmds= case $host_os in cygwin* | mingw* | pw32* | cegcc*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test "$GCC" != yes; then with_gnu_ld=no fi ;; interix*) # we just hope/assume this is gcc and not c89 (= MSVC++) with_gnu_ld=yes ;; openbsd*) with_gnu_ld=no ;; linux* | k*bsd*-gnu | gnu*) link_all_deplibs=no ;; esac ld_shlibs=yes # On some targets, GNU ld is compatible enough with the native linker # that we're better off using the native interface for both. lt_use_gnu_ld_interface=no if test "$with_gnu_ld" = yes; then case $host_os in aix*) # The AIX port of GNU ld has always aspired to compatibility # with the native linker. However, as the warning in the GNU ld # block says, versions before 2.19.5* couldn't really create working # shared libraries, regardless of the interface used. case `$LD -v 2>&1` in *\ \(GNU\ Binutils\)\ 2.19.5*) ;; *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; *\ \(GNU\ Binutils\)\ [3-9]*) ;; *) lt_use_gnu_ld_interface=yes ;; esac ;; *) lt_use_gnu_ld_interface=yes ;; esac fi if test "$lt_use_gnu_ld_interface" = yes; then # If archive_cmds runs LD, not CC, wlarc should be empty wlarc='${wl}' # Set some defaults for GNU ld with shared library support. These # are reset later if shared libraries are not supported. Putting them # here allows them to be overridden if necessary. runpath_var=LD_RUN_PATH hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' export_dynamic_flag_spec='${wl}--export-dynamic' # ancient GNU ld didn't support --whole-archive et. al. if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' else whole_archive_flag_spec= fi supports_anon_versioning=no case `$LD -v 2>&1` in *GNU\ gold*) supports_anon_versioning=yes ;; *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... *\ 2.11.*) ;; # other 2.11 versions *) supports_anon_versioning=yes ;; esac # See if GNU ld supports shared libraries. case $host_os in aix[3-9]*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: the GNU linker, at least up to release 2.19, is reported *** to be unable to reliably create shared libraries on AIX. *** Therefore, libtool is disabling shared libraries support. If you *** really care for shared libraries, you may want to install binutils *** 2.20 or above, or modify your PATH so that a non-GNU linker is found. *** You will then need to restart the configuration process. _LT_EOF fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='' ;; m68k) archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then allow_undefined_flag=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else ld_shlibs=no fi ;; cygwin* | mingw* | pw32* | cegcc*) # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, # as there is no search path for DLLs. hardcode_libdir_flag_spec='-L$libdir' export_dynamic_flag_spec='${wl}--export-all-symbols' allow_undefined_flag=unsupported always_export_symbols=no enable_shared_with_static_runtimes=yes export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file (1st line # is EXPORTS), use it as is; otherwise, prepend... archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else ld_shlibs=no fi ;; haiku*) archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' link_all_deplibs=yes ;; interix[3-9]*) hardcode_direct=no hardcode_shlibpath_var=no hardcode_libdir_flag_spec='${wl}-rpath,$libdir' export_dynamic_flag_spec='${wl}-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) tmp_diet=no if test "$host_os" = linux-dietlibc; then case $cc_basename in diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) esac fi if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ && test "$tmp_diet" = no then tmp_addflag=' $pic_flag' tmp_sharedflag='-shared' case $cc_basename,$host_cpu in pgcc*) # Portland Group C compiler whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' tmp_addflag=' $pic_flag' ;; pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group f77 and f90 compilers whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' tmp_addflag=' $pic_flag -Mnomain' ;; ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 tmp_addflag=' -i_dynamic' ;; efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 tmp_addflag=' -i_dynamic -nofor_main' ;; ifc* | ifort*) # Intel Fortran compiler tmp_addflag=' -nofor_main' ;; lf95*) # Lahey Fortran 8.1 whole_archive_flag_spec= tmp_sharedflag='--shared' ;; xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) tmp_sharedflag='-qmkshrobj' tmp_addflag= ;; nvcc*) # Cuda Compiler Driver 2.2 whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' compiler_needs_object=yes ;; esac case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' compiler_needs_object=yes tmp_sharedflag='-G' ;; *Sun\ F*) # Sun Fortran 8.3 tmp_sharedflag='-G' ;; esac archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' if test "x$supports_anon_versioning" = xyes; then archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' fi case $cc_basename in xlf* | bgf* | bgxlf* | mpixlf*) # IBM XL Fortran 10.1 on PPC cannot create shared libs itself whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' if test "x$supports_anon_versioning" = xyes; then archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' fi ;; esac else ld_shlibs=no fi ;; netbsd* | netbsdelf*-gnu) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' wlarc= else archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' fi ;; solaris*) if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: The releases 2.8.* of the GNU linker cannot reliably *** create shared libraries on Solaris systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.9.1 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) case `$LD -v 2>&1` in *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not *** reliably create shared libraries on SCO systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.16.91.0.3 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF ;; *) # For security reasons, it is highly recommended that you always # use absolute paths for naming shared libraries, and exclude the # DT_RUNPATH tag from executables and libraries. But doing so # requires that you compile everything twice, which is a pain. if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; esac ;; sunos4*) archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' wlarc= hardcode_direct=yes hardcode_shlibpath_var=no ;; *) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; esac if test "$ld_shlibs" = no; then runpath_var= hardcode_libdir_flag_spec= export_dynamic_flag_spec= whole_archive_flag_spec= fi else # PORTME fill in a description of your system's linker (not GNU ld) case $host_os in aix3*) allow_undefined_flag=unsupported always_export_symbols=yes archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. hardcode_minus_L=yes if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. hardcode_direct=unsupported fi ;; aix[4-9]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag="" else # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm # Also, AIX nm treats weak defined symbols like other global # defined symbols, whereas GNU nm marks them as "W". if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' else export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' fi aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then aix_use_runtimelinking=yes break fi done ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. archive_cmds='' hardcode_direct=yes hardcode_direct_absolute=yes hardcode_libdir_separator=':' link_all_deplibs=yes file_list_spec='${wl}-f,' if test "$GCC" = yes; then case $host_os in aix4.[012]|aix4.[012].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 hardcode_direct=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking hardcode_minus_L=yes hardcode_libdir_flag_spec='-L$libdir' hardcode_libdir_separator= fi ;; esac shared_flag='-shared' if test "$aix_use_runtimelinking" = yes; then shared_flag="$shared_flag "'${wl}-G' fi link_all_deplibs=no else # not using gcc if test "$host_cpu" = ia64; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test "$aix_use_runtimelinking" = yes; then shared_flag='${wl}-G' else shared_flag='${wl}-bM:SRE' fi fi fi export_dynamic_flag_spec='${wl}-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to export. always_export_symbols=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. allow_undefined_flag='-berok' # Determine the default libpath from the value encoded in an # empty executable. if test "${lt_cv_aix_libpath+set}" = set; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath_+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_="/usr/lib:/lib" fi fi aix_libpath=$lt_cv_aix_libpath_ fi hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" else if test "$host_cpu" = ia64; then hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' allow_undefined_flag="-z nodefs" archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. if test "${lt_cv_aix_libpath+set}" = set; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath_+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_="/usr/lib:/lib" fi fi aix_libpath=$lt_cv_aix_libpath_ fi hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. no_undefined_flag=' ${wl}-bernotok' allow_undefined_flag=' ${wl}-berok' if test "$with_gnu_ld" = yes; then # We only use this code for GNU lds that support --whole-archive. whole_archive_flag_spec='${wl}--whole-archive$convenience ${wl}--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives whole_archive_flag_spec='$convenience' fi archive_cmds_need_lc=yes # This is similar to how AIX traditionally builds its shared libraries. archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' fi fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='' ;; m68k) archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; bsdi[45]*) export_dynamic_flag_spec=-rdynamic ;; cygwin* | mingw* | pw32* | cegcc*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. case $cc_basename in cl*) # Native MSVC hardcode_libdir_flag_spec=' ' allow_undefined_flag=unsupported always_export_symbols=yes file_list_spec='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; else sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, )='true' enable_shared_with_static_runtimes=yes exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' # Don't use ranlib old_postinstall_cmds='chmod 644 $oldlib' postlink_cmds='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile="$lt_outputfile.exe" lt_tool_outputfile="$lt_tool_outputfile.exe" ;; esac~ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # Assume MSVC wrapper hardcode_libdir_flag_spec=' ' allow_undefined_flag=unsupported # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. old_archive_from_new_cmds='true' # FIXME: Should let the user specify the lib program. old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' enable_shared_with_static_runtimes=yes ;; esac ;; darwin* | rhapsody*) archive_cmds_need_lc=no hardcode_direct=no hardcode_automatic=yes hardcode_shlibpath_var=unsupported if test "$lt_cv_ld_force_load" = "yes"; then whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' else whole_archive_flag_spec='' fi link_all_deplibs=yes allow_undefined_flag="$_lt_dar_allow_undefined" case $cc_basename in ifort*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test "$_lt_dar_can_shared" = "yes"; then output_verbose_link_cmd=func_echo_all archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" module_expsym_cmds="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" else ld_shlibs=no fi ;; dgux*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_shlibpath_var=no ;; # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor # support. Future versions do this automatically, but an explicit c++rt0.o # does not break anything, and helps significantly (at the cost of a little # extra space). freebsd2.2*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; # Unfortunately, older versions of FreeBSD 2 do not have this feature. freebsd2.*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes hardcode_minus_L=yes hardcode_shlibpath_var=no ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. freebsd* | dragonfly*) archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; hpux9*) if test "$GCC" = yes; then archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' else archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' fi hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes export_dynamic_flag_spec='${wl}-E' ;; hpux10*) if test "$GCC" = yes && test "$with_gnu_ld" = no; then archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi if test "$with_gnu_ld" = no; then hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: hardcode_direct=yes hardcode_direct_absolute=yes export_dynamic_flag_spec='${wl}-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes fi ;; hpux11*) if test "$GCC" = yes && test "$with_gnu_ld" = no; then case $host_cpu in hppa*64*) archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac else case $host_cpu in hppa*64*) archive_cmds='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) # Older versions of the 11.00 compiler do not understand -b yet # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 $as_echo_n "checking if $CC understands -b... " >&6; } if ${lt_cv_prog_compiler__b+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler__b=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS -b" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler__b=yes fi else lt_cv_prog_compiler__b=yes fi fi $RM -r conftest* LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 $as_echo "$lt_cv_prog_compiler__b" >&6; } if test x"$lt_cv_prog_compiler__b" = xyes; then archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi ;; esac fi if test "$with_gnu_ld" = no; then hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: case $host_cpu in hppa*64*|ia64*) hardcode_direct=no hardcode_shlibpath_var=no ;; *) hardcode_direct=yes hardcode_direct_absolute=yes export_dynamic_flag_spec='${wl}-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) if test "$GCC" = yes; then archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' # Try to use the -exported_symbol ld option, if it does not # work, assume that -exports_file does not work either and # implicitly export all symbols. # This should be the same for all languages, so no per-tag cache variable. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 $as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } if ${lt_cv_irix_exported_symbol+:} false; then : $as_echo_n "(cached) " >&6 else save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int foo (void) { return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_irix_exported_symbol=yes else lt_cv_irix_exported_symbol=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 $as_echo "$lt_cv_irix_exported_symbol" >&6; } if test "$lt_cv_irix_exported_symbol" = yes; then archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' fi else archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' fi archive_cmds_need_lc='no' hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: inherit_rpath=yes link_all_deplibs=yes ;; netbsd* | netbsdelf*-gnu) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out else archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF fi hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; newsos6) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: hardcode_shlibpath_var=no ;; *nto* | *qnx*) ;; openbsd*) if test -f /usr/libexec/ld.so; then hardcode_direct=yes hardcode_shlibpath_var=no hardcode_direct_absolute=yes if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' hardcode_libdir_flag_spec='${wl}-rpath,$libdir' export_dynamic_flag_spec='${wl}-E' else case $host_os in openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-R$libdir' ;; *) archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec='${wl}-rpath,$libdir' ;; esac fi else ld_shlibs=no fi ;; os2*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes allow_undefined_flag=unsupported archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' ;; osf3*) if test "$GCC" = yes; then allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' else allow_undefined_flag=' -expect_unresolved \*' archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' fi archive_cmds_need_lc='no' hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; osf4* | osf5*) # as osf3* with the addition of -msym flag if test "$GCC" = yes; then allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' else allow_undefined_flag=' -expect_unresolved \*' archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' # Both c and cxx compiler support -rpath directly hardcode_libdir_flag_spec='-rpath $libdir' fi archive_cmds_need_lc='no' hardcode_libdir_separator=: ;; solaris*) no_undefined_flag=' -z defs' if test "$GCC" = yes; then wlarc='${wl}' archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' else case `$CC -V 2>&1` in *"Compilers 5.0"*) wlarc='' archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' ;; *) wlarc='${wl}' archive_cmds='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ;; esac fi hardcode_libdir_flag_spec='-R$libdir' hardcode_shlibpath_var=no case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands `-z linker_flag'. GCC discards it without `$wl', # but is careful enough not to reorder. # Supported since Solaris 2.6 (maybe 2.5.1?) if test "$GCC" = yes; then whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' else whole_archive_flag_spec='-z allextract$convenience -z defaultextract' fi ;; esac link_all_deplibs=yes ;; sunos4*) if test "x$host_vendor" = xsequent; then # Use $CC to link under sequent, because it throws in some extra .o # files that make .init and .fini sections work. archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' fi hardcode_libdir_flag_spec='-L$libdir' hardcode_direct=yes hardcode_minus_L=yes hardcode_shlibpath_var=no ;; sysv4) case $host_vendor in sni) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes # is this really true??? ;; siemens) ## LD is ld it makes a PLAMLIB ## CC just makes a GrossModule. archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' reload_cmds='$CC -r -o $output$reload_objs' hardcode_direct=no ;; motorola) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=no #Motorola manual says yes, but my tests say they lie ;; esac runpath_var='LD_RUN_PATH' hardcode_shlibpath_var=no ;; sysv4.3*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no export_dynamic_flag_spec='-Bexport' ;; sysv4*MP*) if test -d /usr/nec; then archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no runpath_var=LD_RUN_PATH hardcode_runpath_var=yes ld_shlibs=yes fi ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) no_undefined_flag='${wl}-z,text' archive_cmds_need_lc=no hardcode_shlibpath_var=no runpath_var='LD_RUN_PATH' if test "$GCC" = yes; then archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We can NOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. no_undefined_flag='${wl}-z,text' allow_undefined_flag='${wl}-z,nodefs' archive_cmds_need_lc=no hardcode_shlibpath_var=no hardcode_libdir_flag_spec='${wl}-R,$libdir' hardcode_libdir_separator=':' link_all_deplibs=yes export_dynamic_flag_spec='${wl}-Bexport' runpath_var='LD_RUN_PATH' if test "$GCC" = yes; then archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; uts4*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_shlibpath_var=no ;; *) ld_shlibs=no ;; esac if test x$host_vendor = xsni; then case $host in sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) export_dynamic_flag_spec='${wl}-Blargedynsym' ;; esac fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 $as_echo "$ld_shlibs" >&6; } test "$ld_shlibs" = no && can_build_shared=no with_gnu_ld=$with_gnu_ld # # Do we need to explicitly link libc? # case "x$archive_cmds_need_lc" in x|xyes) # Assume -lc should be added archive_cmds_need_lc=yes if test "$enable_shared" = yes && test "$GCC" = yes; then case $archive_cmds in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 $as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } if ${lt_cv_archive_cmds_need_lc+:} false; then : $as_echo_n "(cached) " >&6 else $RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$lt_prog_compiler_wl pic_flag=$lt_prog_compiler_pic compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$allow_undefined_flag allow_undefined_flag= if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } then lt_cv_archive_cmds_need_lc=no else lt_cv_archive_cmds_need_lc=yes fi allow_undefined_flag=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 $as_echo "$lt_cv_archive_cmds_need_lc" >&6; } archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc ;; esac fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 $as_echo_n "checking dynamic linker characteristics... " >&6; } if test "$GCC" = yes; then case $host_os in darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; *) lt_awk_arg="/^libraries:/" ;; esac case $host_os in mingw* | cegcc*) lt_sed_strip_eq="s,=\([A-Za-z]:\),\1,g" ;; *) lt_sed_strip_eq="s,=/,/,g" ;; esac lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` case $lt_search_path_spec in *\;*) # if the path contains ";" then we assume it to be the separator # otherwise default to the standard path separator (i.e. ":") - it is # assumed that no part of a normal pathname contains ";" but that should # okay in the real world where ";" in dirpaths is itself problematic. lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` ;; *) lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` ;; esac # Ok, now we have the path, separated by spaces, we can step through it # and add multilib dir if necessary. lt_tmp_lt_search_path_spec= lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` for lt_sys_path in $lt_search_path_spec; do if test -d "$lt_sys_path/$lt_multi_os_dir"; then lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" else test -d "$lt_sys_path" && \ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" fi done lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' BEGIN {RS=" "; FS="/|\n";} { lt_foo=""; lt_count=0; for (lt_i = NF; lt_i > 0; lt_i--) { if ($lt_i != "" && $lt_i != ".") { if ($lt_i == "..") { lt_count++; } else { if (lt_count == 0) { lt_foo="/" $lt_i lt_foo; } else { lt_count--; } } } } if (lt_foo != "") { lt_freq[lt_foo]++; } if (lt_freq[lt_foo] == 1) { print lt_foo; } }'` # AWK program above erroneously prepends '/' to C:/dos/paths # for these hosts. case $host_os in mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ $SED 's,/\([A-Za-z]:\),\1,g'` ;; esac sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` else sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" fi library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=".so" postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='${libname}${release}${shared_ext}$major' ;; aix[4-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no hardcode_into_libs=yes if test "$host_cpu" = ia64; then # AIX 5 supports IA64 library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line `#! .'. This would cause the generated library to # depend on `.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[01] | aix4.[01].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # AIX (on Power*) has no versioning support, so currently we can not hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. if test "$aix_use_runtimelinking" = yes; then # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' else # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='${libname}${release}.a $libname.a' soname_spec='${libname}${release}${shared_ext}$major' fi shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='${libname}${shared_ext}' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[45]*) version_type=linux # correct to gnu/linux during the next big refactor need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=".dll" need_version=no need_lib_prefix=no case $GCC,$cc_basename in yes,*) # gcc library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ;; esac dynamic_linker='Win32 ld.exe' ;; *,cl*) # Native MSVC libname_spec='$name' soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' library_names_spec='${libname}.dll.lib' case $build_os in mingw*) sys_lib_search_path_spec= lt_save_ifs=$IFS IFS=';' for lt_path in $LIB do IFS=$lt_save_ifs # Let DOS variable expansion print the short 8.3 style file name. lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" done IFS=$lt_save_ifs # Convert to MSYS style. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ;; cygwin*) # Convert to unix form, then to dos form, then back to unix form # but this time dos style (no spaces!) so that the unix form looks # like /cygdrive/c/PROGRA~1:/cygdr... sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ;; *) sys_lib_search_path_spec="$LIB" if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then # It is most probably a Windows format PATH. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # FIXME: find the short name or the path components, as spaces are # common. (e.g. "Program Files" -> "PROGRA~1") ;; esac # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes dynamic_linker='Win32 link.exe' ;; *) # Assume MSVC wrapper library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' dynamic_linker='Win32 ld.exe' ;; esac # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[23].*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2.*) shlibpath_overrides_runpath=yes ;; freebsd3.[01]* | freebsdelf3.[01]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; haiku*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no dynamic_linker="$host_os runtime_loader" library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LIBRARY_PATH shlibpath_overrides_runpath=yes sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' if test "X$HPUX_IA64_MODE" = X32; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" fi sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555, ... postinstall_cmds='chmod 555 $lib' # or fails outright, so override atomically: install_override_mode=555 ;; interix[3-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test "$lt_cv_prog_gnu_ld" = yes; then version_type=linux # correct to gnu/linux during the next big refactor else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH if ${lt_cv_shlibpath_overrides_runpath+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_shlibpath_overrides_runpath=no save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : lt_cv_shlibpath_overrides_runpath=yes fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS libdir=$save_libdir fi shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsdelf*-gnu) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='NetBSD ld.elf_so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd*) version_type=sunos sys_lib_dlsearch_path_spec="/usr/lib" need_lib_prefix=no # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. case $host_os in openbsd3.3 | openbsd3.3.*) need_version=yes ;; *) need_version=no ;; esac library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then case $host_os in openbsd2.[89] | openbsd2.[89].*) shlibpath_overrides_runpath=no ;; *) shlibpath_overrides_runpath=yes ;; esac else shlibpath_overrides_runpath=yes fi ;; os2*) libname_spec='$name' shrext_cmds=".dll" need_lib_prefix=no library_names_spec='$libname${shared_ext} $libname.a' dynamic_linker='OS/2 ld.exe' shlibpath_var=LIBPATH ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test "$with_gnu_ld" = yes; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec ;then version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' soname_spec='$libname${shared_ext}.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=freebsd-elf need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test "$with_gnu_ld" = yes; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 $as_echo "$dynamic_linker" >&6; } test "$dynamic_linker" = no && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" fi if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 $as_echo_n "checking how to hardcode library paths into programs... " >&6; } hardcode_action= if test -n "$hardcode_libdir_flag_spec" || test -n "$runpath_var" || test "X$hardcode_automatic" = "Xyes" ; then # We can hardcode non-existent directories. if test "$hardcode_direct" != no && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test "$_LT_TAGVAR(hardcode_shlibpath_var, )" != no && test "$hardcode_minus_L" != no; then # Linking always hardcodes the temporary library directory. hardcode_action=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. hardcode_action=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. hardcode_action=unsupported fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 $as_echo "$hardcode_action" >&6; } if test "$hardcode_action" = relink || test "$inherit_rpath" = yes; then # Fast installation is not supported enable_fast_install=no elif test "$shlibpath_overrides_runpath" = yes || test "$enable_shared" = no; then # Fast installation is not necessary enable_fast_install=needless fi if test "x$enable_dlopen" != xyes; then enable_dlopen=unknown enable_dlopen_self=unknown enable_dlopen_self_static=unknown else lt_cv_dlopen=no lt_cv_dlopen_libs= case $host_os in beos*) lt_cv_dlopen="load_add_on" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ;; mingw* | pw32* | cegcc*) lt_cv_dlopen="LoadLibrary" lt_cv_dlopen_libs= ;; cygwin*) lt_cv_dlopen="dlopen" lt_cv_dlopen_libs= ;; darwin*) # if libdl is installed we need to link against it { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if ${ac_cv_lib_dl_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlopen=yes else ac_cv_lib_dl_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = xyes; then : lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" else lt_cv_dlopen="dyld" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes fi ;; *) ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" if test "x$ac_cv_func_shl_load" = xyes; then : lt_cv_dlopen="shl_load" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 $as_echo_n "checking for shl_load in -ldld... " >&6; } if ${ac_cv_lib_dld_shl_load+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char shl_load (); int main () { return shl_load (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dld_shl_load=yes else ac_cv_lib_dld_shl_load=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 $as_echo "$ac_cv_lib_dld_shl_load" >&6; } if test "x$ac_cv_lib_dld_shl_load" = xyes; then : lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld" else ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" if test "x$ac_cv_func_dlopen" = xyes; then : lt_cv_dlopen="dlopen" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if ${ac_cv_lib_dl_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlopen=yes else ac_cv_lib_dl_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = xyes; then : lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 $as_echo_n "checking for dlopen in -lsvld... " >&6; } if ${ac_cv_lib_svld_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsvld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_svld_dlopen=yes else ac_cv_lib_svld_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 $as_echo "$ac_cv_lib_svld_dlopen" >&6; } if test "x$ac_cv_lib_svld_dlopen" = xyes; then : lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 $as_echo_n "checking for dld_link in -ldld... " >&6; } if ${ac_cv_lib_dld_dld_link+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dld_link (); int main () { return dld_link (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dld_dld_link=yes else ac_cv_lib_dld_dld_link=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 $as_echo "$ac_cv_lib_dld_dld_link" >&6; } if test "x$ac_cv_lib_dld_dld_link" = xyes; then : lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld" fi fi fi fi fi fi ;; esac if test "x$lt_cv_dlopen" != xno; then enable_dlopen=yes else enable_dlopen=no fi case $lt_cv_dlopen in dlopen) save_CPPFLAGS="$CPPFLAGS" test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" save_LDFLAGS="$LDFLAGS" wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" save_LIBS="$LIBS" LIBS="$lt_cv_dlopen_libs $LIBS" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 $as_echo_n "checking whether a program can dlopen itself... " >&6; } if ${lt_cv_dlopen_self+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : lt_cv_dlopen_self=cross else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF #line $LINENO "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif /* When -fvisbility=hidden is used, assume the code has been annotated correspondingly for the symbols needed. */ #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) int fnord () __attribute__((visibility("default"))); #endif int fnord () { return 42; } int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else { if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; else puts (dlerror ()); } /* dlclose (self); */ } else puts (dlerror ()); return status; } _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then (./conftest; exit; ) >&5 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; esac else : # compilation failed lt_cv_dlopen_self=no fi fi rm -fr conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 $as_echo "$lt_cv_dlopen_self" >&6; } if test "x$lt_cv_dlopen_self" = xyes; then wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 $as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } if ${lt_cv_dlopen_self_static+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : lt_cv_dlopen_self_static=cross else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF #line $LINENO "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif /* When -fvisbility=hidden is used, assume the code has been annotated correspondingly for the symbols needed. */ #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) int fnord () __attribute__((visibility("default"))); #endif int fnord () { return 42; } int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else { if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; else puts (dlerror ()); } /* dlclose (self); */ } else puts (dlerror ()); return status; } _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then (./conftest; exit; ) >&5 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; esac else : # compilation failed lt_cv_dlopen_self_static=no fi fi rm -fr conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 $as_echo "$lt_cv_dlopen_self_static" >&6; } fi CPPFLAGS="$save_CPPFLAGS" LDFLAGS="$save_LDFLAGS" LIBS="$save_LIBS" ;; esac case $lt_cv_dlopen_self in yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; *) enable_dlopen_self=unknown ;; esac case $lt_cv_dlopen_self_static in yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; *) enable_dlopen_self_static=unknown ;; esac fi striplib= old_striplib= { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 $as_echo_n "checking whether stripping libraries is possible... " >&6; } if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" test -z "$striplib" && striplib="$STRIP --strip-unneeded" { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else # FIXME - insert some real tests, host_os isn't really good enough case $host_os in darwin*) if test -n "$STRIP" ; then striplib="$STRIP -x" old_striplib="$STRIP -S" { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ;; esac fi # Report which library types will actually be built { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 $as_echo_n "checking if libtool supports shared libraries... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 $as_echo "$can_build_shared" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 $as_echo_n "checking whether to build shared libraries... " >&6; } test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[4-9]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 $as_echo "$enable_shared" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 $as_echo_n "checking whether to build static libraries... " >&6; } # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 $as_echo "$enable_static" >&6; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu CC="$lt_save_CC" if test -n "$CXX" && ( test "X$CXX" != "Xno" && ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || (test "X$CXX" != "Xg++"))) ; then ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 $as_echo_n "checking how to run the C++ preprocessor... " >&6; } if test -z "$CXXCPP"; then if ${ac_cv_prog_CXXCPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CXXCPP needs to be expanded for CXXCPP in "$CXX -E" "/lib/cpp" do ac_preproc_ok=false for ac_cxx_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CXXCPP=$CXXCPP fi CXXCPP=$ac_cv_prog_CXXCPP else ac_cv_prog_CXXCPP=$CXXCPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 $as_echo "$CXXCPP" >&6; } ac_preproc_ok=false for ac_cxx_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu else _lt_caught_CXX_error=yes fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu archive_cmds_need_lc_CXX=no allow_undefined_flag_CXX= always_export_symbols_CXX=no archive_expsym_cmds_CXX= compiler_needs_object_CXX=no export_dynamic_flag_spec_CXX= hardcode_direct_CXX=no hardcode_direct_absolute_CXX=no hardcode_libdir_flag_spec_CXX= hardcode_libdir_separator_CXX= hardcode_minus_L_CXX=no hardcode_shlibpath_var_CXX=unsupported hardcode_automatic_CXX=no inherit_rpath_CXX=no module_cmds_CXX= module_expsym_cmds_CXX= link_all_deplibs_CXX=unknown old_archive_cmds_CXX=$old_archive_cmds reload_flag_CXX=$reload_flag reload_cmds_CXX=$reload_cmds no_undefined_flag_CXX= whole_archive_flag_spec_CXX= enable_shared_with_static_runtimes_CXX=no # Source file extension for C++ test sources. ac_ext=cpp # Object file extension for compiled C++ test sources. objext=o objext_CXX=$objext # No sense in running all these tests if we already determined that # the CXX compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test "$_lt_caught_CXX_error" != yes; then # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(int, char *[]) { return(0); }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # save warnings/boilerplate of simple test code ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_LD=$LD lt_save_GCC=$GCC GCC=$GXX lt_save_with_gnu_ld=$with_gnu_ld lt_save_path_LD=$lt_cv_path_LD if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx else $as_unset lt_cv_prog_gnu_ld fi if test -n "${lt_cv_path_LDCXX+set}"; then lt_cv_path_LD=$lt_cv_path_LDCXX else $as_unset lt_cv_path_LD fi test -z "${LDCXX+set}" || LD=$LDCXX CC=${CXX-"c++"} CFLAGS=$CXXFLAGS compiler=$CC compiler_CXX=$CC for cc_temp in $compiler""; do case $cc_temp in compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; \-*) ;; *) break;; esac done cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` if test -n "$compiler"; then # We don't want -fno-exception when compiling C++ code, so set the # no_builtin_flag separately if test "$GXX" = yes; then lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' else lt_prog_compiler_no_builtin_flag_CXX= fi if test "$GXX" = yes; then # Set up default GNU C++ configuration # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then : withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes else with_gnu_ld=no fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if ${lt_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$LD"; then lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if ${lt_cv_prog_gnu_ld+:} false; then : $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 $as_echo "$lt_cv_prog_gnu_ld" >&6; } with_gnu_ld=$lt_cv_prog_gnu_ld # Check if GNU C++ uses GNU ld as the underlying linker, since the # archiving commands below assume that GNU ld is being used. if test "$with_gnu_ld" = yes; then archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' export_dynamic_flag_spec_CXX='${wl}--export-dynamic' # If archive_cmds runs LD, not CC, wlarc should be empty # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to # investigate it a little bit more. (MM) wlarc='${wl}' # ancient GNU ld didn't support --whole-archive et. al. if eval "`$CC -print-prog-name=ld` --help 2>&1" | $GREP 'no-whole-archive' > /dev/null; then whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' else whole_archive_flag_spec_CXX= fi else with_gnu_ld=no wlarc= # A generic and very simple default shared library creation # command for GNU C++ for the case where it uses the native # linker, instead of GNU ld. If possible, this setting should # overridden to take advantage of the native linker features on # the platform it is being used on. archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' fi # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else GXX=no with_gnu_ld=no wlarc= fi # PORTME: fill in a description of your system's C++ link characteristics { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } ld_shlibs_CXX=yes case $host_os in aix3*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aix[4-9]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag="" else aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do case $ld_flag in *-brtl*) aix_use_runtimelinking=yes break ;; esac done ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. archive_cmds_CXX='' hardcode_direct_CXX=yes hardcode_direct_absolute_CXX=yes hardcode_libdir_separator_CXX=':' link_all_deplibs_CXX=yes file_list_spec_CXX='${wl}-f,' if test "$GXX" = yes; then case $host_os in aix4.[012]|aix4.[012].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 hardcode_direct_CXX=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking hardcode_minus_L_CXX=yes hardcode_libdir_flag_spec_CXX='-L$libdir' hardcode_libdir_separator_CXX= fi esac shared_flag='-shared' if test "$aix_use_runtimelinking" = yes; then shared_flag="$shared_flag "'${wl}-G' fi else # not using gcc if test "$host_cpu" = ia64; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test "$aix_use_runtimelinking" = yes; then shared_flag='${wl}-G' else shared_flag='${wl}-bM:SRE' fi fi fi export_dynamic_flag_spec_CXX='${wl}-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to # export. always_export_symbols_CXX=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. allow_undefined_flag_CXX='-berok' # Determine the default libpath from the value encoded in an empty # executable. if test "${lt_cv_aix_libpath+set}" = set; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath__CXX+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX="/usr/lib:/lib" fi fi aix_libpath=$lt_cv_aix_libpath__CXX fi hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" else if test "$host_cpu" = ia64; then hardcode_libdir_flag_spec_CXX='${wl}-R $libdir:/usr/lib:/lib' allow_undefined_flag_CXX="-z nodefs" archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. if test "${lt_cv_aix_libpath+set}" = set; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath__CXX+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX="/usr/lib:/lib" fi fi aix_libpath=$lt_cv_aix_libpath__CXX fi hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. no_undefined_flag_CXX=' ${wl}-bernotok' allow_undefined_flag_CXX=' ${wl}-berok' if test "$with_gnu_ld" = yes; then # We only use this code for GNU lds that support --whole-archive. whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives whole_archive_flag_spec_CXX='$convenience' fi archive_cmds_need_lc_CXX=yes # This is similar to how AIX traditionally builds its shared # libraries. archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' fi fi ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then allow_undefined_flag_CXX=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else ld_shlibs_CXX=no fi ;; chorus*) case $cc_basename in *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; cygwin* | mingw* | pw32* | cegcc*) case $GXX,$cc_basename in ,cl* | no,cl*) # Native MSVC # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec_CXX=' ' allow_undefined_flag_CXX=unsupported always_export_symbols_CXX=yes file_list_spec_CXX='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; else $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true' enable_shared_with_static_runtimes_CXX=yes # Don't use ranlib old_postinstall_cmds_CXX='chmod 644 $oldlib' postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile="$lt_outputfile.exe" lt_tool_outputfile="$lt_tool_outputfile.exe" ;; esac~ func_to_tool_file "$lt_outputfile"~ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # g++ # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, # as there is no search path for DLLs. hardcode_libdir_flag_spec_CXX='-L$libdir' export_dynamic_flag_spec_CXX='${wl}--export-all-symbols' allow_undefined_flag_CXX=unsupported always_export_symbols_CXX=no enable_shared_with_static_runtimes_CXX=yes if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file (1st line # is EXPORTS), use it as is; otherwise, prepend... archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else ld_shlibs_CXX=no fi ;; esac ;; darwin* | rhapsody*) archive_cmds_need_lc_CXX=no hardcode_direct_CXX=no hardcode_automatic_CXX=yes hardcode_shlibpath_var_CXX=unsupported if test "$lt_cv_ld_force_load" = "yes"; then whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' else whole_archive_flag_spec_CXX='' fi link_all_deplibs_CXX=yes allow_undefined_flag_CXX="$_lt_dar_allow_undefined" case $cc_basename in ifort*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test "$_lt_dar_can_shared" = "yes"; then output_verbose_link_cmd=func_echo_all archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" module_expsym_cmds_CXX="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" if test "$lt_cv_apple_cc_single_mod" != "yes"; then archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" fi else ld_shlibs_CXX=no fi ;; dgux*) case $cc_basename in ec++*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; ghcx*) # Green Hills C++ Compiler # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; freebsd2.*) # C++ shared libraries reported to be fairly broken before # switch to ELF ld_shlibs_CXX=no ;; freebsd-elf*) archive_cmds_need_lc_CXX=no ;; freebsd* | dragonfly*) # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF # conventions ld_shlibs_CXX=yes ;; haiku*) archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' link_all_deplibs_CXX=yes ;; hpux9*) hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' hardcode_libdir_separator_CXX=: export_dynamic_flag_spec_CXX='${wl}-E' hardcode_direct_CXX=yes hardcode_minus_L_CXX=yes # Not in the search PATH, # but as the default # location of the library. case $cc_basename in CC*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aCC*) archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test "$GXX" = yes; then archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; hpux10*|hpux11*) if test $with_gnu_ld = no; then hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' hardcode_libdir_separator_CXX=: case $host_cpu in hppa*64*|ia64*) ;; *) export_dynamic_flag_spec_CXX='${wl}-E' ;; esac fi case $host_cpu in hppa*64*|ia64*) hardcode_direct_CXX=no hardcode_shlibpath_var_CXX=no ;; *) hardcode_direct_CXX=yes hardcode_direct_absolute_CXX=yes hardcode_minus_L_CXX=yes # Not in the search PATH, # but as the default # location of the library. ;; esac case $cc_basename in CC*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aCC*) case $host_cpu in hppa*64*) archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test "$GXX" = yes; then if test $with_gnu_ld = no; then case $host_cpu in hppa*64*) archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac fi else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; interix[3-9]*) hardcode_direct_CXX=no hardcode_shlibpath_var_CXX=no hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' export_dynamic_flag_spec_CXX='${wl}-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' archive_expsym_cmds_CXX='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; irix5* | irix6*) case $cc_basename in CC*) # SGI C++ archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' # Archives containing C++ object files must be created using # "CC -ar", where "CC" is the IRIX C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' ;; *) if test "$GXX" = yes; then if test "$with_gnu_ld" = no; then archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' else archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' fi fi link_all_deplibs_CXX=yes ;; esac hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator_CXX=: inherit_rpath_CXX=yes ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' export_dynamic_flag_spec_CXX='${wl}--export-dynamic' # Archives containing C++ object files must be created using # "CC -Bstatic", where "CC" is the KAI C++ compiler. old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; icpc* | ecpc* ) # Intel C++ with_gnu_ld=yes # version 8.0 and above of icpc choke on multiply defined symbols # if we add $predep_objects and $postdep_objects, however 7.1 and # earlier do not add the objects themselves. case `$CC -V 2>&1` in *"Version 7."*) archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 8.0 or newer tmp_idyn= case $host_cpu in ia64*) tmp_idyn=' -i_dynamic';; esac archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ;; esac archive_cmds_need_lc_CXX=no hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' export_dynamic_flag_spec_CXX='${wl}--export-dynamic' whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' ;; pgCC* | pgcpp*) # Portland Group C++ compiler case `$CC -V` in *pgCC\ [1-5].* | *pgcpp\ [1-5].*) prelink_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' old_archive_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ $RANLIB $oldlib' archive_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' archive_expsym_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' ;; *) # Version 6 and above use weak symbols archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' ;; esac hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir' export_dynamic_flag_spec_CXX='${wl}--export-dynamic' whole_archive_flag_spec_CXX='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' ;; cxx*) # Compaq C++ archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' runpath_var=LD_RUN_PATH hardcode_libdir_flag_spec_CXX='-rpath $libdir' hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' ;; xl* | mpixl* | bgxl*) # IBM XL 8.0 on PPC, with GNU ld hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' export_dynamic_flag_spec_CXX='${wl}--export-dynamic' archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' if test "x$supports_anon_versioning" = xyes; then archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' fi ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 no_undefined_flag_CXX=' -zdefs' archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' archive_expsym_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' hardcode_libdir_flag_spec_CXX='-R$libdir' whole_archive_flag_spec_CXX='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' compiler_needs_object_CXX=yes # Not sure whether something based on # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 # would be better. output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' ;; esac ;; esac ;; lynxos*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; m88k*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; mvs*) case $cc_basename in cxx*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' wlarc= hardcode_libdir_flag_spec_CXX='-R$libdir' hardcode_direct_CXX=yes hardcode_shlibpath_var_CXX=no fi # Workaround some broken pre-1.5 toolchains output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' ;; *nto* | *qnx*) ld_shlibs_CXX=yes ;; openbsd2*) # C++ shared libraries are fairly broken ld_shlibs_CXX=no ;; openbsd*) if test -f /usr/libexec/ld.so; then hardcode_direct_CXX=yes hardcode_shlibpath_var_CXX=no hardcode_direct_absolute_CXX=yes archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' export_dynamic_flag_spec_CXX='${wl}-E' whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' fi output_verbose_link_cmd=func_echo_all else ld_shlibs_CXX=no fi ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' hardcode_libdir_separator_CXX=: # Archives containing C++ object files must be created using # the KAI C++ compiler. case $host in osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;; esac ;; RCC*) # Rational C++ 2.4.1 # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; cxx*) case $host in osf3*) allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' ;; *) allow_undefined_flag_CXX=' -expect_unresolved \*' archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ echo "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~ $RM $lib.exp' hardcode_libdir_flag_spec_CXX='-rpath $libdir' ;; esac hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test "$GXX" = yes && test "$with_gnu_ld" = no; then allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' case $host in osf3*) archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ;; *) archive_cmds_CXX='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ;; esac hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; psos*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; lcc*) # Lucid # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ archive_cmds_need_lc_CXX=yes no_undefined_flag_CXX=' -zdefs' archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' hardcode_libdir_flag_spec_CXX='-R$libdir' hardcode_shlibpath_var_CXX=no case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands `-z linker_flag'. # Supported since Solaris 2.6 (maybe 2.5.1?) whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract' ;; esac link_all_deplibs_CXX=yes output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' ;; gcx*) # Green Hills C++ Compiler archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' # The C++ compiler must be used to create the archive. old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' ;; *) # GNU C++ compiler with Solaris linker if test "$GXX" = yes && test "$with_gnu_ld" = no; then no_undefined_flag_CXX=' ${wl}-z ${wl}defs' if $CC --version | $GREP -v '^2\.7' > /dev/null; then archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # g++ 2.7 appears to require `-G' NOT `-shared' on this # platform. archive_cmds_CXX='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' fi hardcode_libdir_flag_spec_CXX='${wl}-R $wl$libdir' case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) whole_archive_flag_spec_CXX='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' ;; esac fi ;; esac ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) no_undefined_flag_CXX='${wl}-z,text' archive_cmds_need_lc_CXX=no hardcode_shlibpath_var_CXX=no runpath_var='LD_RUN_PATH' case $cc_basename in CC*) archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; *) archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We can NOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. no_undefined_flag_CXX='${wl}-z,text' allow_undefined_flag_CXX='${wl}-z,nodefs' archive_cmds_need_lc_CXX=no hardcode_shlibpath_var_CXX=no hardcode_libdir_flag_spec_CXX='${wl}-R,$libdir' hardcode_libdir_separator_CXX=':' link_all_deplibs_CXX=yes export_dynamic_flag_spec_CXX='${wl}-Bexport' runpath_var='LD_RUN_PATH' case $cc_basename in CC*) archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~ '"$old_archive_cmds_CXX" reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~ '"$reload_cmds_CXX" ;; *) archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; vxworks*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 $as_echo "$ld_shlibs_CXX" >&6; } test "$ld_shlibs_CXX" = no && can_build_shared=no GCC_CXX="$GXX" LD_CXX="$LD" ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... # Dependencies to place before and after the object being linked: predep_objects_CXX= postdep_objects_CXX= predeps_CXX= postdeps_CXX= compiler_lib_search_path_CXX= cat > conftest.$ac_ext <<_LT_EOF class Foo { public: Foo (void) { a = 0; } private: int a; }; _LT_EOF _lt_libdeps_save_CFLAGS=$CFLAGS case "$CC $CFLAGS " in #( *\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; *\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; *\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; esac if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then # Parse the compiler output and extract the necessary # objects, libraries and library flags. # Sentinel used to keep track of whether or not we are before # the conftest object file. pre_test_object_deps_done=no for p in `eval "$output_verbose_link_cmd"`; do case ${prev}${p} in -L* | -R* | -l*) # Some compilers place space between "-{L,R}" and the path. # Remove the space. if test $p = "-L" || test $p = "-R"; then prev=$p continue fi # Expand the sysroot to ease extracting the directories later. if test -z "$prev"; then case $p in -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; esac fi case $p in =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; esac if test "$pre_test_object_deps_done" = no; then case ${prev} in -L | -R) # Internal compiler library paths should come after those # provided the user. The postdeps already come after the # user supplied libs so there is no need to process them. if test -z "$compiler_lib_search_path_CXX"; then compiler_lib_search_path_CXX="${prev}${p}" else compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} ${prev}${p}" fi ;; # The "-l" case would never come before the object being # linked, so don't bother handling this case. esac else if test -z "$postdeps_CXX"; then postdeps_CXX="${prev}${p}" else postdeps_CXX="${postdeps_CXX} ${prev}${p}" fi fi prev= ;; *.lto.$objext) ;; # Ignore GCC LTO objects *.$objext) # This assumes that the test object file only shows up # once in the compiler output. if test "$p" = "conftest.$objext"; then pre_test_object_deps_done=yes continue fi if test "$pre_test_object_deps_done" = no; then if test -z "$predep_objects_CXX"; then predep_objects_CXX="$p" else predep_objects_CXX="$predep_objects_CXX $p" fi else if test -z "$postdep_objects_CXX"; then postdep_objects_CXX="$p" else postdep_objects_CXX="$postdep_objects_CXX $p" fi fi ;; *) ;; # Ignore the rest. esac done # Clean up. rm -f a.out a.exe else echo "libtool.m4: error: problem compiling CXX test program" fi $RM -f confest.$objext CFLAGS=$_lt_libdeps_save_CFLAGS # PORTME: override above test on systems where it is broken case $host_os in interix[3-9]*) # Interix 3.5 installs completely hosed .la files for C++, so rather than # hack all around it, let's just trust "g++" to DTRT. predep_objects_CXX= postdep_objects_CXX= postdeps_CXX= ;; linux*) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 # The more standards-conforming stlport4 library is # incompatible with the Cstd library. Avoid specifying # it if it's in CXXFLAGS. Ignore libCrun as # -library=stlport4 depends on it. case " $CXX $CXXFLAGS " in *" -library=stlport4 "*) solaris_use_stlport4=yes ;; esac if test "$solaris_use_stlport4" != yes; then postdeps_CXX='-library=Cstd -library=Crun' fi ;; esac ;; solaris*) case $cc_basename in CC* | sunCC*) # The more standards-conforming stlport4 library is # incompatible with the Cstd library. Avoid specifying # it if it's in CXXFLAGS. Ignore libCrun as # -library=stlport4 depends on it. case " $CXX $CXXFLAGS " in *" -library=stlport4 "*) solaris_use_stlport4=yes ;; esac # Adding this requires a known-good setup of shared libraries for # Sun compiler versions before 5.6, else PIC objects from an old # archive will be linked into the output, leading to subtle bugs. if test "$solaris_use_stlport4" != yes; then postdeps_CXX='-library=Cstd -library=Crun' fi ;; esac ;; esac case " $postdeps_CXX " in *" -lc "*) archive_cmds_need_lc_CXX=no ;; esac compiler_lib_search_dirs_CXX= if test -n "${compiler_lib_search_path_CXX}"; then compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` fi lt_prog_compiler_wl_CXX= lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX= # C++ specific cases for pic, static, wl, etc. if test "$GXX" = yes; then lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='-static' case $host_os in aix*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor lt_prog_compiler_static_CXX='-Bstatic' fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support lt_prog_compiler_pic_CXX='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the `-m68020' flag to GCC prevents building anything better, # like `-m68040'. lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries lt_prog_compiler_pic_CXX='-DDLL_EXPORT' ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files lt_prog_compiler_pic_CXX='-fno-common' ;; *djgpp*) # DJGPP does not support shared libraries at all lt_prog_compiler_pic_CXX= ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. lt_prog_compiler_static_CXX= ;; interix[3-9]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; sysv4*MP*) if test -d /usr/nec; then lt_prog_compiler_pic_CXX=-Kconform_pic fi ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) ;; *) lt_prog_compiler_pic_CXX='-fPIC' ;; esac ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic_CXX='-fPIC -shared' ;; *) lt_prog_compiler_pic_CXX='-fPIC' ;; esac else case $host_os in aix[4-9]*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor lt_prog_compiler_static_CXX='-Bstatic' else lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' fi ;; chorus*) case $cc_basename in cxch68*) # Green Hills C++ Compiler # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" ;; esac ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). lt_prog_compiler_pic_CXX='-DDLL_EXPORT' ;; dgux*) case $cc_basename in ec++*) lt_prog_compiler_pic_CXX='-KPIC' ;; ghcx*) # Green Hills C++ Compiler lt_prog_compiler_pic_CXX='-pic' ;; *) ;; esac ;; freebsd* | dragonfly*) # FreeBSD uses GNU C++ ;; hpux9* | hpux10* | hpux11*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' if test "$host_cpu" != ia64; then lt_prog_compiler_pic_CXX='+Z' fi ;; aCC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) lt_prog_compiler_pic_CXX='+Z' ;; esac ;; *) ;; esac ;; interix*) # This is c89, which is MS Visual C++ (no shared libs) # Anyone wants to do a port? ;; irix5* | irix6* | nonstopux*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='-non_shared' # CC pic flag -KPIC is the default. ;; *) ;; esac ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in KCC*) # KAI C++ Compiler lt_prog_compiler_wl_CXX='--backend -Wl,' lt_prog_compiler_pic_CXX='-fPIC' ;; ecpc* ) # old Intel C++ for x86_64 which still supported -KPIC. lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-static' ;; icpc* ) # Intel C++, used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-fPIC' lt_prog_compiler_static_CXX='-static' ;; pgCC* | pgcpp*) # Portland Group C++ compiler lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-fpic' lt_prog_compiler_static_CXX='-Bstatic' ;; cxx*) # Compaq C++ # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX='-non_shared' ;; xlc* | xlC* | bgxl[cC]* | mpixl[cC]*) # IBM XL 8.0, 9.0 on PPC and BlueGene lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-qpic' lt_prog_compiler_static_CXX='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' lt_prog_compiler_wl_CXX='-Qoption ld ' ;; esac ;; esac ;; lynxos*) ;; m88k*) ;; mvs*) case $cc_basename in cxx*) lt_prog_compiler_pic_CXX='-W c,exportall' ;; *) ;; esac ;; netbsd* | netbsdelf*-gnu) ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic_CXX='-fPIC -shared' ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) lt_prog_compiler_wl_CXX='--backend -Wl,' ;; RCC*) # Rational C++ 2.4.1 lt_prog_compiler_pic_CXX='-pic' ;; cxx*) # Digital/Compaq C++ lt_prog_compiler_wl_CXX='-Wl,' # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX='-non_shared' ;; *) ;; esac ;; psos*) ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' lt_prog_compiler_wl_CXX='-Qoption ld ' ;; gcx*) # Green Hills C++ Compiler lt_prog_compiler_pic_CXX='-PIC' ;; *) ;; esac ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x lt_prog_compiler_pic_CXX='-pic' lt_prog_compiler_static_CXX='-Bstatic' ;; lcc*) # Lucid lt_prog_compiler_pic_CXX='-pic' ;; *) ;; esac ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 lt_prog_compiler_pic_CXX='-KPIC' ;; *) ;; esac ;; vxworks*) ;; *) lt_prog_compiler_can_build_shared_CXX=no ;; esac fi case $host_os in # For platforms which do not support PIC, -DPIC is meaningless: *djgpp*) lt_prog_compiler_pic_CXX= ;; *) lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 $as_echo_n "checking for $compiler option to produce PIC... " >&6; } if ${lt_cv_prog_compiler_pic_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5 $as_echo "$lt_cv_prog_compiler_pic_CXX" >&6; } lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX # # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic_CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 $as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; } if ${lt_cv_prog_compiler_pic_works_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_works_CXX=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_pic_works_CXX=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5 $as_echo "$lt_cv_prog_compiler_pic_works_CXX" >&6; } if test x"$lt_cv_prog_compiler_pic_works_CXX" = xyes; then case $lt_prog_compiler_pic_CXX in "" | " "*) ;; *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; esac else lt_prog_compiler_pic_CXX= lt_prog_compiler_can_build_shared_CXX=no fi fi # # Check to make sure the static flag actually works. # wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 $as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } if ${lt_cv_prog_compiler_static_works_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_static_works_CXX=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_static_works_CXX=yes fi else lt_cv_prog_compiler_static_works_CXX=yes fi fi $RM -r conftest* LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5 $as_echo "$lt_cv_prog_compiler_static_works_CXX" >&6; } if test x"$lt_cv_prog_compiler_static_works_CXX" = xyes; then : else lt_prog_compiler_static_CXX= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o_CXX=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o_CXX=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 $as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o_CXX=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o_CXX=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 $as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } hard_links="nottested" if test "$lt_cv_prog_compiler_c_o_CXX" = no && test "$need_locks" != no; then # do not overwrite the value of need_locks provided by the user { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 $as_echo_n "checking if we can lock with hard links... " >&6; } hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 $as_echo "$hard_links" >&6; } if test "$hard_links" = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 $as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} need_locks=warn fi else need_locks=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' case $host_os in aix[4-9]*) # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm # Also, AIX nm treats weak defined symbols like other global defined # symbols, whereas GNU nm marks them as "W". if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' else export_symbols_cmds_CXX='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' fi ;; pw32*) export_symbols_cmds_CXX="$ltdll_cmds" ;; cygwin* | mingw* | cegcc*) case $cc_basename in cl*) exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' ;; *) export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' ;; esac ;; linux* | k*bsd*-gnu | gnu*) link_all_deplibs_CXX=no ;; *) export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 $as_echo "$ld_shlibs_CXX" >&6; } test "$ld_shlibs_CXX" = no && can_build_shared=no with_gnu_ld_CXX=$with_gnu_ld # # Do we need to explicitly link libc? # case "x$archive_cmds_need_lc_CXX" in x|xyes) # Assume -lc should be added archive_cmds_need_lc_CXX=yes if test "$enable_shared" = yes && test "$GCC" = yes; then case $archive_cmds_CXX in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 $as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } if ${lt_cv_archive_cmds_need_lc_CXX+:} false; then : $as_echo_n "(cached) " >&6 else $RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$lt_prog_compiler_wl_CXX pic_flag=$lt_prog_compiler_pic_CXX compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$allow_undefined_flag_CXX allow_undefined_flag_CXX= if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } then lt_cv_archive_cmds_need_lc_CXX=no else lt_cv_archive_cmds_need_lc_CXX=yes fi allow_undefined_flag_CXX=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5 $as_echo "$lt_cv_archive_cmds_need_lc_CXX" >&6; } archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX ;; esac fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 $as_echo_n "checking dynamic linker characteristics... " >&6; } library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=".so" postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='${libname}${release}${shared_ext}$major' ;; aix[4-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no hardcode_into_libs=yes if test "$host_cpu" = ia64; then # AIX 5 supports IA64 library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line `#! .'. This would cause the generated library to # depend on `.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[01] | aix4.[01].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # AIX (on Power*) has no versioning support, so currently we can not hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. if test "$aix_use_runtimelinking" = yes; then # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' else # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='${libname}${release}.a $libname.a' soname_spec='${libname}${release}${shared_ext}$major' fi shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='${libname}${shared_ext}' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[45]*) version_type=linux # correct to gnu/linux during the next big refactor need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=".dll" need_version=no need_lib_prefix=no case $GCC,$cc_basename in yes,*) # gcc library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ;; esac dynamic_linker='Win32 ld.exe' ;; *,cl*) # Native MSVC libname_spec='$name' soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' library_names_spec='${libname}.dll.lib' case $build_os in mingw*) sys_lib_search_path_spec= lt_save_ifs=$IFS IFS=';' for lt_path in $LIB do IFS=$lt_save_ifs # Let DOS variable expansion print the short 8.3 style file name. lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" done IFS=$lt_save_ifs # Convert to MSYS style. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ;; cygwin*) # Convert to unix form, then to dos form, then back to unix form # but this time dos style (no spaces!) so that the unix form looks # like /cygdrive/c/PROGRA~1:/cygdr... sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ;; *) sys_lib_search_path_spec="$LIB" if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then # It is most probably a Windows format PATH. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # FIXME: find the short name or the path components, as spaces are # common. (e.g. "Program Files" -> "PROGRA~1") ;; esac # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes dynamic_linker='Win32 link.exe' ;; *) # Assume MSVC wrapper library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' dynamic_linker='Win32 ld.exe' ;; esac # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[23].*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2.*) shlibpath_overrides_runpath=yes ;; freebsd3.[01]* | freebsdelf3.[01]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; haiku*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no dynamic_linker="$host_os runtime_loader" library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LIBRARY_PATH shlibpath_overrides_runpath=yes sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' if test "X$HPUX_IA64_MODE" = X32; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" fi sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555, ... postinstall_cmds='chmod 555 $lib' # or fails outright, so override atomically: install_override_mode=555 ;; interix[3-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test "$lt_cv_prog_gnu_ld" = yes; then version_type=linux # correct to gnu/linux during the next big refactor else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH if ${lt_cv_shlibpath_overrides_runpath+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_shlibpath_overrides_runpath=no save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\"" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : lt_cv_shlibpath_overrides_runpath=yes fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS libdir=$save_libdir fi shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsdelf*-gnu) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='NetBSD ld.elf_so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd*) version_type=sunos sys_lib_dlsearch_path_spec="/usr/lib" need_lib_prefix=no # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. case $host_os in openbsd3.3 | openbsd3.3.*) need_version=yes ;; *) need_version=no ;; esac library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then case $host_os in openbsd2.[89] | openbsd2.[89].*) shlibpath_overrides_runpath=no ;; *) shlibpath_overrides_runpath=yes ;; esac else shlibpath_overrides_runpath=yes fi ;; os2*) libname_spec='$name' shrext_cmds=".dll" need_lib_prefix=no library_names_spec='$libname${shared_ext} $libname.a' dynamic_linker='OS/2 ld.exe' shlibpath_var=LIBPATH ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test "$with_gnu_ld" = yes; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec ;then version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' soname_spec='$libname${shared_ext}.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=freebsd-elf need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test "$with_gnu_ld" = yes; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 $as_echo "$dynamic_linker" >&6; } test "$dynamic_linker" = no && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" fi if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 $as_echo_n "checking how to hardcode library paths into programs... " >&6; } hardcode_action_CXX= if test -n "$hardcode_libdir_flag_spec_CXX" || test -n "$runpath_var_CXX" || test "X$hardcode_automatic_CXX" = "Xyes" ; then # We can hardcode non-existent directories. if test "$hardcode_direct_CXX" != no && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" != no && test "$hardcode_minus_L_CXX" != no; then # Linking always hardcodes the temporary library directory. hardcode_action_CXX=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. hardcode_action_CXX=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. hardcode_action_CXX=unsupported fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5 $as_echo "$hardcode_action_CXX" >&6; } if test "$hardcode_action_CXX" = relink || test "$inherit_rpath_CXX" = yes; then # Fast installation is not supported enable_fast_install=no elif test "$shlibpath_overrides_runpath" = yes || test "$enable_shared" = no; then # Fast installation is not necessary enable_fast_install=needless fi fi # test -n "$compiler" CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS LDCXX=$LD LD=$lt_save_LD GCC=$lt_save_GCC with_gnu_ld=$lt_save_with_gnu_ld lt_cv_path_LDCXX=$lt_cv_path_LD lt_cv_path_LD=$lt_save_path_LD lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld fi # test "$_lt_caught_CXX_error" != yes ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_config_commands="$ac_config_commands libtool" # Only expand once: LT_DLLOADERS= ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu LIBADD_DLOPEN= { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing dlopen" >&5 $as_echo_n "checking for library containing dlopen... " >&6; } if ${ac_cv_search_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF for ac_lib in '' dl; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_dlopen=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_dlopen+:} false; then : break fi done if ${ac_cv_search_dlopen+:} false; then : else ac_cv_search_dlopen=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_dlopen" >&5 $as_echo "$ac_cv_search_dlopen" >&6; } ac_res=$ac_cv_search_dlopen if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" $as_echo "#define HAVE_LIBDL 1" >>confdefs.h if test "$ac_cv_search_dlopen" != "none required" ; then LIBADD_DLOPEN="-ldl" fi libltdl_cv_lib_dl_dlopen="yes" LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dlopen.la" else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if HAVE_DLFCN_H # include #endif int main () { dlopen(0, 0); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : $as_echo "#define HAVE_LIBDL 1" >>confdefs.h libltdl_cv_func_dlopen="yes" LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dlopen.la" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 $as_echo_n "checking for dlopen in -lsvld... " >&6; } if ${ac_cv_lib_svld_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsvld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_svld_dlopen=yes else ac_cv_lib_svld_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 $as_echo "$ac_cv_lib_svld_dlopen" >&6; } if test "x$ac_cv_lib_svld_dlopen" = xyes; then : $as_echo "#define HAVE_LIBDL 1" >>confdefs.h LIBADD_DLOPEN="-lsvld" libltdl_cv_func_dlopen="yes" LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dlopen.la" fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi if test x"$libltdl_cv_func_dlopen" = xyes || test x"$libltdl_cv_lib_dl_dlopen" = xyes then lt_save_LIBS="$LIBS" LIBS="$LIBS $LIBADD_DLOPEN" for ac_func in dlerror do : ac_fn_c_check_func "$LINENO" "dlerror" "ac_cv_func_dlerror" if test "x$ac_cv_func_dlerror" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_DLERROR 1 _ACEOF fi done LIBS="$lt_save_LIBS" fi LIBADD_SHL_LOAD= ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" if test "x$ac_cv_func_shl_load" = xyes; then : $as_echo "#define HAVE_SHL_LOAD 1" >>confdefs.h LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}shl_load.la" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 $as_echo_n "checking for shl_load in -ldld... " >&6; } if ${ac_cv_lib_dld_shl_load+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char shl_load (); int main () { return shl_load (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dld_shl_load=yes else ac_cv_lib_dld_shl_load=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 $as_echo "$ac_cv_lib_dld_shl_load" >&6; } if test "x$ac_cv_lib_dld_shl_load" = xyes; then : $as_echo "#define HAVE_SHL_LOAD 1" >>confdefs.h LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}shl_load.la" LIBADD_SHL_LOAD="-ldld" fi fi case $host_os in darwin[1567].*) # We only want this for pre-Mac OS X 10.4. ac_fn_c_check_func "$LINENO" "_dyld_func_lookup" "ac_cv_func__dyld_func_lookup" if test "x$ac_cv_func__dyld_func_lookup" = xyes; then : $as_echo "#define HAVE_DYLD 1" >>confdefs.h LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dyld.la" fi ;; beos*) LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}load_add_on.la" ;; cygwin* | mingw* | os2* | pw32*) ac_fn_c_check_decl "$LINENO" "cygwin_conv_path" "ac_cv_have_decl_cygwin_conv_path" "#include " if test "x$ac_cv_have_decl_cygwin_conv_path" = xyes; then : ac_have_decl=1 else ac_have_decl=0 fi cat >>confdefs.h <<_ACEOF #define HAVE_DECL_CYGWIN_CONV_PATH $ac_have_decl _ACEOF LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}loadlibrary.la" ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 $as_echo_n "checking for dld_link in -ldld... " >&6; } if ${ac_cv_lib_dld_dld_link+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dld_link (); int main () { return dld_link (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dld_dld_link=yes else ac_cv_lib_dld_dld_link=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 $as_echo "$ac_cv_lib_dld_dld_link" >&6; } if test "x$ac_cv_lib_dld_dld_link" = xyes; then : $as_echo "#define HAVE_DLD 1" >>confdefs.h LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dld_link.la" fi LT_DLPREOPEN= if test -n "$LT_DLLOADERS" then for lt_loader in $LT_DLLOADERS; do LT_DLPREOPEN="$LT_DLPREOPEN-dlpreopen $lt_loader " done $as_echo "#define HAVE_LIBDLLOADER 1" >>confdefs.h fi LIBADD_DL="$LIBADD_DLOPEN $LIBADD_SHL_LOAD" ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test x$use_libtool != xno; then DEFAULT_OBJECT_TYPE=".lo" DEFAULT_ARCHIVE_TYPE=".la" DEFAULT_SHARED_OBJECT_TYPE=".la" LIBTOOL="\$(LIBTOOL)" LIBTOOL_INSTALL_TARGET="libtool-install" LIBTOOL_UNINSTALL_TARGET="libtool-uninstall" LIBTOOL_CLEAN_TARGET="libtool-clean" QMAKE_LIBTOOL="${BUILD_DIR}/libtool" FD_PLUGIN_DIR="src/plugins/fd" DIR_PLUGIN_DIR="src/plugins/dir" DLIB="-rpath \$(libdir)" have_plugins=yes else DEFAULT_OBJECT_TYPE=".o" DEFAULT_ARCHIVE_TYPE=".a" DEFAULT_SHARED_OBJECT_TYPE=".so" LIBTOOL="# \$(LIBTOOL)" LIBTOOL_INSTALL_TARGET="" LIBTOOL_UNINSTALL_TARGET="" LIBTOOL_CLEAN_TARGET="" QMAKE_LIBTOOL="# ${BUILD_DIR}/libtool" FD_PLUGIN_DIR="" DIR_PLUGIN_DIR="" have_plugins=no fi # Check whether --enable-includes was given. if test "${enable_includes+set}" = set; then : enableval=$enable_includes; if test x$enableval = xyes; then install_includes=yes fi fi if test x$use_libtool != xno -a x$install_includes = xyes; then INCLUDE_INSTALL_TARGET="install-includes" INCLUDE_UNINSTALL_TARGET="uninstall-includes" else INCLUDE_INSTALL_TARGET="" INCLUDE_UNINSTALL_TARGET="" fi case $host_os in *cygwin* ) CYGWIN=yes;; * ) CYGWIN=no;; esac if test $HAVE_UNAME=yes -a x`uname -s` = xSunOS then if $TRUEPRG; then HAVE_SUN_OS_TRUE= HAVE_SUN_OS_FALSE='#' else HAVE_SUN_OS_TRUE='#' HAVE_SUN_OS_FALSE= fi $as_echo "#define HAVE_SUN_OS 1" >>confdefs.h else if $FALSEPRG; then HAVE_SUN_OS_TRUE= HAVE_SUN_OS_FALSE='#' else HAVE_SUN_OS_TRUE='#' HAVE_SUN_OS_FALSE= fi fi if test $HAVE_UNAME=yes -a x`uname -s` = xGNU then if $TRUEPRG; then HAVE_HURD_OS_TRUE= HAVE_HURD_OS_FALSE='#' else HAVE_HURD_OS_TRUE='#' HAVE_HURD_OS_FALSE= fi $as_echo "#define HAVE_HURD_OS 1" >>confdefs.h else if $FALSEPRG; then HAVE_HURD_OS_TRUE= HAVE_HURD_OS_FALSE='#' else HAVE_HURD_OS_TRUE='#' HAVE_HURD_OS_FALSE= fi fi if test $HAVE_UNAME=yes -a x`uname -s` = xOSF1 then if $TRUEPRG; then HAVE_OSF1_OS_TRUE= HAVE_OSF1_OS_FALSE='#' else HAVE_OSF1_OS_TRUE='#' HAVE_OSF1_OS_FALSE= fi $as_echo "#define HAVE_OSF1_OS 1" >>confdefs.h else if $FALSEPRG; then HAVE_OSF1_OS_TRUE= HAVE_OSF1_OS_FALSE='#' else HAVE_OSF1_OS_TRUE='#' HAVE_OSF1_OS_FALSE= fi fi if test $HAVE_UNAME=yes -a x`uname -s` = xAIX then if $TRUEPRG; then HAVE_AIX_OS_TRUE= HAVE_AIX_OS_FALSE='#' else HAVE_AIX_OS_TRUE='#' HAVE_AIX_OS_FALSE= fi $as_echo "#define HAVE_AIX_OS 1" >>confdefs.h else if $FALSEPRG; then HAVE_AIX_OS_TRUE= HAVE_AIX_OS_FALSE='#' else HAVE_AIX_OS_TRUE='#' HAVE_AIX_OS_FALSE= fi fi if test $HAVE_UNAME=yes -a x`uname -s` = xHP-UX then if $TRUEPRG; then HAVE_HPUX_OS_TRUE= HAVE_HPUX_OS_FALSE='#' else HAVE_HPUX_OS_TRUE='#' HAVE_HPUX_OS_FALSE= fi $as_echo "#define HAVE_HPUX_OS 1" >>confdefs.h else if $FALSEPRG; then HAVE_HPUX_OS_TRUE= HAVE_HPUX_OS_FALSE='#' else HAVE_HPUX_OS_TRUE='#' HAVE_HPUX_OS_FALSE= fi fi if test $HAVE_UNAME=yes -a x`uname -s` = xLinux then if $TRUEPRG; then HAVE_LINUX_OS_TRUE= HAVE_LINUX_OS_FALSE='#' else HAVE_LINUX_OS_TRUE='#' HAVE_LINUX_OS_FALSE= fi $as_echo "#define HAVE_LINUX_OS 1" >>confdefs.h else if $FALSEPRG; then HAVE_LINUX_OS_TRUE= HAVE_LINUX_OS_FALSE='#' else HAVE_LINUX_OS_TRUE='#' HAVE_LINUX_OS_FALSE= fi fi if test $HAVE_UNAME=yes -a x`uname -s` = xFreeBSD then if $TRUEPRG; then HAVE_FREEBSD_OS_TRUE= HAVE_FREEBSD_OS_FALSE='#' else HAVE_FREEBSD_OS_TRUE='#' HAVE_FREEBSD_OS_FALSE= fi $as_echo "#define HAVE_FREEBSD_OS 1" >>confdefs.h else if $FALSEPRG; then HAVE_FREEBSD_OS_TRUE= HAVE_FREEBSD_OS_FALSE='#' else HAVE_FREEBSD_OS_TRUE='#' HAVE_FREEBSD_OS_FALSE= fi fi if test $HAVE_UNAME=yes -a x`uname -s` = xNetBSD then if $TRUEPRG; then HAVE_NETBSD_OS_TRUE= HAVE_NETBSD_OS_FALSE='#' else HAVE_NETBSD_OS_TRUE='#' HAVE_NETBSD_OS_FALSE= fi $as_echo "#define HAVE_NETBSD_OS 1" >>confdefs.h else if $FALSEPRG; then HAVE_NETBSD_OS_TRUE= HAVE_NETBSD_OS_FALSE='#' else HAVE_NETBSD_OS_TRUE='#' HAVE_NETBSD_OS_FALSE= fi fi if test $HAVE_UNAME=yes -a x`uname -s` = xOpenBSD then if $TRUEPRG; then HAVE_OPENBSD_OS_TRUE= HAVE_OPENBSD_OS_FALSE='#' else HAVE_OPENBSD_OS_TRUE='#' HAVE_OPENBSD_OS_FALSE= fi $as_echo "#define HAVE_OPENBSD_OS 1" >>confdefs.h else if $FALSEPRG; then HAVE_OPENBSD_OS_TRUE= HAVE_OPENBSD_OS_FALSE='#' else HAVE_OPENBSD_OS_TRUE='#' HAVE_OPENBSD_OS_FALSE= fi fi if test $HAVE_UNAME=yes -a x`uname -s` = xBSD/OS then if $TRUEPRG; then HAVE_BSDI_OS_TRUE= HAVE_BSDI_OS_FALSE='#' else HAVE_BSDI_OS_TRUE='#' HAVE_BSDI_OS_FALSE= fi $as_echo "#define HAVE_BSDI_OS 1" >>confdefs.h else if $FALSEPRG; then HAVE_BSDI_OS_TRUE= HAVE_BSDI_OS_FALSE='#' else HAVE_BSDI_OS_TRUE='#' HAVE_BSDI_OS_FALSE= fi fi if test $HAVE_UNAME=yes -a x`uname -s` = xSGI then if $TRUEPRG; then HAVE_SGI_OS_TRUE= HAVE_SGI_OS_FALSE='#' else HAVE_SGI_OS_TRUE='#' HAVE_SGI_OS_FALSE= fi $as_echo "#define HAVE_SGI_OS 1" >>confdefs.h else if $FALSEPRG; then HAVE_SGI_OS_TRUE= HAVE_SGI_OS_FALSE='#' else HAVE_SGI_OS_TRUE='#' HAVE_SGI_OS_FALSE= fi fi if test $HAVE_UNAME=yes -a x`uname -s` = xIRIX -o x`uname -s` = xIRIX64 then if $TRUEPRG; then HAVE_IRIX_OS_TRUE= HAVE_IRIX_OS_FALSE='#' else HAVE_IRIX_OS_TRUE='#' HAVE_IRIX_OS_FALSE= fi $as_echo "#define HAVE_IRIX_OS 1" >>confdefs.h else if $FALSEPRG; then HAVE_IRIX_OS_TRUE= HAVE_IRIX_OS_FALSE='#' else HAVE_IRIX_OS_TRUE='#' HAVE_IRIX_OS_FALSE= fi fi if test $HAVE_UNAME=yes -a x`uname -s` = xDarwin then if $TRUEPRG; then HAVE_DARWIN_OS_TRUE= HAVE_DARWIN_OS_FALSE='#' else HAVE_DARWIN_OS_TRUE='#' HAVE_DARWIN_OS_FALSE= fi $as_echo "#define HAVE_DARWIN_OS 1" >>confdefs.h else if $FALSEPRG; then HAVE_DARWIN_OS_TRUE= HAVE_DARWIN_OS_FALSE='#' else HAVE_DARWIN_OS_TRUE='#' HAVE_DARWIN_OS_FALSE= fi fi if test $HAVE_UNAME=yes -a x`uname -s` = xGNU/kFreeBSD then if $TRUEPRG; then HAVE_KFREEBSD_OS_TRUE= HAVE_KFREEBSD_OS_FALSE='#' else HAVE_KFREEBSD_OS_TRUE='#' HAVE_KFREEBSD_OS_FALSE= fi $as_echo "#define HAVE_KFREEBSD_OS 1" >>confdefs.h else if $FALSEPRG; then HAVE_KFREEBSD_OS_TRUE= HAVE_KFREEBSD_OS_FALSE='#' else HAVE_KFREEBSD_OS_TRUE='#' HAVE_KFREEBSD_OS_FALSE= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Operating System Distribution" >&5 $as_echo_n "checking for Operating System Distribution... " >&6; } if test "x$DISTNAME" != "x" then echo "distname set to $DISTNAME" elif test $HAVE_UNAME=yes -a x`uname -s` = xOSF1 then DISTNAME=alpha elif test $HAVE_UNAME=yes -a x`uname -s` = xAIX then DISTNAME=aix elif test $HAVE_UNAME=yes -a x`uname -s` = xHP-UX then DISTNAME=hpux elif test $HAVE_UNAME=yes -a x`uname -s` = xSunOS then DISTNAME=solaris elif test $HAVE_UNAME=yes -a x`uname -s` = xGNU then DISTNAME=hurd elif test $HAVE_UNAME=yes -a x`uname -s` = xFreeBSD then DISTNAME=freebsd elif test $HAVE_UNAME=yes -a x`uname -s` = xNetBSD then DISTNAME=netbsd elif test $HAVE_UNAME=yes -a x`uname -s` = xOpenBSD then DISTNAME=openbsd elif test $HAVE_UNAME=yes -a x`uname -s` = xIRIX then DISTNAME=irix elif test $HAVE_UNAME=yes -a x`uname -s` = xBSD/OS then DISTNAME=bsdi elif test -f /etc/SuSE-release then DISTNAME=suse elif test -d /etc/SuSEconfig then DISTNAME=suse5 elif test -f /etc/mandrake-release then DISTNAME=mandrake elif test -f /etc/whitebox-release then DISTNAME=redhat elif test -f /etc/redhat-release then DISTNAME=redhat elif test -f /etc/gentoo-release then DISTNAME=gentoo elif test -f /etc/debian_version then DISTNAME=debian elif test -f /etc/slackware-version then DISTNAME=slackware elif test x$host_vendor = xapple then DISTNAME=osx elif test $HAVE_UNAME=yes -a x`uname -s` = xDarwin then DISTNAME=darwin elif test -f /etc/engarde-version then DISTNAME=engarde elif test -f /etc/arch-release then DISTNAME=archlinux elif test "$CYGWIN" = yes then DISTNAME=cygwin $as_echo "#define HAVE_CYGWIN 1" >>confdefs.h else DISTNAME=unknown_distro fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5 $as_echo "done" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 $as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } set x ${MAKE-make} ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : $as_echo_n "(cached) " >&6 else cat >conftest.make <<\_ACEOF SHELL = /bin/sh all: @echo '@@@%%%=$(MAKE)=@@@%%%' _ACEOF # GNU make sometimes prints "make[1]: Entering ...", which would confuse us. case `${MAKE-make} -f conftest.make 2>/dev/null` in *@@@%%%=?*=@@@%%%*) eval ac_cv_prog_make_${ac_make}_set=yes;; *) eval ac_cv_prog_make_${ac_make}_set=no;; esac rm -f conftest.make fi if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } SET_MAKE= else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } SET_MAKE="MAKE=${MAKE-make}" fi MKINSTALLDIRS= if test -n "$ac_aux_dir"; then case "$ac_aux_dir" in /*) MKINSTALLDIRS="$ac_aux_dir/mkinstalldirs" ;; *) MKINSTALLDIRS="\$(top_builddir)/$ac_aux_dir/mkinstalldirs" ;; esac fi if test -z "$MKINSTALLDIRS"; then MKINSTALLDIRS="\$(top_srcdir)/mkinstalldirs" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether NLS is requested" >&5 $as_echo_n "checking whether NLS is requested... " >&6; } # Check whether --enable-nls was given. if test "${enable_nls+set}" = set; then : enableval=$enable_nls; USE_NLS=$enableval else USE_NLS=yes fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_NLS" >&5 $as_echo "$USE_NLS" >&6; } # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi # Find out how to test for executable files. Don't use a zero-byte file, # as systems may use methods other than mode bits to determine executability. cat >conf$$.file <<_ASEOF #! /bin/sh exit 0 _ASEOF chmod +x conf$$.file if test -x conf$$.file >/dev/null 2>&1; then ac_executable_p="test -x" else ac_executable_p="test -f" fi rm -f conf$$.file # Extract the first word of "msgfmt", so it can be a program name with args. set dummy msgfmt; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_MSGFMT+:} false; then : $as_echo_n "(cached) " >&6 else case "$MSGFMT" in [\\/]* | ?:[\\/]*) ac_cv_path_MSGFMT="$MSGFMT" # Let the user override the test with a path. ;; *) ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$ac_save_IFS" test -z "$ac_dir" && ac_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then echo "$as_me: trying $ac_dir/$ac_word..." >&5 if $ac_dir/$ac_word --statistics /dev/null >&5 2>&1 && (if $ac_dir/$ac_word --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then ac_cv_path_MSGFMT="$ac_dir/$ac_word$ac_exec_ext" break 2 fi fi done done IFS="$ac_save_IFS" test -z "$ac_cv_path_MSGFMT" && ac_cv_path_MSGFMT=":" ;; esac fi MSGFMT="$ac_cv_path_MSGFMT" if test "$MSGFMT" != ":"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MSGFMT" >&5 $as_echo "$MSGFMT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "gmsgfmt", so it can be a program name with args. set dummy gmsgfmt; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_GMSGFMT+:} false; then : $as_echo_n "(cached) " >&6 else case $GMSGFMT in [\\/]* | ?:[\\/]*) ac_cv_path_GMSGFMT="$GMSGFMT" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_GMSGFMT="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_GMSGFMT" && ac_cv_path_GMSGFMT="$MSGFMT" ;; esac fi GMSGFMT=$ac_cv_path_GMSGFMT if test -n "$GMSGFMT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GMSGFMT" >&5 $as_echo "$GMSGFMT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi # Find out how to test for executable files. Don't use a zero-byte file, # as systems may use methods other than mode bits to determine executability. cat >conf$$.file <<_ASEOF #! /bin/sh exit 0 _ASEOF chmod +x conf$$.file if test -x conf$$.file >/dev/null 2>&1; then ac_executable_p="test -x" else ac_executable_p="test -f" fi rm -f conf$$.file # Extract the first word of "xgettext", so it can be a program name with args. set dummy xgettext; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_XGETTEXT+:} false; then : $as_echo_n "(cached) " >&6 else case "$XGETTEXT" in [\\/]* | ?:[\\/]*) ac_cv_path_XGETTEXT="$XGETTEXT" # Let the user override the test with a path. ;; *) ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$ac_save_IFS" test -z "$ac_dir" && ac_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then echo "$as_me: trying $ac_dir/$ac_word..." >&5 if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >&5 2>&1 && (if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then ac_cv_path_XGETTEXT="$ac_dir/$ac_word$ac_exec_ext" break 2 fi fi done done IFS="$ac_save_IFS" test -z "$ac_cv_path_XGETTEXT" && ac_cv_path_XGETTEXT=":" ;; esac fi XGETTEXT="$ac_cv_path_XGETTEXT" if test "$XGETTEXT" != ":"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XGETTEXT" >&5 $as_echo "$XGETTEXT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi rm -f messages.po # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi # Find out how to test for executable files. Don't use a zero-byte file, # as systems may use methods other than mode bits to determine executability. cat >conf$$.file <<_ASEOF #! /bin/sh exit 0 _ASEOF chmod +x conf$$.file if test -x conf$$.file >/dev/null 2>&1; then ac_executable_p="test -x" else ac_executable_p="test -f" fi rm -f conf$$.file # Extract the first word of "msgmerge", so it can be a program name with args. set dummy msgmerge; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_MSGMERGE+:} false; then : $as_echo_n "(cached) " >&6 else case "$MSGMERGE" in [\\/]* | ?:[\\/]*) ac_cv_path_MSGMERGE="$MSGMERGE" # Let the user override the test with a path. ;; *) ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$ac_save_IFS" test -z "$ac_dir" && ac_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then echo "$as_me: trying $ac_dir/$ac_word..." >&5 if $ac_dir/$ac_word --update -q /dev/null /dev/null >&5 2>&1; then ac_cv_path_MSGMERGE="$ac_dir/$ac_word$ac_exec_ext" break 2 fi fi done done IFS="$ac_save_IFS" test -z "$ac_cv_path_MSGMERGE" && ac_cv_path_MSGMERGE=":" ;; esac fi MSGMERGE="$ac_cv_path_MSGMERGE" if test "$MSGMERGE" != ":"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MSGMERGE" >&5 $as_echo "$MSGMERGE" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "$GMSGFMT" != ":"; then if $GMSGFMT --statistics /dev/null >/dev/null 2>&1 && (if $GMSGFMT --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then : ; else GMSGFMT=`echo "$GMSGFMT" | sed -e 's,^.*/,,'` { $as_echo "$as_me:${as_lineno-$LINENO}: result: found $GMSGFMT program is not GNU msgfmt; ignore it" >&5 $as_echo "found $GMSGFMT program is not GNU msgfmt; ignore it" >&6; } GMSGFMT=":" fi fi if test "$XGETTEXT" != ":"; then if $XGETTEXT --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >/dev/null 2>&1 && (if $XGETTEXT --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then : ; else { $as_echo "$as_me:${as_lineno-$LINENO}: result: found xgettext program is not GNU xgettext; ignore it" >&5 $as_echo "found xgettext program is not GNU xgettext; ignore it" >&6; } XGETTEXT=":" fi rm -f messages.po fi ac_config_commands="$ac_config_commands default-1" if test "X$prefix" = "XNONE"; then acl_final_prefix="$ac_default_prefix" else acl_final_prefix="$prefix" fi if test "X$exec_prefix" = "XNONE"; then acl_final_exec_prefix='${prefix}' else acl_final_exec_prefix="$exec_prefix" fi acl_save_prefix="$prefix" prefix="$acl_final_prefix" eval acl_final_exec_prefix=\"$acl_final_exec_prefix\" prefix="$acl_save_prefix" # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then : withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes else with_gnu_ld=no fi # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by GCC" >&5 $as_echo_n "checking for ld used by GCC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | [A-Za-z]:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the path of ld ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'` while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if ${acl_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$LD"; then IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR-:}" for ac_dir in $PATH; do test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then acl_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some GNU ld's only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$acl_cv_path_LD" -v 2>&1 < /dev/null` in *GNU* | *'with BFD'*) test "$with_gnu_ld" != no && break ;; *) test "$with_gnu_ld" != yes && break ;; esac fi done IFS="$ac_save_ifs" else acl_cv_path_LD="$LD" # Let the user override the test with a path. fi fi LD="$acl_cv_path_LD" if test -n "$LD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if ${acl_cv_prog_gnu_ld+:} false; then : $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU ld's only accept -v. case `$LD -v 2>&1 &5 $as_echo "$acl_cv_prog_gnu_ld" >&6; } with_gnu_ld=$acl_cv_prog_gnu_ld { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shared library run path origin" >&5 $as_echo_n "checking for shared library run path origin... " >&6; } if ${acl_cv_rpath+:} false; then : $as_echo_n "(cached) " >&6 else CC="$CC" GCC="$GCC" LDFLAGS="$LDFLAGS" LD="$LD" with_gnu_ld="$with_gnu_ld" \ ${CONFIG_SHELL-/bin/sh} "$ac_aux_dir/config.rpath" "$host" > conftest.sh . ./conftest.sh rm -f ./conftest.sh acl_cv_rpath=done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acl_cv_rpath" >&5 $as_echo "$acl_cv_rpath" >&6; } wl="$acl_cv_wl" libext="$acl_cv_libext" shlibext="$acl_cv_shlibext" hardcode_libdir_flag_spec="$acl_cv_hardcode_libdir_flag_spec" hardcode_libdir_separator="$acl_cv_hardcode_libdir_separator" hardcode_direct="$acl_cv_hardcode_direct" hardcode_minus_L="$acl_cv_hardcode_minus_L" # Check whether --enable-rpath was given. if test "${enable_rpath+set}" = set; then : enableval=$enable_rpath; : else enable_rpath=yes fi use_additional=yes acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" # Check whether --with-libiconv-prefix was given. if test "${with_libiconv_prefix+set}" = set; then : withval=$with_libiconv_prefix; if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" else additional_includedir="$withval/include" additional_libdir="$withval/lib" fi fi fi LIBICONV= LTLIBICONV= INCICONV= rpathdirs= ltrpathdirs= names_already_handled= names_next_round='iconv ' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= for name in $names_this_round; do already_handled= for n in $names_already_handled; do if test "$n" = "$name"; then already_handled=yes break fi done if test -z "$already_handled"; then names_already_handled="$names_already_handled $name" uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./-|ABCDEFGHIJKLMNOPQRSTUVWXYZ___|'` eval value=\"\$HAVE_LIB$uppername\" if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" test -z "$value" || LIBICONV="${LIBICONV}${LIBICONV:+ }$value" eval value=\"\$LTLIB$uppername\" test -z "$value" || LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }$value" else : fi else found_dir= found_la= found_so= found_a= if test $use_additional = yes; then if test -n "$shlibext" && test -f "$additional_libdir/lib$name.$shlibext"; then found_dir="$additional_libdir" found_so="$additional_libdir/lib$name.$shlibext" if test -f "$additional_libdir/lib$name.la"; then found_la="$additional_libdir/lib$name.la" fi else if test -f "$additional_libdir/lib$name.$libext"; then found_dir="$additional_libdir" found_a="$additional_libdir/lib$name.$libext" if test -f "$additional_libdir/lib$name.la"; then found_la="$additional_libdir/lib$name.la" fi fi fi fi if test "X$found_dir" = "X"; then for x in $LDFLAGS $LTLIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" case "$x" in -L*) dir=`echo "X$x" | sed -e 's/^X-L//'` if test -n "$shlibext" && test -f "$dir/lib$name.$shlibext"; then found_dir="$dir" found_so="$dir/lib$name.$shlibext" if test -f "$dir/lib$name.la"; then found_la="$dir/lib$name.la" fi else if test -f "$dir/lib$name.$libext"; then found_dir="$dir" found_a="$dir/lib$name.$libext" if test -f "$dir/lib$name.la"; then found_la="$dir/lib$name.la" fi fi fi ;; esac if test "X$found_dir" != "X"; then break fi done fi if test "X$found_dir" != "X"; then LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/lib"; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" else haveit= for x in $ltrpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $found_dir" fi if test "$hardcode_direct" = yes; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" else if test -n "$hardcode_libdir_flag_spec" && test "$hardcode_minus_L" = no; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $found_dir" fi else haveit= for x in $LDFLAGS $LIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then LIBICONV="${LIBICONV}${LIBICONV:+ }-L$found_dir" fi if test "$hardcode_minus_L" != no; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" else LIBICONV="${LIBICONV}${LIBICONV:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_a" else LIBICONV="${LIBICONV}${LIBICONV:+ }-L$found_dir -l$name" fi fi additional_includedir= case "$found_dir" in */lib | */lib/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e 's,/lib/*$,,'` additional_includedir="$basedir/include" ;; esac if test "X$additional_includedir" != "X"; then if test "X$additional_includedir" != "X/usr/include"; then haveit= if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then for x in $CPPFLAGS $INCICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then INCICONV="${INCICONV}${INCICONV:+ }-I$additional_includedir" fi fi fi fi fi if test -n "$found_la"; then save_libdir="$libdir" case "$found_la" in */* | *\\*) . "$found_la" ;; *) . "./$found_la" ;; esac libdir="$save_libdir" for dep in $dependency_libs; do case "$dep" in -L*) additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'` if test "X$additional_libdir" != "X/usr/lib"; then haveit= if test "X$additional_libdir" = "X/usr/local/lib"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then haveit= for x in $LDFLAGS $LIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then LIBICONV="${LIBICONV}${LIBICONV:+ }-L$additional_libdir" fi fi haveit= for x in $LDFLAGS $LTLIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-L$additional_libdir" fi fi fi fi ;; -R*) dir=`echo "X$dep" | sed -e 's/^X-R//'` if test "$enable_rpath" != no; then haveit= for x in $rpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $dir" fi haveit= for x in $ltrpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $dir" fi fi ;; -l*) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` ;; *.la) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) LIBICONV="${LIBICONV}${LIBICONV:+ }$dep" LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }$dep" ;; esac done fi else LIBICONV="${LIBICONV}${LIBICONV:+ }-l$name" LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-l$name" fi fi fi done done if test "X$rpathdirs" != "X"; then if test -n "$hardcode_libdir_separator"; then alldirs= for found_dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$hardcode_libdir_separator}$found_dir" done acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBICONV="${LIBICONV}${LIBICONV:+ }$flag" else for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBICONV="${LIBICONV}${LIBICONV:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then for found_dir in $ltrpathdirs; do LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-R$found_dir" done fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CFPreferencesCopyAppValue" >&5 $as_echo_n "checking for CFPreferencesCopyAppValue... " >&6; } if ${gt_cv_func_CFPreferencesCopyAppValue+:} false; then : $as_echo_n "(cached) " >&6 else gt_save_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS -I/System/Library/Frameworks/CoreFoundation.framework/Headers" gt_save_LIBS="$LIBS" LIBS="$LIBS -framework CoreFoundation" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { CFPreferencesCopyAppValue(NULL, NULL) ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : gt_cv_func_CFPreferencesCopyAppValue=yes else gt_cv_func_CFPreferencesCopyAppValue=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext CPPFLAGS="$gt_save_CPPFLAGS" LIBS="$gt_save_LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_CFPreferencesCopyAppValue" >&5 $as_echo "$gt_cv_func_CFPreferencesCopyAppValue" >&6; } if test $gt_cv_func_CFPreferencesCopyAppValue = yes; then $as_echo "#define HAVE_CFPREFERENCESCOPYAPPVALUE 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CFLocaleCopyCurrent" >&5 $as_echo_n "checking for CFLocaleCopyCurrent... " >&6; } if ${gt_cv_func_CFLocaleCopyCurrent+:} false; then : $as_echo_n "(cached) " >&6 else gt_save_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS -I/System/Library/Frameworks/CoreFoundation.framework/Headers" gt_save_LIBS="$LIBS" LIBS="$LIBS -framework CoreFoundation" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { CFLocaleCopyCurrent(); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : gt_cv_func_CFLocaleCopyCurrent=yes else gt_cv_func_CFLocaleCopyCurrent=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext CPPFLAGS="$gt_save_CPPFLAGS" LIBS="$gt_save_LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_CFLocaleCopyCurrent" >&5 $as_echo "$gt_cv_func_CFLocaleCopyCurrent" >&6; } if test $gt_cv_func_CFLocaleCopyCurrent = yes; then $as_echo "#define HAVE_CFLOCALECOPYCURRENT 1" >>confdefs.h fi INTL_MACOSX_LIBS= if test $gt_cv_func_CFPreferencesCopyAppValue = yes || test $gt_cv_func_CFLocaleCopyCurrent = yes; then INTL_MACOSX_LIBS="-Wl,-framework -Wl,CoreFoundation" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether NLS is requested" >&5 $as_echo_n "checking whether NLS is requested... " >&6; } # Check whether --enable-nls was given. if test "${enable_nls+set}" = set; then : enableval=$enable_nls; USE_NLS=$enableval else USE_NLS=yes fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_NLS" >&5 $as_echo "$USE_NLS" >&6; } LIBINTL= LTLIBINTL= POSUB= if test "$USE_NLS" = "yes"; then gt_use_preinstalled_gnugettext=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU gettext in libc" >&5 $as_echo_n "checking for GNU gettext in libc... " >&6; } if ${gt_cv_func_gnugettext1_libc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include extern int _nl_msg_cat_cntr; extern int *_nl_domain_bindings; int main () { bindtextdomain ("", ""); return * gettext ("") + _nl_msg_cat_cntr + *_nl_domain_bindings ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : gt_cv_func_gnugettext1_libc=yes else gt_cv_func_gnugettext1_libc=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_gnugettext1_libc" >&5 $as_echo "$gt_cv_func_gnugettext1_libc" >&6; } if test "$gt_cv_func_gnugettext1_libc" != "yes"; then am_save_CPPFLAGS="$CPPFLAGS" for element in $INCICONV; do haveit= for x in $CPPFLAGS; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for iconv" >&5 $as_echo_n "checking for iconv... " >&6; } if ${am_cv_func_iconv+:} false; then : $as_echo_n "(cached) " >&6 else am_cv_func_iconv="no, consider installing GNU libiconv" am_cv_lib_iconv=no cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : am_cv_func_iconv=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test "$am_cv_func_iconv" != yes; then am_save_LIBS="$LIBS" LIBS="$LIBS $LIBICONV" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : am_cv_lib_iconv=yes am_cv_func_iconv=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS="$am_save_LIBS" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_func_iconv" >&5 $as_echo "$am_cv_func_iconv" >&6; } if test "$am_cv_func_iconv" = yes; then $as_echo "#define HAVE_ICONV 1" >>confdefs.h fi if test "$am_cv_lib_iconv" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libiconv" >&5 $as_echo_n "checking how to link with libiconv... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBICONV" >&5 $as_echo "$LIBICONV" >&6; } else CPPFLAGS="$am_save_CPPFLAGS" LIBICONV= LTLIBICONV= fi use_additional=yes acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" # Check whether --with-libintl-prefix was given. if test "${with_libintl_prefix+set}" = set; then : withval=$with_libintl_prefix; if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" else additional_includedir="$withval/include" additional_libdir="$withval/lib" fi fi fi LIBINTL= LTLIBINTL= INCINTL= rpathdirs= ltrpathdirs= names_already_handled= names_next_round='intl ' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= for name in $names_this_round; do already_handled= for n in $names_already_handled; do if test "$n" = "$name"; then already_handled=yes break fi done if test -z "$already_handled"; then names_already_handled="$names_already_handled $name" uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./-|ABCDEFGHIJKLMNOPQRSTUVWXYZ___|'` eval value=\"\$HAVE_LIB$uppername\" if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" test -z "$value" || LIBINTL="${LIBINTL}${LIBINTL:+ }$value" eval value=\"\$LTLIB$uppername\" test -z "$value" || LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }$value" else : fi else found_dir= found_la= found_so= found_a= if test $use_additional = yes; then if test -n "$shlibext" && test -f "$additional_libdir/lib$name.$shlibext"; then found_dir="$additional_libdir" found_so="$additional_libdir/lib$name.$shlibext" if test -f "$additional_libdir/lib$name.la"; then found_la="$additional_libdir/lib$name.la" fi else if test -f "$additional_libdir/lib$name.$libext"; then found_dir="$additional_libdir" found_a="$additional_libdir/lib$name.$libext" if test -f "$additional_libdir/lib$name.la"; then found_la="$additional_libdir/lib$name.la" fi fi fi fi if test "X$found_dir" = "X"; then for x in $LDFLAGS $LTLIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" case "$x" in -L*) dir=`echo "X$x" | sed -e 's/^X-L//'` if test -n "$shlibext" && test -f "$dir/lib$name.$shlibext"; then found_dir="$dir" found_so="$dir/lib$name.$shlibext" if test -f "$dir/lib$name.la"; then found_la="$dir/lib$name.la" fi else if test -f "$dir/lib$name.$libext"; then found_dir="$dir" found_a="$dir/lib$name.$libext" if test -f "$dir/lib$name.la"; then found_la="$dir/lib$name.la" fi fi fi ;; esac if test "X$found_dir" != "X"; then break fi done fi if test "X$found_dir" != "X"; then LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/lib"; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" else haveit= for x in $ltrpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $found_dir" fi if test "$hardcode_direct" = yes; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" else if test -n "$hardcode_libdir_flag_spec" && test "$hardcode_minus_L" = no; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $found_dir" fi else haveit= for x in $LDFLAGS $LIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then LIBINTL="${LIBINTL}${LIBINTL:+ }-L$found_dir" fi if test "$hardcode_minus_L" != no; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" else LIBINTL="${LIBINTL}${LIBINTL:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_a" else LIBINTL="${LIBINTL}${LIBINTL:+ }-L$found_dir -l$name" fi fi additional_includedir= case "$found_dir" in */lib | */lib/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e 's,/lib/*$,,'` additional_includedir="$basedir/include" ;; esac if test "X$additional_includedir" != "X"; then if test "X$additional_includedir" != "X/usr/include"; then haveit= if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then for x in $CPPFLAGS $INCINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then INCINTL="${INCINTL}${INCINTL:+ }-I$additional_includedir" fi fi fi fi fi if test -n "$found_la"; then save_libdir="$libdir" case "$found_la" in */* | *\\*) . "$found_la" ;; *) . "./$found_la" ;; esac libdir="$save_libdir" for dep in $dependency_libs; do case "$dep" in -L*) additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'` if test "X$additional_libdir" != "X/usr/lib"; then haveit= if test "X$additional_libdir" = "X/usr/local/lib"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then haveit= for x in $LDFLAGS $LIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then LIBINTL="${LIBINTL}${LIBINTL:+ }-L$additional_libdir" fi fi haveit= for x in $LDFLAGS $LTLIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-L$additional_libdir" fi fi fi fi ;; -R*) dir=`echo "X$dep" | sed -e 's/^X-R//'` if test "$enable_rpath" != no; then haveit= for x in $rpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $dir" fi haveit= for x in $ltrpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $dir" fi fi ;; -l*) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` ;; *.la) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) LIBINTL="${LIBINTL}${LIBINTL:+ }$dep" LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }$dep" ;; esac done fi else LIBINTL="${LIBINTL}${LIBINTL:+ }-l$name" LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-l$name" fi fi fi done done if test "X$rpathdirs" != "X"; then if test -n "$hardcode_libdir_separator"; then alldirs= for found_dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$hardcode_libdir_separator}$found_dir" done acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBINTL="${LIBINTL}${LIBINTL:+ }$flag" else for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBINTL="${LIBINTL}${LIBINTL:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then for found_dir in $ltrpathdirs; do LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-R$found_dir" done fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU gettext in libintl" >&5 $as_echo_n "checking for GNU gettext in libintl... " >&6; } if ${gt_cv_func_gnugettext1_libintl+:} false; then : $as_echo_n "(cached) " >&6 else gt_save_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS $INCINTL" gt_save_LIBS="$LIBS" LIBS="$LIBS $LIBINTL" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include extern int _nl_msg_cat_cntr; extern #ifdef __cplusplus "C" #endif const char *_nl_expand_alias (const char *); int main () { bindtextdomain ("", ""); return * gettext ("") + _nl_msg_cat_cntr + *_nl_expand_alias ("") ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : gt_cv_func_gnugettext1_libintl=yes else gt_cv_func_gnugettext1_libintl=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test "$gt_cv_func_gnugettext1_libintl" != yes && test -n "$LIBICONV"; then LIBS="$LIBS $LIBICONV" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include extern int _nl_msg_cat_cntr; extern #ifdef __cplusplus "C" #endif const char *_nl_expand_alias (const char *); int main () { bindtextdomain ("", ""); return * gettext ("") + _nl_msg_cat_cntr + *_nl_expand_alias ("") ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : LIBINTL="$LIBINTL $LIBICONV" LTLIBINTL="$LTLIBINTL $LTLIBICONV" gt_cv_func_gnugettext1_libintl=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi CPPFLAGS="$gt_save_CPPFLAGS" LIBS="$gt_save_LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_gnugettext1_libintl" >&5 $as_echo "$gt_cv_func_gnugettext1_libintl" >&6; } fi if test "$gt_cv_func_gnugettext1_libc" = "yes" \ || { test "$gt_cv_func_gnugettext1_libintl" = "yes" \ && test "$PACKAGE" != gettext-runtime \ && test "$PACKAGE" != gettext-tools; }; then gt_use_preinstalled_gnugettext=yes else LIBINTL= LTLIBINTL= INCINTL= fi if test -n "$INTL_MACOSX_LIBS"; then if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then LIBINTL="$LIBINTL $INTL_MACOSX_LIBS" LTLIBINTL="$LTLIBINTL $INTL_MACOSX_LIBS" fi fi if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then $as_echo "#define ENABLE_NLS 1" >>confdefs.h else USE_NLS=no fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use NLS" >&5 $as_echo_n "checking whether to use NLS... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_NLS" >&5 $as_echo "$USE_NLS" >&6; } if test "$USE_NLS" = "yes"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking where the gettext function comes from" >&5 $as_echo_n "checking where the gettext function comes from... " >&6; } if test "$gt_use_preinstalled_gnugettext" = "yes"; then if test "$gt_cv_func_gnugettext1_libintl" = "yes"; then gt_source="external libintl" else gt_source="libc" fi else gt_source="included intl directory" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_source" >&5 $as_echo "$gt_source" >&6; } fi if test "$USE_NLS" = "yes"; then if test "$gt_use_preinstalled_gnugettext" = "yes"; then if test "$gt_cv_func_gnugettext1_libintl" = "yes"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libintl" >&5 $as_echo_n "checking how to link with libintl... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBINTL" >&5 $as_echo "$LIBINTL" >&6; } for element in $INCINTL; do haveit= for x in $CPPFLAGS; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" fi done fi $as_echo "#define HAVE_GETTEXT 1" >>confdefs.h $as_echo "#define HAVE_DCGETTEXT 1" >>confdefs.h fi POSUB=po fi INTLLIBS="$LIBINTL" os_name=`uname -s 2>/dev/null` if test x${prefix} = xNONE ; then if test `eval echo ${sysconfdir}` = NONE/etc ; then sysconfdir=/etc/bacula fi if test `eval echo ${libdir}` = NONE/lib ; then case ${os_name} in Linux) os_processor=`uname -p 2>/dev/null` case ${os_processor} in x86_64) libdir=/usr/lib64 ;; *) libdir=/usr/lib ;; esac ;; *) libdir=/usr/lib ;; esac fi if test `eval echo ${includedir}` = NONE/include ; then includedir=/usr/include fi if test `eval echo ${datarootdir}` = NONE/share ; then datarootdir=/usr/share fi prefix= fi if test x${exec_prefix} = xNONE ; then exec_prefix=${prefix} fi sysconfdir=`eval echo ${sysconfdir}` datarootdir=`eval echo ${datarootdir}` docdir=`eval echo ${docdir}` htmldir=`eval echo ${htmldir}` libdir=`eval echo ${libdir}` includedir=`eval echo ${includedir}` localedir=`eval echo ${datarootdir}/locale` cat >>confdefs.h <<_ACEOF #define SYSCONFDIR "$sysconfdir" _ACEOF cat >>confdefs.h <<_ACEOF #define LOCALEDIR "$localedir" _ACEOF if test x$sbindir = x'${exec_prefix}/sbin' ; then sbindir=${exec_prefix}/sbin fi sbindir=`eval echo ${sbindir}` if test x$mandir = x'${datarootdir}/man' ; then mandir=/usr/share/man fi if test x$htmldir = x'/usr/share/doc/bacula/' ; then htmldir=`eval echo ${docdir}html` fi if test x$docdir = x'/usr/share/doc/' ; then docdir=`eval echo ${docdir}bacula` fi for ac_prog in msgfmt do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_MSGFMT+:} false; then : $as_echo_n "(cached) " >&6 else case $MSGFMT in [\\/]* | ?:[\\/]*) ac_cv_path_MSGFMT="$MSGFMT" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_MSGFMT="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi MSGFMT=$ac_cv_path_MSGFMT if test -n "$MSGFMT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MSGFMT" >&5 $as_echo "$MSGFMT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$MSGFMT" && break done test -n "$MSGFMT" || MSGFMT="no" if test "$MSGFMT" = "no" then echo 'msgfmt program not found, disabling NLS !' USE_NLS=no USE_INCLUDED_LIBINTL=no #else { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C Library 2 or newer" >&5 $as_echo_n "checking whether we are using the GNU C Library 2 or newer... " >&6; } if ${ac_cv_gnu_library_2+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifdef __GNU_LIBRARY__ #if (__GLIBC__ >= 2) Lucky GNU user #endif #endif _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "Lucky GNU user" >/dev/null 2>&1; then : ac_cv_gnu_library_2=yes else ac_cv_gnu_library_2=no fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_gnu_library_2" >&5 $as_echo "$ac_cv_gnu_library_2" >&6; } GLIBC2="$ac_cv_gnu_library_2" if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. set dummy ${ac_tool_prefix}ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$RANLIB"; then ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi RANLIB=$ac_cv_prog_RANLIB if test -n "$RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 $as_echo "$RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_RANLIB"; then ac_ct_RANLIB=$RANLIB # Extract the first word of "ranlib", so it can be a program name with args. set dummy ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_RANLIB"; then ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_RANLIB="ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB if test -n "$ac_ct_RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 $as_echo "$ac_ct_RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_RANLIB" = x; then RANLIB=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac RANLIB=$ac_ct_RANLIB fi else RANLIB="$ac_cv_prog_RANLIB" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for strerror in -lcposix" >&5 $as_echo_n "checking for strerror in -lcposix... " >&6; } if ${ac_cv_lib_cposix_strerror+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lcposix $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char strerror (); int main () { return strerror (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_cposix_strerror=yes else ac_cv_lib_cposix_strerror=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_cposix_strerror" >&5 $as_echo "$ac_cv_lib_cposix_strerror" >&6; } if test "x$ac_cv_lib_cposix_strerror" = xyes; then : LIBS="$LIBS -lcposix" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 $as_echo_n "checking for an ANSI C-conforming const... " >&6; } if ${ac_cv_c_const+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __cplusplus /* Ultrix mips cc rejects this sort of thing. */ typedef int charset[2]; const charset cs = { 0, 0 }; /* SunOS 4.1.1 cc rejects this. */ char const *const *pcpcc; char **ppc; /* NEC SVR4.0.2 mips cc rejects this. */ struct point {int x, y;}; static struct point const zero = {0,0}; /* AIX XL C 1.02.0.0 rejects this. It does not let you subtract one const X* pointer from another in an arm of an if-expression whose if-part is not a constant expression */ const char *g = "string"; pcpcc = &g + (g ? g-g : 0); /* HPUX 7.0 cc rejects these. */ ++pcpcc; ppc = (char**) pcpcc; pcpcc = (char const *const *) ppc; { /* SCO 3.2v4 cc rejects this sort of thing. */ char tx; char *t = &tx; char const *s = 0 ? (char *) 0 : (char const *) 0; *t++ = 0; if (s) return 0; } { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ int x[] = {25, 17}; const int *foo = &x[0]; ++foo; } { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ typedef const int *iptr; iptr p = 0; ++p; } { /* AIX XL C 1.02.0.0 rejects this sort of thing, saying "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ struct s { int j; const int *ap[3]; } bx; struct s *b = &bx; b->j = 5; } { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ const int foo = 10; if (!foo) return 0; } return !cs[0] && !zero.x; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_const=yes else ac_cv_c_const=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5 $as_echo "$ac_cv_c_const" >&6; } if test $ac_cv_c_const = no; then $as_echo "#define const /**/" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for signed" >&5 $as_echo_n "checking for signed... " >&6; } if ${bh_cv_c_signed+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { signed char x; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : bh_cv_c_signed=yes else bh_cv_c_signed=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $bh_cv_c_signed" >&5 $as_echo "$bh_cv_c_signed" >&6; } if test $bh_cv_c_signed = no; then $as_echo "#define signed /**/" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inline" >&5 $as_echo_n "checking for inline... " >&6; } if ${ac_cv_c_inline+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_c_inline=no for ac_kw in inline __inline__ __inline; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifndef __cplusplus typedef int foo_t; static $ac_kw foo_t static_foo () {return 0; } $ac_kw foo_t foo () {return 0; } #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_inline=$ac_kw fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext test "$ac_cv_c_inline" != no && break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_inline" >&5 $as_echo "$ac_cv_c_inline" >&6; } case $ac_cv_c_inline in inline | yes) ;; *) case $ac_cv_c_inline in no) ac_val=;; *) ac_val=$ac_cv_c_inline;; esac cat >>confdefs.h <<_ACEOF #ifndef __cplusplus #define inline $ac_val #endif _ACEOF ;; esac ac_fn_c_check_type "$LINENO" "off_t" "ac_cv_type_off_t" "$ac_includes_default" if test "x$ac_cv_type_off_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define off_t long int _ACEOF fi ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default" if test "x$ac_cv_type_size_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define size_t unsigned int _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for long long" >&5 $as_echo_n "checking for long long... " >&6; } if ${ac_cv_type_long_long+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ long long ll = 1LL; int i = 63; int main () { long long llmax = (long long) -1; return ll << i | ll >> i | llmax / ll | llmax % ll; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_type_long_long=yes else ac_cv_type_long_long=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_long_long" >&5 $as_echo "$ac_cv_type_long_long" >&6; } if test $ac_cv_type_long_long = yes; then $as_echo "#define HAVE_LONG_LONG 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for long double" >&5 $as_echo_n "checking for long double... " >&6; } if ${gt_cv_c_long_double+:} false; then : $as_echo_n "(cached) " >&6 else if test "$GCC" = yes; then gt_cv_c_long_double=yes else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* The Stardent Vistra knows sizeof(long double), but does not support it. */ long double foo = 0.0; /* On Ultrix 4.3 cc, long double is 4 and double is 8. */ int array [2*(sizeof(long double) >= sizeof(double)) - 1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gt_cv_c_long_double=yes else gt_cv_c_long_double=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_c_long_double" >&5 $as_echo "$gt_cv_c_long_double" >&6; } if test $gt_cv_c_long_double = yes; then $as_echo "#define HAVE_LONG_DOUBLE 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for wchar_t" >&5 $as_echo_n "checking for wchar_t... " >&6; } if ${gt_cv_c_wchar_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include wchar_t foo = (wchar_t)'\0'; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gt_cv_c_wchar_t=yes else gt_cv_c_wchar_t=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_c_wchar_t" >&5 $as_echo "$gt_cv_c_wchar_t" >&6; } if test $gt_cv_c_wchar_t = yes; then $as_echo "#define HAVE_WCHAR_T 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for wint_t" >&5 $as_echo_n "checking for wint_t... " >&6; } if ${gt_cv_c_wint_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include wint_t foo = (wchar_t)'\0'; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gt_cv_c_wint_t=yes else gt_cv_c_wint_t=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_c_wint_t" >&5 $as_echo "$gt_cv_c_wint_t" >&6; } if test $gt_cv_c_wint_t = yes; then $as_echo "#define HAVE_WINT_T 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inttypes.h" >&5 $as_echo_n "checking for inttypes.h... " >&6; } if ${gl_cv_header_inttypes_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { uintmax_t i = (uintmax_t) -1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gl_cv_header_inttypes_h=yes else gl_cv_header_inttypes_h=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gl_cv_header_inttypes_h" >&5 $as_echo "$gl_cv_header_inttypes_h" >&6; } if test $gl_cv_header_inttypes_h = yes; then cat >>confdefs.h <<_ACEOF #define HAVE_INTTYPES_H_WITH_UINTMAX 1 _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdint.h" >&5 $as_echo_n "checking for stdint.h... " >&6; } if ${gl_cv_header_stdint_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { uintmax_t i = (uintmax_t) -1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gl_cv_header_stdint_h=yes else gl_cv_header_stdint_h=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gl_cv_header_stdint_h" >&5 $as_echo "$gl_cv_header_stdint_h" >&6; } if test $gl_cv_header_stdint_h = yes; then cat >>confdefs.h <<_ACEOF #define HAVE_STDINT_H_WITH_UINTMAX 1 _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for intmax_t" >&5 $as_echo_n "checking for intmax_t... " >&6; } if ${gt_cv_c_intmax_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if HAVE_STDINT_H_WITH_UINTMAX #include #endif #if HAVE_INTTYPES_H_WITH_UINTMAX #include #endif int main () { intmax_t x = -1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gt_cv_c_intmax_t=yes else gt_cv_c_intmax_t=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_c_intmax_t" >&5 $as_echo "$gt_cv_c_intmax_t" >&6; } if test $gt_cv_c_intmax_t = yes; then $as_echo "#define HAVE_INTMAX_T 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether printf() supports POSIX/XSI format strings" >&5 $as_echo_n "checking whether printf() supports POSIX/XSI format strings... " >&6; } if ${gt_cv_func_printf_posix+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined __NetBSD__ || defined _MSC_VER || defined __MINGW32__ || defined __CYGWIN__ notposix #endif _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "notposix" >/dev/null 2>&1; then : gt_cv_func_printf_posix="guessing no" else gt_cv_func_printf_posix="guessing yes" fi rm -f conftest* else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include /* The string "%2$d %1$d", with dollar characters protected from the shell's dollar expansion (possibly an autoconf bug). */ static char format[] = { '%', '2', '$', 'd', ' ', '%', '1', '$', 'd', '\0' }; static char buf[100]; int main () { sprintf (buf, format, 33, 55); return (strcmp (buf, "55 33") != 0); } _ACEOF if ac_fn_c_try_run "$LINENO"; then : gt_cv_func_printf_posix=yes else gt_cv_func_printf_posix=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_printf_posix" >&5 $as_echo "$gt_cv_func_printf_posix" >&6; } case $gt_cv_func_printf_posix in *yes) $as_echo "#define HAVE_POSIX_PRINTF 1" >>confdefs.h ;; esac # The Ultrix 4.2 mips builtin alloca declared by alloca.h only works # for constant arguments. Useless! { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working alloca.h" >&5 $as_echo_n "checking for working alloca.h... " >&6; } if ${ac_cv_working_alloca_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { char *p = (char *) alloca (2 * sizeof (int)); if (p) return 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_working_alloca_h=yes else ac_cv_working_alloca_h=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_working_alloca_h" >&5 $as_echo "$ac_cv_working_alloca_h" >&6; } if test $ac_cv_working_alloca_h = yes; then $as_echo "#define HAVE_ALLOCA_H 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for alloca" >&5 $as_echo_n "checking for alloca... " >&6; } if ${ac_cv_func_alloca_works+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __GNUC__ # define alloca __builtin_alloca #else # ifdef _MSC_VER # include # define alloca _alloca # else # ifdef HAVE_ALLOCA_H # include # else # ifdef _AIX #pragma alloca # else # ifndef alloca /* predefined by HP cc +Olibcalls */ void *alloca (size_t); # endif # endif # endif # endif #endif int main () { char *p = (char *) alloca (1); if (p) return 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_func_alloca_works=yes else ac_cv_func_alloca_works=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_alloca_works" >&5 $as_echo "$ac_cv_func_alloca_works" >&6; } if test $ac_cv_func_alloca_works = yes; then $as_echo "#define HAVE_ALLOCA 1" >>confdefs.h else # The SVR3 libPW and SVR4 libucb both contain incompatible functions # that cause trouble. Some versions do not even contain alloca or # contain a buggy version. If you still want to use their alloca, # use ar to extract alloca.o from them instead of compiling alloca.c. ALLOCA=\${LIBOBJDIR}alloca.$ac_objext $as_echo "#define C_ALLOCA 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether \`alloca.c' needs Cray hooks" >&5 $as_echo_n "checking whether \`alloca.c' needs Cray hooks... " >&6; } if ${ac_cv_os_cray+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined CRAY && ! defined CRAY2 webecray #else wenotbecray #endif _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "webecray" >/dev/null 2>&1; then : ac_cv_os_cray=yes else ac_cv_os_cray=no fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_os_cray" >&5 $as_echo "$ac_cv_os_cray" >&6; } if test $ac_cv_os_cray = yes; then for ac_func in _getb67 GETB67 getb67; do as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define CRAY_STACKSEG_END $ac_func _ACEOF break fi done fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking stack direction for C alloca" >&5 $as_echo_n "checking stack direction for C alloca... " >&6; } if ${ac_cv_c_stack_direction+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_c_stack_direction=0 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int find_stack_direction (int *addr, int depth) { int dir, dummy = 0; if (! addr) addr = &dummy; *addr = addr < &dummy ? 1 : addr == &dummy ? 0 : -1; dir = depth ? find_stack_direction (addr, depth - 1) : 0; return dir + dummy; } int main (int argc, char **argv) { return find_stack_direction (0, argc + !argv + 20) < 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_c_stack_direction=1 else ac_cv_c_stack_direction=-1 fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_stack_direction" >&5 $as_echo "$ac_cv_c_stack_direction" >&6; } cat >>confdefs.h <<_ACEOF #define STACK_DIRECTION $ac_cv_c_stack_direction _ACEOF fi for ac_header in $ac_header_list do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_func in getpagesize do : ac_fn_c_check_func "$LINENO" "getpagesize" "ac_cv_func_getpagesize" if test "x$ac_cv_func_getpagesize" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_GETPAGESIZE 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working mmap" >&5 $as_echo_n "checking for working mmap... " >&6; } if ${ac_cv_func_mmap_fixed_mapped+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_mmap_fixed_mapped=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default /* malloc might have been renamed as rpl_malloc. */ #undef malloc /* Thanks to Mike Haertel and Jim Avera for this test. Here is a matrix of mmap possibilities: mmap private not fixed mmap private fixed at somewhere currently unmapped mmap private fixed at somewhere already mapped mmap shared not fixed mmap shared fixed at somewhere currently unmapped mmap shared fixed at somewhere already mapped For private mappings, we should verify that changes cannot be read() back from the file, nor mmap's back from the file at a different address. (There have been systems where private was not correctly implemented like the infamous i386 svr4.0, and systems where the VM page cache was not coherent with the file system buffer cache like early versions of FreeBSD and possibly contemporary NetBSD.) For shared mappings, we should conversely verify that changes get propagated back to all the places they're supposed to be. Grep wants private fixed already mapped. The main things grep needs to know about mmap are: * does it exist and is it safe to write into the mmap'd area * how to use it (BSD variants) */ #include #include #if !defined STDC_HEADERS && !defined HAVE_STDLIB_H char *malloc (); #endif /* This mess was copied from the GNU getpagesize.h. */ #ifndef HAVE_GETPAGESIZE # ifdef _SC_PAGESIZE # define getpagesize() sysconf(_SC_PAGESIZE) # else /* no _SC_PAGESIZE */ # ifdef HAVE_SYS_PARAM_H # include # ifdef EXEC_PAGESIZE # define getpagesize() EXEC_PAGESIZE # else /* no EXEC_PAGESIZE */ # ifdef NBPG # define getpagesize() NBPG * CLSIZE # ifndef CLSIZE # define CLSIZE 1 # endif /* no CLSIZE */ # else /* no NBPG */ # ifdef NBPC # define getpagesize() NBPC # else /* no NBPC */ # ifdef PAGESIZE # define getpagesize() PAGESIZE # endif /* PAGESIZE */ # endif /* no NBPC */ # endif /* no NBPG */ # endif /* no EXEC_PAGESIZE */ # else /* no HAVE_SYS_PARAM_H */ # define getpagesize() 8192 /* punt totally */ # endif /* no HAVE_SYS_PARAM_H */ # endif /* no _SC_PAGESIZE */ #endif /* no HAVE_GETPAGESIZE */ int main () { char *data, *data2, *data3; const char *cdata2; int i, pagesize; int fd, fd2; pagesize = getpagesize (); /* First, make a file with some known garbage in it. */ data = (char *) malloc (pagesize); if (!data) return 1; for (i = 0; i < pagesize; ++i) *(data + i) = rand (); umask (0); fd = creat ("conftest.mmap", 0600); if (fd < 0) return 2; if (write (fd, data, pagesize) != pagesize) return 3; close (fd); /* Next, check that the tail of a page is zero-filled. File must have non-zero length, otherwise we risk SIGBUS for entire page. */ fd2 = open ("conftest.txt", O_RDWR | O_CREAT | O_TRUNC, 0600); if (fd2 < 0) return 4; cdata2 = ""; if (write (fd2, cdata2, 1) != 1) return 5; data2 = (char *) mmap (0, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2, 0L); if (data2 == MAP_FAILED) return 6; for (i = 0; i < pagesize; ++i) if (*(data2 + i)) return 7; close (fd2); if (munmap (data2, pagesize)) return 8; /* Next, try to mmap the file at a fixed address which already has something else allocated at it. If we can, also make sure that we see the same garbage. */ fd = open ("conftest.mmap", O_RDWR); if (fd < 0) return 9; if (data2 != mmap (data2, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd, 0L)) return 10; for (i = 0; i < pagesize; ++i) if (*(data + i) != *(data2 + i)) return 11; /* Finally, make sure that changes to the mapped area do not percolate back to the file as seen by read(). (This is a bug on some variants of i386 svr4.0.) */ for (i = 0; i < pagesize; ++i) *(data2 + i) = *(data2 + i) + 1; data3 = (char *) malloc (pagesize); if (!data3) return 12; if (read (fd, data3, pagesize) != pagesize) return 13; for (i = 0; i < pagesize; ++i) if (*(data + i) != *(data3 + i)) return 14; close (fd); free (data); free (data3); return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_mmap_fixed_mapped=yes else ac_cv_func_mmap_fixed_mapped=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_mmap_fixed_mapped" >&5 $as_echo "$ac_cv_func_mmap_fixed_mapped" >&6; } if test $ac_cv_func_mmap_fixed_mapped = yes; then $as_echo "#define HAVE_MMAP 1" >>confdefs.h fi rm -f conftest.mmap conftest.txt { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C Library 2.1 or newer" >&5 $as_echo_n "checking whether we are using the GNU C Library 2.1 or newer... " >&6; } if ${ac_cv_gnu_library_2_1+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifdef __GNU_LIBRARY__ #if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 1) || (__GLIBC__ > 2) Lucky GNU user #endif #endif _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "Lucky GNU user" >/dev/null 2>&1; then : ac_cv_gnu_library_2_1=yes else ac_cv_gnu_library_2_1=no fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_gnu_library_2_1" >&5 $as_echo "$ac_cv_gnu_library_2_1" >&6; } GLIBC21="$ac_cv_gnu_library_2_1" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether integer division by zero raises SIGFPE" >&5 $as_echo_n "checking whether integer division by zero raises SIGFPE... " >&6; } if ${gt_cv_int_divbyzero_sigfpe+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : # Guess based on the CPU. case "$host_cpu" in alpha* | i3456786 | m68k | s390*) gt_cv_int_divbyzero_sigfpe="guessing yes";; *) gt_cv_int_divbyzero_sigfpe="guessing no";; esac else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include static void #ifdef __cplusplus sigfpe_handler (int sig) #else sigfpe_handler (sig) int sig; #endif { /* Exit with code 0 if SIGFPE, with code 1 if any other signal. */ exit (sig != SIGFPE); } int x = 1; int y = 0; int z; int nan; int main () { signal (SIGFPE, sigfpe_handler); /* IRIX and AIX (when "xlc -qcheck" is used) yield signal SIGTRAP. */ #if (defined (__sgi) || defined (_AIX)) && defined (SIGTRAP) signal (SIGTRAP, sigfpe_handler); #endif /* Linux/SPARC yields signal SIGILL. */ #if defined (__sparc__) && defined (__linux__) signal (SIGILL, sigfpe_handler); #endif z = x / y; nan = y / y; exit (1); } _ACEOF if ac_fn_c_try_run "$LINENO"; then : gt_cv_int_divbyzero_sigfpe=yes else gt_cv_int_divbyzero_sigfpe=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_int_divbyzero_sigfpe" >&5 $as_echo "$gt_cv_int_divbyzero_sigfpe" >&6; } case "$gt_cv_int_divbyzero_sigfpe" in *yes) value=1;; *) value=0;; esac cat >>confdefs.h <<_ACEOF #define INTDIV0_RAISES_SIGFPE $value _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking for unsigned long long" >&5 $as_echo_n "checking for unsigned long long... " >&6; } if ${ac_cv_type_unsigned_long_long+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ unsigned long long ull = 1ULL; int i = 63; int main () { unsigned long long ullmax = (unsigned long long) -1; return ull << i | ull >> i | ullmax / ull | ullmax % ull; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_type_unsigned_long_long=yes else ac_cv_type_unsigned_long_long=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_unsigned_long_long" >&5 $as_echo "$ac_cv_type_unsigned_long_long" >&6; } if test $ac_cv_type_unsigned_long_long = yes; then $as_echo "#define HAVE_UNSIGNED_LONG_LONG 1" >>confdefs.h fi if test $gl_cv_header_inttypes_h = no && test $gl_cv_header_stdint_h = no; then test $ac_cv_type_unsigned_long_long = yes \ && ac_type='unsigned long long' \ || ac_type='unsigned long' cat >>confdefs.h <<_ACEOF #define uintmax_t $ac_type _ACEOF else $as_echo "#define HAVE_UINTMAX_T 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inttypes.h" >&5 $as_echo_n "checking for inttypes.h... " >&6; } if ${gt_cv_header_inttypes_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gt_cv_header_inttypes_h=yes else gt_cv_header_inttypes_h=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_header_inttypes_h" >&5 $as_echo "$gt_cv_header_inttypes_h" >&6; } if test $gt_cv_header_inttypes_h = yes; then cat >>confdefs.h <<_ACEOF #define HAVE_INTTYPES_H 1 _ACEOF fi if test $gt_cv_header_inttypes_h = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the inttypes.h PRIxNN macros are broken" >&5 $as_echo_n "checking whether the inttypes.h PRIxNN macros are broken... " >&6; } if ${gt_cv_inttypes_pri_broken+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifdef PRId32 char *p = PRId32; #endif int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gt_cv_inttypes_pri_broken=no else gt_cv_inttypes_pri_broken=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_inttypes_pri_broken" >&5 $as_echo "$gt_cv_inttypes_pri_broken" >&6; } fi if test "$gt_cv_inttypes_pri_broken" = yes; then cat >>confdefs.h <<_ACEOF #define PRI_MACROS_BROKEN 1 _ACEOF fi for ac_header in stdint.h do : ac_fn_c_check_header_mongrel "$LINENO" "stdint.h" "ac_cv_header_stdint_h" "$ac_includes_default" if test "x$ac_cv_header_stdint_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STDINT_H 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for SIZE_MAX" >&5 $as_echo_n "checking for SIZE_MAX... " >&6; } result= cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #if HAVE_STDINT_H #include #endif #ifdef SIZE_MAX Found it #endif _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "Found it" >/dev/null 2>&1; then : result=yes fi rm -f conftest* if test -z "$result"; then if ac_fn_c_compute_int "$LINENO" "~(size_t)0 / 10" "res_hi" "#include "; then : else result=? fi if ac_fn_c_compute_int "$LINENO" "~(size_t)0 % 10" "res_lo" "#include "; then : else result=? fi if ac_fn_c_compute_int "$LINENO" "sizeof (size_t) <= sizeof (unsigned int)" "fits_in_uint" "#include "; then : else result=? fi if test "$fits_in_uint" = 1; then cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include extern size_t foo; extern unsigned long foo; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : fits_in_uint=0 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test -z "$result"; then if test "$fits_in_uint" = 1; then result="$res_hi$res_lo"U else result="$res_hi$res_lo"UL fi else result='~(size_t)0' fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $result" >&5 $as_echo "$result" >&6; } if test "$result" != yes; then cat >>confdefs.h <<_ACEOF #define SIZE_MAX $result _ACEOF fi for ac_header in stdint.h do : ac_fn_c_check_header_mongrel "$LINENO" "stdint.h" "ac_cv_header_stdint_h" "$ac_includes_default" if test "x$ac_cv_header_stdint_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STDINT_H 1 _ACEOF fi done ac_fn_c_check_type "$LINENO" "ptrdiff_t" "ac_cv_type_ptrdiff_t" "$ac_includes_default" if test "x$ac_cv_type_ptrdiff_t" = xyes; then : else $as_echo "#define ptrdiff_t long" >>confdefs.h fi for ac_header in argz.h limits.h locale.h nl_types.h malloc.h stddef.h \ stdlib.h string.h unistd.h sys/param.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_func in asprintf fwprintf getcwd getegid geteuid getgid getuid \ mempcpy munmap putenv setenv setlocale snprintf stpcpy strcasecmp strdup \ strtoul tsearch wcslen __argz_count __argz_stringify __argz_next \ __fsetlocking do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether _snprintf is declared" >&5 $as_echo_n "checking whether _snprintf is declared... " >&6; } if ${ac_cv_have_decl__snprintf+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #ifndef _snprintf char *p = (char *) _snprintf; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_have_decl__snprintf=yes else ac_cv_have_decl__snprintf=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_decl__snprintf" >&5 $as_echo "$ac_cv_have_decl__snprintf" >&6; } if test $ac_cv_have_decl__snprintf = yes; then gt_value=1 else gt_value=0 fi cat >>confdefs.h <<_ACEOF #define HAVE_DECL__SNPRINTF $gt_value _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether _snwprintf is declared" >&5 $as_echo_n "checking whether _snwprintf is declared... " >&6; } if ${ac_cv_have_decl__snwprintf+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #ifndef _snwprintf char *p = (char *) _snwprintf; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_have_decl__snwprintf=yes else ac_cv_have_decl__snwprintf=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_decl__snwprintf" >&5 $as_echo "$ac_cv_have_decl__snwprintf" >&6; } if test $ac_cv_have_decl__snwprintf = yes; then gt_value=1 else gt_value=0 fi cat >>confdefs.h <<_ACEOF #define HAVE_DECL__SNWPRINTF $gt_value _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether feof_unlocked is declared" >&5 $as_echo_n "checking whether feof_unlocked is declared... " >&6; } if ${ac_cv_have_decl_feof_unlocked+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #ifndef feof_unlocked char *p = (char *) feof_unlocked; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_have_decl_feof_unlocked=yes else ac_cv_have_decl_feof_unlocked=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_decl_feof_unlocked" >&5 $as_echo "$ac_cv_have_decl_feof_unlocked" >&6; } if test $ac_cv_have_decl_feof_unlocked = yes; then gt_value=1 else gt_value=0 fi cat >>confdefs.h <<_ACEOF #define HAVE_DECL_FEOF_UNLOCKED $gt_value _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether fgets_unlocked is declared" >&5 $as_echo_n "checking whether fgets_unlocked is declared... " >&6; } if ${ac_cv_have_decl_fgets_unlocked+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #ifndef fgets_unlocked char *p = (char *) fgets_unlocked; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_have_decl_fgets_unlocked=yes else ac_cv_have_decl_fgets_unlocked=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_decl_fgets_unlocked" >&5 $as_echo "$ac_cv_have_decl_fgets_unlocked" >&6; } if test $ac_cv_have_decl_fgets_unlocked = yes; then gt_value=1 else gt_value=0 fi cat >>confdefs.h <<_ACEOF #define HAVE_DECL_FGETS_UNLOCKED $gt_value _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether getc_unlocked is declared" >&5 $as_echo_n "checking whether getc_unlocked is declared... " >&6; } if ${ac_cv_have_decl_getc_unlocked+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #ifndef getc_unlocked char *p = (char *) getc_unlocked; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_have_decl_getc_unlocked=yes else ac_cv_have_decl_getc_unlocked=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_decl_getc_unlocked" >&5 $as_echo "$ac_cv_have_decl_getc_unlocked" >&6; } if test $ac_cv_have_decl_getc_unlocked = yes; then gt_value=1 else gt_value=0 fi cat >>confdefs.h <<_ACEOF #define HAVE_DECL_GETC_UNLOCKED $gt_value _ACEOF case $gt_cv_func_printf_posix in *yes) HAVE_POSIX_PRINTF=1 ;; *) HAVE_POSIX_PRINTF=0 ;; esac if test "$ac_cv_func_asprintf" = yes; then HAVE_ASPRINTF=1 else HAVE_ASPRINTF=0 fi if test "$ac_cv_func_snprintf" = yes; then HAVE_SNPRINTF=1 else HAVE_SNPRINTF=0 fi if test "$ac_cv_func_wprintf" = yes; then HAVE_WPRINTF=1 else HAVE_WPRINTF=0 fi am_save_CPPFLAGS="$CPPFLAGS" for element in $INCICONV; do haveit= for x in $CPPFLAGS; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for iconv" >&5 $as_echo_n "checking for iconv... " >&6; } if ${am_cv_func_iconv+:} false; then : $as_echo_n "(cached) " >&6 else am_cv_func_iconv="no, consider installing GNU libiconv" am_cv_lib_iconv=no cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : am_cv_func_iconv=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test "$am_cv_func_iconv" != yes; then am_save_LIBS="$LIBS" LIBS="$LIBS $LIBICONV" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : am_cv_lib_iconv=yes am_cv_func_iconv=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS="$am_save_LIBS" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_func_iconv" >&5 $as_echo "$am_cv_func_iconv" >&6; } if test "$am_cv_func_iconv" = yes; then $as_echo "#define HAVE_ICONV 1" >>confdefs.h fi if test "$am_cv_lib_iconv" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libiconv" >&5 $as_echo_n "checking how to link with libiconv... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBICONV" >&5 $as_echo "$LIBICONV" >&6; } else CPPFLAGS="$am_save_CPPFLAGS" LIBICONV= LTLIBICONV= fi if test "$am_cv_func_iconv" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for iconv declaration" >&5 $as_echo_n "checking for iconv declaration... " >&6; } if ${am_cv_proto_iconv+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include extern #ifdef __cplusplus "C" #endif #if defined(__STDC__) || defined(__cplusplus) size_t iconv (iconv_t cd, char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft); #else size_t iconv(); #endif int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : am_cv_proto_iconv_arg1="" else am_cv_proto_iconv_arg1="const" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext am_cv_proto_iconv="extern size_t iconv (iconv_t cd, $am_cv_proto_iconv_arg1 char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft);" fi am_cv_proto_iconv=`echo "$am_cv_proto_iconv" | tr -s ' ' | sed -e 's/( /(/'` { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${ac_t:- }$am_cv_proto_iconv" >&5 $as_echo "${ac_t:- }$am_cv_proto_iconv" >&6; } cat >>confdefs.h <<_ACEOF #define ICONV_CONST $am_cv_proto_iconv_arg1 _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for nl_langinfo and CODESET" >&5 $as_echo_n "checking for nl_langinfo and CODESET... " >&6; } if ${am_cv_langinfo_codeset+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { char* cs = nl_langinfo(CODESET); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : am_cv_langinfo_codeset=yes else am_cv_langinfo_codeset=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_langinfo_codeset" >&5 $as_echo "$am_cv_langinfo_codeset" >&6; } if test $am_cv_langinfo_codeset = yes; then $as_echo "#define HAVE_LANGINFO_CODESET 1" >>confdefs.h fi if test $ac_cv_header_locale_h = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for LC_MESSAGES" >&5 $as_echo_n "checking for LC_MESSAGES... " >&6; } if ${gt_cv_val_LC_MESSAGES+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { return LC_MESSAGES ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : gt_cv_val_LC_MESSAGES=yes else gt_cv_val_LC_MESSAGES=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_val_LC_MESSAGES" >&5 $as_echo "$gt_cv_val_LC_MESSAGES" >&6; } if test $gt_cv_val_LC_MESSAGES = yes; then $as_echo "#define HAVE_LC_MESSAGES 1" >>confdefs.h fi fi if test -n "$INTL_MACOSX_LIBS"; then CPPFLAGS="$CPPFLAGS -I/System/Library/Frameworks/CoreFoundation.framework/Headers" fi for ac_prog in bison do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_INTLBISON+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$INTLBISON"; then ac_cv_prog_INTLBISON="$INTLBISON" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_INTLBISON="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi INTLBISON=$ac_cv_prog_INTLBISON if test -n "$INTLBISON"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $INTLBISON" >&5 $as_echo "$INTLBISON" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$INTLBISON" && break done if test -z "$INTLBISON"; then ac_verc_fail=yes else { $as_echo "$as_me:${as_lineno-$LINENO}: checking version of bison" >&5 $as_echo_n "checking version of bison... " >&6; } ac_prog_version=`$INTLBISON --version 2>&1 | sed -n 's/^.*GNU Bison.* \([0-9]*\.[0-9.]*\).*$/\1/p'` case $ac_prog_version in '') ac_prog_version="v. ?.??, bad"; ac_verc_fail=yes;; 1.2[6-9]* | 1.[3-9][0-9]* | [2-9].*) ac_prog_version="$ac_prog_version, ok"; ac_verc_fail=no;; *) ac_prog_version="$ac_prog_version, bad"; ac_verc_fail=yes;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_prog_version" >&5 $as_echo "$ac_prog_version" >&6; } fi if test $ac_verc_fail = yes; then INTLBISON=: fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CFPreferencesCopyAppValue" >&5 $as_echo_n "checking for CFPreferencesCopyAppValue... " >&6; } if ${gt_cv_func_CFPreferencesCopyAppValue+:} false; then : $as_echo_n "(cached) " >&6 else gt_save_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS -I/System/Library/Frameworks/CoreFoundation.framework/Headers" gt_save_LIBS="$LIBS" LIBS="$LIBS -framework CoreFoundation" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { CFPreferencesCopyAppValue(NULL, NULL) ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : gt_cv_func_CFPreferencesCopyAppValue=yes else gt_cv_func_CFPreferencesCopyAppValue=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext CPPFLAGS="$gt_save_CPPFLAGS" LIBS="$gt_save_LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_CFPreferencesCopyAppValue" >&5 $as_echo "$gt_cv_func_CFPreferencesCopyAppValue" >&6; } if test $gt_cv_func_CFPreferencesCopyAppValue = yes; then $as_echo "#define HAVE_CFPREFERENCESCOPYAPPVALUE 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CFLocaleCopyCurrent" >&5 $as_echo_n "checking for CFLocaleCopyCurrent... " >&6; } if ${gt_cv_func_CFLocaleCopyCurrent+:} false; then : $as_echo_n "(cached) " >&6 else gt_save_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS -I/System/Library/Frameworks/CoreFoundation.framework/Headers" gt_save_LIBS="$LIBS" LIBS="$LIBS -framework CoreFoundation" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { CFLocaleCopyCurrent(); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : gt_cv_func_CFLocaleCopyCurrent=yes else gt_cv_func_CFLocaleCopyCurrent=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext CPPFLAGS="$gt_save_CPPFLAGS" LIBS="$gt_save_LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_CFLocaleCopyCurrent" >&5 $as_echo "$gt_cv_func_CFLocaleCopyCurrent" >&6; } if test $gt_cv_func_CFLocaleCopyCurrent = yes; then $as_echo "#define HAVE_CFLOCALECOPYCURRENT 1" >>confdefs.h fi INTL_MACOSX_LIBS= if test $gt_cv_func_CFPreferencesCopyAppValue = yes || test $gt_cv_func_CFLocaleCopyCurrent = yes; then INTL_MACOSX_LIBS="-Wl,-framework -Wl,CoreFoundation" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether NLS is requested" >&5 $as_echo_n "checking whether NLS is requested... " >&6; } # Check whether --enable-nls was given. if test "${enable_nls+set}" = set; then : enableval=$enable_nls; USE_NLS=$enableval else USE_NLS=yes fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_NLS" >&5 $as_echo "$USE_NLS" >&6; } BUILD_INCLUDED_LIBINTL=no USE_INCLUDED_LIBINTL=no LIBINTL= LTLIBINTL= POSUB= if test "$USE_NLS" = "yes"; then gt_use_preinstalled_gnugettext=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether included gettext is requested" >&5 $as_echo_n "checking whether included gettext is requested... " >&6; } # Check whether --with-included-gettext was given. if test "${with_included_gettext+set}" = set; then : withval=$with_included_gettext; nls_cv_force_use_gnu_gettext=$withval else nls_cv_force_use_gnu_gettext=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $nls_cv_force_use_gnu_gettext" >&5 $as_echo "$nls_cv_force_use_gnu_gettext" >&6; } nls_cv_use_gnu_gettext="$nls_cv_force_use_gnu_gettext" if test "$nls_cv_force_use_gnu_gettext" != "yes"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU gettext in libc" >&5 $as_echo_n "checking for GNU gettext in libc... " >&6; } if ${gt_cv_func_gnugettext1_libc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include extern int _nl_msg_cat_cntr; extern int *_nl_domain_bindings; int main () { bindtextdomain ("", ""); return * gettext ("") + _nl_msg_cat_cntr + *_nl_domain_bindings ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : gt_cv_func_gnugettext1_libc=yes else gt_cv_func_gnugettext1_libc=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_gnugettext1_libc" >&5 $as_echo "$gt_cv_func_gnugettext1_libc" >&6; } if test "$gt_cv_func_gnugettext1_libc" != "yes"; then use_additional=yes acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" # Check whether --with-libintl-prefix was given. if test "${with_libintl_prefix+set}" = set; then : withval=$with_libintl_prefix; if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" else additional_includedir="$withval/include" additional_libdir="$withval/lib" fi fi fi LIBINTL= LTLIBINTL= INCINTL= rpathdirs= ltrpathdirs= names_already_handled= names_next_round='intl ' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= for name in $names_this_round; do already_handled= for n in $names_already_handled; do if test "$n" = "$name"; then already_handled=yes break fi done if test -z "$already_handled"; then names_already_handled="$names_already_handled $name" uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./-|ABCDEFGHIJKLMNOPQRSTUVWXYZ___|'` eval value=\"\$HAVE_LIB$uppername\" if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" test -z "$value" || LIBINTL="${LIBINTL}${LIBINTL:+ }$value" eval value=\"\$LTLIB$uppername\" test -z "$value" || LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }$value" else : fi else found_dir= found_la= found_so= found_a= if test $use_additional = yes; then if test -n "$shlibext" && test -f "$additional_libdir/lib$name.$shlibext"; then found_dir="$additional_libdir" found_so="$additional_libdir/lib$name.$shlibext" if test -f "$additional_libdir/lib$name.la"; then found_la="$additional_libdir/lib$name.la" fi else if test -f "$additional_libdir/lib$name.$libext"; then found_dir="$additional_libdir" found_a="$additional_libdir/lib$name.$libext" if test -f "$additional_libdir/lib$name.la"; then found_la="$additional_libdir/lib$name.la" fi fi fi fi if test "X$found_dir" = "X"; then for x in $LDFLAGS $LTLIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" case "$x" in -L*) dir=`echo "X$x" | sed -e 's/^X-L//'` if test -n "$shlibext" && test -f "$dir/lib$name.$shlibext"; then found_dir="$dir" found_so="$dir/lib$name.$shlibext" if test -f "$dir/lib$name.la"; then found_la="$dir/lib$name.la" fi else if test -f "$dir/lib$name.$libext"; then found_dir="$dir" found_a="$dir/lib$name.$libext" if test -f "$dir/lib$name.la"; then found_la="$dir/lib$name.la" fi fi fi ;; esac if test "X$found_dir" != "X"; then break fi done fi if test "X$found_dir" != "X"; then LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/lib"; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" else haveit= for x in $ltrpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $found_dir" fi if test "$hardcode_direct" = yes; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" else if test -n "$hardcode_libdir_flag_spec" && test "$hardcode_minus_L" = no; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $found_dir" fi else haveit= for x in $LDFLAGS $LIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then LIBINTL="${LIBINTL}${LIBINTL:+ }-L$found_dir" fi if test "$hardcode_minus_L" != no; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" else LIBINTL="${LIBINTL}${LIBINTL:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_a" else LIBINTL="${LIBINTL}${LIBINTL:+ }-L$found_dir -l$name" fi fi additional_includedir= case "$found_dir" in */lib | */lib/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e 's,/lib/*$,,'` additional_includedir="$basedir/include" ;; esac if test "X$additional_includedir" != "X"; then if test "X$additional_includedir" != "X/usr/include"; then haveit= if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then for x in $CPPFLAGS $INCINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then INCINTL="${INCINTL}${INCINTL:+ }-I$additional_includedir" fi fi fi fi fi if test -n "$found_la"; then save_libdir="$libdir" case "$found_la" in */* | *\\*) . "$found_la" ;; *) . "./$found_la" ;; esac libdir="$save_libdir" for dep in $dependency_libs; do case "$dep" in -L*) additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'` if test "X$additional_libdir" != "X/usr/lib"; then haveit= if test "X$additional_libdir" = "X/usr/local/lib"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then haveit= for x in $LDFLAGS $LIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then LIBINTL="${LIBINTL}${LIBINTL:+ }-L$additional_libdir" fi fi haveit= for x in $LDFLAGS $LTLIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-L$additional_libdir" fi fi fi fi ;; -R*) dir=`echo "X$dep" | sed -e 's/^X-R//'` if test "$enable_rpath" != no; then haveit= for x in $rpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $dir" fi haveit= for x in $ltrpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $dir" fi fi ;; -l*) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` ;; *.la) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) LIBINTL="${LIBINTL}${LIBINTL:+ }$dep" LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }$dep" ;; esac done fi else LIBINTL="${LIBINTL}${LIBINTL:+ }-l$name" LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-l$name" fi fi fi done done if test "X$rpathdirs" != "X"; then if test -n "$hardcode_libdir_separator"; then alldirs= for found_dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$hardcode_libdir_separator}$found_dir" done acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBINTL="${LIBINTL}${LIBINTL:+ }$flag" else for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBINTL="${LIBINTL}${LIBINTL:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then for found_dir in $ltrpathdirs; do LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-R$found_dir" done fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU gettext in libintl" >&5 $as_echo_n "checking for GNU gettext in libintl... " >&6; } if ${gt_cv_func_gnugettext1_libintl+:} false; then : $as_echo_n "(cached) " >&6 else gt_save_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS $INCINTL" gt_save_LIBS="$LIBS" LIBS="$LIBS $LIBINTL" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include extern int _nl_msg_cat_cntr; extern #ifdef __cplusplus "C" #endif const char *_nl_expand_alias (const char *); int main () { bindtextdomain ("", ""); return * gettext ("") + _nl_msg_cat_cntr + *_nl_expand_alias ("") ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : gt_cv_func_gnugettext1_libintl=yes else gt_cv_func_gnugettext1_libintl=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test "$gt_cv_func_gnugettext1_libintl" != yes && test -n "$LIBICONV"; then LIBS="$LIBS $LIBICONV" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include extern int _nl_msg_cat_cntr; extern #ifdef __cplusplus "C" #endif const char *_nl_expand_alias (const char *); int main () { bindtextdomain ("", ""); return * gettext ("") + _nl_msg_cat_cntr + *_nl_expand_alias ("") ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : LIBINTL="$LIBINTL $LIBICONV" LTLIBINTL="$LTLIBINTL $LTLIBICONV" gt_cv_func_gnugettext1_libintl=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi CPPFLAGS="$gt_save_CPPFLAGS" LIBS="$gt_save_LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_gnugettext1_libintl" >&5 $as_echo "$gt_cv_func_gnugettext1_libintl" >&6; } fi if test "$gt_cv_func_gnugettext1_libc" = "yes" \ || { test "$gt_cv_func_gnugettext1_libintl" = "yes" \ && test "$PACKAGE" != gettext-runtime \ && test "$PACKAGE" != gettext-tools; }; then gt_use_preinstalled_gnugettext=yes else LIBINTL= LTLIBINTL= INCINTL= fi if test "$gt_use_preinstalled_gnugettext" != "yes"; then nls_cv_use_gnu_gettext=yes fi fi if test "$nls_cv_use_gnu_gettext" = "yes"; then BUILD_INCLUDED_LIBINTL=yes USE_INCLUDED_LIBINTL=yes LIBINTL="\${top_builddir}/intl/libintl.a $LIBICONV" LTLIBINTL="\${top_builddir}/intl/libintl.a $LTLIBICONV" LIBS=`echo " $LIBS " | sed -e 's/ -lintl / /' -e 's/^ //' -e 's/ $//'` fi CATOBJEXT= if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then CATOBJEXT=.gmo fi if test -n "$INTL_MACOSX_LIBS"; then if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then LIBINTL="$LIBINTL $INTL_MACOSX_LIBS" LTLIBINTL="$LTLIBINTL $INTL_MACOSX_LIBS" fi fi if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then $as_echo "#define ENABLE_NLS 1" >>confdefs.h else USE_NLS=no fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use NLS" >&5 $as_echo_n "checking whether to use NLS... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_NLS" >&5 $as_echo "$USE_NLS" >&6; } if test "$USE_NLS" = "yes"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking where the gettext function comes from" >&5 $as_echo_n "checking where the gettext function comes from... " >&6; } if test "$gt_use_preinstalled_gnugettext" = "yes"; then if test "$gt_cv_func_gnugettext1_libintl" = "yes"; then gt_source="external libintl" else gt_source="libc" fi else gt_source="included intl directory" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_source" >&5 $as_echo "$gt_source" >&6; } fi if test "$USE_NLS" = "yes"; then if test "$gt_use_preinstalled_gnugettext" = "yes"; then if test "$gt_cv_func_gnugettext1_libintl" = "yes"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libintl" >&5 $as_echo_n "checking how to link with libintl... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBINTL" >&5 $as_echo "$LIBINTL" >&6; } for element in $INCINTL; do haveit= for x in $CPPFLAGS; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" fi done fi $as_echo "#define HAVE_GETTEXT 1" >>confdefs.h $as_echo "#define HAVE_DCGETTEXT 1" >>confdefs.h fi POSUB=po fi if test "$PACKAGE" = gettext-runtime || test "$PACKAGE" = gettext-tools; then BUILD_INCLUDED_LIBINTL=yes fi nls_cv_header_intl= nls_cv_header_libgt= DATADIRNAME=share INSTOBJEXT=.mo GENCAT=gencat INTLOBJS= if test "$USE_INCLUDED_LIBINTL" = yes; then INTLOBJS="\$(GETTOBJS)" fi INTL_LIBTOOL_SUFFIX_PREFIX= INTLLIBS="$LIBINTL" fi support_smartalloc=yes support_readline=yes support_lzo=yes support_zstd=yes support_s3=yes support_aws=yes support_conio=yes support_bat=no support_tls=no support_crypto=no support_static_tools=no support_static_fd=no support_static_sd=no support_static_dir=no support_static_cons=no build_client_only=no build_dird=yes build_stored=yes db_backends="" batch_insert_db_backends="" support_lockmgr=no # Check whether --enable-bat was given. if test "${enable_bat+set}" = set; then : enableval=$enable_bat; if test x$enableval = xyes; then $as_echo "#define HAVE_BAT 1" >>confdefs.h support_bat=yes fi fi BAT_DIR= if test x$support_bat = xyes; then abc=`$PKG_CONFIG QtGui` pkg=$? if test $pkg = 0; then BAT_DIR=src/qt-console else abc=`$PKG_CONFIG Qt5Gui` pkg=$? if test $pkg = 0; then BAT_DIR="src/qt-console src/qt-console/tray-monitor" else as_fn_error $? "Unable to find either Qt4 or Qt5 installation needed by bat" "$LINENO" 5 fi fi fi # Check whether --enable-smartalloc was given. if test "${enable_smartalloc+set}" = set; then : enableval=$enable_smartalloc; if test x$enableval = xno; then support_smartalloc=no fi fi if test x$support_smartalloc = xyes; then $as_echo "#define SMARTALLOC 1" >>confdefs.h fi # Check whether --enable-lockmgr was given. if test "${enable_lockmgr+set}" = set; then : enableval=$enable_lockmgr; if test x$enableval = xyes; then support_lockmgr=yes fi fi if test x$support_lockmgr = xyes; then $as_echo "#define USE_LOCKMGR 1" >>confdefs.h fi # Check whether --enable-static-tools was given. if test "${enable_static_tools+set}" = set; then : enableval=$enable_static_tools; if test x$enableval = xyes; then if test x$use_libtool = xyes; then as_fn_error $? "Libtool is enabled, not compatible with static tools, please rerun configure with --disable-libtool" "$LINENO" 5 fi support_static_tools=yes fi fi TTOOL_LDFLAGS= if test x$support_static_tools = xyes; then TTOOL_LDFLAGS="-static" fi # Check whether --enable-static-fd was given. if test "${enable_static_fd+set}" = set; then : enableval=$enable_static_fd; if test x$enableval = xyes; then if test x$use_libtool = xyes; then as_fn_error $? "Libtool is enabled, not compatible with static tools, please rerun configure with --disable-libtool" "$LINENO" 5 fi support_static_fd=yes fi fi STATIC_FD= if test x$support_static_fd = xyes; then STATIC_FD="static-bacula-fd" fi # Check whether --enable-static-sd was given. if test "${enable_static_sd+set}" = set; then : enableval=$enable_static_sd; if test x$enableval = xyes; then if test x$use_libtool = xyes; then as_fn_error $? "Libtool is enabled, not compatible with static tools, please rerun configure with --disable-libtool" "$LINENO" 5 fi support_static_sd=yes fi fi STATIC_SD= if test x$support_static_sd = xyes; then STATIC_SD="static-bacula-sd" fi # Check whether --enable-static-dir was given. if test "${enable_static_dir+set}" = set; then : enableval=$enable_static_dir; if test x$enableval = xyes; then if test x$use_libtool = xyes; then as_fn_error $? "Libtool is enabled, not compatible with static tools, please rerun configure with --disable-libtool" "$LINENO" 5 fi support_static_dir=yes fi fi STATIC_DIR= if test x$support_static_dir = xyes; then STATIC_DIR="static-bacula-dir" fi # Check whether --enable-static-cons was given. if test "${enable_static_cons+set}" = set; then : enableval=$enable_static_cons; if test x$enableval = xyes; then if test x$use_libtool = xyes; then as_fn_error $? "Libtool is enabled, not compatible with static tools, please rerun configure with --disable-libtool" "$LINENO" 5 fi support_static_cons=yes fi fi STATIC_CONS= if test x$support_static_cons = xyes; then STATIC_CONS="static-bconsole" fi # Check whether --enable-client-only was given. if test "${enable_client_only+set}" = set; then : enableval=$enable_client_only; if test x$enableval = xyes; then build_client_only=yes db_backends="None" DB_BACKENDS="none" fi fi if test x$build_client_only = xno; then ALL_DIRS="subdirs" $as_echo "#define HAVE_CLIENT_ONLY 1" >>confdefs.h else ALL_DIRS="" fi # Check whether --enable-build-dird was given. if test "${enable_build_dird+set}" = set; then : enableval=$enable_build_dird; if test x$enableval = xno; then build_dird=no fi fi DIRD_DIR="src/dird" DIR_TOOLS="DIRTOOLS" # DIR_PLUGIN_DIR="src/plugins/dir" if test x$build_dird = xyes; then DIRD_DIR="src/dird" DIR_TOOLS="DIRTOOLS" else DIRD_DIR="" DIR_TOOLS="NODIRTOOLS" fi # Check whether --enable-build-stored was given. if test "${enable_build_stored+set}" = set; then : enableval=$enable_build_stored; if test x$enableval = xno; then build_stored=no fi fi if test x$build_stored = xyes; then STORED_DIR="src/stored" else STORED_DIR="" fi # Check whether --enable-conio was given. if test "${enable_conio+set}" = set; then : enableval=$enable_conio; if test x$enableval = xno; then support_conio=no fi fi support_ipv6=yes # Check whether --enable-ipv6 was given. if test "${enable_ipv6+set}" = set; then : enableval=$enable_ipv6; if test x$enableval = xno; then support_ipv6=no fi fi if test x$support_ipv6 = xyes; then cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main () { struct in6_addr t=in6addr_any; t.s6_addr[0] = 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : support_in6addr_any=yes else support_in6addr_any=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test x$support_in6addr_any = xno ; then in6addr_any="const struct in6_addr in6addr_any" else in6addr_any="1" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main () { $in6addr_any; struct sockaddr_in6 s; struct in6_addr t=in6addr_any; int i=AF_INET6; s; t.s6_addr[0] = 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : support_ipv6=yes else support_ipv6=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi if test x$support_ipv6 = xyes; then $as_echo "#define HAVE_IPV6 1" >>confdefs.h if test x$support_in6addr_any = xno ; then $as_echo "#define NEED_IN6ADDR_ANY 1" >>confdefs.h fi fi TERM_LIB="" ac_fn_c_check_header_mongrel "$LINENO" "curses.h" "ac_cv_header_curses_h" "$ac_includes_default" if test "x$ac_cv_header_curses_h" = xyes; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for tgetent in -ltinfo" >&5 $as_echo_n "checking for tgetent in -ltinfo... " >&6; } if ${ac_cv_lib_tinfo_tgetent+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ltinfo $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char tgetent (); int main () { return tgetent (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_tinfo_tgetent=yes else ac_cv_lib_tinfo_tgetent=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_tinfo_tgetent" >&5 $as_echo "$ac_cv_lib_tinfo_tgetent" >&6; } if test "x$ac_cv_lib_tinfo_tgetent" = xyes; then : TERM_LIB="-ltinfo" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for tgetent in -lncurses" >&5 $as_echo_n "checking for tgetent in -lncurses... " >&6; } if ${ac_cv_lib_ncurses_tgetent+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lncurses $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char tgetent (); int main () { return tgetent (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_ncurses_tgetent=yes else ac_cv_lib_ncurses_tgetent=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ncurses_tgetent" >&5 $as_echo "$ac_cv_lib_ncurses_tgetent" >&6; } if test "x$ac_cv_lib_ncurses_tgetent" = xyes; then : TERM_LIB="-lncurses" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for tgetent in -ltermcap" >&5 $as_echo_n "checking for tgetent in -ltermcap... " >&6; } if ${ac_cv_lib_termcap_tgetent+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ltermcap $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char tgetent (); int main () { return tgetent (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_termcap_tgetent=yes else ac_cv_lib_termcap_tgetent=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_termcap_tgetent" >&5 $as_echo "$ac_cv_lib_termcap_tgetent" >&6; } if test "x$ac_cv_lib_termcap_tgetent" = xyes; then : TERM_LIB="-ltermcap" fi fi fi else for ac_header in curses.h do : ac_fn_c_check_header_mongrel "$LINENO" "curses.h" "ac_cv_header_curses_h" "$ac_includes_default" if test "x$ac_cv_header_curses_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_CURSES_H 1 _ACEOF fi done ac_fn_c_check_header_mongrel "$LINENO" "term.h" "ac_cv_header_term_h" "$ac_includes_default" if test "x$ac_cv_header_term_h" = xyes; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for tgetent in -lcurses" >&5 $as_echo_n "checking for tgetent in -lcurses... " >&6; } if ${ac_cv_lib_curses_tgetent+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lcurses $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char tgetent (); int main () { return tgetent (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_curses_tgetent=yes else ac_cv_lib_curses_tgetent=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_curses_tgetent" >&5 $as_echo "$ac_cv_lib_curses_tgetent" >&6; } if test "x$ac_cv_lib_curses_tgetent" = xyes; then : TERM_LIB="-lcurses" fi fi fi got_conio="no" if test x$support_conio = xyes; then if test x$TERM_LIB != x; then CONS_LIBS=$TERM_LIB CONS_OBJ="conio.o" CONS_SRC="conio.c" got_conio="yes" support_readline=no $as_echo "#define HAVE_CONIO 1" >>confdefs.h else echo " "; echo "Required libraries not found. CONIO turned off ..."; echo " " fi fi # Check whether --enable-readline was given. if test "${enable_readline+set}" = set; then : enableval=$enable_readline; if test x$enableval = xno; then support_readline=no fi fi got_readline="no" READLINE_SRC= if test x$support_readline = xyes; then # Check whether --with-readline was given. if test "${with_readline+set}" = set; then : withval=$with_readline; case "$with_readline" in no) : ;; yes|*) if test -f ${with_readline}/readline.h; then CONS_INC="-I${with_readline}" CONS_LDFLAGS="-L$with_readline" elif test -f ${with_readline}/include/readline/readline.h; then CONS_INC="-I${with_readline}/include/readline" CONS_LDFLAGS="-L${with_readline}/lib" with_readline="${with_readline}/include/readline" else with_readline="/usr/include/readline" fi as_ac_Header=`$as_echo "ac_cv_header_${with_readline}/readline.h" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "${with_readline}/readline.h" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : $as_echo "#define HAVE_READLINE 1" >>confdefs.h CONS_LIBS="-lreadline -lhistory ${TERM_LIB}" got_readline="yes" else echo " " echo "readline.h not found. readline turned off ..." echo " " fi ;; esac else ac_fn_c_check_header_mongrel "$LINENO" "/usr/include/readline/readline.h" "ac_cv_header__usr_include_readline_readline_h" "$ac_includes_default" if test "x$ac_cv_header__usr_include_readline_readline_h" = xyes; then : $as_echo "#define HAVE_READLINE 1" >>confdefs.h got_readline="yes" CONS_INC="-I/usr/include/readline" CONS_LIBS="-lreadline ${TERM_LIB}" else as_ac_Header=`$as_echo "ac_cv_header_${TOP_DIR}/depkgs/readline/readline.h" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "${TOP_DIR}/depkgs/readline/readline.h" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : $as_echo "#define HAVE_READLINE 1" >>confdefs.h got_readline="yes" CONS_INC="-I${TOP_DIR}/depkgs/readline" CONS_LIBS="-lreadline -lhistory ${TERM_LIB}" CONS_LDFLAGS="-L${TOP_DIR}/depkgs/readline" PRTREADLINE_SRC="${TOP_DIR}/depkgs/readline" else echo " " echo "readline.h not found. readline turned off ..." echo " " fi fi fi fi MAKE_SHELL=/bin/sh { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stat file-mode macros are broken" >&5 $as_echo_n "checking whether stat file-mode macros are broken... " >&6; } if ${ac_cv_header_stat_broken+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if defined S_ISBLK && defined S_IFDIR extern char c1[S_ISBLK (S_IFDIR) ? -1 : 1]; #endif #if defined S_ISBLK && defined S_IFCHR extern char c2[S_ISBLK (S_IFCHR) ? -1 : 1]; #endif #if defined S_ISLNK && defined S_IFREG extern char c3[S_ISLNK (S_IFREG) ? -1 : 1]; #endif #if defined S_ISSOCK && defined S_IFREG extern char c4[S_ISSOCK (S_IFREG) ? -1 : 1]; #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stat_broken=no else ac_cv_header_stat_broken=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stat_broken" >&5 $as_echo "$ac_cv_header_stat_broken" >&6; } if test $ac_cv_header_stat_broken = yes; then $as_echo "#define STAT_MACROS_BROKEN 1" >>confdefs.h fi ac_header_dirent=no for ac_hdr in dirent.h sys/ndir.h sys/dir.h ndir.h; do as_ac_Header=`$as_echo "ac_cv_header_dirent_$ac_hdr" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_hdr that defines DIR" >&5 $as_echo_n "checking for $ac_hdr that defines DIR... " >&6; } if eval \${$as_ac_Header+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include <$ac_hdr> int main () { if ((DIR *) 0) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$as_ac_Header=yes" else eval "$as_ac_Header=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_ac_Header { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_hdr" | $as_tr_cpp` 1 _ACEOF ac_header_dirent=$ac_hdr; break fi done # Two versions of opendir et al. are in -ldir and -lx on SCO Xenix. if test $ac_header_dirent = dirent.h; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5 $as_echo_n "checking for library containing opendir... " >&6; } if ${ac_cv_search_opendir+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char opendir (); int main () { return opendir (); ; return 0; } _ACEOF for ac_lib in '' dir; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_opendir=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_opendir+:} false; then : break fi done if ${ac_cv_search_opendir+:} false; then : else ac_cv_search_opendir=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_opendir" >&5 $as_echo "$ac_cv_search_opendir" >&6; } ac_res=$ac_cv_search_opendir if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5 $as_echo_n "checking for library containing opendir... " >&6; } if ${ac_cv_search_opendir+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char opendir (); int main () { return opendir (); ; return 0; } _ACEOF for ac_lib in '' x; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_opendir=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_opendir+:} false; then : break fi done if ${ac_cv_search_opendir+:} false; then : else ac_cv_search_opendir=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_opendir" >&5 $as_echo "$ac_cv_search_opendir" >&6; } ac_res=$ac_cv_search_opendir if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi fi for ac_header in sys/fsuid.h do : ac_fn_c_check_header_mongrel "$LINENO" "sys/fsuid.h" "ac_cv_header_sys_fsuid_h" "$ac_includes_default" if test "x$ac_cv_header_sys_fsuid_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_SYS_FSUID_H 1 _ACEOF fi done for ac_func in setfsgid setfsuid do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done for ac_func in strcasecmp select setenv putenv tcgetattr tzset do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done for ac_func in lstat lchown lchmod futimes fchmod fchown lutimes do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done for ac_func in nanosleep nl_langinfo do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done for ac_func in be64toh htobe64 do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done for ac_header in varargs.h do : ac_fn_c_check_header_mongrel "$LINENO" "varargs.h" "ac_cv_header_varargs_h" "$ac_includes_default" if test "x$ac_cv_header_varargs_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_VARARGS_H 1 _ACEOF fi done ac_fn_c_check_func "$LINENO" "socket" "ac_cv_func_socket" if test "x$ac_cv_func_socket" = xyes; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: using libc's socket" >&5 $as_echo "using libc's socket" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for socket in -lxnet" >&5 $as_echo_n "checking for socket in -lxnet... " >&6; } if ${ac_cv_lib_xnet_socket+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lxnet $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char socket (); int main () { return socket (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_xnet_socket=yes else ac_cv_lib_xnet_socket=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_xnet_socket" >&5 $as_echo "$ac_cv_lib_xnet_socket" >&6; } if test "x$ac_cv_lib_xnet_socket" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBXNET 1 _ACEOF LIBS="-lxnet $LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for socket in -lsocket" >&5 $as_echo_n "checking for socket in -lsocket... " >&6; } if ${ac_cv_lib_socket_socket+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsocket $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char socket (); int main () { return socket (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_socket_socket=yes else ac_cv_lib_socket_socket=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_socket_socket" >&5 $as_echo "$ac_cv_lib_socket_socket" >&6; } if test "x$ac_cv_lib_socket_socket" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBSOCKET 1 _ACEOF LIBS="-lsocket $LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for socket in -linet" >&5 $as_echo_n "checking for socket in -linet... " >&6; } if ${ac_cv_lib_inet_socket+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-linet $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char socket (); int main () { return socket (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_inet_socket=yes else ac_cv_lib_inet_socket=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_inet_socket" >&5 $as_echo "$ac_cv_lib_inet_socket" >&6; } if test "x$ac_cv_lib_inet_socket" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBINET 1 _ACEOF LIBS="-linet $LIBS" fi fi TCPW_MSG="no" WRAPLIBS="" # Check whether --with-tcp-wrappers was given. if test "${with_tcp_wrappers+set}" = set; then : withval=$with_tcp_wrappers; if test "x$withval" != "xno" ; then saved_LIBS="$LIBS" LIBS="$saved_LIBS -lwrap" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing nanosleep" >&5 $as_echo_n "checking for library containing nanosleep... " >&6; } if ${ac_cv_search_nanosleep+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char nanosleep (); int main () { return nanosleep (); ; return 0; } _ACEOF for ac_lib in '' rt; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_nanosleep=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_nanosleep+:} false; then : break fi done if ${ac_cv_search_nanosleep+:} false; then : else ac_cv_search_nanosleep=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_nanosleep" >&5 $as_echo "$ac_cv_search_nanosleep" >&6; } ac_res=$ac_cv_search_nanosleep if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libwrap" >&5 $as_echo_n "checking for libwrap... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int deny_severity = 0; int allow_severity = 0; struct request_info *req; int main () { hosts_access(req); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } $as_echo "#define HAVE_LIBWRAP 1" >>confdefs.h TCPW_MSG="yes" LIBS="$saved_LIBS" WRAPLIBS="-lwrap" else LIBS="$saved_LIBS -lwrap -lnsl" WRAPLIBS="$saved_LIBS -lwrap -lnsl" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int deny_severity = 0; int allow_severity = 0; struct request_info *req; int main () { hosts_access(req); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } $as_echo "#define HAVE_LIBWRAP 1" >>confdefs.h TCPW_MSG="yes" LIBS="$saved_LIBS" WRAPLIBS="-lwrap" else as_fn_error $? "*** libwrap missing" "$LINENO" 5 fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi fi with_curl_directory=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CURL" >&5 $as_echo_n "checking for CURL... " >&6; } # Check whether --with-curl was given. if test "${with_curl+set}" = set; then : withval=$with_curl; with_curl_directory=${withval} fi CURL_LIBS="-lcurl" CURL_INC="" saved_LIBS="${LIBS}" saved_CFLAGS="${CFLAGS}" if test "x$with_curl_directory" != "xno"; then if test "x$with_curl_directory" != "xyes" && test x"${with_curl_directory}" != "x"; then # # Make sure the $with_curl_directory also makes sense # if test -d "$with_curl_directory/include"; then CURL_INC="-I$with_curl_directory/include $CURL_INC" fi if test -d "$with_curl_directory/lib"; then CURL_LIBS="-L$with_curl_directory/lib $CURL_LIBS" fi if test -d "$with_curl_directory/lib64"; then CURL_LIBS="-L$with_curl_directory/lib64 $CURL_LIBS" fi fi LIBS="${saved_LIBS} ${CURL_LIBS}" CFLAGS="${saved_CFLAGS} ${CURL_INC}" fi ac_fn_c_check_header_mongrel "$LINENO" "curl/curl.h" "ac_cv_header_curl_curl_h" "$ac_includes_default" if test "x$ac_cv_header_curl_curl_h" = xyes; then : else error="Can't find CURL header." fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for curl_version_info in -lcurl" >&5 $as_echo_n "checking for curl_version_info in -lcurl... " >&6; } if ${ac_cv_lib_curl_curl_version_info+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lcurl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char curl_version_info (); int main () { return curl_version_info (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_curl_curl_version_info=yes else ac_cv_lib_curl_curl_version_info=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_curl_curl_version_info" >&5 $as_echo "$ac_cv_lib_curl_curl_version_info" >&6; } if test "x$ac_cv_lib_curl_curl_version_info" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBCURL 1 _ACEOF LIBS="-lcurl $LIBS" else error="Can't find CURL library." fi if test x"${error}" == "x"; then support_curl=yes fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $support_curl" >&5 $as_echo "$support_curl" >&6; } if test "$support_curl" = "yes"; then $as_echo "#define HAVE_CURL 1" >>confdefs.h fi LIBS="${saved_LIBS}" CFLAGS="${saved_CFLAGS}" with_zstd_directory=no # Check whether --with-zstd was given. if test "${with_zstd+set}" = set; then : withval=$with_zstd; with_zstd_directory=${withval} fi ZSTD_LIBS="-lzstd" ZSTD_INC="" saved_LIBS="${LIBS}" saved_CFLAGS="${CFLAGS}" if test "x$with_zstd_directory" != "xno"; then if test "x$with_zstd_directory" != "xyes" && test x"${with_zstd_directory}" != "x"; then # # Make sure the $with_zstd_directory also makes sense # if test -d "$with_zstd_directory/include"; then ZSTD_INC="-I$with_zstd_directory/include $ZSTD_INC" fi if test -d "$with_zstd_directory/lib"; then ZSTD_LIBS="$with_zstd_directory/lib/libzstd.a" fi if test -d "$with_zstd_directory/lib64"; then ZSTD_LIBS="$with_zstd_directory/lib64/libzstd.a" fi fi LIBS="${saved_LIBS} ${ZSTD_LIBS}" CFLAGS="${saved_CFLAGS} ${ZSTD_INC}" fi ac_fn_c_check_header_mongrel "$LINENO" "zstd.h" "ac_cv_header_zstd_h" "$ac_includes_default" if test "x$ac_cv_header_zstd_h" = xyes; then : else error="Can't find ZSTD header." fi if test x"${error}" == "x"; then support_zstd=yes else support_zstd=no ZSTD_LIBS="" ZSTD_INC="" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $support_zstd" >&5 $as_echo "$support_zstd" >&6; } if test "$support_zstd" = "yes"; then $as_echo "#define HAVE_ZSTD 1" >>confdefs.h fi LIBS="${saved_LIBS}" CFLAGS="${saved_CFLAGS}" TOKYOCABINET_LIBS= TOKYOCABINET_INC= { $as_echo "$as_me:${as_lineno-$LINENO}: checking for OpenSSL" >&5 $as_echo_n "checking for OpenSSL... " >&6; } # Check whether --with-openssl was given. if test "${with_openssl+set}" = set; then : withval=$with_openssl; with_openssl_directory=${withval} fi if test "x$with_openssl_directory" != "xno"; then OPENSSL_LIBS="-lssl -lcrypto" OPENSSL_INC="" if test "x$with_openssl_directory" != "xyes" && test x"${with_openssl_directory}" != "x"; then # # Make sure the $with_openssl_directory also makes sense # if test -d "$with_openssl_directory/lib" -a -d "$with_openssl_directory/include"; then OPENSSL_LIBS="-L$with_openssl_directory/lib $OPENSSL_LIBS" OPENSSL_INC="-I$with_openssl_directory/include $OPENSSL_INC" fi fi saved_LIBS="${LIBS}" saved_CFLAGS="${CFLAGS}" LIBS="${saved_LIBS} ${OPENSSL_LIBS}" CFLAGS="${saved_CFLAGS} ${OPENSSL_INC}" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { CRYPTO_set_id_callback(NULL); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : support_tls="yes" support_crypto="yes" else support_tls="no" support_crypto="no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { SSL_CTX_set_psk_client_callback(NULL, NULL); SSL_CTX_set_psk_server_callback(NULL, NULL); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : support_tls_psk="yes" else support_tls_psk="no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { EVP_sha512(); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_openssl_sha2="yes" else ac_cv_openssl_sha2="no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { EVP_aes_192_cbc(); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_openssl_export="no" else ac_cv_openssl_export="yes" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { FIPS_mode_set(1); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : $as_echo "#define HAVE_FIPS 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: result: checking for FIPS... yes" >&5 $as_echo "checking for FIPS... yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: checkint for FIPS... no" >&5 $as_echo "checkint for FIPS... no" >&6; } fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { OPENSSL_VERSION_TEXT; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : $as_echo "#define HAVE_OPENSSL_VERSION_TEXT 1" >>confdefs.h fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $support_tls" >&5 $as_echo "$support_tls" >&6; } if test "$support_tls_psk" = "yes"; then $as_echo "#define HAVE_TLS_PSK 1" >>confdefs.h fi if test "$support_tls" = "yes"; then $as_echo "#define HAVE_OPENSSL 1" >>confdefs.h $as_echo "#define HAVE_TLS 1" >>confdefs.h $as_echo "#define HAVE_CRYPTO 1" >>confdefs.h fi if test "$ac_cv_openssl_sha2" = "yes"; then $as_echo "#define HAVE_SHA2 1" >>confdefs.h fi if test "$ac_cv_openssl_export" = "yes"; then $as_echo "#define HAVE_OPENSSL_EXPORT_LIBRARY 1" >>confdefs.h fi if test "$support_crypto" = "yes"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for EVP_PKEY_encrypt_old in -lcrypto" >&5 $as_echo_n "checking for EVP_PKEY_encrypt_old in -lcrypto... " >&6; } if ${ac_cv_lib_crypto_EVP_PKEY_encrypt_old+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lcrypto $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char EVP_PKEY_encrypt_old (); int main () { return EVP_PKEY_encrypt_old (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_crypto_EVP_PKEY_encrypt_old=yes else ac_cv_lib_crypto_EVP_PKEY_encrypt_old=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_crypto_EVP_PKEY_encrypt_old" >&5 $as_echo "$ac_cv_lib_crypto_EVP_PKEY_encrypt_old" >&6; } if test "x$ac_cv_lib_crypto_EVP_PKEY_encrypt_old" = xyes; then : $as_echo "#define HAVE_OPENSSLv1 1" >>confdefs.h fi fi LIBS="${saved_LIBS}" CFLAGS="${saved_CFLAGS}" else support_tls="no" support_crypto="no" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $support_tls" >&5 $as_echo "$support_tls" >&6; } fi if test "$support_tls" = "no" -o "$support_crypto" = "no"; then OPENSSL_LIBS="" OPENSSL_INC="" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing dlopen" >&5 $as_echo_n "checking for library containing dlopen... " >&6; } if ${ac_cv_search_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF for ac_lib in '' dl; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_dlopen=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_dlopen+:} false; then : break fi done if ${ac_cv_search_dlopen+:} false; then : else ac_cv_search_dlopen=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_dlopen" >&5 $as_echo "$ac_cv_search_dlopen" >&6; } ac_res=$ac_cv_search_dlopen if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi IOKITLIBS="" if test x${HAVE_DARWIN_OS_TRUE} = x; then ac_fn_c_check_header_mongrel "$LINENO" "IOKit/pwr_mgt/IOPMLib.h" "ac_cv_header_IOKit_pwr_mgt_IOPMLib_h" "$ac_includes_default" if test "x$ac_cv_header_IOKit_pwr_mgt_IOPMLib_h" = xyes; then : IOKITLIBS="-framework IOKit" fi fi working_dir=`eval echo /opt/bacula/working` # Check whether --with-working-dir was given. if test "${with_working_dir+set}" = set; then : withval=$with_working_dir; if test "x$withval" != "xno" ; then working_dir=$withval fi fi archivedir=/tmp # Check whether --with-archivedir was given. if test "${with_archivedir+set}" = set; then : withval=$with_archivedir; if test "x$withval" != "xno" ; then archivedir=$withval fi fi basename=`hostname` # Check whether --with-basename was given. if test "${with_basename+set}" = set; then : withval=$with_basename; if test "x$withval" != "xno" ; then basename=$withval fi fi hostname=`uname -n | cut -d '.' -f 1` if test x${hostname} = x ; then hostname="localhost" fi ping -c 1 $hostname 2>/dev/null 1>/dev/null if test ! $? = 0; then hostname="localhost" fi # Check whether --with-hostname was given. if test "${with_hostname+set}" = set; then : withval=$with_hostname; if test "x$withval" != "xno" ; then hostname=$withval fi fi scriptdir=`eval echo ${sysconfdir}` # Check whether --with-scriptdir was given. if test "${with_scriptdir+set}" = set; then : withval=$with_scriptdir; if test "x$withval" != "xno" ; then scriptdir=$withval fi fi bsrdir=`eval echo /opt/bacula/bsr` # Check whether --with-bsrdir was given. if test "${with_bsrdir+set}" = set; then : withval=$with_bsrdir; if test "x$withval" != "xno" ; then bsrdir=$withval fi fi logdir=`eval echo /opt/bacula/log` # Check whether --with-logdir was given. if test "${with_logdir+set}" = set; then : withval=$with_logdir; if test "x$withval" != "xno" ; then logdir=$withval fi fi # ------------------------------------------ # Where to place plugindir (plugin files) # ------------------------------------------ plugindir=`eval echo ${libdir}` # Check whether --with-plugindir was given. if test "${with_plugindir+set}" = set; then : withval=$with_plugindir; if test "x$withval" != "xno" ; then plugindir=$withval fi fi dump_email=root@localhost # Check whether --with-dump-email was given. if test "${with_dump_email+set}" = set; then : withval=$with_dump_email; if test "x$withval" != "xno" ; then dump_email=$withval fi fi job_email=root@localhost # Check whether --with-job-email was given. if test "${with_job_email+set}" = set; then : withval=$with_job_email; if test "x$withval" != "xno" ; then job_email=$withval fi fi smtp_host=localhost # Check whether --with-smtp_host was given. if test "${with_smtp_host+set}" = set; then : withval=$with_smtp_host; if test "x$withval" != "xno" ; then smtp_host=$withval fi fi piddir=/var/run # Check whether --with-pid-dir was given. if test "${with_pid_dir+set}" = set; then : withval=$with_pid_dir; if test "x$withval" != "xno" ; then piddir=$withval fi fi cat >>confdefs.h <<_ACEOF #define _PATH_BACULA_PIDDIR "$piddir" _ACEOF subsysdir=/var/run/subsys if test -d /var/run/subsys; then subsysdir=/var/run/subsys elif test -d /var/lock/subsys; then subsysdir=/var/lock/subsys else subsysdir=/var/run/subsys fi # Check whether --with-subsys-dir was given. if test "${with_subsys_dir+set}" = set; then : withval=$with_subsys_dir; if test "x$withval" != "xno" ; then subsysdir=$withval fi fi baseport=9101 # Check whether --with-baseport was given. if test "${with_baseport+set}" = set; then : withval=$with_baseport; if test "x$withval" != "xno" ; then baseport=$withval fi fi dir_port=`expr $baseport` fd_port=`expr $baseport + 1` sd_port=`expr $fd_port + 1` dir_password= # Check whether --with-dir-password was given. if test "${with_dir_password+set}" = set; then : withval=$with_dir_password; if test "x$withval" != "xno" ; then dir_password=$withval fi fi if test "x$dir_password" = "x" ; then if test "x$OPENSSL" = "xnone" ; then key=`autoconf/randpass 33` else key=`openssl rand -base64 33` fi dir_password=$key fi fd_password= # Check whether --with-fd-password was given. if test "${with_fd_password+set}" = set; then : withval=$with_fd_password; if test "x$withval" != "xno" ; then fd_password=$withval fi fi if test "x$fd_password" = "x" ; then if test "x$OPENSSL" = "xnone" ; then key=`autoconf/randpass 37` else key=`openssl rand -base64 33` fi fd_password=$key fi sd_password= # Check whether --with-sd-password was given. if test "${with_sd_password+set}" = set; then : withval=$with_sd_password; if test "x$withval" != "xno" ; then sd_password=$withval fi fi if test "x$sd_password" = "x" ; then if test "x$OPENSSL" = "xnone" ; then key=`autoconf/randpass 41` else key=`openssl rand -base64 33` fi sd_password=$key fi mon_dir_password= # Check whether --with-mon-dir-password was given. if test "${with_mon_dir_password+set}" = set; then : withval=$with_mon_dir_password; if test "x$withval" != "xno" ; then mon_dir_password=$withval fi fi if test "x$mon_dir_password" = "x" ; then if test "x$OPENSSL" = "xnone" ; then key=`autoconf/randpass 33` else key=`openssl rand -base64 33` fi mon_dir_password=$key fi mon_fd_password= # Check whether --with-mon-fd-password was given. if test "${with_mon_fd_password+set}" = set; then : withval=$with_mon_fd_password; if test "x$withval" != "xno" ; then mon_fd_password=$withval fi fi if test "x$mon_fd_password" = "x" ; then if test "x$OPENSSL" = "xnone" ; then key=`autoconf/randpass 37` else key=`openssl rand -base64 33` fi mon_fd_password=$key fi mon_sd_password= # Check whether --with-mon-sd-password was given. if test "${with_mon_sd_password+set}" = set; then : withval=$with_mon_sd_password; if test "x$withval" != "xno" ; then mon_sd_password=$withval fi fi if test "x$mon_sd_password" = "x" ; then if test "x$OPENSSL" = "xnone" ; then key=`autoconf/randpass 41` else key=`openssl rand -base64 33` fi mon_sd_password=$key fi db_name=bacula # Check whether --with-db_name was given. if test "${with_db_name+set}" = set; then : withval=$with_db_name; if test "x$withval" != "x" ; then db_name=$withval fi fi db_user=bacula # Check whether --with-db_user was given. if test "${with_db_user+set}" = set; then : withval=$with_db_user; if test "x$withval" != "x" ; then db_user=$withval fi fi db_password= # Check whether --with-db_password was given. if test "${with_db_password+set}" = set; then : withval=$with_db_password; if test "x$withval" != "x" ; then db_password=$withval fi fi db_port=" " # Check whether --with-db_port was given. if test "${with_db_port+set}" = set; then : withval=$with_db_port; if test "x$withval" != "x" ; then db_port=$withval fi fi db_ssl_options= # Check whether --with-db_ssl_options was given. if test "${with_db_ssl_options+set}" = set; then : withval=$with_db_ssl_options; if test "x$withval" != "x" ; then db_ssl_options=$withval fi fi # # Handle users and groups for each daemon # dir_user= # Check whether --with-dir_user was given. if test "${with_dir_user+set}" = set; then : withval=$with_dir_user; if test "x$withval" != "x" ; then dir_user=$withval fi fi dir_group= # Check whether --with-dir_group was given. if test "${with_dir_group+set}" = set; then : withval=$with_dir_group; if test "x$withval" != "x" ; then dir_group=$withval fi fi sd_user= # Check whether --with-sd_user was given. if test "${with_sd_user+set}" = set; then : withval=$with_sd_user; if test "x$withval" != "x" ; then sd_user=$withval fi fi sd_group= # Check whether --with-sd_group was given. if test "${with_sd_group+set}" = set; then : withval=$with_sd_group; if test "x$withval" != "x" ; then sd_group=$withval fi fi fd_user= # Check whether --with-fd_user was given. if test "${with_fd_user+set}" = set; then : withval=$with_fd_user; if test "x$withval" != "x" ; then fd_user=$withval fi fi fd_group= # Check whether --with-fd_group was given. if test "${with_fd_group+set}" = set; then : withval=$with_fd_group; if test "x$withval" != "x" ; then fd_group=$withval fi fi SBINPERM=0750 # Check whether --with-sbin-perm was given. if test "${with_sbin_perm+set}" = set; then : withval=$with_sbin_perm; if test "x$withval" != "x" ; then SBINPERM=$withval fi fi CYTHON=cython CYTHON_LIBS= CYTHON_INC= # Check whether --with-cython was given. if test "${with_cython+set}" = set; then : withval=$with_cython; if test "x$withval" != "xno" ; then CYTHON=$withval fi fi $CYTHON -h > /dev/null 2> /dev/null if test $? = 0; then if echo $CYTHON | grep cython3 > /dev/null; then PYTHON=python3 CYTHON="$CYTHON -3" else PYTHON=python CYTHON="$CYTHON -2" fi # Thanks to Python, in 3.8 the new --embed flag is necessary # but it break any previous version. So, let's do it 2 times CYTHON_LIBS=`${PYTHON}-config --ldflags --embed 2>/dev/null` if test $? != 0; then CYTHON_LIBS=`${PYTHON}-config --ldflags 2>/dev/null` fi CYTHON_INC=`${PYTHON}-config --cflags 2>/dev/null` else CYTHON= fi support_batch_insert=yes # Check whether --enable-batch-insert was given. if test "${enable_batch_insert+set}" = set; then : enableval=$enable_batch_insert; if test x$enableval = xno; then support_batch_insert=no fi fi if test x$support_batch_insert = xyes; then $as_echo "#define USE_BATCH_FILE_INSERT 1" >>confdefs.h fi uncomment_dbi="#" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PostgreSQL support" >&5 $as_echo_n "checking for PostgreSQL support... " >&6; } # Check whether --with-postgresql was given. if test "${with_postgresql+set}" = set; then : withval=$with_postgresql; if test "$withval" != "no"; then if test "$withval" = "yes"; then PG_CONFIG=`which pg_config 2>/dev/null` if test -n "$PG_CONFIG"; then POSTGRESQL_INCDIR=`"$PG_CONFIG" --includedir` POSTGRESQL_LIBDIR=`"$PG_CONFIG" --libdir` POSTGRESQL_BINDIR=`"$PG_CONFIG" --bindir` elif test -f /usr/local/include/libpq-fe.h; then POSTGRESQL_INCDIR=/usr/local/include if test -d /usr/local/lib64; then POSTGRESQL_LIBDIR=/usr/local/lib64 else POSTGRESQL_LIBDIR=/usr/local/lib fi POSTGRESQL_BINDIR=/usr/local/bin elif test -f /usr/include/libpq-fe.h; then POSTGRESQL_INCDIR=/usr/include if test -d /usr/lib64; then POSTGRESQL_LIBDIR=/usr/lib64 else POSTGRESQL_LIBDIR=/usr/lib fi POSTGRESQL_BINDIR=/usr/bin elif test -f /usr/include/pgsql/libpq-fe.h; then POSTGRESQL_INCDIR=/usr/include/pgsql if test -d /usr/lib64/pgsql; then POSTGRESQL_LIBDIR=/usr/lib64/pgsql else POSTGRESQL_LIBDIR=/usr/lib/pgsql fi POSTGRESQL_BINDIR=/usr/bin elif test -f /usr/include/postgresql/libpq-fe.h; then POSTGRESQL_INCDIR=/usr/include/postgresql if test -d /usr/lib64/postgresql; then POSTGRESQL_LIBDIR=/usr/lib64/postgresql else POSTGRESQL_LIBDIR=/usr/lib/postgresql fi POSTGRESQL_BINDIR=/usr/bin else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } as_fn_error $? "Unable to find libpq-fe.h in standard locations" "$LINENO" 5 fi elif test -f $withval/include/libpq-fe.h; then POSTGRESQL_INCDIR=$withval/include POSTGRESQL_LIBDIR=$withval/lib POSTGRESQL_BINDIR=$withval/bin elif test -f $withval/include/postgresql/libpq-fe.h; then POSTGRESQL_INCDIR=$withval/include/postgresql if test -d $withval/lib64; then POSTGRESQL_LIBDIR=$withval/lib64 else POSTGRESQL_LIBDIR=$withval/lib fi POSTGRESQL_BINDIR=$withval/bin else localloc=`find $withval/include/postgresql* -name libpq-fe.h | head -1` if test "x$localloc" != "x"; then inclocaldir=`ls -d $withval/include/postgresql* | head -1` liblocaldir=`ls -d $withval/lib/postgresql* | head -1` POSTGRESQL_INCDIR=$inclocaldir POSTGRESQL_LIBDIR=$liblocaldir POSTGRESQL_BINDIR=$withval/bin else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } as_fn_error $? "Invalid PostgreSQL directory $withval - unable to find libpq-fe.h under $withval" "$LINENO" 5 fi fi $as_echo "#define HAVE_POSTGRESQL 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } POSTGRESQL_INCLUDE=-I$POSTGRESQL_INCDIR if test x$use_libtool != xno; then POSTGRESQL_LIBS="-R $POSTGRESQL_LIBDIR -L$POSTGRESQL_LIBDIR -lpq" else POSTGRESQL_LIBS="-L$POSTGRESQL_LIBDIR -lpq" fi ac_fn_c_check_func "$LINENO" "crypt" "ac_cv_func_crypt" if test "x$ac_cv_func_crypt" = xyes; then : else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for crypt in -lcrypt" >&5 $as_echo_n "checking for crypt in -lcrypt... " >&6; } if ${ac_cv_lib_crypt_crypt+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lcrypt $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char crypt (); int main () { return crypt (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_crypt_crypt=yes else ac_cv_lib_crypt_crypt=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_crypt_crypt" >&5 $as_echo "$ac_cv_lib_crypt_crypt" >&6; } if test "x$ac_cv_lib_crypt_crypt" = xyes; then : POSTGRESQL_LIBS="$POSTGRESQL_LIBS -lcrypt" fi fi POSTGRESQL_LIB=$POSTGRESQL_LIBDIR/libpq.a DB_LIBS="${DB_LIBS} ${POSTGRESQL_LIBS}" if test -z "${db_backends}"; then db_backends="PostgreSQL" else db_backends="${db_backends} PostgreSQL" fi if test -z "${DB_BACKENDS}"; then DB_BACKENDS="postgresql" else DB_BACKENDS="${DB_BACKENDS} postgresql" fi if test "x$support_batch_insert" = "xyes"; then saved_LDFLAGS="${LDFLAGS}" LDFLAGS="${saved_LDFLAGS} -L$POSTGRESQL_LIBDIR" saved_LIBS="${LIBS}" if test "x$ac_cv_lib_crypt_crypt" = "xyes" ; then LIBS="${saved_LIBS} -lcrypt" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PQisthreadsafe in -lpq" >&5 $as_echo_n "checking for PQisthreadsafe in -lpq... " >&6; } if ${ac_cv_lib_pq_PQisthreadsafe+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpq $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char PQisthreadsafe (); int main () { return PQisthreadsafe (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_pq_PQisthreadsafe=yes else ac_cv_lib_pq_PQisthreadsafe=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pq_PQisthreadsafe" >&5 $as_echo "$ac_cv_lib_pq_PQisthreadsafe" >&6; } if test "x$ac_cv_lib_pq_PQisthreadsafe" = xyes; then : $as_echo "#define HAVE_PQISTHREADSAFE 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PQputCopyData in -lpq" >&5 $as_echo_n "checking for PQputCopyData in -lpq... " >&6; } if ${ac_cv_lib_pq_PQputCopyData+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpq $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char PQputCopyData (); int main () { return PQputCopyData (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_pq_PQputCopyData=yes else ac_cv_lib_pq_PQputCopyData=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pq_PQputCopyData" >&5 $as_echo "$ac_cv_lib_pq_PQputCopyData" >&6; } if test "x$ac_cv_lib_pq_PQputCopyData" = xyes; then : $as_echo "#define HAVE_PQ_COPY 1" >>confdefs.h fi if test "x$ac_cv_lib_pq_PQputCopyData" = "xyes"; then if test $support_batch_insert = yes ; then $as_echo "#define HAVE_POSTGRESQL_BATCH_FILE_INSERT 1" >>confdefs.h if test -z "${batch_insert_db_backends}"; then batch_insert_db_backends="PostgreSQL" else batch_insert_db_backends="${batch_insert_db_backends} PostgreSQL" fi fi fi if test x$ac_cv_lib_pq_PQisthreadsafe != xyes -a x$support_batch_insert = xyes then echo "WARNING: Your PostgreSQL client library is too old to detect " echo "if it was compiled with --enable-thread-safety, consider to " echo "upgrade it in order to avoid problems with Batch insert mode" fi LDFLAGS="${saved_LDFLAGS}" LIBS="${saved_LIBS}" fi else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for MySQL support" >&5 $as_echo_n "checking for MySQL support... " >&6; } # Check whether --with-mysql was given. if test "${with_mysql+set}" = set; then : withval=$with_mysql; HAVE_LIBSR="no" if test "$withval" != "no"; then if test "$withval" = "yes"; then MYSQL_CONFIG=`which mysql_config 2>/dev/null` if test "x${MYSQL_CONFIG}" != x; then MYSQL_BINDIR="${MYSQL_CONFIG%/*}" ${MYSQL_CONFIG} --libs_r >/dev/null 2>&1 if test $? = 0; then MYSQL_LIBDIR=`${MYSQL_CONFIG} --libs_r` MYSQL_INCDIR=`${MYSQL_CONFIG} --include` HAVE_LIBSR="yes" else ${MYSQL_CONFIG} --variable=pkglibdir > /dev/null 2>&1 if test $? = 0 ; then MYSQL_LIBDIR=`${MYSQL_CONFIG} --variable=pkglibdir` MYSQL_INCDIR=`${MYSQL_CONFIG} --variable=pkgincludedir` fi fi fi # if something wrong fall back to old method if test "x${MYSQL_LIBDIR}" = x -o "x${MYSQL_INCDIR}" = x ; then if test -f /usr/local/mysql/include/mysql/mysql.h; then MYSQL_INCDIR=/usr/local/mysql/include/mysql if test -f /usr/local/mysql/lib64/mysql/libmysqlclient_r.a \ -o -f /usr/local/mysql/lib64/mysql/libmysqlclient_r.so; then MYSQL_LIBDIR=/usr/local/mysql/lib64/mysql else MYSQL_LIBDIR=/usr/local/mysql/lib/mysql fi MYSQL_BINDIR=/usr/local/mysql/bin elif test -f /usr/include/mysql/mysql.h; then MYSQL_INCDIR=/usr/include/mysql if test -f /usr/lib64/mysql/libmysqlclient_r.a \ -o -f /usr/lib64/mysql/libmysqlclient_r.so; then MYSQL_LIBDIR=/usr/lib64/mysql elif test -f /usr/lib64/libmysqlclient_r.a \ -o -f /usr/lib64/libmysqlclient_r.so; then MYSQL_LIBDIR=/usr/lib64 elif test -f /usr/lib/x86_64-linux-gnu/libmysqlclient_r.a \ -o -f /usr/lib/x86_64-linux-gnu/libmysqlclient_r.so; then MYSQL_LIBDIR=/usr/lib/x86_64-linux-gnu elif test -f /usr/lib/mysql/libmysqlclient_r.a \ -o -f /usr/lib/mysql/libmysqlclient_r.so; then MYSQL_LIBDIR=/usr/lib/mysql else MYSQL_LIBDIR=/usr/lib fi MYSQL_BINDIR=/usr/bin elif test -f /usr/include/mysql.h; then MYSQL_INCDIR=/usr/include if test -f /usr/lib64/libmysqlclient_r.a \ -o -f /usr/lib64/libmysqlclient_r.so; then MYSQL_LIBDIR=/usr/lib64 else MYSQL_LIBDIR=/usr/lib fi MYSQL_BINDIR=/usr/bin elif test -f /usr/local/include/mysql/mysql.h; then MYSQL_INCDIR=/usr/local/include/mysql if test -f /usr/local/lib64/mysql/libmysqlclient_r.a \ -o -f /usr/local/lib64/mysql/libmysqlclient_r.so; then MYSQL_LIBDIR=/usr/local/lib64/mysql else MYSQL_LIBDIR=/usr/local/lib/mysql fi MYSQL_BINDIR=/usr/local/bin elif test -f /usr/local/include/mysql.h; then MYSQL_INCDIR=/usr/local/include if test -f /usr/local/lib64/libmysqlclient_r.a \ -o -f /usr/local/lib64/libmysqlclient_r.so; then MYSQL_LIBDIR=/usr/local/lib64 else MYSQL_LIBDIR=/usr/local/lib fi MYSQL_BINDIR=/usr/local/bin else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } as_fn_error $? "Unable to find mysql.h in standard locations" "$LINENO" 5 fi fi else if test -f $withval/include/mysql/mysql.h; then MYSQL_INCDIR=$withval/include/mysql if test -f $withval/lib64/mysql/libmysqlclient_r.a \ -o -f $withval/lib64/mysql/libmysqlclient_r.so; then MYSQL_LIBDIR=$withval/lib64/mysql elif test -f $withval/lib64/libmysqlclient_r.a \ -o -f $withval/lib64/libmysqlclient_r.so; then MYSQL_LIBDIR=$withval/lib64 elif test -f $withval/lib/libmysqlclient_r.a \ -o -f $withval/lib/libmysqlclient_r.so; then MYSQL_LIBDIR=$withval/lib else MYSQL_LIBDIR=$withval/lib/mysql fi MYSQL_BINDIR=$withval/bin elif test -f $withval/include/mysql.h; then MYSQL_INCDIR=$withval/include if test -f $withval/lib64/libmysqlclient_r.a \ -o -f $withval/lib64/libmysqlclient_r.so; then MYSQL_LIBDIR=$withval/lib64 else MYSQL_LIBDIR=$withval/lib fi MYSQL_BINDIR=$withval/bin elif test -f $withval/mysql.h; then MYSQL_INCDIR=$withval { $as_echo "$as_me:${as_lineno-$LINENO}: Got with-mysql variable $MYSQL_INCDIR checking MySQL version" >&5 $as_echo "$as_me: Got with-mysql variable $MYSQL_INCDIR checking MySQL version" >&6;} case $MYSQL_INCDIR in *mysql55*) { $as_echo "$as_me:${as_lineno-$LINENO}: Assuming MacPorts MySQL 5.5 variant installed" >&5 $as_echo "$as_me: Assuming MacPorts MySQL 5.5 variant installed" >&6;} if test -f $prefix/lib/mysql55/mysql/libmysqlclient_r.a \ -o -f $prefix/lib/mysql55/mysql/libmysqlclient_r.so; then { $as_echo "$as_me:${as_lineno-$LINENO}: Found MySQL 5.5 library in $prefix/lib/mysql55/mysql" >&5 $as_echo "$as_me: Found MySQL 5.5 library in $prefix/lib/mysql55/mysql" >&6;} MYSQL_LIBDIR=$prefix/lib/mysql55/mysql fi MYSQL_BINDIR=$prefix/lib/mysql55/bin ;; *mysql51*) { $as_echo "$as_me:${as_lineno-$LINENO}: Assuming MacPorts MySQL 5.1 variant installed" >&5 $as_echo "$as_me: Assuming MacPorts MySQL 5.1 variant installed" >&6;} if test -f $prefix/lib/mysql51/mysql/libmysqlclient_r.a \ -o -f $prefix/lib/mysql51/mysql/libmysqlclient_r.so; then { $as_echo "$as_me:${as_lineno-$LINENO}: Found MySQL 5.1 library in $prefix/lib/mysql55/mysql" >&5 $as_echo "$as_me: Found MySQL 5.1 library in $prefix/lib/mysql55/mysql" >&6;} MYSQL_LIBDIR=$prefix/lib/mysql51/mysql fi MYSQL_BINDIR=$prefix/lib/mysql51/bin ;; esac if test -z "${MYSQL_LIBDIR}" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } as_fn_error $? "MySQL $withval - unable to find MySQL libraries" "$LINENO" 5 fi else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } as_fn_error $? "Invalid MySQL directory $withval - unable to find mysql.h under $withval" "$LINENO" 5 fi fi if test "x${MYSQL_LIBDIR}" != x; then MYSQL_INCLUDE=-I$MYSQL_INCDIR if test "x$HAVE_LIBSR" = "xyes"; then DB_LIBS="${DB_LIBS} ${MYSQL_LIBS}" MYSQL_LIBS="$MYSQL_LIBDIR" MYSQL_INCLUDE="$MYSQL_INCDIR" $as_echo "#define HAVE_MYSQL_THREAD_SAFE 1" >>confdefs.h if test -z "${batch_insert_db_backends}"; then batch_insert_db_backends="MySQL" else batch_insert_db_backends="${batch_insert_db_backends} MySQL" fi elif test -f $MYSQL_LIBDIR/libmysqlclient_r.a \ -o -f $MYSQL_LIBDIR/libmysqlclient_r.so; then if test x$use_libtool != xno; then MYSQL_LIBS="-R $MYSQL_LIBDIR -L$MYSQL_LIBDIR -lmysqlclient_r -lz" else MYSQL_LIBS="-L$MYSQL_LIBDIR -lmysqlclient_r -lz" fi DB_LIBS="${DB_LIBS} ${MYSQL_LIBS}" fi if test "x${MYSQL_LIBS}" = x; then MYSQL_LIBS=$MYSQL_LIBDIR/libmysqlclient_r.a fi $as_echo "#define HAVE_MYSQL 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } if test -z "${db_backends}" ; then db_backends="MySQL" else db_backends="${db_backends} MySQL" fi if test -z "${DB_BACKENDS}" ; then DB_BACKENDS="mysql" else DB_BACKENDS="${DB_BACKENDS} mysql" fi if test "x$HAVE_LIBSR" = "xno"; then if test "x$support_batch_insert" = "xyes"; then saved_LDFLAGS="${LDFLAGS}" LDFLAGS="${saved_LDFLAGS} -L$MYSQL_LIBDIR" saved_LIBS="${LIBS}" LIBS="${saved_LIBS} -lz" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for mysql_thread_safe in -lmysqlclient_r" >&5 $as_echo_n "checking for mysql_thread_safe in -lmysqlclient_r... " >&6; } if ${ac_cv_lib_mysqlclient_r_mysql_thread_safe+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lmysqlclient_r $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char mysql_thread_safe (); int main () { return mysql_thread_safe (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_mysqlclient_r_mysql_thread_safe=yes else ac_cv_lib_mysqlclient_r_mysql_thread_safe=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_mysqlclient_r_mysql_thread_safe" >&5 $as_echo "$ac_cv_lib_mysqlclient_r_mysql_thread_safe" >&6; } if test "x$ac_cv_lib_mysqlclient_r_mysql_thread_safe" = xyes; then : $as_echo "#define HAVE_MYSQL_THREAD_SAFE 1" >>confdefs.h fi if test "x$ac_cv_lib_mysqlclient_r_mysql_thread_safe" = "xyes"; then if test -z "${batch_insert_db_backends}"; then batch_insert_db_backends="MySQL" else batch_insert_db_backends="${batch_insert_db_backends} MySQL" fi fi LDFLAGS="${saved_LDFLAGS}" LIBS="${saved_LIBS}" fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for MySQL embedded support" >&5 $as_echo_n "checking for MySQL embedded support... " >&6; } # Check whether --with-embedded-mysql was given. if test "${with_embedded_mysql+set}" = set; then : withval=$with_embedded_mysql; if test "$withval" != "no"; then if test "$withval" = "yes"; then if test -f /usr/local/mysql/include/mysql/mysql.h; then MYSQL_INCDIR=/usr/local/mysql/include/mysql if test -d /usr/local/mysql/lib64/mysql; then MYSQL_LIBDIR=/usr/local/mysql/lib64/mysql else MYSQL_LIBDIR=/usr/local/mysql/lib/mysql fi MYSQL_BINDIR=/usr/local/mysql/bin elif test -f /usr/include/mysql/mysql.h; then MYSQL_INCDIR=/usr/include/mysql if test -d /usr/lib64/mysql; then MYSQL_LIBDIR=/usr/lib64/mysql else MYSQL_LIBDIR=/usr/lib/mysql fi MYSQL_BINDIR=/usr/bin elif test -f /usr/include/mysql.h; then MYSQL_INCDIR=/usr/include if test -d /usr/lib64; then MYSQL_LIBDIR=/usr/lib64 else MYSQL_LIBDIR=/usr/lib fi MYSQL_BINDIR=/usr/bin elif test -f /usr/local/include/mysql/mysql.h; then MYSQL_INCDIR=/usr/local/include/mysql if test -d /usr/local/lib64/mysql; then MYSQL_LIBDIR=/usr/local/lib64/mysql else MYSQL_LIBDIR=/usr/local/lib/mysql fi MYSQL_BINDIR=/usr/local/bin elif test -f /usr/local/include/mysql.h; then MYSQL_INCDIR=/usr/local/include if test -d /usr/local/lib64; then MYSQL_LIBDIR=/usr/local/lib64 else MYSQL_LIBDIR=/usr/local/lib fi MYSQL_BINDIR=/usr/local/bin else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } as_fn_error $? "Unable to find mysql.h in standard locations" "$LINENO" 5 fi else if test -f $withval/include/mysql/mysql.h; then MYSQL_INCDIR=$withval/include/mysql if test -d $withval/lib64/mysql; then MYSQL_LIBDIR=$withval/lib64/mysql else MYSQL_LIBDIR=$withval/lib/mysql fi MYSQL_BINDIR=$withval/bin elif test -f $withval/include/mysql.h; then MYSQL_INCDIR=$withval/include if test -d $withval/lib64; then MYSQL_LIBDIR=$withval/lib64 else MYSQL_LIBDIR=$withval/lib fi MYSQL_BINDIR=$withval/bin else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } as_fn_error $? "Invalid MySQL directory $withval - unable to find mysql.h under $withval" "$LINENO" 5 fi fi MYSQL_INCLUDE=-I$MYSQL_INCDIR if test x$use_libtool != xno; then MYSQL_LIBS="-R $MYSQL_LIBDIR -L$MYSQL_LIBDIR -lmysqld -lz -lm -lcrypt" else MYSQL_LIBS="-L$MYSQL_LIBDIR -lmysqld -lz -lm -lcrypt" fi MYSQL_LIB=$MYSQL_LIBDIR/libmysqld.a DB_LIBS="${DB_LIBS} ${MYSQL_LIBS}" $as_echo "#define HAVE_MYSQL 1" >>confdefs.h $as_echo "#define HAVE_EMBEDDED_MYSQL 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } if test -z "${db_backends}"; then db_backends="MySQL" else db_backends="${db_backends} MySQL" fi if test -z "${DB_BACKENDS}"; then DB_BACKENDS="mysql" else DB_BACKENDS="${DB_BACKENDS} mysql" fi if test "x$support_batch_insert" = "xyes"; then saved_LDFLAGS="${LDFLAGS}" LDFLAGS="${saved_LDFLAGS} -L$MYSQL_LIBDIR" saved_LIBS="${LIBS}" LIBS="${saved_LIBS} -lz -lm -lcrypt" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for mysql_thread_safe in -lmysqlclient_r" >&5 $as_echo_n "checking for mysql_thread_safe in -lmysqlclient_r... " >&6; } if ${ac_cv_lib_mysqlclient_r_mysql_thread_safe+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lmysqlclient_r $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char mysql_thread_safe (); int main () { return mysql_thread_safe (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_mysqlclient_r_mysql_thread_safe=yes else ac_cv_lib_mysqlclient_r_mysql_thread_safe=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_mysqlclient_r_mysql_thread_safe" >&5 $as_echo "$ac_cv_lib_mysqlclient_r_mysql_thread_safe" >&6; } if test "x$ac_cv_lib_mysqlclient_r_mysql_thread_safe" = xyes; then : $as_echo "#define HAVE_MYSQL_THREAD_SAFE 1" >>confdefs.h fi if test "x$ac_cv_lib_mysqlclient_r_mysql_thread_safe" = "xyes"; then if test -z "${batch_insert_db_backends}"; then batch_insert_db_backends="MySQL" else batch_insert_db_backends="${batch_insert_db_backends} MySQL" fi fi LDFLAGS="${saved_LDFLAGS}" LIBS="${saved_LIBS}" fi else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for SQLite3 support" >&5 $as_echo_n "checking for SQLite3 support... " >&6; } # Check whether --with-sqlite3 was given. if test "${with_sqlite3+set}" = set; then : withval=$with_sqlite3; if test "$withval" != "no"; then if test "$withval" = "yes"; then if test -f /usr/local/include/sqlite3.h; then SQLITE_INCDIR=/usr/local/include if test -d /usr/local/lib64; then SQLITE_LIBDIR=/usr/local/lib64 else SQLITE_LIBDIR=/usr/local/lib fi SQLITE_BINDIR=/usr/local/bin elif test -f /usr/include/sqlite3.h; then SQLITE_INCDIR=/usr/include if test -n $multiarch -a -d /usr/lib/$multiarch; then SQLITE_LIBDIR=/usr/lib/$multiarch elif test -d /usr/lib64; then SQLITE_LIBDIR=/usr/lib64 else SQLITE_LIBDIR=/usr/lib fi SQLITE_BINDIR=/usr/bin elif test -f $prefix/include/sqlite3.h; then SQLITE_INCDIR=$prefix/include if test -d $prefix/lib64; then SQLITE_LIBDIR=$prefix/lib64 else SQLITE_LIBDIR=$prefix/lib fi SQLITE_BINDIR=$prefix/bin else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } as_fn_error $? "Unable to find sqlite3.h in standard locations" "$LINENO" 5 fi else if test -f $withval/sqlite3.h; then SQLITE_INCDIR=$withval SQLITE_LIBDIR=$withval SQLITE_BINDIR=$withval elif test -f $withval/include/sqlite3.h; then SQLITE_INCDIR=$withval/include if test -d $withval/lib64; then SQLITE_LIBDIR=$withval/lib64 else SQLITE_LIBDIR=$withval/lib fi SQLITE_BINDIR=$withval/bin else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } as_fn_error $? "Invalid SQLite3 directory $withval - unable to find sqlite3.h under $withval" "$LINENO" 5 fi fi SQLITE_INCLUDE=-I$SQLITE_INCDIR if test x$use_libtool != xno; then SQLITE_LIBS="-R $SQLITE_LIBDIR -L$SQLITE_LIBDIR -lsqlite3" else SQLITE_LIBS="-L$SQLITE_LIBDIR -lsqlite3" fi SQLITE_LIB=$SQLITE_LIBDIR/libsqlite3.a DB_LIBS="${DB_LIBS} ${SQLITE_LIBS}" $as_echo "#define HAVE_SQLITE3 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } if test -z "${db_backends}"; then db_backends="SQLite3" else db_backends="${db_backends} SQLite3" fi if test -z "${DB_BACKENDS}"; then DB_BACKENDS="sqlite3" else DB_BACKENDS="${DB_BACKENDS} sqlite3" fi if test "x$support_batch_insert" = "xyes"; then saved_LDFLAGS="${LDFLAGS}" LDFLAGS="${saved_LDFLAGS} -lpthread -lm -L$SQLITE_LIBDIR" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for sqlite3_threadsafe in -lsqlite3" >&5 $as_echo_n "checking for sqlite3_threadsafe in -lsqlite3... " >&6; } if ${ac_cv_lib_sqlite3_sqlite3_threadsafe+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsqlite3 $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char sqlite3_threadsafe (); int main () { return sqlite3_threadsafe (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_sqlite3_sqlite3_threadsafe=yes else ac_cv_lib_sqlite3_sqlite3_threadsafe=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_sqlite3_sqlite3_threadsafe" >&5 $as_echo "$ac_cv_lib_sqlite3_sqlite3_threadsafe" >&6; } if test "x$ac_cv_lib_sqlite3_sqlite3_threadsafe" = xyes; then : $as_echo "#define HAVE_SQLITE3_THREADSAFE 1" >>confdefs.h fi if test "x$ac_cv_lib_sqlite3_sqlite3_threadsafe" = "xyes"; then if test -z "${batch_insert_db_backends}"; then batch_insert_db_backends="SQLite3" else batch_insert_db_backends="${batch_insert_db_backends} SQLite3" fi fi LDFLAGS="${saved_LDFLAGS}" fi else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test -z "${batch_insert_db_backends}"; then batch_insert_db_backends="None" fi if test "x${db_backends}" = "x" ; then echo " " echo " " echo "You have not specified either --enable-client-only or one of the" echo "supported databases: MySQL, PostgreSQL, or SQLite3." echo "This is not permitted. Please reconfigure." echo " " echo "Aborting the configuration ..." echo " " echo " " exit 1 fi case `echo $DB_BACKENDS | wc -w | sed -e 's/^ *//'` in 1) DEFAULT_DB_TYPE="${DB_BACKENDS}" if test x$use_libtool = xno; then SHARED_CATALOG_TARGETS="" else SHARED_CATALOG_TARGETS="libbaccats-${DEFAULT_DB_TYPE}.la" fi ;; *) DEFAULT_DB_TYPE=`echo ${DB_BACKENDS} | cut -d' ' -f1` if test x$use_libtool = xno; then echo " " echo " " echo "You have specified two or more of the" echo "supported databases: MySQL, PostgreSQL, or SQLite3." echo "This is not permitted when not using libtool Please reconfigure." echo " " echo "Aborting the configuration ..." echo " " echo " " exit 1 fi SHARED_CATALOG_TARGETS="" for db_type in ${DB_BACKENDS} do if test -z "${SHARED_CATALOG_TARGETS}"; then SHARED_CATALOG_TARGETS="libbaccats-${db_type}.la" else SHARED_CATALOG_TARGETS="${SHARED_CATALOG_TARGETS} libbaccats-${db_type}.la" fi done ;; esac if test x$use_libtool = xyes; then DB_LIBS="" fi $as_echo "#define PROTOTYPES 1" >>confdefs.h support_bee=no extra_prefix=org nb=`ls src/*/bee_* 2>/dev/null| wc -l` if test $nb -gt 1; then support_bee=yes extra_prefix=bee fi # Check whether --enable-bee was given. if test "${enable_bee+set}" = set; then : enableval=$enable_bee; if test x$enableval = xyes; then support_bee=yes extra_prefix=bee elif test x$enableval = xno; then support_bee=no extra_prefix=org fi fi if test $support_bee = no then $as_echo "#define COMMUNITY 1" >>confdefs.h fi EXTRA_CATS_SRCS=`cd src/cats ; ls ${extra_prefix}_*.c 2> /dev/null | xargs echo` EXTRA_FILED_SRCS=`cd src/filed ; ls ${extra_prefix}_*.c 2> /dev/null | xargs echo` EXTRA_LIB_SRCS=`cd src/lib ; ls ${extra_prefix}_*.c 2> /dev/null | xargs echo` EXTRA_DIRD_SRCS=`cd src/dird ; ls ${extra_prefix}_*.c 2> /dev/null | xargs echo` EXTRA_STORED_SRCS=`cd src/stored ; ls ${extra_prefix}_stored_*.c 2> /dev/null | xargs echo` EXTRA_BLS_SRCS=`cd src/stored ; ls ${extra_prefix}_bls_*.c 2> /dev/null | xargs echo` EXTRA_LIBSD_SRCS=`cd src/stored ; ls ${extra_prefix}_lib*.c 2> /dev/null | xargs echo` EXTRA_FINDLIB_SRCS=`cd src/findlib ; ls ${extra_prefix}_*.c 2> /dev/null | xargs echo` if test -z "$CFLAGS" -o "$CFLAGS" = "-g -O2"; then if test -z "$CCOPTS"; then CCOPTS='-g -O2 -Wall' fi CFLAGS="$CCOPTS" fi largefile_support="no" # Check whether --enable-largefile was given. if test "${enable_largefile+set}" = set; then : enableval=$enable_largefile; fi if test "$enable_largefile" != no; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}getconf", so it can be a program name with args. set dummy ${ac_tool_prefix}getconf; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_GETCONF+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$GETCONF"; then ac_cv_prog_GETCONF="$GETCONF" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_GETCONF="${ac_tool_prefix}getconf" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi GETCONF=$ac_cv_prog_GETCONF if test -n "$GETCONF"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GETCONF" >&5 $as_echo "$GETCONF" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_GETCONF"; then ac_ct_GETCONF=$GETCONF # Extract the first word of "getconf", so it can be a program name with args. set dummy getconf; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_GETCONF+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_GETCONF"; then ac_cv_prog_ac_ct_GETCONF="$ac_ct_GETCONF" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_GETCONF="getconf" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_GETCONF=$ac_cv_prog_ac_ct_GETCONF if test -n "$ac_ct_GETCONF"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_GETCONF" >&5 $as_echo "$ac_ct_GETCONF" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_GETCONF" = x; then GETCONF="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac GETCONF=$ac_ct_GETCONF fi else GETCONF="$ac_cv_prog_GETCONF" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CFLAGS value to request large file support" >&5 $as_echo_n "checking for CFLAGS value to request large file support... " >&6; } if ${ac_cv_sys_largefile_CFLAGS+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_sys_largefile_CFLAGS=`($GETCONF LFS_CFLAGS) 2>/dev/null` || { ac_cv_sys_largefile_CFLAGS=no case "$host_os" in # IRIX 6.2 and later require cc -n32. irix6.[2-9]* | irix6.1[0-9]* | irix[7-9].* | irix[1-9][0-9]*) if test "$GCC" != yes; then ac_cv_sys_largefile_CFLAGS=-n32 fi ac_save_CC="$CC" CC="$CC $ac_cv_sys_largefile_CFLAGS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : else ac_cv_sys_largefile_CFLAGS=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext CC="$ac_save_CC" esac } fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CFLAGS" >&5 $as_echo "$ac_cv_sys_largefile_CFLAGS" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for LDFLAGS value to request large file support" >&5 $as_echo_n "checking for LDFLAGS value to request large file support... " >&6; } if ${ac_cv_sys_largefile_LDFLAGS+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_sys_largefile_LDFLAGS=`($GETCONF LFS_LDFLAGS) 2>/dev/null` || { ac_cv_sys_largefile_LDFLAGS=no } fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_LDFLAGS" >&5 $as_echo "$ac_cv_sys_largefile_LDFLAGS" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for LIBS value to request large file support" >&5 $as_echo_n "checking for LIBS value to request large file support... " >&6; } if ${ac_cv_sys_largefile_LIBS+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_sys_largefile_LIBS=`($GETCONF LFS_LIBS) 2>/dev/null` || { ac_cv_sys_largefile_LIBS=no } fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_LIBS" >&5 $as_echo "$ac_cv_sys_largefile_LIBS" >&6; } for ac_flag in $ac_cv_sys_largefile_CFLAGS no; do case "$ac_flag" in no) ;; -D_FILE_OFFSET_BITS=*) ;; -D_LARGEFILE_SOURCE | -D_LARGEFILE_SOURCE=*) ;; -D_LARGE_FILES | -D_LARGE_FILES=*) ;; -D?* | -I?*) case "$ac_flag" in no) ;; ?*) case "$CPPFLAGS" in '') CPPFLAGS="$ac_flag" ;; *) CPPFLAGS=$CPPFLAGS' '"$ac_flag" ;; esac ;; esac ;; *) case "$ac_flag" in no) ;; ?*) case "$CFLAGS" in '') CFLAGS="$ac_flag" ;; *) CFLAGS=$CFLAGS' '"$ac_flag" ;; esac ;; esac ;; esac done case "$ac_cv_sys_largefile_LDFLAGS" in no) ;; ?*) case "$LDFLAGS" in '') LDFLAGS="$ac_cv_sys_largefile_LDFLAGS" ;; *) LDFLAGS=$LDFLAGS' '"$ac_cv_sys_largefile_LDFLAGS" ;; esac ;; esac case "$ac_cv_sys_largefile_LIBS" in no) ;; ?*) case "$LIBS" in '') LIBS="$ac_cv_sys_largefile_LIBS" ;; *) LIBS=$LIBS' '"$ac_cv_sys_largefile_LIBS" ;; esac ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS" >&5 $as_echo_n "checking for _FILE_OFFSET_BITS... " >&6; } if ${ac_cv_sys_file_offset_bits+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_sys_file_offset_bits=no ac_cv_sys_file_offset_bits=64 for ac_flag in $ac_cv_sys_largefile_CFLAGS no; do case "$ac_flag" in -D_FILE_OFFSET_BITS) ac_cv_sys_file_offset_bits=1 ;; -D_FILE_OFFSET_BITS=*) ac_cv_sys_file_offset_bits=`expr " $ac_flag" : '[^=]*=\(.*\)'` ;; esac done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 $as_echo "$ac_cv_sys_file_offset_bits" >&6; } if test "$ac_cv_sys_file_offset_bits" != no; then cat >>confdefs.h <<_ACEOF #define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGEFILE_SOURCE" >&5 $as_echo_n "checking for _LARGEFILE_SOURCE... " >&6; } if ${ac_cv_sys_largefile_source+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_sys_largefile_source=no ac_cv_sys_largefile_source=1 for ac_flag in $ac_cv_sys_largefile_CFLAGS no; do case "$ac_flag" in -D_LARGEFILE_SOURCE) ac_cv_sys_largefile_source=1 ;; -D_LARGEFILE_SOURCE=*) ac_cv_sys_largefile_source=`expr " $ac_flag" : '[^=]*=\(.*\)'` ;; esac done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_source" >&5 $as_echo "$ac_cv_sys_largefile_source" >&6; } if test "$ac_cv_sys_largefile_source" != no; then cat >>confdefs.h <<_ACEOF #define _LARGEFILE_SOURCE $ac_cv_sys_largefile_source _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES" >&5 $as_echo_n "checking for _LARGE_FILES... " >&6; } if ${ac_cv_sys_large_files+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_sys_large_files=no ac_cv_sys_large_files=1 for ac_flag in $ac_cv_sys_largefile_CFLAGS no; do case "$ac_flag" in -D_LARGE_FILES) ac_cv_sys_large_files=1 ;; -D_LARGE_FILES=*) ac_cv_sys_large_files=`expr " $ac_flag" : '[^=]*=\(.*\)'` ;; esac done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 $as_echo "$ac_cv_sys_large_files" >&6; } if test "$ac_cv_sys_large_files" != no; then cat >>confdefs.h <<_ACEOF #define _LARGE_FILES $ac_cv_sys_large_files _ACEOF fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for X" >&5 $as_echo_n "checking for X... " >&6; } # Check whether --with-x was given. if test "${with_x+set}" = set; then : withval=$with_x; fi # $have_x is `yes', `no', `disabled', or empty when we do not yet know. if test "x$with_x" = xno; then # The user explicitly disabled X. have_x=disabled else case $x_includes,$x_libraries in #( *\'*) as_fn_error $? "cannot use X directory names containing '" "$LINENO" 5;; #( *,NONE | NONE,*) if ${ac_cv_have_x+:} false; then : $as_echo_n "(cached) " >&6 else # One or both of the vars are not set, and there is no cached value. ac_x_includes=no ac_x_libraries=no rm -f -r conftest.dir if mkdir conftest.dir; then cd conftest.dir cat >Imakefile <<'_ACEOF' incroot: @echo incroot='${INCROOT}' usrlibdir: @echo usrlibdir='${USRLIBDIR}' libdir: @echo libdir='${LIBDIR}' _ACEOF if (export CC; ${XMKMF-xmkmf}) >/dev/null 2>/dev/null && test -f Makefile; then # GNU make sometimes prints "make[1]: Entering ...", which would confuse us. for ac_var in incroot usrlibdir libdir; do eval "ac_im_$ac_var=\`\${MAKE-make} $ac_var 2>/dev/null | sed -n 's/^$ac_var=//p'\`" done # Open Windows xmkmf reportedly sets LIBDIR instead of USRLIBDIR. for ac_extension in a so sl dylib la dll; do if test ! -f "$ac_im_usrlibdir/libX11.$ac_extension" && test -f "$ac_im_libdir/libX11.$ac_extension"; then ac_im_usrlibdir=$ac_im_libdir; break fi done # Screen out bogus values from the imake configuration. They are # bogus both because they are the default anyway, and because # using them would break gcc on systems where it needs fixed includes. case $ac_im_incroot in /usr/include) ac_x_includes= ;; *) test -f "$ac_im_incroot/X11/Xos.h" && ac_x_includes=$ac_im_incroot;; esac case $ac_im_usrlibdir in /usr/lib | /usr/lib64 | /lib | /lib64) ;; *) test -d "$ac_im_usrlibdir" && ac_x_libraries=$ac_im_usrlibdir ;; esac fi cd .. rm -f -r conftest.dir fi # Standard set of common directories for X headers. # Check X11 before X11Rn because it is often a symlink to the current release. ac_x_header_dirs=' /usr/X11/include /usr/X11R7/include /usr/X11R6/include /usr/X11R5/include /usr/X11R4/include /usr/include/X11 /usr/include/X11R7 /usr/include/X11R6 /usr/include/X11R5 /usr/include/X11R4 /usr/local/X11/include /usr/local/X11R7/include /usr/local/X11R6/include /usr/local/X11R5/include /usr/local/X11R4/include /usr/local/include/X11 /usr/local/include/X11R7 /usr/local/include/X11R6 /usr/local/include/X11R5 /usr/local/include/X11R4 /usr/X386/include /usr/x386/include /usr/XFree86/include/X11 /usr/include /usr/local/include /usr/unsupported/include /usr/athena/include /usr/local/x11r5/include /usr/lpp/Xamples/include /usr/openwin/include /usr/openwin/share/include' if test "$ac_x_includes" = no; then # Guess where to find include files, by looking for Xlib.h. # First, try using that file with no special directory specified. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # We can compile using X headers with no special include directory. ac_x_includes= else for ac_dir in $ac_x_header_dirs; do if test -r "$ac_dir/X11/Xlib.h"; then ac_x_includes=$ac_dir break fi done fi rm -f conftest.err conftest.i conftest.$ac_ext fi # $ac_x_includes = no if test "$ac_x_libraries" = no; then # Check for the libraries. # See if we find them without any special options. # Don't add to $LIBS permanently. ac_save_LIBS=$LIBS LIBS="-lX11 $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { XrmInitialize () ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : LIBS=$ac_save_LIBS # We can link X programs with no special library path. ac_x_libraries= else LIBS=$ac_save_LIBS for ac_dir in `$as_echo "$ac_x_includes $ac_x_header_dirs" | sed s/include/lib/g` do # Don't even attempt the hair of trying to link an X program! for ac_extension in a so sl dylib la dll; do if test -r "$ac_dir/libX11.$ac_extension"; then ac_x_libraries=$ac_dir break 2 fi done done fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi # $ac_x_libraries = no case $ac_x_includes,$ac_x_libraries in #( no,* | *,no | *\'*) # Didn't find X, or a directory has "'" in its name. ac_cv_have_x="have_x=no";; #( *) # Record where we found X for the cache. ac_cv_have_x="have_x=yes\ ac_x_includes='$ac_x_includes'\ ac_x_libraries='$ac_x_libraries'" esac fi ;; #( *) have_x=yes;; esac eval "$ac_cv_have_x" fi # $with_x != no if test "$have_x" != yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_x" >&5 $as_echo "$have_x" >&6; } no_x=yes else # If each of the values was on the command line, it overrides each guess. test "x$x_includes" = xNONE && x_includes=$ac_x_includes test "x$x_libraries" = xNONE && x_libraries=$ac_x_libraries # Update the cache value to reflect the command line values. ac_cv_have_x="have_x=yes\ ac_x_includes='$x_includes'\ ac_x_libraries='$x_libraries'" { $as_echo "$as_me:${as_lineno-$LINENO}: result: libraries $x_libraries, headers $x_includes" >&5 $as_echo "libraries $x_libraries, headers $x_includes" >&6; } fi if test "$no_x" = yes; then # Not all programs may use this symbol, but it does not hurt to define it. $as_echo "#define X_DISPLAY_MISSING 1" >>confdefs.h X_CFLAGS= X_PRE_LIBS= X_LIBS= X_EXTRA_LIBS= else if test -n "$x_includes"; then X_CFLAGS="$X_CFLAGS -I$x_includes" fi # It would also be nice to do this for all -L options, not just this one. if test -n "$x_libraries"; then X_LIBS="$X_LIBS -L$x_libraries" # For Solaris; some versions of Sun CC require a space after -R and # others require no space. Words are not sufficient . . . . { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -R must be followed by a space" >&5 $as_echo_n "checking whether -R must be followed by a space... " >&6; } ac_xsave_LIBS=$LIBS; LIBS="$LIBS -R$x_libraries" ac_xsave_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } X_LIBS="$X_LIBS -R$x_libraries" else LIBS="$ac_xsave_LIBS -R $x_libraries" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } X_LIBS="$X_LIBS -R $x_libraries" else { $as_echo "$as_me:${as_lineno-$LINENO}: result: neither works" >&5 $as_echo "neither works" >&6; } fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext ac_c_werror_flag=$ac_xsave_c_werror_flag LIBS=$ac_xsave_LIBS fi # Check for system-dependent libraries X programs must link with. # Do this before checking for the system-independent R6 libraries # (-lICE), since we may need -lsocket or whatever for X linking. if test "$ISC" = yes; then X_EXTRA_LIBS="$X_EXTRA_LIBS -lnsl_s -linet" else # Martyn Johnson says this is needed for Ultrix, if the X # libraries were built with DECnet support. And Karl Berry says # the Alpha needs dnet_stub (dnet does not exist). ac_xsave_LIBS="$LIBS"; LIBS="$LIBS $X_LIBS -lX11" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char XOpenDisplay (); int main () { return XOpenDisplay (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dnet_ntoa in -ldnet" >&5 $as_echo_n "checking for dnet_ntoa in -ldnet... " >&6; } if ${ac_cv_lib_dnet_dnet_ntoa+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldnet $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dnet_ntoa (); int main () { return dnet_ntoa (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dnet_dnet_ntoa=yes else ac_cv_lib_dnet_dnet_ntoa=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dnet_dnet_ntoa" >&5 $as_echo "$ac_cv_lib_dnet_dnet_ntoa" >&6; } if test "x$ac_cv_lib_dnet_dnet_ntoa" = xyes; then : X_EXTRA_LIBS="$X_EXTRA_LIBS -ldnet" fi if test $ac_cv_lib_dnet_dnet_ntoa = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dnet_ntoa in -ldnet_stub" >&5 $as_echo_n "checking for dnet_ntoa in -ldnet_stub... " >&6; } if ${ac_cv_lib_dnet_stub_dnet_ntoa+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldnet_stub $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dnet_ntoa (); int main () { return dnet_ntoa (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dnet_stub_dnet_ntoa=yes else ac_cv_lib_dnet_stub_dnet_ntoa=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dnet_stub_dnet_ntoa" >&5 $as_echo "$ac_cv_lib_dnet_stub_dnet_ntoa" >&6; } if test "x$ac_cv_lib_dnet_stub_dnet_ntoa" = xyes; then : X_EXTRA_LIBS="$X_EXTRA_LIBS -ldnet_stub" fi fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS="$ac_xsave_LIBS" # msh@cis.ufl.edu says -lnsl (and -lsocket) are needed for his 386/AT, # to get the SysV transport functions. # Chad R. Larson says the Pyramis MIS-ES running DC/OSx (SVR4) # needs -lnsl. # The nsl library prevents programs from opening the X display # on Irix 5.2, according to T.E. Dickey. # The functions gethostbyname, getservbyname, and inet_addr are # in -lbsd on LynxOS 3.0.1/i386, according to Lars Hecking. ac_fn_c_check_func "$LINENO" "gethostbyname" "ac_cv_func_gethostbyname" if test "x$ac_cv_func_gethostbyname" = xyes; then : fi if test $ac_cv_func_gethostbyname = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gethostbyname in -lnsl" >&5 $as_echo_n "checking for gethostbyname in -lnsl... " >&6; } if ${ac_cv_lib_nsl_gethostbyname+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lnsl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gethostbyname (); int main () { return gethostbyname (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_nsl_gethostbyname=yes else ac_cv_lib_nsl_gethostbyname=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_nsl_gethostbyname" >&5 $as_echo "$ac_cv_lib_nsl_gethostbyname" >&6; } if test "x$ac_cv_lib_nsl_gethostbyname" = xyes; then : X_EXTRA_LIBS="$X_EXTRA_LIBS -lnsl" fi if test $ac_cv_lib_nsl_gethostbyname = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gethostbyname in -lbsd" >&5 $as_echo_n "checking for gethostbyname in -lbsd... " >&6; } if ${ac_cv_lib_bsd_gethostbyname+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lbsd $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gethostbyname (); int main () { return gethostbyname (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_bsd_gethostbyname=yes else ac_cv_lib_bsd_gethostbyname=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_bsd_gethostbyname" >&5 $as_echo "$ac_cv_lib_bsd_gethostbyname" >&6; } if test "x$ac_cv_lib_bsd_gethostbyname" = xyes; then : X_EXTRA_LIBS="$X_EXTRA_LIBS -lbsd" fi fi fi # lieder@skyler.mavd.honeywell.com says without -lsocket, # socket/setsockopt and other routines are undefined under SCO ODT # 2.0. But -lsocket is broken on IRIX 5.2 (and is not necessary # on later versions), says Simon Leinen: it contains gethostby* # variants that don't use the name server (or something). -lsocket # must be given before -lnsl if both are needed. We assume that # if connect needs -lnsl, so does gethostbyname. ac_fn_c_check_func "$LINENO" "connect" "ac_cv_func_connect" if test "x$ac_cv_func_connect" = xyes; then : fi if test $ac_cv_func_connect = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for connect in -lsocket" >&5 $as_echo_n "checking for connect in -lsocket... " >&6; } if ${ac_cv_lib_socket_connect+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsocket $X_EXTRA_LIBS $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char connect (); int main () { return connect (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_socket_connect=yes else ac_cv_lib_socket_connect=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_socket_connect" >&5 $as_echo "$ac_cv_lib_socket_connect" >&6; } if test "x$ac_cv_lib_socket_connect" = xyes; then : X_EXTRA_LIBS="-lsocket $X_EXTRA_LIBS" fi fi # Guillermo Gomez says -lposix is necessary on A/UX. ac_fn_c_check_func "$LINENO" "remove" "ac_cv_func_remove" if test "x$ac_cv_func_remove" = xyes; then : fi if test $ac_cv_func_remove = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for remove in -lposix" >&5 $as_echo_n "checking for remove in -lposix... " >&6; } if ${ac_cv_lib_posix_remove+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lposix $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char remove (); int main () { return remove (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_posix_remove=yes else ac_cv_lib_posix_remove=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_posix_remove" >&5 $as_echo "$ac_cv_lib_posix_remove" >&6; } if test "x$ac_cv_lib_posix_remove" = xyes; then : X_EXTRA_LIBS="$X_EXTRA_LIBS -lposix" fi fi # BSDI BSD/OS 2.1 needs -lipc for XOpenDisplay. ac_fn_c_check_func "$LINENO" "shmat" "ac_cv_func_shmat" if test "x$ac_cv_func_shmat" = xyes; then : fi if test $ac_cv_func_shmat = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shmat in -lipc" >&5 $as_echo_n "checking for shmat in -lipc... " >&6; } if ${ac_cv_lib_ipc_shmat+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lipc $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char shmat (); int main () { return shmat (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_ipc_shmat=yes else ac_cv_lib_ipc_shmat=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ipc_shmat" >&5 $as_echo "$ac_cv_lib_ipc_shmat" >&6; } if test "x$ac_cv_lib_ipc_shmat" = xyes; then : X_EXTRA_LIBS="$X_EXTRA_LIBS -lipc" fi fi fi # Check for libraries that X11R6 Xt/Xaw programs need. ac_save_LDFLAGS=$LDFLAGS test -n "$x_libraries" && LDFLAGS="$LDFLAGS -L$x_libraries" # SM needs ICE to (dynamically) link under SunOS 4.x (so we have to # check for ICE first), but we must link in the order -lSM -lICE or # we get undefined symbols. So assume we have SM if we have ICE. # These have to be linked with before -lX11, unlike the other # libraries we check for below, so use a different variable. # John Interrante, Karl Berry { $as_echo "$as_me:${as_lineno-$LINENO}: checking for IceConnectionNumber in -lICE" >&5 $as_echo_n "checking for IceConnectionNumber in -lICE... " >&6; } if ${ac_cv_lib_ICE_IceConnectionNumber+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lICE $X_EXTRA_LIBS $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char IceConnectionNumber (); int main () { return IceConnectionNumber (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_ICE_IceConnectionNumber=yes else ac_cv_lib_ICE_IceConnectionNumber=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ICE_IceConnectionNumber" >&5 $as_echo "$ac_cv_lib_ICE_IceConnectionNumber" >&6; } if test "x$ac_cv_lib_ICE_IceConnectionNumber" = xyes; then : X_PRE_LIBS="$X_PRE_LIBS -lSM -lICE" fi LDFLAGS=$ac_save_LDFLAGS fi for ac_header in \ assert.h \ fcntl.h \ grp.h \ pwd.h \ libc.h \ limits.h \ stdarg.h \ stdlib.h \ stdint.h \ inttypes.h \ string.h \ strings.h \ termios.h \ termcap.h \ term.h \ unistd.h \ sys/bitypes.h \ sys/byteorder.h \ sys/ioctl.h \ sys/select.h \ sys/socket.h \ sys/sockio.h \ sys/stat.h \ sys/time.h \ sys/types.h \ arpa/nameser.h \ mtio.h \ sys/mtio.h \ sys/tape.h \ regex.h \ attr/attributes.h \ attr/xattr.h \ do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether sys/types.h defines makedev" >&5 $as_echo_n "checking whether sys/types.h defines makedev... " >&6; } if ${ac_cv_header_sys_types_h_makedev+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { return makedev(0, 0); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_header_sys_types_h_makedev=yes else ac_cv_header_sys_types_h_makedev=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_sys_types_h_makedev" >&5 $as_echo "$ac_cv_header_sys_types_h_makedev" >&6; } if test $ac_cv_header_sys_types_h_makedev = no; then ac_fn_c_check_header_mongrel "$LINENO" "sys/mkdev.h" "ac_cv_header_sys_mkdev_h" "$ac_includes_default" if test "x$ac_cv_header_sys_mkdev_h" = xyes; then : $as_echo "#define MAJOR_IN_MKDEV 1" >>confdefs.h fi if test $ac_cv_header_sys_mkdev_h = no; then ac_fn_c_check_header_mongrel "$LINENO" "sys/sysmacros.h" "ac_cv_header_sys_sysmacros_h" "$ac_includes_default" if test "x$ac_cv_header_sys_sysmacros_h" = xyes; then : $as_echo "#define MAJOR_IN_SYSMACROS 1" >>confdefs.h fi fi fi ac_header_dirent=no for ac_hdr in dirent.h sys/ndir.h sys/dir.h ndir.h; do as_ac_Header=`$as_echo "ac_cv_header_dirent_$ac_hdr" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_hdr that defines DIR" >&5 $as_echo_n "checking for $ac_hdr that defines DIR... " >&6; } if eval \${$as_ac_Header+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include <$ac_hdr> int main () { if ((DIR *) 0) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$as_ac_Header=yes" else eval "$as_ac_Header=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_ac_Header { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_hdr" | $as_tr_cpp` 1 _ACEOF ac_header_dirent=$ac_hdr; break fi done # Two versions of opendir et al. are in -ldir and -lx on SCO Xenix. if test $ac_header_dirent = dirent.h; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5 $as_echo_n "checking for library containing opendir... " >&6; } if ${ac_cv_search_opendir+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char opendir (); int main () { return opendir (); ; return 0; } _ACEOF for ac_lib in '' dir; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_opendir=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_opendir+:} false; then : break fi done if ${ac_cv_search_opendir+:} false; then : else ac_cv_search_opendir=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_opendir" >&5 $as_echo "$ac_cv_search_opendir" >&6; } ac_res=$ac_cv_search_opendir if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5 $as_echo_n "checking for library containing opendir... " >&6; } if ${ac_cv_search_opendir+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char opendir (); int main () { return opendir (); ; return 0; } _ACEOF for ac_lib in '' x; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_opendir=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_opendir+:} false; then : break fi done if ${ac_cv_search_opendir+:} false; then : else ac_cv_search_opendir=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_opendir" >&5 $as_echo "$ac_cv_search_opendir" >&6; } ac_res=$ac_cv_search_opendir if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stat file-mode macros are broken" >&5 $as_echo_n "checking whether stat file-mode macros are broken... " >&6; } if ${ac_cv_header_stat_broken+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if defined S_ISBLK && defined S_IFDIR extern char c1[S_ISBLK (S_IFDIR) ? -1 : 1]; #endif #if defined S_ISBLK && defined S_IFCHR extern char c2[S_ISBLK (S_IFCHR) ? -1 : 1]; #endif #if defined S_ISLNK && defined S_IFREG extern char c3[S_ISLNK (S_IFREG) ? -1 : 1]; #endif #if defined S_ISSOCK && defined S_IFREG extern char c4[S_ISSOCK (S_IFREG) ? -1 : 1]; #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stat_broken=no else ac_cv_header_stat_broken=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stat_broken" >&5 $as_echo "$ac_cv_header_stat_broken" >&6; } if test $ac_cv_header_stat_broken = yes; then $as_echo "#define STAT_MACROS_BROKEN 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for sys/wait.h that is POSIX.1 compatible" >&5 $as_echo_n "checking for sys/wait.h that is POSIX.1 compatible... " >&6; } if ${ac_cv_header_sys_wait_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #ifndef WEXITSTATUS # define WEXITSTATUS(stat_val) ((unsigned int) (stat_val) >> 8) #endif #ifndef WIFEXITED # define WIFEXITED(stat_val) (((stat_val) & 255) == 0) #endif int main () { int s; wait (&s); s = WIFEXITED (s) ? WEXITSTATUS (s) : 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_sys_wait_h=yes else ac_cv_header_sys_wait_h=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_sys_wait_h" >&5 $as_echo "$ac_cv_header_sys_wait_h" >&6; } if test $ac_cv_header_sys_wait_h = yes; then $as_echo "#define HAVE_SYS_WAIT_H 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether time.h and sys/time.h may both be included" >&5 $as_echo_n "checking whether time.h and sys/time.h may both be included... " >&6; } if ${ac_cv_header_time+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main () { if ((struct tm *) 0) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_time=yes else ac_cv_header_time=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_time" >&5 $as_echo "$ac_cv_header_time" >&6; } if test $ac_cv_header_time = yes; then $as_echo "#define TIME_WITH_SYS_TIME 1" >>confdefs.h fi ac_fn_c_check_member "$LINENO" "struct stat" "st_blksize" "ac_cv_member_struct_stat_st_blksize" "$ac_includes_default" if test "x$ac_cv_member_struct_stat_st_blksize" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STRUCT_STAT_ST_BLKSIZE 1 _ACEOF $as_echo "#define HAVE_ST_BLKSIZE 1" >>confdefs.h fi ac_fn_c_check_member "$LINENO" "struct stat" "st_blocks" "ac_cv_member_struct_stat_st_blocks" "$ac_includes_default" if test "x$ac_cv_member_struct_stat_st_blocks" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STRUCT_STAT_ST_BLOCKS 1 _ACEOF $as_echo "#define HAVE_ST_BLOCKS 1" >>confdefs.h else case " $LIBOBJS " in *" fileblocks.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS fileblocks.$ac_objext" ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether struct tm is in sys/time.h or time.h" >&5 $as_echo_n "checking whether struct tm is in sys/time.h or time.h... " >&6; } if ${ac_cv_struct_tm+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { struct tm tm; int *p = &tm.tm_sec; return !p; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_struct_tm=time.h else ac_cv_struct_tm=sys/time.h fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_struct_tm" >&5 $as_echo "$ac_cv_struct_tm" >&6; } if test $ac_cv_struct_tm = sys/time.h; then $as_echo "#define TM_IN_SYS_TIME 1" >>confdefs.h fi ac_fn_c_check_member "$LINENO" "struct tm" "tm_zone" "ac_cv_member_struct_tm_tm_zone" "#include #include <$ac_cv_struct_tm> " if test "x$ac_cv_member_struct_tm_tm_zone" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STRUCT_TM_TM_ZONE 1 _ACEOF fi if test "$ac_cv_member_struct_tm_tm_zone" = yes; then $as_echo "#define HAVE_TM_ZONE 1" >>confdefs.h else ac_fn_c_check_decl "$LINENO" "tzname" "ac_cv_have_decl_tzname" "#include " if test "x$ac_cv_have_decl_tzname" = xyes; then : ac_have_decl=1 else ac_have_decl=0 fi cat >>confdefs.h <<_ACEOF #define HAVE_DECL_TZNAME $ac_have_decl _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking for tzname" >&5 $as_echo_n "checking for tzname... " >&6; } if ${ac_cv_var_tzname+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #if !HAVE_DECL_TZNAME extern char *tzname[]; #endif int main () { return tzname[0][0]; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_var_tzname=yes else ac_cv_var_tzname=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_var_tzname" >&5 $as_echo "$ac_cv_var_tzname" >&6; } if test $ac_cv_var_tzname = yes; then $as_echo "#define HAVE_TZNAME 1" >>confdefs.h fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for utime.h" >&5 $as_echo_n "checking for utime.h... " >&6; } if ${ba_cv_header_utime_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { struct utimbuf foo ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ba_cv_header_utime_h=yes else ba_cv_header_utime_h=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ba_cv_header_utime_h" >&5 $as_echo "$ba_cv_header_utime_h" >&6; } test $ba_cv_header_utime_h = yes && $as_echo "#define HAVE_UTIME_H 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: checking for socklen_t" >&5 $as_echo_n "checking for socklen_t... " >&6; } if ${ba_cv_header_socklen_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { socklen_t x ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ba_cv_header_socklen_t=yes else ba_cv_header_socklen_t=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ba_cv_header_socklen_t" >&5 $as_echo "$ba_cv_header_socklen_t" >&6; } test $ba_cv_header_socklen_t = yes && $as_echo "#define HAVE_SOCKLEN_T 1" >>confdefs.h ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ioctl_req_t" >&5 $as_echo_n "checking for ioctl_req_t... " >&6; } if ${ba_cv_header_ioctl_req_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main () { int (*d_ioctl)(int fd, unsigned long int request, ...); d_ioctl = ::ioctl; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ba_cv_header_ioctl_req_t=yes else ba_cv_header_ioctl_req_t=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ba_cv_header_ioctl_req_t" >&5 $as_echo "$ba_cv_header_ioctl_req_t" >&6; } test $ba_cv_header_ioctl_req_t = yes && $as_echo "#define HAVE_IOCTL_ULINT_REQUEST 1" >>confdefs.h ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking for typeof" >&5 $as_echo_n "checking for typeof... " >&6; } if ${ba_cv_have_typeof+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ba_cv_have_typeof=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ main(){char *a = 0; a = (typeof a)a;} _ACEOF if ac_fn_cxx_try_run "$LINENO"; then : ba_cv_have_typeof=yes else ba_cv_have_typeof=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ba_cv_have_typeof" >&5 $as_echo "$ba_cv_have_typeof" >&6; } if test $ba_cv_have_typeof = yes; then $as_echo "#define HAVE_TYPEOF 1" >>confdefs.h $as_echo "#define TYPEOF_FUNC typeof" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __typeof" >&5 $as_echo_n "checking for __typeof... " >&6; } if ${ba_cv_have_utypeof+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ba_cv_have_utypeof=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ main(){char *a = 0; a = (__typeof a)a;} _ACEOF if ac_fn_cxx_try_run "$LINENO"; then : ba_cv_have_utypeof=yes else ba_cv_have_utypeof=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ba_cv_have_utypeof" >&5 $as_echo "$ba_cv_have_utypeof" >&6; } if test $ba_cv_have_utypeof = yes; then $as_echo "#define HAVE_TYPEOF 1" >>confdefs.h $as_echo "#define TYPEOF_FUNC __typeof" >>confdefs.h fi fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 $as_echo_n "checking for an ANSI C-conforming const... " >&6; } if ${ac_cv_c_const+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __cplusplus /* Ultrix mips cc rejects this sort of thing. */ typedef int charset[2]; const charset cs = { 0, 0 }; /* SunOS 4.1.1 cc rejects this. */ char const *const *pcpcc; char **ppc; /* NEC SVR4.0.2 mips cc rejects this. */ struct point {int x, y;}; static struct point const zero = {0,0}; /* AIX XL C 1.02.0.0 rejects this. It does not let you subtract one const X* pointer from another in an arm of an if-expression whose if-part is not a constant expression */ const char *g = "string"; pcpcc = &g + (g ? g-g : 0); /* HPUX 7.0 cc rejects these. */ ++pcpcc; ppc = (char**) pcpcc; pcpcc = (char const *const *) ppc; { /* SCO 3.2v4 cc rejects this sort of thing. */ char tx; char *t = &tx; char const *s = 0 ? (char *) 0 : (char const *) 0; *t++ = 0; if (s) return 0; } { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ int x[] = {25, 17}; const int *foo = &x[0]; ++foo; } { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ typedef const int *iptr; iptr p = 0; ++p; } { /* AIX XL C 1.02.0.0 rejects this sort of thing, saying "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ struct s { int j; const int *ap[3]; } bx; struct s *b = &bx; b->j = 5; } { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ const int foo = 10; if (!foo) return 0; } return !cs[0] && !zero.x; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_const=yes else ac_cv_c_const=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5 $as_echo "$ac_cv_c_const" >&6; } if test $ac_cv_c_const = no; then $as_echo "#define const /**/" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 $as_echo_n "checking whether byte ordering is bigendian... " >&6; } if ${ac_cv_c_bigendian+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_c_bigendian=unknown # See if we're dealing with a universal compiler. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifndef __APPLE_CC__ not a universal capable compiler #endif typedef int dummy; _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # Check for potential -arch flags. It is not universal unless # there are at least two -arch flags with different values. ac_arch= ac_prev= for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do if test -n "$ac_prev"; then case $ac_word in i?86 | x86_64 | ppc | ppc64) if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then ac_arch=$ac_word else ac_cv_c_bigendian=universal break fi ;; esac ac_prev= elif test "x$ac_word" = "x-arch"; then ac_prev=arch fi done fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_c_bigendian = unknown; then # See if sys/param.h defines the BYTE_ORDER macro. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { #if ! (defined BYTE_ORDER && defined BIG_ENDIAN \ && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \ && LITTLE_ENDIAN) bogus endian macros #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # It does; now see whether it defined to BIG_ENDIAN or not. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { #if BYTE_ORDER != BIG_ENDIAN not big endian #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_bigendian=yes else ac_cv_c_bigendian=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test $ac_cv_c_bigendian = unknown; then # See if defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris). cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN) bogus endian macros #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # It does; now see whether it defined to _BIG_ENDIAN or not. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #ifndef _BIG_ENDIAN not big endian #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_bigendian=yes else ac_cv_c_bigendian=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test $ac_cv_c_bigendian = unknown; then # Compile a test program. if test "$cross_compiling" = yes; then : # Try to guess by grepping values from an object file. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; int use_ascii (int i) { return ascii_mm[i] + ascii_ii[i]; } short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; int use_ebcdic (int i) { return ebcdic_mm[i] + ebcdic_ii[i]; } extern int foo; int main () { return use_ascii (foo) == use_ebcdic (foo); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then ac_cv_c_bigendian=yes fi if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then if test "$ac_cv_c_bigendian" = unknown; then ac_cv_c_bigendian=no else # finding both strings is unlikely to happen, but who knows? ac_cv_c_bigendian=unknown fi fi fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { /* Are we little or big endian? From Harbison&Steele. */ union { long int l; char c[sizeof (long int)]; } u; u.l = 1; return u.c[sizeof (long int) - 1] == 1; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_c_bigendian=no else ac_cv_c_bigendian=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 $as_echo "$ac_cv_c_bigendian" >&6; } case $ac_cv_c_bigendian in #( yes) $as_echo "#define HAVE_BIG_ENDIAN 1" >>confdefs.h ;; #( no) $as_echo "#define HAVE_LITTLE_ENDIAN 1" >>confdefs.h ;; #( universal) $as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h ;; #( *) as_fn_error $? "unknown endianness presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to get filesystem type" >&5 $as_echo_n "checking how to get filesystem type... " >&6; } fstype=no # The order of these tests is important. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : $as_echo "#define FSTYPE_STATVFS 1" >>confdefs.h fstype=SVR4 fi rm -f conftest.err conftest.i conftest.$ac_ext if test $fstype = no; then cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : $as_echo "#define FSTYPE_USG_STATFS 1" >>confdefs.h fstype=SVR3 fi rm -f conftest.err conftest.i conftest.$ac_ext fi if test $fstype = no; then cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : $as_echo "#define FSTYPE_AIX_STATFS 1" >>confdefs.h fstype=AIX fi rm -f conftest.err conftest.i conftest.$ac_ext fi if test $fstype = no; then cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : $as_echo "#define FSTYPE_MNTENT 1" >>confdefs.h fstype=4.3BSD fi rm -f conftest.err conftest.i conftest.$ac_ext fi if test $fstype = no; then cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "f_type;" >/dev/null 2>&1; then : $as_echo "#define FSTYPE_STATFS 1" >>confdefs.h fstype=4.4BSD/OSF1 fi rm -f conftest* fi if test $fstype = no; then cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : $as_echo "#define FSTYPE_GETMNT 1" >>confdefs.h fstype=Ultrix fi rm -f conftest.err conftest.i conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $fstype" >&5 $as_echo "$fstype" >&6; } ac_fn_c_check_header_mongrel "$LINENO" "sys/statvfs.h" "ac_cv_header_sys_statvfs_h" "$ac_includes_default" if test "x$ac_cv_header_sys_statvfs_h" = xyes; then : $as_echo "#define HAVE_SYS_STATVFS_H 1" >>confdefs.h fi ac_fn_c_check_decl "$LINENO" "O_CLOEXEC" "ac_cv_have_decl_O_CLOEXEC" " #ifdef HAVE_FCNTL_H # include #endif " if test "x$ac_cv_have_decl_O_CLOEXEC" = xyes; then : ac_have_decl=1 else ac_have_decl=0 fi cat >>confdefs.h <<_ACEOF #define HAVE_DECL_O_CLOEXEC $ac_have_decl _ACEOF if test $ac_have_decl = 1; then : else $as_echo "#define O_CLOEXEC 0" >>confdefs.h fi ac_fn_c_check_decl "$LINENO" "FD_CLOEXEC" "ac_cv_have_decl_FD_CLOEXEC" " #ifdef HAVE_FCNTL_H # include #endif " if test "x$ac_cv_have_decl_FD_CLOEXEC" = xyes; then : ac_have_decl=1 else ac_have_decl=0 fi cat >>confdefs.h <<_ACEOF #define HAVE_DECL_FD_CLOEXEC $ac_have_decl _ACEOF if test $ac_have_decl = 1; then : else $as_echo "#define FD_CLOEXEC 0" >>confdefs.h fi ac_fn_c_check_decl "$LINENO" "SOCK_CLOEXEC" "ac_cv_have_decl_SOCK_CLOEXEC" " #ifdef HAVE_SYS_SOCKET_H # include #endif " if test "x$ac_cv_have_decl_SOCK_CLOEXEC" = xyes; then : ac_have_decl=1 else ac_have_decl=0 fi cat >>confdefs.h <<_ACEOF #define HAVE_DECL_SOCK_CLOEXEC $ac_have_decl _ACEOF if test $ac_have_decl = 1; then : else $as_echo "#define SOCK_CLOEXEC 0" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for close on exec modifier for fopen()" >&5 $as_echo_n "checking for close on exec modifier for fopen()... " >&6; } if ${ac_cv_feature_stream_cloexec_flag+:} false; then : $as_echo_n "(cached) " >&6 else if test $ac_cv_have_decl_O_CLOEXEC = yes ; then if test $ac_cv_have_decl_SOCK_CLOEXEC = yes ; then ac_cv_feature_stream_cloexec_flag="e" fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_feature_stream_cloexec_flag" >&5 $as_echo "$ac_cv_feature_stream_cloexec_flag" >&6; } if test "x$ac_cv_feature_stream_cloexec_flag" = "xe" ; then $as_echo "#define HAVE_STREAM_CLOEXEC 0" >>confdefs.h fi cat >>confdefs.h <<_ACEOF #define STREAM_CLOEXEC "$ac_cv_feature_stream_cloexec_flag" _ACEOF ac_fn_c_check_func "$LINENO" "accept4" "ac_cv_func_accept4" if test "x$ac_cv_func_accept4" = xyes; then : $as_echo "#define HAVE_ACCEPT4 1" >>confdefs.h fi CLOUD_DRIVERS= CLOUD_INSTALL_TARGETS= # Check whether --enable-s3 was given. if test "${enable_s3+set}" = set; then : enableval=$enable_s3; if test x$enableval = xno; then support_s3=no fi fi S3_INC= S3_LIBS= S3_LDFLAGS= have_libs3=no if test x$support_s3 = xyes; then # Check whether --with-s3 was given. if test "${with_s3+set}" = set; then : withval=$with_s3; case "$with_s3" in no) : ;; yes|*) if test -f ${with_s3}/include/libs3.h; then S3_INC="-I${with_s3}/include" S3_LDFLAGS="-L${with_s3}/lib" with_s3="${with_s3}/include" else with_s3="/usr/include" fi as_ac_Header=`$as_echo "ac_cv_header_${with_s3}/libs3.h" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "${with_s3}/libs3.h" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : $as_echo "#define HAVE_LIBS3 1" >>confdefs.h S3_LIBS="${S3_LDFLAGS} -ls3" have_libs3="yes" CLOUD_DRIVERS="${CLOUD_DRIVERS} s3-driver" CLOUD_INSTALL_TARGETS="${CLOUD_INSTALL_TARGETS} install-s3-cloud" else echo " " echo "libs3.h not found. s3 turned off ..." echo " " fi ;; esac else ac_fn_c_check_header_mongrel "$LINENO" "libs3.h" "ac_cv_header_libs3_h" "$ac_includes_default" if test "x$ac_cv_header_libs3_h" = xyes; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for S3_initialize in -ls3" >&5 $as_echo_n "checking for S3_initialize in -ls3... " >&6; } if ${ac_cv_lib_s3_S3_initialize+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ls3 $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char S3_initialize (); int main () { return S3_initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_s3_S3_initialize=yes else ac_cv_lib_s3_S3_initialize=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_s3_S3_initialize" >&5 $as_echo "$ac_cv_lib_s3_S3_initialize" >&6; } if test "x$ac_cv_lib_s3_S3_initialize" = xyes; then : S3_LIBS="-ls3" $as_echo "#define HAVE_LIBS3 1" >>confdefs.h have_libs3=yes CLOUD_DRIVERS="${CLOUD_DRIVERS} s3-driver" CLOUD_INSTALL_TARGETS="${CLOUD_INSTALL_TARGETS} install-s3-cloud" fi fi fi fi have_aws=no if test x$support_aws = xyes; then # Check whether --with-aws was given. if test "${with_aws+set}" = set; then : withval=$with_aws; case "$with_aws" in no) : ;; yes|*) $as_echo "#define HAVE_GENERIC_CLOUD 1" >>confdefs.h have_aws="yes" CLOUD_DRIVERS="${CLOUD_DRIVERS} generic-driver" CLOUD_INSTALL_TARGETS="${CLOUD_INSTALL_TARGETS} install-aws-cloud" ;; esac fi fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu for ac_func in backtrace do : ac_fn_cxx_check_func "$LINENO" "backtrace" "ac_cv_func_backtrace" if test "x$ac_cv_func_backtrace" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_BACKTRACE 1 _ACEOF fi done ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking return type of signal handlers" >&5 $as_echo_n "checking return type of signal handlers... " >&6; } if ${ac_cv_type_signal+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { return *(signal (0, 0)) (0) == 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_type_signal=int else ac_cv_type_signal=void fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_signal" >&5 $as_echo "$ac_cv_type_signal" >&6; } cat >>confdefs.h <<_ACEOF #define RETSIGTYPE $ac_cv_type_signal _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking for type of signal functions" >&5 $as_echo_n "checking for type of signal functions... " >&6; } if ${bash_cv_signal_vintage+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { sigset_t ss; struct sigaction sa; sigemptyset(&ss); sigsuspend(&ss); sigaction(SIGINT, &sa, (struct sigaction *) 0); sigprocmask(SIG_BLOCK, &ss, (sigset_t *) 0); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : bash_cv_signal_vintage="posix" else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { int mask = sigmask(SIGINT); sigsetmask(mask); sigblock(mask); sigpause(mask); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : bash_cv_signal_vintage="4.2bsd" else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include RETSIGTYPE foo() { } int main () { int mask = sigmask(SIGINT); sigset(SIGINT, foo); sigrelse(SIGINT); sighold(SIGINT); sigpause(SIGINT); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : bash_cv_signal_vintage="svr3" else bash_cv_signal_vintage="v7" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $bash_cv_signal_vintage" >&5 $as_echo "$bash_cv_signal_vintage" >&6; } if test "$bash_cv_signal_vintage" = "posix"; then $as_echo "#define HAVE_POSIX_SIGNALS 1" >>confdefs.h elif test "$bash_cv_signal_vintage" = "4.2bsd"; then $as_echo "#define HAVE_BSD_SIGNALS 1" >>confdefs.h elif test "$bash_cv_signal_vintage" = "svr3"; then $as_echo "#define HAVE_USG_SIGHOLD 1" >>confdefs.h fi ac_fn_c_check_type "$LINENO" "mode_t" "ac_cv_type_mode_t" "$ac_includes_default" if test "x$ac_cv_type_mode_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define mode_t int _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uid_t in sys/types.h" >&5 $as_echo_n "checking for uid_t in sys/types.h... " >&6; } if ${ac_cv_type_uid_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "uid_t" >/dev/null 2>&1; then : ac_cv_type_uid_t=yes else ac_cv_type_uid_t=no fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_uid_t" >&5 $as_echo "$ac_cv_type_uid_t" >&6; } if test $ac_cv_type_uid_t = no; then $as_echo "#define uid_t int" >>confdefs.h $as_echo "#define gid_t int" >>confdefs.h fi ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default" if test "x$ac_cv_type_size_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define size_t unsigned int _ACEOF fi ac_fn_c_check_type "$LINENO" "pid_t" "ac_cv_type_pid_t" "$ac_includes_default" if test "x$ac_cv_type_pid_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define pid_t int _ACEOF fi ac_fn_c_check_type "$LINENO" "off_t" "ac_cv_type_off_t" "$ac_includes_default" if test "x$ac_cv_type_off_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define off_t long int _ACEOF fi ac_fn_c_check_type "$LINENO" "intptr_t" "ac_cv_type_intptr_t" "$ac_includes_default" if test "x$ac_cv_type_intptr_t" = xyes; then : $as_echo "#define HAVE_INTPTR_T 1" >>confdefs.h else for ac_type in 'int' 'long int' 'long long int'; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { static int test_array [1 - 2 * !(sizeof (void *) <= sizeof ($ac_type))]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : cat >>confdefs.h <<_ACEOF #define intptr_t $ac_type _ACEOF ac_type= fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext test -z "$ac_type" && break done fi ac_fn_c_check_type "$LINENO" "uintptr_t" "ac_cv_type_uintptr_t" "$ac_includes_default" if test "x$ac_cv_type_uintptr_t" = xyes; then : $as_echo "#define HAVE_UINTPTR_T 1" >>confdefs.h else for ac_type in 'unsigned int' 'unsigned long int' \ 'unsigned long long int'; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { static int test_array [1 - 2 * !(sizeof (void *) <= sizeof ($ac_type))]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : cat >>confdefs.h <<_ACEOF #define uintptr_t $ac_type _ACEOF ac_type= fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext test -z "$ac_type" && break done fi ac_fn_c_check_type "$LINENO" "ino_t" "ac_cv_type_ino_t" "$ac_includes_default" if test "x$ac_cv_type_ino_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define ino_t unsigned long _ACEOF fi ac_fn_c_check_type "$LINENO" "dev_t" "ac_cv_type_dev_t" "$ac_includes_default" if test "x$ac_cv_type_dev_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define dev_t unsigned long _ACEOF fi ac_fn_c_check_type "$LINENO" "daddr_t" "ac_cv_type_daddr_t" "$ac_includes_default" if test "x$ac_cv_type_daddr_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define daddr_t long _ACEOF fi ac_fn_c_check_type "$LINENO" "major_t" "ac_cv_type_major_t" "$ac_includes_default" if test "x$ac_cv_type_major_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define major_t int _ACEOF fi ac_fn_c_check_type "$LINENO" "minor_t" "ac_cv_type_minor_t" "$ac_includes_default" if test "x$ac_cv_type_minor_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define minor_t int _ACEOF fi ac_fn_c_check_type "$LINENO" "ssize_t" "ac_cv_type_ssize_t" "$ac_includes_default" if test "x$ac_cv_type_ssize_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define ssize_t int _ACEOF fi ac_fn_c_check_member "$LINENO" "struct stat" "st_blocks" "ac_cv_member_struct_stat_st_blocks" "$ac_includes_default" if test "x$ac_cv_member_struct_stat_st_blocks" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STRUCT_STAT_ST_BLOCKS 1 _ACEOF $as_echo "#define HAVE_ST_BLOCKS 1" >>confdefs.h else case " $LIBOBJS " in *" fileblocks.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS fileblocks.$ac_objext" ;; esac fi ac_fn_c_check_member "$LINENO" "struct stat" "st_rdev" "ac_cv_member_struct_stat_st_rdev" "$ac_includes_default" if test "x$ac_cv_member_struct_stat_st_rdev" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STRUCT_STAT_ST_RDEV 1 _ACEOF $as_echo "#define HAVE_ST_RDEV 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether struct tm is in sys/time.h or time.h" >&5 $as_echo_n "checking whether struct tm is in sys/time.h or time.h... " >&6; } if ${ac_cv_struct_tm+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { struct tm tm; int *p = &tm.tm_sec; return !p; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_struct_tm=time.h else ac_cv_struct_tm=sys/time.h fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_struct_tm" >&5 $as_echo "$ac_cv_struct_tm" >&6; } if test $ac_cv_struct_tm = sys/time.h; then $as_echo "#define TM_IN_SYS_TIME 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 $as_echo_n "checking for an ANSI C-conforming const... " >&6; } if ${ac_cv_c_const+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __cplusplus /* Ultrix mips cc rejects this sort of thing. */ typedef int charset[2]; const charset cs = { 0, 0 }; /* SunOS 4.1.1 cc rejects this. */ char const *const *pcpcc; char **ppc; /* NEC SVR4.0.2 mips cc rejects this. */ struct point {int x, y;}; static struct point const zero = {0,0}; /* AIX XL C 1.02.0.0 rejects this. It does not let you subtract one const X* pointer from another in an arm of an if-expression whose if-part is not a constant expression */ const char *g = "string"; pcpcc = &g + (g ? g-g : 0); /* HPUX 7.0 cc rejects these. */ ++pcpcc; ppc = (char**) pcpcc; pcpcc = (char const *const *) ppc; { /* SCO 3.2v4 cc rejects this sort of thing. */ char tx; char *t = &tx; char const *s = 0 ? (char *) 0 : (char const *) 0; *t++ = 0; if (s) return 0; } { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ int x[] = {25, 17}; const int *foo = &x[0]; ++foo; } { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ typedef const int *iptr; iptr p = 0; ++p; } { /* AIX XL C 1.02.0.0 rejects this sort of thing, saying "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ struct s { int j; const int *ap[3]; } bx; struct s *b = &bx; b->j = 5; } { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ const int foo = 10; if (!foo) return 0; } return !cs[0] && !zero.x; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_const=yes else ac_cv_c_const=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5 $as_echo "$ac_cv_c_const" >&6; } if test $ac_cv_c_const = no; then $as_echo "#define const /**/" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C/C++ restrict keyword" >&5 $as_echo_n "checking for C/C++ restrict keyword... " >&6; } if ${ac_cv_c_restrict+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_c_restrict=no # The order here caters to the fact that C++ does not require restrict. for ac_kw in __restrict __restrict__ _Restrict restrict; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ typedef int * int_ptr; int foo (int_ptr $ac_kw ip) { return ip[0]; } int main () { int s[1]; int * $ac_kw t = s; t[0] = 0; return foo(t) ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_restrict=$ac_kw fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext test "$ac_cv_c_restrict" != no && break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_restrict" >&5 $as_echo "$ac_cv_c_restrict" >&6; } case $ac_cv_c_restrict in restrict) ;; no) $as_echo "#define restrict /**/" >>confdefs.h ;; *) cat >>confdefs.h <<_ACEOF #define restrict $ac_cv_c_restrict _ACEOF ;; esac # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of char" >&5 $as_echo_n "checking size of char... " >&6; } if ${ac_cv_sizeof_char+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (char))" "ac_cv_sizeof_char" "$ac_includes_default"; then : else if test "$ac_cv_type_char" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (char) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_char=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_char" >&5 $as_echo "$ac_cv_sizeof_char" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_CHAR $ac_cv_sizeof_char _ACEOF # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of short int" >&5 $as_echo_n "checking size of short int... " >&6; } if ${ac_cv_sizeof_short_int+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (short int))" "ac_cv_sizeof_short_int" "$ac_includes_default"; then : else if test "$ac_cv_type_short_int" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (short int) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_short_int=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_short_int" >&5 $as_echo "$ac_cv_sizeof_short_int" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_SHORT_INT $ac_cv_sizeof_short_int _ACEOF # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of int" >&5 $as_echo_n "checking size of int... " >&6; } if ${ac_cv_sizeof_int+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (int))" "ac_cv_sizeof_int" "$ac_includes_default"; then : else if test "$ac_cv_type_int" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (int) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_int=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_int" >&5 $as_echo "$ac_cv_sizeof_int" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_INT $ac_cv_sizeof_int _ACEOF # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long int" >&5 $as_echo_n "checking size of long int... " >&6; } if ${ac_cv_sizeof_long_int+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long int))" "ac_cv_sizeof_long_int" "$ac_includes_default"; then : else if test "$ac_cv_type_long_int" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (long int) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_long_int=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long_int" >&5 $as_echo "$ac_cv_sizeof_long_int" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_LONG_INT $ac_cv_sizeof_long_int _ACEOF # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long long int" >&5 $as_echo_n "checking size of long long int... " >&6; } if ${ac_cv_sizeof_long_long_int+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long long int))" "ac_cv_sizeof_long_long_int" "$ac_includes_default"; then : else if test "$ac_cv_type_long_long_int" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (long long int) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_long_long_int=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long_long_int" >&5 $as_echo "$ac_cv_sizeof_long_long_int" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_LONG_LONG_INT $ac_cv_sizeof_long_long_int _ACEOF # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of int *" >&5 $as_echo_n "checking size of int *... " >&6; } if ${ac_cv_sizeof_int_p+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (int *))" "ac_cv_sizeof_int_p" "$ac_includes_default"; then : else if test "$ac_cv_type_int_p" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (int *) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_int_p=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_int_p" >&5 $as_echo "$ac_cv_sizeof_int_p" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_INT_P $ac_cv_sizeof_int_p _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking for u_int type" >&5 $as_echo_n "checking for u_int type... " >&6; } if ${ac_cv_have_u_int+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { u_int a; a = 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_have_u_int="yes" else ac_cv_have_u_int="no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_u_int" >&5 $as_echo "$ac_cv_have_u_int" >&6; } if test "x$ac_cv_have_u_int" = "xyes" ; then $as_echo "#define HAVE_U_INT 1" >>confdefs.h have_u_int=1 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for intmax_t type" >&5 $as_echo_n "checking for intmax_t type... " >&6; } if ${ac_cv_have_intmax_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { intmax_t a; a = 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_have_intmax_t="yes" else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { intmax_t a; a = 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_have_intmax_t="yes" else ac_cv_have_intmax_t="no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_intmax_t" >&5 $as_echo "$ac_cv_have_intmax_t" >&6; } if test "x$ac_cv_have_intmax_t" = "xyes" ; then $as_echo "#define HAVE_INTMAX_T 1" >>confdefs.h have_intmax_t=1 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for u_intmax_t type" >&5 $as_echo_n "checking for u_intmax_t type... " >&6; } if ${ac_cv_have_u_intmax_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { u_intmax_t a; a = 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_have_u_intmax_t="yes" else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { u_intmax_t a; a = 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_have_u_intmax_t="yes" else ac_cv_have_u_intmax_t="no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_u_intmax_t" >&5 $as_echo "$ac_cv_have_u_intmax_t" >&6; } if test "x$ac_cv_have_u_intmax_t" = "xyes" ; then $as_echo "#define HAVE_U_INTMAX_T 1" >>confdefs.h have_u_intmax_t=1 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for intXX_t types" >&5 $as_echo_n "checking for intXX_t types... " >&6; } if ${ac_cv_have_intxx_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { int8_t a; int16_t b; int32_t c; a = b = c = 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_have_intxx_t="yes" else ac_cv_have_intxx_t="no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_intxx_t" >&5 $as_echo "$ac_cv_have_intxx_t" >&6; } if test "x$ac_cv_have_intxx_t" = "xyes" ; then $as_echo "#define HAVE_INTXX_T 1" >>confdefs.h have_intxx_t=1 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for int64_t type" >&5 $as_echo_n "checking for int64_t type... " >&6; } if ${ac_cv_have_int64_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { int64_t a; a = 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_have_int64_t="yes" else ac_cv_have_int64_t="no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_int64_t" >&5 $as_echo "$ac_cv_have_int64_t" >&6; } if test "x$ac_cv_have_int64_t" = "xyes" ; then $as_echo "#define HAVE_INT64_T 1" >>confdefs.h have_int64_t=1 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for u_intXX_t types" >&5 $as_echo_n "checking for u_intXX_t types... " >&6; } if ${ac_cv_have_u_intxx_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { u_int8_t a; u_int16_t b; u_int32_t c; a = b = c = 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_have_u_intxx_t="yes" else ac_cv_have_u_intxx_t="no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_u_intxx_t" >&5 $as_echo "$ac_cv_have_u_intxx_t" >&6; } if test "x$ac_cv_have_u_intxx_t" = "xyes" ; then $as_echo "#define HAVE_U_INTXX_T 1" >>confdefs.h have_u_intxx_t=1 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for u_int64_t types" >&5 $as_echo_n "checking for u_int64_t types... " >&6; } if ${ac_cv_have_u_int64_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { u_int64_t a; a = 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_have_u_int64_t="yes" else ac_cv_have_u_int64_t="no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_u_int64_t" >&5 $as_echo "$ac_cv_have_u_int64_t" >&6; } if test "x$ac_cv_have_u_int64_t" = "xyes" ; then $as_echo "#define HAVE_U_INT64_T 1" >>confdefs.h have_u_int64_t=1 fi if (test -z "$have_u_intxx_t" || test -z "$have_intxx_t" && \ test "x$ac_cv_header_sys_bitypes_h" = "xyes") then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for intXX_t and u_intXX_t types in sys/bitypes.h" >&5 $as_echo_n "checking for intXX_t and u_intXX_t types in sys/bitypes.h... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { int8_t a; int16_t b; int32_t c; u_int8_t e; u_int16_t f; u_int32_t g; a = b = c = e = f = g = 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : $as_echo "#define HAVE_U_INTXX_T 1" >>confdefs.h $as_echo "#define HAVE_INTXX_T 1" >>confdefs.h $as_echo "#define HAVE_SYS_BITYPES_H 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test -z "$have_u_intxx_t" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uintXX_t types" >&5 $as_echo_n "checking for uintXX_t types... " >&6; } if ${ac_cv_have_uintxx_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { uint8_t a; uint16_t b; uint32_t c; a = b = c = 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_have_uintxx_t="yes" else ac_cv_have_uintxx_t="no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_uintxx_t" >&5 $as_echo "$ac_cv_have_uintxx_t" >&6; } if test "x$ac_cv_have_uintxx_t" = "xyes" ; then $as_echo "#define HAVE_UINTXX_T 1" >>confdefs.h fi fi if (test -z "$have_u_int64_t" || test -z "$have_int64_t" && \ test "x$ac_cv_header_sys_bitypes_h" = "xyes") then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for int64_t and u_int64_t types in sys/bitypes.h" >&5 $as_echo_n "checking for int64_t and u_int64_t types in sys/bitypes.h... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { int64_t a; u_int64_t b; a = b = 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : $as_echo "#define HAVE_U_INT64_T 1" >>confdefs.h $as_echo "#define HAVE_INT64_T 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if (test -z "$have_uintxx_t" && \ test "x$ac_cv_header_sys_bitypes_h" = "xyes") then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uintXX_t types in sys/bitypes.h" >&5 $as_echo_n "checking for uintXX_t types in sys/bitypes.h... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { uint8_t a; uint16_t b; uint32_t c; a = b = c = 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : $as_echo "#define HAVE_UINTXX_T 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi for ac_func in \ fork \ getcwd \ gethostname \ getpid \ gettimeofday \ setpgid \ setpgrp \ setsid \ signal \ strerror \ strncmp \ strncpy \ vfprintf \ do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF else echo 'configure: cannot find needed function.'; exit 1 fi done ac_fn_c_check_decl "$LINENO" "F_CLOSEM" "ac_cv_have_decl_F_CLOSEM" "#include " if test "x$ac_cv_have_decl_F_CLOSEM" = xyes; then : $as_echo "#define HAVE_FCNTL_F_CLOSEM 1" >>confdefs.h fi ac_fn_c_check_decl "$LINENO" "FS_NODUMP_FL" "ac_cv_have_decl_FS_NODUMP_FL" "#include " if test "x$ac_cv_have_decl_FS_NODUMP_FL" = xyes; then : $as_echo "#define HAVE_NODUMP 1" >>confdefs.h fi ac_fn_c_check_decl "$LINENO" "FS_APPEND_FL" "ac_cv_have_decl_FS_APPEND_FL" "#include " if test "x$ac_cv_have_decl_FS_APPEND_FL" = xyes; then : $as_echo "#define HAVE_APPEND_FL 1" >>confdefs.h fi ac_fn_c_check_decl "$LINENO" "FS_IMMUTABLE_FL" "ac_cv_have_decl_FS_IMMUTABLE_FL" "#include " if test "x$ac_cv_have_decl_FS_IMMUTABLE_FL" = xyes; then : $as_echo "#define HAVE_IMMUTABLE_FL 1" >>confdefs.h fi ac_fn_c_check_decl "$LINENO" "FS_IOC_GETFLAGS" "ac_cv_have_decl_FS_IOC_GETFLAGS" "#include " if test "x$ac_cv_have_decl_FS_IOC_GETFLAGS" = xyes; then : $as_echo "#define HAVE_FS_IOC_GETFLAGS 1" >>confdefs.h fi ac_fn_c_check_decl "$LINENO" "FS_IOC_SETFLAGS" "ac_cv_have_decl_FS_IOC_SETFLAGS" "#include " if test "x$ac_cv_have_decl_FS_IOC_SETFLAGS" = xyes; then : $as_echo "#define HAVE_FS_IOC_SETFLAGS 1" >>confdefs.h fi ac_fn_c_check_decl "$LINENO" "F_SETLK" "ac_cv_have_decl_F_SETLK" "#include " if test "x$ac_cv_have_decl_F_SETLK" = xyes; then : $as_echo "#define HAVE_FCNTL_LOCK 1" >>confdefs.h fi ac_fn_c_check_func "$LINENO" "closefrom" "ac_cv_func_closefrom" if test "x$ac_cv_func_closefrom" = xyes; then : $as_echo "#define HAVE_CLOSEFROM 1" >>confdefs.h fi for ac_func in getpagesize do : ac_fn_c_check_func "$LINENO" "getpagesize" "ac_cv_func_getpagesize" if test "x$ac_cv_func_getpagesize" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_GETPAGESIZE 1 _ACEOF $as_echo "#define HAVE_GETPAGESIZE 1" >>confdefs.h fi done for ac_func in malloc_trim do : ac_fn_c_check_func "$LINENO" "malloc_trim" "ac_cv_func_malloc_trim" if test "x$ac_cv_func_malloc_trim" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_MALLOC_TRIM 1 _ACEOF $as_echo "#define HAVE_MALLOC_TRIM 1" >>confdefs.h fi done for ac_func in fchdir do : ac_fn_c_check_func "$LINENO" "fchdir" "ac_cv_func_fchdir" if test "x$ac_cv_func_fchdir" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_FCHDIR 1 _ACEOF $as_echo "#define HAVE_FCHDIR 1" >>confdefs.h fi done for ac_func in strtoll do : ac_fn_c_check_func "$LINENO" "strtoll" "ac_cv_func_strtoll" if test "x$ac_cv_func_strtoll" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STRTOLL 1 _ACEOF $as_echo "#define HAVE_STRTOLL 1" >>confdefs.h fi done for ac_func in posix_fadvise do : ac_fn_c_check_func "$LINENO" "posix_fadvise" "ac_cv_func_posix_fadvise" if test "x$ac_cv_func_posix_fadvise" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_POSIX_FADVISE 1 _ACEOF fi done for ac_func in posix_fallocate do : ac_fn_c_check_func "$LINENO" "posix_fallocate" "ac_cv_func_posix_fallocate" if test "x$ac_cv_func_posix_fallocate" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_POSIX_FALLOCATE 1 _ACEOF fi done for ac_func in fdatasync do : ac_fn_c_check_func "$LINENO" "fdatasync" "ac_cv_func_fdatasync" if test "x$ac_cv_func_fdatasync" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_FDATASYNC 1 _ACEOF fi done for ac_func in realpath do : ac_fn_c_check_func "$LINENO" "realpath" "ac_cv_func_realpath" if test "x$ac_cv_func_realpath" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_REALPATH 1 _ACEOF fi done for ac_func in getrlimit do : ac_fn_c_check_func "$LINENO" "getrlimit" "ac_cv_func_getrlimit" if test "x$ac_cv_func_getrlimit" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_GETRLIMIT 1 _ACEOF fi done for ac_func in getpwent_r do : ac_fn_c_check_func "$LINENO" "getpwent_r" "ac_cv_func_getpwent_r" if test "x$ac_cv_func_getpwent_r" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_GETPWENT_R 1 _ACEOF fi done for ac_func in chflags do : ac_fn_c_check_func "$LINENO" "chflags" "ac_cv_func_chflags" if test "x$ac_cv_func_chflags" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_CHFLAGS 1 _ACEOF fi done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int f(int b) { return __builtin_bswap32(b); } int main () { int a = f(10); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : $as_echo "#define HAVE_BSWAP32 1" >>confdefs.h fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext for ac_func in snprintf vsnprintf gethostid fseeko do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for va_copy" >&5 $as_echo_n "checking for va_copy... " >&6; } if ${ba_cv_va_copy+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include void use_va_copy(va_list args){va_list args2; va_copy(args2,args); va_end(args2);} void call_use_va_copy(int junk,...){va_list args; va_start(args,junk); use_va_copy(args); va_end(args);} int main () { call_use_va_copy(1,2,3) ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ba_cv_va_copy=yes else ba_cv_va_copy=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ba_cv_va_copy" >&5 $as_echo "$ba_cv_va_copy" >&6; } test $ba_cv_va_copy = yes && $as_echo "#define HAVE_VA_COPY 1" >>confdefs.h for ac_func in localtime_r readdir_r strerror_r gethostbyname_r do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done # If resolver functions are not in libc check for -lnsl or -lresolv. ac_fn_c_check_func "$LINENO" "gethostbyname_r" "ac_cv_func_gethostbyname_r" if test "x$ac_cv_func_gethostbyname_r" = xyes; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: using libc's resolver" >&5 $as_echo "using libc's resolver" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gethostbyname_r in -lnsl" >&5 $as_echo_n "checking for gethostbyname_r in -lnsl... " >&6; } if ${ac_cv_lib_nsl_gethostbyname_r+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lnsl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gethostbyname_r (); int main () { return gethostbyname_r (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_nsl_gethostbyname_r=yes else ac_cv_lib_nsl_gethostbyname_r=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_nsl_gethostbyname_r" >&5 $as_echo "$ac_cv_lib_nsl_gethostbyname_r" >&6; } if test "x$ac_cv_lib_nsl_gethostbyname_r" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBNSL 1 _ACEOF LIBS="-lnsl $LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gethostbyname_r in -lresolv" >&5 $as_echo_n "checking for gethostbyname_r in -lresolv... " >&6; } if ${ac_cv_lib_resolv_gethostbyname_r+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lresolv $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gethostbyname_r (); int main () { return gethostbyname_r (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_resolv_gethostbyname_r=yes else ac_cv_lib_resolv_gethostbyname_r=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_resolv_gethostbyname_r" >&5 $as_echo "$ac_cv_lib_resolv_gethostbyname_r" >&6; } if test "x$ac_cv_lib_resolv_gethostbyname_r" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBRESOLV 1 _ACEOF LIBS="-lresolv $LIBS" fi fi for ac_func in inet_pton do : ac_fn_c_check_func "$LINENO" "inet_pton" "ac_cv_func_inet_pton" if test "x$ac_cv_func_inet_pton" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_INET_PTON 1 _ACEOF $as_echo "#define HAVE_INET_PTON 1" >>confdefs.h fi done for ac_func in inet_ntop do : ac_fn_c_check_func "$LINENO" "inet_ntop" "ac_cv_func_inet_ntop" if test "x$ac_cv_func_inet_ntop" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_INET_NTOP 1 _ACEOF $as_echo "#define HAVE_INET_NTOP 1" >>confdefs.h fi done for ac_func in gethostbyname2 do : ac_fn_c_check_func "$LINENO" "gethostbyname2" "ac_cv_func_gethostbyname2" if test "x$ac_cv_func_gethostbyname2" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_GETHOSTBYNAME2 1 _ACEOF $as_echo "#define HAVE_GETHOSTBYNAME2 1" >>confdefs.h fi done for ac_func in getnameinfo do : ac_fn_c_check_func "$LINENO" "getnameinfo" "ac_cv_func_getnameinfo" if test "x$ac_cv_func_getnameinfo" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_GETNAMEINFO 1 _ACEOF $as_echo "#define HAVE_GETNAMEINFO 1" >>confdefs.h fi done for ac_func in poll do : ac_fn_c_check_func "$LINENO" "poll" "ac_cv_func_poll" if test "x$ac_cv_func_poll" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_POLL 1 _ACEOF $as_echo "#define HAVE_POLL 1" >>confdefs.h fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for struct sockaddr has a sa_len field" >&5 $as_echo_n "checking for struct sockaddr has a sa_len field... " >&6; } if ${ac_cv_struct_sockaddr_sa_len+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { struct sockaddr s; s.sa_len; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_struct_sockaddr_sa_len=yes else ac_cv_struct_sockaddr_sa_len=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_struct_sockaddr_sa_len" >&5 $as_echo "$ac_cv_struct_sockaddr_sa_len" >&6; } if test $ac_cv_struct_sockaddr_sa_len = yes; then $as_echo "#define HAVE_SA_LEN 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for struct sockaddr_storage" >&5 $as_echo_n "checking for struct sockaddr_storage... " >&6; } if ${ac_cv_struct_sockaddr_storage+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { struct sockaddr_storage s; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_struct_sockaddr_storage=yes else ac_cv_struct_sockaddr_storage=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_struct_sockaddr_storage" >&5 $as_echo "$ac_cv_struct_sockaddr_storage" >&6; } if test $ac_cv_struct_sockaddr_storage = yes; then $as_echo "#define HAVE_SOCKADDR_STORAGE 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working getaddrinfo" >&5 $as_echo_n "checking for working getaddrinfo... " >&6; } if ${ac_cv_working_getaddrinfo+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_working_getaddrinfo="yes" else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main(void) { struct addrinfo hints, *ai; int error; memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; error = getaddrinfo("127.0.0.1", NULL, &hints, &ai); if (error) { return(1); } if (ai->ai_addr->sa_family != AF_INET) { return(1); } return(0); } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_working_getaddrinfo="yes" else ac_cv_working_getaddrinfo="no" fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_working_getaddrinfo" >&5 $as_echo "$ac_cv_working_getaddrinfo" >&6; } ac_fn_c_check_func "$LINENO" "gai_strerror" "ac_cv_func_gai_strerror" if test "x$ac_cv_func_gai_strerror" = xyes; then : $as_echo "#define HAVE_GAI_STRERROR 1" >>confdefs.h fi if test "$ac_cv_working_getaddrinfo" = "yes"; then if test "$ac_cv_func_gai_strerror" != "yes"; then ac_cv_working_getaddrinfo="no" else $as_echo "#define HAVE_GETADDRINFO 1" >>confdefs.h fi fi for ac_func in strftime do : ac_fn_c_check_func "$LINENO" "strftime" "ac_cv_func_strftime" if test "x$ac_cv_func_strftime" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STRFTIME 1 _ACEOF else # strftime is in -lintl on SCO UNIX. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for strftime in -lintl" >&5 $as_echo_n "checking for strftime in -lintl... " >&6; } if ${ac_cv_lib_intl_strftime+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lintl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char strftime (); int main () { return strftime (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_intl_strftime=yes else ac_cv_lib_intl_strftime=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_intl_strftime" >&5 $as_echo "$ac_cv_lib_intl_strftime" >&6; } if test "x$ac_cv_lib_intl_strftime" = xyes; then : $as_echo "#define HAVE_STRFTIME 1" >>confdefs.h LIBS="-lintl $LIBS" fi fi done for ac_func in vprintf do : ac_fn_c_check_func "$LINENO" "vprintf" "ac_cv_func_vprintf" if test "x$ac_cv_func_vprintf" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_VPRINTF 1 _ACEOF ac_fn_c_check_func "$LINENO" "_doprnt" "ac_cv_func__doprnt" if test "x$ac_cv_func__doprnt" = xyes; then : $as_echo "#define HAVE_DOPRNT 1" >>confdefs.h fi fi done # The Ultrix 4.2 mips builtin alloca declared by alloca.h only works # for constant arguments. Useless! { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working alloca.h" >&5 $as_echo_n "checking for working alloca.h... " >&6; } if ${ac_cv_working_alloca_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { char *p = (char *) alloca (2 * sizeof (int)); if (p) return 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_working_alloca_h=yes else ac_cv_working_alloca_h=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_working_alloca_h" >&5 $as_echo "$ac_cv_working_alloca_h" >&6; } if test $ac_cv_working_alloca_h = yes; then $as_echo "#define HAVE_ALLOCA_H 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for alloca" >&5 $as_echo_n "checking for alloca... " >&6; } if ${ac_cv_func_alloca_works+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __GNUC__ # define alloca __builtin_alloca #else # ifdef _MSC_VER # include # define alloca _alloca # else # ifdef HAVE_ALLOCA_H # include # else # ifdef _AIX #pragma alloca # else # ifndef alloca /* predefined by HP cc +Olibcalls */ void *alloca (size_t); # endif # endif # endif # endif #endif int main () { char *p = (char *) alloca (1); if (p) return 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_func_alloca_works=yes else ac_cv_func_alloca_works=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_alloca_works" >&5 $as_echo "$ac_cv_func_alloca_works" >&6; } if test $ac_cv_func_alloca_works = yes; then $as_echo "#define HAVE_ALLOCA 1" >>confdefs.h else # The SVR3 libPW and SVR4 libucb both contain incompatible functions # that cause trouble. Some versions do not even contain alloca or # contain a buggy version. If you still want to use their alloca, # use ar to extract alloca.o from them instead of compiling alloca.c. ALLOCA=\${LIBOBJDIR}alloca.$ac_objext $as_echo "#define C_ALLOCA 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether \`alloca.c' needs Cray hooks" >&5 $as_echo_n "checking whether \`alloca.c' needs Cray hooks... " >&6; } if ${ac_cv_os_cray+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined CRAY && ! defined CRAY2 webecray #else wenotbecray #endif _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "webecray" >/dev/null 2>&1; then : ac_cv_os_cray=yes else ac_cv_os_cray=no fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_os_cray" >&5 $as_echo "$ac_cv_os_cray" >&6; } if test $ac_cv_os_cray = yes; then for ac_func in _getb67 GETB67 getb67; do as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define CRAY_STACKSEG_END $ac_func _ACEOF break fi done fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking stack direction for C alloca" >&5 $as_echo_n "checking stack direction for C alloca... " >&6; } if ${ac_cv_c_stack_direction+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_c_stack_direction=0 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int find_stack_direction (int *addr, int depth) { int dir, dummy = 0; if (! addr) addr = &dummy; *addr = addr < &dummy ? 1 : addr == &dummy ? 0 : -1; dir = depth ? find_stack_direction (addr, depth - 1) : 0; return dir + dummy; } int main (int argc, char **argv) { return find_stack_direction (0, argc + !argv + 20) < 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_c_stack_direction=1 else ac_cv_c_stack_direction=-1 fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_stack_direction" >&5 $as_echo "$ac_cv_c_stack_direction" >&6; } cat >>confdefs.h <<_ACEOF #define STACK_DIRECTION $ac_cv_c_stack_direction _ACEOF fi # getmntent is in the standard C library on UNICOS, in -lsun on Irix 4, # -lseq on Dynix/PTX, -lgen on Unixware. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing getmntent" >&5 $as_echo_n "checking for library containing getmntent... " >&6; } if ${ac_cv_search_getmntent+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char getmntent (); int main () { return getmntent (); ; return 0; } _ACEOF for ac_lib in '' sun seq gen; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_getmntent=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_getmntent+:} false; then : break fi done if ${ac_cv_search_getmntent+:} false; then : else ac_cv_search_getmntent=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_getmntent" >&5 $as_echo "$ac_cv_search_getmntent" >&6; } ac_res=$ac_cv_search_getmntent if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" ac_cv_func_getmntent=yes $as_echo "#define HAVE_GETMNTENT 1" >>confdefs.h else ac_cv_func_getmntent=no fi for ac_func in getmntinfo do : ac_fn_c_check_func "$LINENO" "getmntinfo" "ac_cv_func_getmntinfo" if test "x$ac_cv_func_getmntinfo" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_GETMNTINFO 1 _ACEOF $as_echo "#define HAVE_GETMNTINFO 1" >>confdefs.h fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether closedir returns void" >&5 $as_echo_n "checking whether closedir returns void... " >&6; } if ${ac_cv_func_closedir_void+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_closedir_void=yes else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default #include <$ac_header_dirent> #ifndef __cplusplus int closedir (); #endif int main () { return closedir (opendir (".")) != 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_closedir_void=no else ac_cv_func_closedir_void=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_closedir_void" >&5 $as_echo "$ac_cv_func_closedir_void" >&6; } if test $ac_cv_func_closedir_void = yes; then $as_echo "#define CLOSEDIR_VOID 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether setpgrp takes no argument" >&5 $as_echo_n "checking whether setpgrp takes no argument... " >&6; } if ${ac_cv_func_setpgrp_void+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : as_fn_error $? "cannot check setpgrp when cross compiling" "$LINENO" 5 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { /* If this system has a BSD-style setpgrp which takes arguments, setpgrp(1, 1) will fail with ESRCH and return -1, in that case exit successfully. */ return setpgrp (1,1) != -1; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_setpgrp_void=no else ac_cv_func_setpgrp_void=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_setpgrp_void" >&5 $as_echo "$ac_cv_func_setpgrp_void" >&6; } if test $ac_cv_func_setpgrp_void = yes; then $as_echo "#define SETPGRP_VOID 1" >>confdefs.h fi # AC_FUNC_FNMATCH dnl use local version { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gettext in -lintl" >&5 $as_echo_n "checking for gettext in -lintl... " >&6; } if ${ac_cv_lib_intl_gettext+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lintl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gettext (); int main () { return gettext (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_intl_gettext=yes else ac_cv_lib_intl_gettext=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_intl_gettext" >&5 $as_echo "$ac_cv_lib_intl_gettext" >&6; } if test "x$ac_cv_lib_intl_gettext" = xyes; then : LIBS="$LIBS -lintl" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for getpwnam in -lsun" >&5 $as_echo_n "checking for getpwnam in -lsun... " >&6; } if ${ac_cv_lib_sun_getpwnam+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsun $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char getpwnam (); int main () { return getpwnam (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_sun_getpwnam=yes else ac_cv_lib_sun_getpwnam=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_sun_getpwnam" >&5 $as_echo "$ac_cv_lib_sun_getpwnam" >&6; } if test "x$ac_cv_lib_sun_getpwnam" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBSUN 1 _ACEOF LIBS="-lsun $LIBS" fi for ac_header in zlib.h do : ac_fn_c_check_header_mongrel "$LINENO" "zlib.h" "ac_cv_header_zlib_h" "$ac_includes_default" if test "x$ac_cv_header_zlib_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_ZLIB_H 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for deflate in -lz" >&5 $as_echo_n "checking for deflate in -lz... " >&6; } if ${ac_cv_lib_z_deflate+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lz $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char deflate (); int main () { return deflate (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_z_deflate=yes else ac_cv_lib_z_deflate=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_z_deflate" >&5 $as_echo "$ac_cv_lib_z_deflate" >&6; } if test "x$ac_cv_lib_z_deflate" = xyes; then : ZLIBS="-lz" fi have_zlib=no if test x$ZLIBS = x-lz; then $as_echo "#define HAVE_LIBZ 1" >>confdefs.h have_zlib=yes fi AFS_CFLAGS="" AFS_LIBS="" support_afs=auto # Check whether --enable-afs was given. if test "${enable_afs+set}" = set; then : enableval=$enable_afs; if test x$enableval = xyes; then support_afs=yes elif test x$enableval = xno; then support_afs=no fi fi have_afs=no if test x$support_afs = xyes -o x$support_afs = xauto; then # Check whether --with-afsdir was given. if test "${with_afsdir+set}" = set; then : withval=$with_afsdir; with_afsdir=$withval fi if test x$with_afsdir = x; then for root in /usr /usr/local; do if test -d ${root}/include/afs/ ; then with_afsdir=${root} break fi if test -d ${root}/include/openafs/afs/ ; then with_afsdir=${root} break fi done fi if test -d ${with_afsdir}/include/afs/ ; then AFS_CFLAGS="-I${with_afsdir}/include" else if test -d ${with_afsdir}/include/openafs/afs/ ; then AFS_CFLAGS="-I${with_afsdir}/include/openafs" fi fi saved_CFLAGS="${CFLAGS}" saved_CPPFLAGS="${CPPFLAGS}" CFLAGS="${AFS_CFLAGS} ${saved_CFLAGS}" CPPFLAGS="${AFS_CFLAGS} ${saved_CPPFLAGS}" for ac_header in afs/afsint.h do : ac_fn_c_check_header_mongrel "$LINENO" "afs/afsint.h" "ac_cv_header_afs_afsint_h" "$ac_includes_default" if test "x$ac_cv_header_afs_afsint_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_AFS_AFSINT_H 1 _ACEOF fi done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : $as_echo "#define HAVE_AFS_VENUS_H 1" >>confdefs.h fi rm -f conftest.err conftest.i conftest.$ac_ext CFLAGS="${saved_CFLAGS}" CPPFLAGS="${saved_CPPFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pioctl in AFS libsys" >&5 $as_echo_n "checking for pioctl in AFS libsys... " >&6; } for dir in ${with_afsdir}/lib \ ${with_afsdir}/lib/afs \ ${with_afsdir}/lib/openafs \ ${with_afsdir}/lib64 \ ${with_afsdir}/lib64/afs \ ${with_afsdir}/lib64/openafs do for arch_type in .a .so do A=`test -f ${dir}/libsys${arch_type} && nm ${dir}/libsys${arch_type} 2>/dev/null | grep pioctl` pkg=$? if test $pkg = 0; then have_afs=yes AFS_LIBS="-L${dir} -lsys -lrx -llwp ${dir}/util${arch_type}" break fi done done if test $have_afs = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test x$support_afs = xyes -a $have_afs != yes; then as_fn_error $? "afs support explicitly enabled but no supported afs implementation found, please either load the afs libraries or rerun configure without --enable-afs" "$LINENO" 5 else if test $have_afs = yes; then $as_echo "#define HAVE_AFS 1" >>confdefs.h $as_echo "#define HAVE_AFS_ACL 1" >>confdefs.h fi fi fi # Check whether --enable-lzo was given. if test "${enable_lzo+set}" = set; then : enableval=$enable_lzo; if test x$enableval = xno; then support_lzo=no fi fi LZO_INC= LZO_LIBS= LZO_LDFLAGS= have_lzo="no" if test x$support_lzo = xyes; then # Check whether --with-lzo was given. if test "${with_lzo+set}" = set; then : withval=$with_lzo; case "$with_lzo" in no) : ;; yes|*) if test -f ${with_lzo}/include/lzo/lzoconf.h; then LZO_INC="-I${with_lzo}/include" LZO_LDFLAGS="-L${with_lzo}/lib" with_lzo="${with_lzo}/include" else with_lzo="/usr/include" fi as_ac_Header=`$as_echo "ac_cv_header_${with_lzo}/lzo/lzoconf.h" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "${with_lzo}/lzo/lzoconf.h" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : $as_echo "#define HAVE_LZO 1" >>confdefs.h LZO_LIBS="${LZO_LDFLAGS} -llzo2" have_lzo="yes" else echo " " echo "lzoconf.h not found. lzo turned off ..." echo " " fi ;; esac else ac_fn_c_check_header_mongrel "$LINENO" "lzo/lzoconf.h" "ac_cv_header_lzo_lzoconf_h" "$ac_includes_default" if test "x$ac_cv_header_lzo_lzoconf_h" = xyes; then : ac_fn_c_check_header_mongrel "$LINENO" "lzo/lzo1x.h" "ac_cv_header_lzo_lzo1x_h" "$ac_includes_default" if test "x$ac_cv_header_lzo_lzo1x_h" = xyes; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for lzo1x_1_compress in -llzo2" >&5 $as_echo_n "checking for lzo1x_1_compress in -llzo2... " >&6; } if ${ac_cv_lib_lzo2_lzo1x_1_compress+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-llzo2 $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char lzo1x_1_compress (); int main () { return lzo1x_1_compress (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_lzo2_lzo1x_1_compress=yes else ac_cv_lib_lzo2_lzo1x_1_compress=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_lzo2_lzo1x_1_compress" >&5 $as_echo "$ac_cv_lib_lzo2_lzo1x_1_compress" >&6; } if test "x$ac_cv_lib_lzo2_lzo1x_1_compress" = xyes; then : LZO_LIBS="-llzo2" $as_echo "#define HAVE_LZO 1" >>confdefs.h have_lzo=yes fi fi fi fi fi acsls_support=yes have_acsls="no" # Check whether --enable-acsls was given. if test "${enable_acsls+set}" = set; then : enableval=$enable_acsls; if test x$enableval = xno; then acsls_support=no fi fi ACSLS_LIBDIR= ACSLS_OS_DEFINE= ACSLS_BUILD_TARGET= support_acl=auto # Check whether --enable-acl was given. if test "${enable_acl+set}" = set; then : enableval=$enable_acl; if test x$enableval = xyes; then support_acl=yes elif test x$enableval = xno; then support_acl=no fi fi have_acl=no have_extended_acl=no if test x$support_acl = xyes -o x$support_acl = xauto; then ac_fn_c_check_header_mongrel "$LINENO" "sys/acl.h" "ac_cv_header_sys_acl_h" "$ac_includes_default" if test "x$ac_cv_header_sys_acl_h" = xyes; then : $as_echo "#define HAVE_SYS_ACL_H 1" >>confdefs.h fi ac_fn_c_check_func "$LINENO" "acl_get_file" "ac_cv_func_acl_get_file" if test "x$ac_cv_func_acl_get_file" = xyes; then : have_acl=yes fi if test $have_acl = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for acl_get_file in -lacl" >&5 $as_echo_n "checking for acl_get_file in -lacl... " >&6; } if ${ac_cv_lib_acl_acl_get_file+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lacl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char acl_get_file (); int main () { return acl_get_file (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_acl_acl_get_file=yes else ac_cv_lib_acl_acl_get_file=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_acl_acl_get_file" >&5 $as_echo "$ac_cv_lib_acl_acl_get_file" >&6; } if test "x$ac_cv_lib_acl_acl_get_file" = xyes; then : have_acl=yes if test $have_afs = yes; then if test -d /usr/lib64/; then FDLIBS="-L/usr/lib64 -lacl $FDLIBS" else FDLIBS="-L/usr/lib -lacl $FDLIBS" fi else FDLIBS="-lacl $FDLIBS" fi fi fi if test $have_acl = no -a x${HAVE_OSF1_OS_TRUE} = x; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for acl_get_file in -lpacl" >&5 $as_echo_n "checking for acl_get_file in -lpacl... " >&6; } if ${ac_cv_lib_pacl_acl_get_file+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpacl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char acl_get_file (); int main () { return acl_get_file (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_pacl_acl_get_file=yes else ac_cv_lib_pacl_acl_get_file=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pacl_acl_get_file" >&5 $as_echo "$ac_cv_lib_pacl_acl_get_file" >&6; } if test "x$ac_cv_lib_pacl_acl_get_file" = xyes; then : have_acl=yes FDLIBS="-lpacl $FDLIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ACL_TYPE_DEFAULT_DIR in acl.h include file" >&5 $as_echo_n "checking for ACL_TYPE_DEFAULT_DIR in acl.h include file... " >&6; } grep ACL_TYPE_DEFAULT_DIR /usr/include/sys/acl.h > /dev/null 2>&1 if test $? = 0; then $as_echo "#define HAVE_ACL_TYPE_DEFAULT_DIR 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test $have_acl = yes -a x${HAVE_DARWIN_OS_TRUE} = x; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ACL_TYPE_EXTENDED in acl.h include file" >&5 $as_echo_n "checking for ACL_TYPE_EXTENDED in acl.h include file... " >&6; } grep ACL_TYPE_EXTENDED /usr/include/sys/acl.h > /dev/null 2>&1 if test $? = 0; then $as_echo "#define HAVE_ACL_TYPE_EXTENDED 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test $have_acl = yes -a \ x${HAVE_FREEBSD_OS_TRUE} = x; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ACL_TYPE_NFS4 in acl.h include file" >&5 $as_echo_n "checking for ACL_TYPE_NFS4 in acl.h include file... " >&6; } grep ACL_TYPE_NFS4 /usr/include/sys/acl.h > /dev/null 2>&1 if test $? = 0; then $as_echo "#define HAVE_ACL_TYPE_NFS4 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test $have_acl = no -a \ x${HAVE_SUN_OS_TRUE} = x; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for acltotext in -lsec" >&5 $as_echo_n "checking for acltotext in -lsec... " >&6; } if ${ac_cv_lib_sec_acltotext+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsec $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char acltotext (); int main () { return acltotext (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_sec_acltotext=yes else ac_cv_lib_sec_acltotext=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_sec_acltotext" >&5 $as_echo "$ac_cv_lib_sec_acltotext" >&6; } if test "x$ac_cv_lib_sec_acltotext" = xyes; then : have_acl=yes FDLIBS="-lsec $FDLIBS" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for acl_totext in -lsec" >&5 $as_echo_n "checking for acl_totext in -lsec... " >&6; } if ${ac_cv_lib_sec_acl_totext+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsec $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char acl_totext (); int main () { return acl_totext (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_sec_acl_totext=yes else ac_cv_lib_sec_acl_totext=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_sec_acl_totext" >&5 $as_echo "$ac_cv_lib_sec_acl_totext" >&6; } if test "x$ac_cv_lib_sec_acl_totext" = xyes; then : have_extended_acl=yes fi fi fi if test $have_acl = no -a \ x${HAVE_AIX_OS_TRUE} = x; then ac_fn_c_check_func "$LINENO" "acl_get" "ac_cv_func_acl_get" if test "x$ac_cv_func_acl_get" = xyes; then : have_acl=yes ac_fn_c_check_func "$LINENO" "aclx_get" "ac_cv_func_aclx_get" if test "x$ac_cv_func_aclx_get" = xyes; then : have_extended_acl=yes fi fi fi ACLOBJS= if test x$support_acl = xyes -a $have_acl != yes; then as_fn_error $? "acl support explicitly enabled but no supported acl implementation found, please either load the acl libraries or rerun configure without --enable-acl" "$LINENO" 5 else if test $have_acl = yes; then $as_echo "#define HAVE_ACL 1" >>confdefs.h if test x${HAVE_LINUX_OS_TRUE} = x; then ACLOBJS="bacl.c bacl_linux.c" fi if test x${HAVE_SUN_OS_TRUE} = x; then ACLOBJS="bacl.c bacl_solaris.c" fi if test x${HAVE_FREEBSD_OS_TRUE} = x; then ACLOBJS="bacl.c bacl_freebsd.c" fi if test x${HAVE_DARWIN_OS_TRUE} = x; then ACLOBJS="bacl.c bacl_osx.c" fi fi if test $have_extended_acl = yes; then $as_echo "#define HAVE_EXTENDED_ACL 1" >>confdefs.h fi fi fi support_xattr=auto # Check whether --enable-xattr was given. if test "${enable_xattr+set}" = set; then : enableval=$enable_xattr; if test x$enableval = xyes; then support_xattr=yes elif test x$enableval = xno; then support_xattr=no fi fi have_xattr=no if test x$support_xattr = xyes -o x$support_xattr = xauto; then if test x${HAVE_FREEBSD_OS_TRUE} = x -o \ x${HAVE_NETBSD_OS_TRUE} = x -o \ x${HAVE_OPENBSD_OS_TRUE} = x; then ac_fn_c_check_header_mongrel "$LINENO" "sys/extattr.h" "ac_cv_header_sys_extattr_h" "$ac_includes_default" if test "x$ac_cv_header_sys_extattr_h" = xyes; then : $as_echo "#define HAVE_SYS_EXTATTR_H 1" >>confdefs.h fi ac_fn_c_check_header_mongrel "$LINENO" "libutil.h" "ac_cv_header_libutil_h" "$ac_includes_default" if test "x$ac_cv_header_libutil_h" = xyes; then : $as_echo "#define HAVE_LIBUTIL_H 1" >>confdefs.h fi for ac_func in extattr_get_link extattr_set_link extattr_list_link do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF have_xattr=yes $as_echo "#define HAVE_EXTATTR_GET_LINK 1" >>confdefs.h $as_echo "#define HAVE_EXTATTR_SET_LINK 1" >>confdefs.h $as_echo "#define HAVE_EXTATTR_LIST_LINK 1" >>confdefs.h fi done if test $have_xattr = no; then for ac_func in extattr_get_file extattr_set_file extattr_list_file do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF have_xattr=yes $as_echo "#define HAVE_EXTATTR_GET_FILE 1" >>confdefs.h $as_echo "#define HAVE_EXTATTR_SET_FILE 1" >>confdefs.h $as_echo "#define HAVE_EXTATTR_LIST_FILE 1" >>confdefs.h fi done fi if test $have_xattr = yes; then have_extattr_string_in_libc=no for ac_func in extattr_namespace_to_string extattr_string_to_namespace do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF have_extattr_string_in_libc=yes $as_echo "#define HAVE_EXTATTR_NAMESPACE_TO_STRING 1" >>confdefs.h $as_echo "#define HAVE_EXTATTR_STRING_TO_NAMESPACE 1" >>confdefs.h fi done if test $have_extattr_string_in_libc = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for extattr_namespace_to_string extattr_string_to_namespace in -lutil" >&5 $as_echo_n "checking for extattr_namespace_to_string extattr_string_to_namespace in -lutil... " >&6; } if ${ac_cv_lib_util_extattr_namespace_to_string_extattr_string_to_namespace+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lutil $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char extattr_namespace_to_string extattr_string_to_namespace (); int main () { return extattr_namespace_to_string extattr_string_to_namespace (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_util_extattr_namespace_to_string_extattr_string_to_namespace=yes else ac_cv_lib_util_extattr_namespace_to_string_extattr_string_to_namespace=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_util_extattr_namespace_to_string_extattr_string_to_namespace" >&5 $as_echo "$ac_cv_lib_util_extattr_namespace_to_string_extattr_string_to_namespace" >&6; } if test "x$ac_cv_lib_util_extattr_namespace_to_string_extattr_string_to_namespace" = xyes; then : $as_echo "#define HAVE_EXTATTR_NAMESPACE_TO_STRING 1" >>confdefs.h $as_echo "#define HAVE_EXTATTR_STRING_TO_NAMESPACE 1" >>confdefs.h FDLIBS="-lutil $FDLIBS" fi fi fi fi if test $have_xattr = no -a \ x${HAVE_AIX_OS_TRUE} = x; then ac_fn_c_check_header_mongrel "$LINENO" "sys/ea.h" "ac_cv_header_sys_ea_h" "$ac_includes_default" if test "x$ac_cv_header_sys_ea_h" = xyes; then : $as_echo "#define HAVE_SYS_EA_H 1" >>confdefs.h fi for ac_func in llistea lgetea lsetea do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF have_xattr=yes $as_echo "#define HAVE_LLISTEA 1" >>confdefs.h $as_echo "#define HAVE_LGETEA 1" >>confdefs.h $as_echo "#define HAVE_LSETEA 1" >>confdefs.h fi done if test $have_xattr = no; then for ac_func in listea getea setea do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF have_xattr=yes $as_echo "#define HAVE_LISTEA 1" >>confdefs.h $as_echo "#define HAVE_GETEA 1" >>confdefs.h $as_echo "#define HAVE_SETEA 1" >>confdefs.h fi done fi fi if test $have_xattr = no -a \ x${HAVE_OSF1_OS_TRUE} = x; then ac_fn_c_check_header_mongrel "$LINENO" "sys/proplist.h" "ac_cv_header_sys_proplist_h" "$ac_includes_default" if test "x$ac_cv_header_sys_proplist_h" = xyes; then : $as_echo "#define HAVE_SYS_PROPLIST_H 1" >>confdefs.h fi for ac_func in getproplist get_proplist_entry sizeof_proplist_entry add_proplist_entry setproplist do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF have_xattr=yes $as_echo "#define HAVE_GETPROPLIST 1" >>confdefs.h $as_echo "#define HAVE_GET_PROPLIST_ENTRY 1" >>confdefs.h $as_echo "#define HAVE_SIZEOF_PROPLIST_ENTRY 1" >>confdefs.h $as_echo "#define HAVE_ADD_PROPLIST_ENTRY 1" >>confdefs.h $as_echo "#define HAVE_SETPROPLIST 1" >>confdefs.h fi done fi if test $have_xattr = no -a \ x${HAVE_SUN_OS_TRUE} = x; then ac_fn_c_check_header_mongrel "$LINENO" "sys/attr.h" "ac_cv_header_sys_attr_h" "$ac_includes_default" if test "x$ac_cv_header_sys_attr_h" = xyes; then : $as_echo "#define HAVE_SYS_ATTR_H 1" >>confdefs.h fi ac_fn_c_check_header_mongrel "$LINENO" "sys/nvpair.h" "ac_cv_header_sys_nvpair_h" "$ac_includes_default" if test "x$ac_cv_header_sys_nvpair_h" = xyes; then : $as_echo "#define HAVE_SYS_NVPAIR_H 1" >>confdefs.h fi ac_fn_c_check_header_mongrel "$LINENO" "attr.h" "ac_cv_header_attr_h" "$ac_includes_default" if test "x$ac_cv_header_attr_h" = xyes; then : $as_echo "#define HAVE_ATTR_H 1" >>confdefs.h fi for ac_func in openat attropen unlinkat fchownat futimesat linkat do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF have_xattr=yes $as_echo "#define HAVE_OPENAT 1" >>confdefs.h $as_echo "#define HAVE_ATTROPEN 1" >>confdefs.h $as_echo "#define HAVE_UNLINKAT 1" >>confdefs.h $as_echo "#define HAVE_FCHOWNAT 1" >>confdefs.h $as_echo "#define HAVE_FUTIMESAT 1" >>confdefs.h $as_echo "#define HAVE_LINKAT 1" >>confdefs.h fi done if test $have_xattr = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for nvlist_next_nvpair in -lnvpair" >&5 $as_echo_n "checking for nvlist_next_nvpair in -lnvpair... " >&6; } if ${ac_cv_lib_nvpair_nvlist_next_nvpair+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lnvpair $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char nvlist_next_nvpair (); int main () { return nvlist_next_nvpair (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_nvpair_nvlist_next_nvpair=yes else ac_cv_lib_nvpair_nvlist_next_nvpair=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_nvpair_nvlist_next_nvpair" >&5 $as_echo "$ac_cv_lib_nvpair_nvlist_next_nvpair" >&6; } if test "x$ac_cv_lib_nvpair_nvlist_next_nvpair" = xyes; then : $as_echo "#define HAVE_NVLIST_NEXT_NVPAIR 1" >>confdefs.h FDLIBS="-lnvpair $FDLIBS" fi fi fi if test $have_xattr = no; then ac_fn_c_check_header_mongrel "$LINENO" "sys/xattr.h" "ac_cv_header_sys_xattr_h" "$ac_includes_default" if test "x$ac_cv_header_sys_xattr_h" = xyes; then : $as_echo "#define HAVE_SYS_XATTR_H 1" >>confdefs.h fi for ac_func in llistxattr lgetxattr lsetxattr do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF have_xattr=yes $as_echo "#define HAVE_LLISTXATTR 1" >>confdefs.h $as_echo "#define HAVE_LGETXATTR 1" >>confdefs.h $as_echo "#define HAVE_LSETXATTR 1" >>confdefs.h fi done if test $have_xattr = no; then for ac_func in listxattr getxattr setxattr do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF have_xattr=yes $as_echo "#define HAVE_LISTXATTR 1" >>confdefs.h $as_echo "#define HAVE_GETXATTR 1" >>confdefs.h $as_echo "#define HAVE_SETXATTR 1" >>confdefs.h fi done fi fi XATTROBJS= if test x$support_xattr = xyes -a $have_xattr != yes; then as_fn_error $? "xattr support explicitly enabled but no supported xattr implementation found, please either load the xattr libraries or rerun configure without --enable-xattr" "$LINENO" 5 else if test $have_xattr = yes; then $as_echo "#define HAVE_XATTR 1" >>confdefs.h if test x${HAVE_LINUX_OS_TRUE} = x; then XATTROBJS="bxattr.c bxattr_linux.c" fi if test x${HAVE_SUN_OS_TRUE} = x; then XATTROBJS="bxattr.c bxattr_solaris.c" fi if test x${HAVE_FREEBSD_OS_TRUE} = x; then XATTROBJS="bxattr.c bxattr_freebsd.c" fi if test x${HAVE_DARWIN_OS_TRUE} = x; then XATTROBJS="bxattr.c bxattr_osx.c" fi fi fi fi AFS_CFLAGS="" AFS_LIBS="" support_afs=auto # Check whether --enable-afs was given. if test "${enable_afs+set}" = set; then : enableval=$enable_afs; if test x$enableval = xyes; then support_afs=yes elif test x$enableval = xno; then support_afs=no fi fi GPFS_CFLAGS="" GPFS_LDFLAGS="" support_gpfs=auto # Check whether --enable-gpfs was given. if test "${enable_gpfs+set}" = set; then : enableval=$enable_gpfs; if test x$enableval = xyes; then support_gpfs=yes elif test x$enableval = xno; then support_gpfs=no fi fi have_gpfs=no have_gpfslib=no GPFSLIB="" if test x$have_acl = xno -a x$have_xattr = xno; then if test x$support_gpfs = xyes; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: ACL and XATTR engine disabled - disable dependent GPFS support." >&5 $as_echo "$as_me: WARNING: ACL and XATTR engine disabled - disable dependent GPFS support." >&2;} fi else if test x$support_gpfs = xyes -o x$support_gpfs = xauto; then # Check whether --with-gpfsdir was given. if test "${with_gpfsdir+set}" = set; then : withval=$with_gpfsdir; with_gpfsdir=$withval fi saved_CFLAGS="${CFLAGS}" saved_CPPFLAGS="${CPPFLAGS}" saved_LDFLAGS="${LDFLAGS}" if test x$with_gpfsdir != x; then GPFS_CFLAGS="-I${with_gpfsdir}/include" CFLAGS="${GPFS_CFLAGS} ${saved_CFLAGS}" CPPFLAGS="${GPFS_CFLAGS} ${saved_CPPFLAGS}" LDFLAGS="-L${with_gpfsdir}/lib ${saved_LDFLAGS}" fi ac_fn_c_check_header_mongrel "$LINENO" "gpfs.h" "ac_cv_header_gpfs_h" "$ac_includes_default" if test "x$ac_cv_header_gpfs_h" = xyes; then : have_gpfs=yes fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gpfs_getacl in -lgpfs" >&5 $as_echo_n "checking for gpfs_getacl in -lgpfs... " >&6; } if ${ac_cv_lib_gpfs_gpfs_getacl+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lgpfs $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gpfs_getacl (); int main () { return gpfs_getacl (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_gpfs_gpfs_getacl=yes else ac_cv_lib_gpfs_gpfs_getacl=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gpfs_gpfs_getacl" >&5 $as_echo "$ac_cv_lib_gpfs_gpfs_getacl" >&6; } if test "x$ac_cv_lib_gpfs_gpfs_getacl" = xyes; then : have_gpfslib=yes fi if test x$with_gpfsdir != x; then CFLAGS="${saved_CFLAGS}" CPPFLAGS="${saved_CPPFLAGS}" LDFLAGS="${saved_LDFLAGS}" fi if test x$have_gpfslib = xyes; then if test -f ${with_gpfsdir}/lib/libgpfs.so; then GPFSLIB=`realpath ${with_gpfsdir}/lib/libgpfs.so` cat >>confdefs.h <<_ACEOF #define GPFSLIB_CUSTOM_PATH "${GPFSLIB}" _ACEOF fi fi if test x$have_gpfs = xyes; then $as_echo "#define HAVE_GPFS 1" >>confdefs.h $as_echo "#define HAVE_GPFS_ACL 1" >>confdefs.h $as_echo "#define HAVE_GPFS_XATTR 1" >>confdefs.h fi fi fi PTHREAD_LIB="" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthread" >&5 $as_echo_n "checking for pthread_create in -lpthread... " >&6; } if ${ac_cv_lib_pthread_pthread_create+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpthread $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char pthread_create (); int main () { return pthread_create (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_pthread_pthread_create=yes else ac_cv_lib_pthread_pthread_create=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthread_pthread_create" >&5 $as_echo "$ac_cv_lib_pthread_pthread_create" >&6; } if test "x$ac_cv_lib_pthread_pthread_create" = xyes; then : PTHREAD_LIB="-lpthread" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthreads" >&5 $as_echo_n "checking for pthread_create in -lpthreads... " >&6; } if ${ac_cv_lib_pthreads_pthread_create+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpthreads $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char pthread_create (); int main () { return pthread_create (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_pthreads_pthread_create=yes else ac_cv_lib_pthreads_pthread_create=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthreads_pthread_create" >&5 $as_echo "$ac_cv_lib_pthreads_pthread_create" >&6; } if test "x$ac_cv_lib_pthreads_pthread_create" = xyes; then : PTHREAD_LIB="-lpthreads" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lc_r" >&5 $as_echo_n "checking for pthread_create in -lc_r... " >&6; } if ${ac_cv_lib_c_r_pthread_create+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lc_r $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char pthread_create (); int main () { return pthread_create (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_c_r_pthread_create=yes else ac_cv_lib_c_r_pthread_create=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_c_r_pthread_create" >&5 $as_echo "$ac_cv_lib_c_r_pthread_create" >&6; } if test "x$ac_cv_lib_c_r_pthread_create" = xyes; then : PTHREAD_LIB="-lc_r" else ac_fn_c_check_func "$LINENO" "pthread_create" "ac_cv_func_pthread_create" if test "x$ac_cv_func_pthread_create" = xyes; then : fi fi fi fi LDAP_LIBS="" LDAP_LDFLAGS="" LDAP_INC="" LDAP_TARGET="" LDAP_TARGET_INSTALL="" ldap_support=no ldap_start_tls_support=no # Check whether --with-ldap was given. if test "${with_ldap+set}" = set; then : withval=$with_ldap; case "$with_ldap" in no) : ;; yes|*) if test -f ${with_ldap}/include/ldap.h; then LDAP_INC="-I${with_ldap}/include" LDAP_LDFLAGS="-L${with_ldap}/lib" with_ldap="${with_ldap}/include" else with_ldap="/usr/include" fi as_ac_Header=`$as_echo "ac_cv_header_${with_ldap}/ldap.h" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "${with_ldap}/ldap.h" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : $as_echo "#define HAVE_LDAP 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldap_init in -lldap" >&5 $as_echo_n "checking for ldap_init in -lldap... " >&6; } if ${ac_cv_lib_ldap_ldap_init+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lldap $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char ldap_init (); int main () { return ldap_init (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_ldap_ldap_init=yes else ac_cv_lib_ldap_ldap_init=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ldap_ldap_init" >&5 $as_echo "$ac_cv_lib_ldap_ldap_init" >&6; } if test "x$ac_cv_lib_ldap_ldap_init" = xyes; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ber_bvfree in -llber" >&5 $as_echo_n "checking for ber_bvfree in -llber... " >&6; } if ${ac_cv_lib_lber_ber_bvfree+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-llber $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char ber_bvfree (); int main () { return ber_bvfree (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_lber_ber_bvfree=yes else ac_cv_lib_lber_ber_bvfree=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_lber_ber_bvfree" >&5 $as_echo "$ac_cv_lib_lber_ber_bvfree" >&6; } if test "x$ac_cv_lib_lber_ber_bvfree" = xyes; then : ldap_support=yes LDAP_LIBS="-lldap -llber" else ac_fn_c_check_func "$LINENO" "ber_bvfree" "ac_cv_func_ber_bvfree" if test "x$ac_cv_func_ber_bvfree" = xyes; then : ldap_support=yes LDAP_LIBS="-lldap -llber" fi fi else ac_fn_c_check_func "$LINENO" "ldap_init" "ac_cv_func_ldap_init" if test "x$ac_cv_func_ldap_init" = xyes; then : fi fi ac_fn_c_check_func "$LINENO" "ldap_start_tls" "ac_cv_func_ldap_start_tls" if test "x$ac_cv_func_ldap_start_tls" = xyes; then : ldap_start_tls_support=yes else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldap_start_tls in -lldap" >&5 $as_echo_n "checking for ldap_start_tls in -lldap... " >&6; } if ${ac_cv_lib_ldap_ldap_start_tls+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lldap $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char ldap_start_tls (); int main () { return ldap_start_tls (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_ldap_ldap_start_tls=yes else ac_cv_lib_ldap_ldap_start_tls=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ldap_ldap_start_tls" >&5 $as_echo "$ac_cv_lib_ldap_ldap_start_tls" >&6; } if test "x$ac_cv_lib_ldap_ldap_start_tls" = xyes; then : ldap_start_tls_support=yes $as_echo "#define HAVE_LDAP_START_TLS 1" >>confdefs.h fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: \"ldap.h not found. LDAP turned off ...\"" >&5 $as_echo "$as_me: \"ldap.h not found. LDAP turned off ...\"" >&6;} fi ;; esac fi DIRPLUG_INSTALL_TARGET= EXTRA_INSTALL_SCRIPTS= PLUGIN_INSTALL_TARGET= support_antivirus=no # Check whether --enable-antivirus-plugin was given. if test "${enable_antivirus_plugin+set}" = set; then : enableval=$enable_antivirus_plugin; if test x$enableval = xyes; then support_antivirus=yes elif test x$enableval = xno; then support_antivirus=disabled fi fi if test x$support_antivirus = xyes ; then FD_PLUGIN_INSTALL="$FD_PLUGIN_INSTALL install-antivirus" fi support_docker=auto # Check whether --enable-docker-plugin was given. if test "${enable_docker_plugin+set}" = set; then : enableval=$enable_docker_plugin; if test x$enableval = xyes; then support_docker=yes elif test x$enableval = xno; then support_docker=disabled fi fi if test x$support_docker = xyes -o x$support_docker = xauto; then # Extract the first word of "docker", so it can be a program name with args. set dummy docker; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_docker_bin+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$docker_bin"; then ac_cv_prog_docker_bin="$docker_bin" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in "/usr/bin/" do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_docker_bin="yes" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_prog_docker_bin" && ac_cv_prog_docker_bin="no" fi fi docker_bin=$ac_cv_prog_docker_bin if test -n "$docker_bin"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $docker_bin" >&5 $as_echo "$docker_bin" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test x$docker_bin = xyes; then support_docker=yes FD_PLUGIN_DIR="${FD_PLUGIN_DIR} src/plugins/fd/docker" FD_PLUGIN_INSTALL="${FD_PLUGIN_INSTALL} install-docker" else if test x$support_docker = xyes; then echo " " echo "You enabled a Docker Plugin build but 'docker' command not found." echo " " exit 1 fi support_docker=no fi fi support_k8s=auto # Check whether --enable-kubernetes-plugin was given. if test "${enable_kubernetes_plugin+set}" = set; then : enableval=$enable_kubernetes_plugin; if test x$enableval = xyes; then support_kubernetes=yes FD_PLUGIN_INSTALL="${FD_PLUGIN_INSTALL} install-kubernetes" elif test x$enableval = xno; then support_kubernetes=disabled fi fi if test x$support_kubernetes = xyes -o x$support_kubernetes = xauto; then FD_PLUGIN_DIR="${FD_PLUGIN_DIR} src/plugins/fd/kubernetes-backend" fi support_cdp=auto # Check whether --enable-cdp-plugin was given. if test "${enable_cdp_plugin+set}" = set; then : enableval=$enable_cdp_plugin; if test x$enableval = xyes; then support_cdp=yes FD_PLUGIN_INSTALL="${FD_PLUGIN_INSTALL} install-cdp" TOOLS_INSTALL="${TOOLS_INSTALL} install-cdp" elif test x$enableval = xno; then support_cdp=disabled fi fi support_totp_bpam=no # Check whether --enable-totp-bpam was given. if test "${enable_totp_bpam+set}" = set; then : enableval=$enable_totp_bpam; if test x$enableval = xyes; then support_totp_bpam=yes elif test x$enableval = xno; then support_totp_bpam=disabled fi fi if test x$support_totp_bpam = xyes ; then DIRPLUG_INSTALL_TARGET="$DIRPLUG_INSTALL_TARGET install-totp" DIR_PLUGIN_DIR="$DIR_PLUGIN_DIR src/plugins/dir/totp" fi support_ldap_bpam=auto # Check whether --enable-ldap-bpam was given. if test "${enable_ldap_bpam+set}" = set; then : enableval=$enable_ldap_bpam; if test x$enableval = xyes; then support_ldap_bpam=yes elif test x$enableval = xno; then support_ldap_bpam=disabled fi fi if test x$support_ldap_bpam = xyes -o x$support_ldap_bpam = xauto; then if test x$ldap_support = xyes; then support_ldap_bpam=yes BPAM_LDAP_TARGET="ldap-dir.la ldaptest" BPAM_LDAP_TARGET_INSTALL="all install-ldap install-ldaptest" else if test x$support_ldap_bpam = xyes; then as_fn_error $? "\"Director LDAP Plugin enabled but no required libraries found. Please check for LDAP library support and set a proper --with-ldap=... parameter.\"" "$LINENO" 5 fi support_ldap_bpam=no fi fi if test x$support_ldap_bpam = xyes; then DIR_PLUGIN_DIR="${DIR_PLUGIN_DIR} src/plugins/dir/ldap" fi for ac_header in sys/prctl.h sys/capability.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_func in prctl setreuid do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for cap_set_proc in -lcap" >&5 $as_echo_n "checking for cap_set_proc in -lcap... " >&6; } if ${ac_cv_lib_cap_cap_set_proc+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lcap $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char cap_set_proc (); int main () { return cap_set_proc (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_cap_cap_set_proc=yes else ac_cv_lib_cap_cap_set_proc=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_cap_cap_set_proc" >&5 $as_echo "$ac_cv_lib_cap_cap_set_proc" >&6; } if test "x$ac_cv_lib_cap_cap_set_proc" = xyes; then : CAP_LIBS="-lcap" else CAP_LIBS= fi have_libcap="no" if test x$CAP_LIBS = x-lcap; then $as_echo "#define HAVE_LIBCAP 1" >>confdefs.h have_libcap="yes" fi $as_echo "#define FDLIBS 1" >>confdefs.h CFLAGS=${CFLAGS--O} if test x$have_gcc = xyes ; then CPPFLAGS="$CPPFLAGS -x c++ -fno-strict-aliasing -fno-exceptions -fno-rtti" CFLAGS="$CFLAGS -x c++ -fno-strict-aliasing -fno-exceptions -fno-rtti" fi LDFLAGS=${LDFLAGS--O} CPPFLAGS="$CPPFLAGS" CFLAGS="$CFLAGS" OBJLIST= lld="lld" llu="llu" WCFLAGS= WLDFLAGS= PSCMD="ps -e" WIN32= MACOSX= COMPRESS_MANPAGES=yes case "$DISTNAME" in aix) DISTVER=`uname -r` PSCMD="ps -e -o pid,comm" PFILES="${PFILES} platforms/aix/Makefile" TAPEDRIVE="/dev/rmt0.1" ;; alpha) DISTVER=`uname -r` PTHREAD_LIB="-lpthread -lexc" if test "${CC}" = "gcc" ; then lld="lld" llu="llu" else lld="ld" llu="lu" fi TAPEDRIVE="/dev/nrmt0" ;; bsdi) DISTVER=`uname -a |awk '{print $3}'` TAPEDRIVE="/dev/nrmt0" PTHREAD_LIB="-pthread" CFLAGS="${CFLAGS} -pthread" PSCMD="ps -ax -o pid,command" lld="qd" llu="qu" PFILES="${PFILES} \ platforms/bsdi/Makefile \ platforms/bsdi/bacula-fd \ platforms/bsdi/bacula-sd \ platforms/bsdi/bacula-dir" largefile_support="yes" ;; cygwin) DISTVER=`uname -a |awk '{print $3}'` TAPEDRIVE="/dev/nrst0" WIN32=win32 WCFLAGS="-mwindows" WLDFLAGS="-mwindows" ;; darwin) DISTVER=`uname -r` TAPEDRIVE="/dev/nst0" PSCMD="ps -e -o pid,command" MACOSX=macosx PFILES="${PFILES} \ platforms/darwin/Makefile" ;; osx) DISTVER=`uname -r` TAPEDRIVE="/dev/nst0" PSCMD="ps -e -o pid,command" MACOSX=macosx ;; debian) if `test -f /etc/apt/sources.list && grep -q ubuntu /etc/apt/sources.list`; then DISTNAME="ubuntu" fi DISTVER=`cat /etc/debian_version` if test -f /etc/lsb-release ; then . /etc/lsb-release if test "x$DISTRIB_ID" != "x" ; then DISTNAME=$DISTRIB_ID fi if test "x$DISTRIB_RELEASE" != "x" ; then DISTVER=$DISTRIB_RELEASE fi fi if test "$DISTNAME" = "Ubuntu" ; then DISTNAME="ubuntu" fi TAPEDRIVE="/dev/nst0" PSCMD="ps -e -o pid,command" if test "$DISTNAME" = "ubuntu" ; then PFILES="${PFILES} \ platforms/ubuntu/Makefile \ platforms/ubuntu/bacula-fd \ platforms/ubuntu/bacula-sd \ platforms/ubuntu/bacula-dir" else PFILES="${PFILES} \ platforms/debian/Makefile \ platforms/debian/bacula-fd \ platforms/debian/bacula-sd \ platforms/debian/bacula-dir" fi ;; freebsd) DISTVER=`uname -a |awk '{print $3}'` VER=`echo $DISTVER | cut -c 1` if test x$VER = x4 ; then PTHREAD_LIB="${PTHREAD_LIBS:--pthread}" CFLAGS="${CFLAGS} ${PTHREAD_CFLAGS:--pthread}" fi lld="qd" llu="qu" TAPEDRIVE="/dev/nrsa0" PSCMD="ps -ax -o pid,command" PFILES="${PFILES} \ platforms/freebsd/Makefile \ platforms/freebsd/bacula-fd \ platforms/freebsd/bacula-sd \ platforms/freebsd/bacula-dir" largefile_support="yes" ;; hurd) DISTVER=`uname -r` TAPEDRIVE="/dev/nst0" PSCMD="ps -e -o pid,command" PFILES="${PFILES} \ platforms/hurd/Makefile \ platforms/hurd/bacula-fd \ platforms/hurd/bacula-sd \ platforms/hurd/bacula-dir" ;; hpux) PSCMD="UNIX95=1; ps -e -o pid,comm" CFLAGS="${CFLAGS} -D_XOPEN_SOURCE_EXTENDED=1" DISTVER=`uname -r` TAPEDRIVE="/dev/rmt/0hnb" PTHREAD_LIB="-lpthread" $as_echo "#define _INCLUDE_LONGLONG 1" >>confdefs.h ;; irix) DISTVER=`uname -r` TAPEDRIVE="/dev/rmt/0cbn" PSCMD="ps -e -o pid,comm" PFILES="${PFILES} \ platforms/irix/Makefile \ platforms/irix/bacula-fd \ platforms/irix/bacula-sd \ platforms/irix/bacula-dir" ;; netbsd) DISTVER=`uname -a |awk '{print $3}'` lld="qd" llu="qu" TAPEDRIVE="/dev/nrst0" PSCMD="ps -ax -o pid,command" PTHREAD_LIB="-pthread" CFLAGS="${CFLAGS} -pthread" ;; openbsd) DISTVER=`uname -a |awk '{print $3}'` lld="qd" llu="qu" TAPEDRIVE="/dev/nrst0" PSCMD="ps -ax -o pid,command" PTHREAD_LIB="-pthread" CFLAGS="${CFLAGS} -pthread" PFILES="${PFILES} \ platforms/openbsd/Makefile \ platforms/openbsd/bacula-fd \ platforms/openbsd/bacula-sd \ platforms/openbsd/bacula-dir" ;; redhat) if test -f /etc/whitebox-release ; then f=/etc/whitebox-release else f=/etc/redhat-release fi if test `cat $f | grep release |\ cut -f 3 -d ' '`x = "Enterprise"x ; then DISTVER="Enterprise "`cat $f | grep release |\ cut -f 6 -d ' '` else DISTVER=`cat /etc/redhat-release | grep release |\ cut -f 5 -d ' '` fi TAPEDRIVE="/dev/nst0" PSCMD="ps -e -o pid,command" PFILES="${PFILES} \ platforms/redhat/Makefile \ platforms/redhat/bacula-fd \ platforms/redhat/bacula-sd \ platforms/redhat/bacula-dir " ;; mandrake) DISTVER=`cat /etc/mandrake-release | grep release |\ cut -f 5 -d ' '` TAPEDRIVE="/dev/nst0" PSCMD="ps -e -o pid,command" PFILES="${PFILES} \ platforms/mandrake/Makefile \ platforms/mandrake/bacula-fd \ platforms/mandrake/bacula-sd \ platforms/mandrake/bacula-dir \ " ;; gentoo) DISTVER=`awk '/version / {print $5}' < /etc/gentoo-release` TAPEDRIVE="/dev/nst0" PSCMD="ps -e -o pid,command" PFILES="${PFILES} \ platforms/gentoo/Makefile \ platforms/gentoo/bacula-init \ platforms/gentoo/bacula-fd \ platforms/gentoo/bacula-sd \ platforms/gentoo/bacula-dir" ;; slackware) DISTVER=`cat /etc/slackware-version` TAPEDRIVE="/dev/nst0" PSCMD="ps -e -o pid,command" PFILES="${PFILES} \ platforms/slackware/Makefile \ platforms/slackware/rc.bacula-fd \ platforms/slackware/rc.bacula-sd \ platforms/slackware/rc.bacula-dir\ platforms/slackware/functions.bacula" ;; solaris) DISTVER=`uname -r` TAPEDRIVE="/dev/rmt/0cbn" PSCMD="ps -e -o pid,comm" PFILES="${PFILES} \ platforms/solaris/Makefile \ platforms/solaris/bacula-fd \ platforms/solaris/bacula-sd \ platforms/solaris/bacula-dir" COMPRESS_MANPAGES= case ${DISTVER} in 5.5|5.6) $as_echo "#define HAVE_OLD_SOCKOPT 1" >>confdefs.h $as_echo "#define USE_THR_SETCONCURRENCY 1" >>confdefs.h ;; 5.7|5.8) $as_echo "#define USE_THR_SETCONCURRENCY 1" >>confdefs.h ;; 5.10) $as_echo "#define HAVE_SOLARIS10 1" >>confdefs.h ;; *) ;; esac LIBS="$LIBS -lresolv -lrt" ;; suse) DISTVER=`cat /etc/SuSE-release |grep VERSION|\ cut -f 3 -d ' '` TAPEDRIVE="/dev/nst0" PSCMD="ps -e -o pid,command" PFILES="${PFILES} \ platforms/suse/Makefile \ platforms/suse/bacula-fd \ platforms/suse/bacula-sd \ platforms/suse/bacula-dir \ platforms/suse/bacula" ;; suse5) DISTNAME=suse DISTVER=5.x TAPEDRIVE="/dev/nst0" PSCMD="ps -e -o pid,command" PFILES="${PFILES} \ platforms/suse/Makefile \ platforms/suse/bacula-fd \ platforms/suse/bacula-sd \ platforms/suse/bacula-dir" ;; unknown) DISTVER=unknown_distro_ver TAPEDRIVE="/dev/nst0" ;; *) echo " === Something went wrong. Unknown DISTNAME $DISTNAME ===" ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for systemd support" >&5 $as_echo_n "checking for systemd support... " >&6; } # Check whether --with-systemd was given. if test "${with_systemd+set}" = set; then : withval=$with_systemd; if test "$withval" != "no"; then if test "$withval" = "yes"; then SYSTEMD_UNITDIR="/lib/systemd/system" else SYSTEMD_UNITDIR="${withval}" fi PFILES="${PFILES} \ platforms/systemd/Makefile \ platforms/systemd/bacula.conf \ platforms/systemd/bacula-dir.service \ platforms/systemd/bacula-fd.service \ platforms/systemd/bacula-sd.service" $as_echo "#define HAVE_SYSTEMD 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } support_systemd="yes" else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } support_systemd="no" fi else support_systemd="no" { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi LIBS="$PTHREAD_LIB $LIBS" cat >>confdefs.h <<_ACEOF #define lld "$lld" _ACEOF cat >>confdefs.h <<_ACEOF #define llu "$llu" _ACEOF MCOMMON=./autoconf/Make.common if test "x${subsysdir}" = "x${sbindir}" ; then echo " " echo " " echo "You have set both --sbindir and --with-subsys-dir" echo " equal to: ${subsysdir} " echo "This is not permitted. Please reconfigure." echo " " echo "Aborting configuration ..." echo " " echo " " exit 1 fi if test -f src/plugins/fd/kubernetes-backend/Makefile.in; then PFILES="${PFILES} src/plugins/fd/kubernetes-backend/Makefile" fi if test -f src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/baculabackupimage.py.in; then PFILES="${PFILES} src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/baculabackupimage.py" fi ac_config_files="$ac_config_files autoconf/Make.common Makefile manpages/Makefile scripts/btraceback scripts/bconsole scripts/bacula scripts/bacula-ctl-dir scripts/bacula-ctl-fd scripts/bacula-ctl-sd scripts/devel_bacula scripts/Makefile scripts/logrotate scripts/mtx-changer scripts/disk-changer scripts/key-manager.py scripts/install-key-manager.sh scripts/logwatch/Makefile scripts/logwatch/logfile.bacula.conf scripts/bat.desktop scripts/bat.desktop.xsu scripts/bat.desktop.consolehelper scripts/bat.console_apps scripts/aws_cloud_driver scripts/bacula-tray-monitor.desktop scripts/kubernetes-bacula-backup/Dockerfile src/Makefile src/host.h src/console/Makefile src/console/bconsole.conf src/qt-console/bat.conf src/qt-console/bat.pro src/qt-console/bat.pro.mingw32 src/qt-console/bat.pro.mingw64 src/qt-console/install_conf_file src/qt-console/tray-monitor/tray-monitor.conf src/qt-console/tray-monitor/bacula-tray-monitor.conf src/qt-console/tray-monitor/tray-monitor.pro src/qt-console/tray-monitor/tray-monitor.pro.mingw32 src/qt-console/tray-monitor/tray-monitor.pro.mingw64 src/qt-console/tray-monitor/install_conf_file src/dird/Makefile src/dird/bacula-dir.conf src/lib/Makefile src/stored/Makefile src/stored/bacula-sd.conf src/filed/Makefile src/filed/bacula-fd.conf src/cats/Makefile src/cats/make_catalog_backup.pl src/cats/make_catalog_backup src/cats/delete_catalog_backup src/cats/create_postgresql_database src/cats/update_postgresql_tables src/cats/make_postgresql_tables src/cats/grant_postgresql_privileges src/cats/drop_postgresql_tables src/cats/drop_postgresql_database src/cats/create_mysql_database src/cats/update_mysql_tables src/cats/make_mysql_tables src/cats/grant_mysql_privileges src/cats/drop_mysql_tables src/cats/drop_mysql_database src/cats/create_sqlite3_database src/cats/update_sqlite3_tables src/cats/make_sqlite3_tables src/cats/grant_sqlite3_privileges src/cats/drop_sqlite3_tables src/cats/drop_sqlite3_database src/cats/sqlite src/cats/mysql src/cats/create_bacula_database src/cats/update_bacula_tables src/cats/grant_bacula_privileges src/cats/make_bacula_tables src/cats/drop_bacula_tables src/cats/drop_bacula_database src/cats/install-default-backend src/findlib/Makefile src/tools/Makefile src/tools/cdp-client/Makefile src/plugins/fd/Makefile.inc src/plugins/sd/Makefile src/plugins/dir/Makefile.inc src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/baculabackupimage.py po/Makefile.in updatedb/update_mysql_tables updatedb/update_sqlite3_tables updatedb/update_postgresql_tables updatedb/update_mysql_tables_9_to_10 updatedb/update_sqlite3_tables_9_to_10 updatedb/update_postgresql_tables_9_to_10 updatedb/update_mysql_tables_10_to_11 updatedb/update_sqlite3_tables_10_to_11 updatedb/update_postgresql_tables_10_to_11 updatedb/update_mysql_tables_11_to_12 updatedb/update_sqlite3_tables_11_to_12 updatedb/update_postgresql_tables_11_to_12 updatedb/update_postgresql_tables_1021_to_1022 updatedb/update_postgresql_tables_1022_to_1023 updatedb/update_postgresql_tables_1023_to_1024 updatedb/update_postgresql_tables_1024_to_1025 updatedb/update_postgresql_tables_1025_to_1026 updatedb/update_mysql_tables_1020_to_1021 updatedb/update_mysql_tables_1021_to_1022 updatedb/update_mysql_tables_1022_to_1023 updatedb/update_mysql_tables_1023_to_1024 updatedb/update_mysql_tables_1024_to_1025 updatedb/update_mysql_tables_1025_to_1026 examples/nagios/check_bacula/Makefile platforms/rpms/redhat/bacula.spec platforms/rpms/redhat/bacula-bat.spec platforms/rpms/redhat/bacula-docs.spec platforms/rpms/redhat/bacula-mtx.spec platforms/rpms/suse/bacula.spec platforms/rpms/suse/bacula-bat.spec platforms/rpms/suse/bacula-docs.spec platforms/rpms/suse/bacula-mtx.spec $PFILES" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else case $cache_file in #( */* | ?:*) mv -f confcache "$cache_file"$$ && mv -f "$cache_file"$$ "$cache_file" ;; #( *) mv -f confcache "$cache_file" ;; esac fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' DEFS=-DHAVE_CONFIG_H ac_libobjs= ac_ltlibobjs= U= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs : "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by bacula $as_me 15.0.0, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac case $ac_config_headers in *" "*) set x $ac_config_headers; shift; ac_config_headers=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_headers="$ac_config_headers" config_commands="$ac_config_commands" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Configuration commands: $config_commands Report bugs to the package provider." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ bacula config.status 15.0.0 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' INSTALL='$INSTALL' AWK='$AWK' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_HEADERS " '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header as_fn_error $? "ambiguous option: \`$1' Try \`$0 --help' for more information.";; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # # INIT-COMMANDS # # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH sed_quote_subst='$sed_quote_subst' double_quote_subst='$double_quote_subst' delay_variable_subst='$delay_variable_subst' macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`' macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`' enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`' ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`' PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`' host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`' host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`' host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`' build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`' build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`' build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`' SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`' Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`' GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`' EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`' FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`' LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`' NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`' LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`' max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`' ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`' exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`' old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`' lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`' CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`' CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`' compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`' GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`' OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`' libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`' shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`' extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`' archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`' enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`' export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`' whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`' compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`' old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`' old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`' archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`' archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`' module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`' module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`' with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`' allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`' no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`' hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`' hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`' hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`' hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`' hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`' hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`' hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`' version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`' runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`' shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`' shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`' libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`' library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`' soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`' install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`' postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`' postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`' finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`' finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`' hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`' sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`' sys_lib_dlsearch_path_spec='`$ECHO "$sys_lib_dlsearch_path_spec" | $SED "$delay_single_quote_subst"`' hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`' enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`' enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`' enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`' old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`' striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`' compiler_lib_search_dirs='`$ECHO "$compiler_lib_search_dirs" | $SED "$delay_single_quote_subst"`' predep_objects='`$ECHO "$predep_objects" | $SED "$delay_single_quote_subst"`' postdep_objects='`$ECHO "$postdep_objects" | $SED "$delay_single_quote_subst"`' predeps='`$ECHO "$predeps" | $SED "$delay_single_quote_subst"`' postdeps='`$ECHO "$postdeps" | $SED "$delay_single_quote_subst"`' compiler_lib_search_path='`$ECHO "$compiler_lib_search_path" | $SED "$delay_single_quote_subst"`' LD_CXX='`$ECHO "$LD_CXX" | $SED "$delay_single_quote_subst"`' reload_flag_CXX='`$ECHO "$reload_flag_CXX" | $SED "$delay_single_quote_subst"`' reload_cmds_CXX='`$ECHO "$reload_cmds_CXX" | $SED "$delay_single_quote_subst"`' old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`' GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`' lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`' archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`' enable_shared_with_static_runtimes_CXX='`$ECHO "$enable_shared_with_static_runtimes_CXX" | $SED "$delay_single_quote_subst"`' export_dynamic_flag_spec_CXX='`$ECHO "$export_dynamic_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' whole_archive_flag_spec_CXX='`$ECHO "$whole_archive_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' compiler_needs_object_CXX='`$ECHO "$compiler_needs_object_CXX" | $SED "$delay_single_quote_subst"`' old_archive_from_new_cmds_CXX='`$ECHO "$old_archive_from_new_cmds_CXX" | $SED "$delay_single_quote_subst"`' old_archive_from_expsyms_cmds_CXX='`$ECHO "$old_archive_from_expsyms_cmds_CXX" | $SED "$delay_single_quote_subst"`' archive_cmds_CXX='`$ECHO "$archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' archive_expsym_cmds_CXX='`$ECHO "$archive_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' module_cmds_CXX='`$ECHO "$module_cmds_CXX" | $SED "$delay_single_quote_subst"`' module_expsym_cmds_CXX='`$ECHO "$module_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`' allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`' hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`' hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`' hardcode_minus_L_CXX='`$ECHO "$hardcode_minus_L_CXX" | $SED "$delay_single_quote_subst"`' hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_single_quote_subst"`' hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`' inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`' link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`' always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`' export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`' exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`' include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`' prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`' postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`' file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`' hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`' compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`' predep_objects_CXX='`$ECHO "$predep_objects_CXX" | $SED "$delay_single_quote_subst"`' postdep_objects_CXX='`$ECHO "$postdep_objects_CXX" | $SED "$delay_single_quote_subst"`' predeps_CXX='`$ECHO "$predeps_CXX" | $SED "$delay_single_quote_subst"`' postdeps_CXX='`$ECHO "$postdeps_CXX" | $SED "$delay_single_quote_subst"`' compiler_lib_search_path_CXX='`$ECHO "$compiler_lib_search_path_CXX" | $SED "$delay_single_quote_subst"`' LTCC='$LTCC' LTCFLAGS='$LTCFLAGS' compiler='$compiler_DEFAULT' # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF \$1 _LTECHO_EOF' } # Quote evaled strings. for var in SHELL \ ECHO \ PATH_SEPARATOR \ SED \ GREP \ EGREP \ FGREP \ LD \ NM \ LN_S \ lt_SP2NL \ lt_NL2SP \ reload_flag \ OBJDUMP \ deplibs_check_method \ file_magic_cmd \ file_magic_glob \ want_nocaseglob \ DLLTOOL \ sharedlib_from_linklib_cmd \ AR \ AR_FLAGS \ archiver_list_spec \ STRIP \ RANLIB \ CC \ CFLAGS \ compiler \ lt_cv_sys_global_symbol_pipe \ lt_cv_sys_global_symbol_to_cdecl \ lt_cv_sys_global_symbol_to_c_name_address \ lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ nm_file_list_spec \ lt_prog_compiler_no_builtin_flag \ lt_prog_compiler_pic \ lt_prog_compiler_wl \ lt_prog_compiler_static \ lt_cv_prog_compiler_c_o \ need_locks \ MANIFEST_TOOL \ DSYMUTIL \ NMEDIT \ LIPO \ OTOOL \ OTOOL64 \ shrext_cmds \ export_dynamic_flag_spec \ whole_archive_flag_spec \ compiler_needs_object \ with_gnu_ld \ allow_undefined_flag \ no_undefined_flag \ hardcode_libdir_flag_spec \ hardcode_libdir_separator \ exclude_expsyms \ include_expsyms \ file_list_spec \ variables_saved_for_relink \ libname_spec \ library_names_spec \ soname_spec \ install_override_mode \ finish_eval \ old_striplib \ striplib \ compiler_lib_search_dirs \ predep_objects \ postdep_objects \ predeps \ postdeps \ compiler_lib_search_path \ LD_CXX \ reload_flag_CXX \ compiler_CXX \ lt_prog_compiler_no_builtin_flag_CXX \ lt_prog_compiler_pic_CXX \ lt_prog_compiler_wl_CXX \ lt_prog_compiler_static_CXX \ lt_cv_prog_compiler_c_o_CXX \ export_dynamic_flag_spec_CXX \ whole_archive_flag_spec_CXX \ compiler_needs_object_CXX \ with_gnu_ld_CXX \ allow_undefined_flag_CXX \ no_undefined_flag_CXX \ hardcode_libdir_flag_spec_CXX \ hardcode_libdir_separator_CXX \ exclude_expsyms_CXX \ include_expsyms_CXX \ file_list_spec_CXX \ compiler_lib_search_dirs_CXX \ predep_objects_CXX \ postdep_objects_CXX \ predeps_CXX \ postdeps_CXX \ compiler_lib_search_path_CXX; do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[\\\\\\\`\\"\\\$]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Double-quote double-evaled strings. for var in reload_cmds \ old_postinstall_cmds \ old_postuninstall_cmds \ old_archive_cmds \ extract_expsyms_cmds \ old_archive_from_new_cmds \ old_archive_from_expsyms_cmds \ archive_cmds \ archive_expsym_cmds \ module_cmds \ module_expsym_cmds \ export_symbols_cmds \ prelink_cmds \ postlink_cmds \ postinstall_cmds \ postuninstall_cmds \ finish_cmds \ sys_lib_search_path_spec \ sys_lib_dlsearch_path_spec \ reload_cmds_CXX \ old_archive_cmds_CXX \ old_archive_from_new_cmds_CXX \ old_archive_from_expsyms_cmds_CXX \ archive_cmds_CXX \ archive_expsym_cmds_CXX \ module_cmds_CXX \ module_expsym_cmds_CXX \ export_symbols_cmds_CXX \ prelink_cmds_CXX \ postlink_cmds_CXX; do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[\\\\\\\`\\"\\\$]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done ac_aux_dir='$ac_aux_dir' xsi_shell='$xsi_shell' lt_shell_append='$lt_shell_append' # See if we are running on zsh, and set the options which allow our # commands through without removal of \ escapes INIT. if test -n "\${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi PACKAGE='$PACKAGE' VERSION='$VERSION' TIMESTAMP='$TIMESTAMP' RM='$RM' ofile='$ofile' # Capture the value of obsolete ALL_LINGUAS because we need it to compute # POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES, CATALOGS. But hide it # from automake. eval 'OBSOLETE_ALL_LINGUAS''="$ALL_LINGUAS"' # Capture the value of LINGUAS because we need it to compute CATALOGS. LINGUAS="${LINGUAS-%UNSET%}" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "src/config.h") CONFIG_HEADERS="$CONFIG_HEADERS src/config.h:autoconf/config.h.in" ;; "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; "default-1") CONFIG_COMMANDS="$CONFIG_COMMANDS default-1" ;; "autoconf/Make.common") CONFIG_FILES="$CONFIG_FILES autoconf/Make.common" ;; "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; "manpages/Makefile") CONFIG_FILES="$CONFIG_FILES manpages/Makefile" ;; "scripts/btraceback") CONFIG_FILES="$CONFIG_FILES scripts/btraceback" ;; "scripts/bconsole") CONFIG_FILES="$CONFIG_FILES scripts/bconsole" ;; "scripts/bacula") CONFIG_FILES="$CONFIG_FILES scripts/bacula" ;; "scripts/bacula-ctl-dir") CONFIG_FILES="$CONFIG_FILES scripts/bacula-ctl-dir" ;; "scripts/bacula-ctl-fd") CONFIG_FILES="$CONFIG_FILES scripts/bacula-ctl-fd" ;; "scripts/bacula-ctl-sd") CONFIG_FILES="$CONFIG_FILES scripts/bacula-ctl-sd" ;; "scripts/devel_bacula") CONFIG_FILES="$CONFIG_FILES scripts/devel_bacula" ;; "scripts/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/Makefile" ;; "scripts/logrotate") CONFIG_FILES="$CONFIG_FILES scripts/logrotate" ;; "scripts/mtx-changer") CONFIG_FILES="$CONFIG_FILES scripts/mtx-changer" ;; "scripts/disk-changer") CONFIG_FILES="$CONFIG_FILES scripts/disk-changer" ;; "scripts/key-manager.py") CONFIG_FILES="$CONFIG_FILES scripts/key-manager.py" ;; "scripts/install-key-manager.sh") CONFIG_FILES="$CONFIG_FILES scripts/install-key-manager.sh" ;; "scripts/logwatch/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/logwatch/Makefile" ;; "scripts/logwatch/logfile.bacula.conf") CONFIG_FILES="$CONFIG_FILES scripts/logwatch/logfile.bacula.conf" ;; "scripts/bat.desktop") CONFIG_FILES="$CONFIG_FILES scripts/bat.desktop" ;; "scripts/bat.desktop.xsu") CONFIG_FILES="$CONFIG_FILES scripts/bat.desktop.xsu" ;; "scripts/bat.desktop.consolehelper") CONFIG_FILES="$CONFIG_FILES scripts/bat.desktop.consolehelper" ;; "scripts/bat.console_apps") CONFIG_FILES="$CONFIG_FILES scripts/bat.console_apps" ;; "scripts/aws_cloud_driver") CONFIG_FILES="$CONFIG_FILES scripts/aws_cloud_driver" ;; "scripts/bacula-tray-monitor.desktop") CONFIG_FILES="$CONFIG_FILES scripts/bacula-tray-monitor.desktop" ;; "scripts/kubernetes-bacula-backup/Dockerfile") CONFIG_FILES="$CONFIG_FILES scripts/kubernetes-bacula-backup/Dockerfile" ;; "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;; "src/host.h") CONFIG_FILES="$CONFIG_FILES src/host.h" ;; "src/console/Makefile") CONFIG_FILES="$CONFIG_FILES src/console/Makefile" ;; "src/console/bconsole.conf") CONFIG_FILES="$CONFIG_FILES src/console/bconsole.conf" ;; "src/qt-console/bat.conf") CONFIG_FILES="$CONFIG_FILES src/qt-console/bat.conf" ;; "src/qt-console/bat.pro") CONFIG_FILES="$CONFIG_FILES src/qt-console/bat.pro" ;; "src/qt-console/bat.pro.mingw32") CONFIG_FILES="$CONFIG_FILES src/qt-console/bat.pro.mingw32" ;; "src/qt-console/bat.pro.mingw64") CONFIG_FILES="$CONFIG_FILES src/qt-console/bat.pro.mingw64" ;; "src/qt-console/install_conf_file") CONFIG_FILES="$CONFIG_FILES src/qt-console/install_conf_file" ;; "src/qt-console/tray-monitor/tray-monitor.conf") CONFIG_FILES="$CONFIG_FILES src/qt-console/tray-monitor/tray-monitor.conf" ;; "src/qt-console/tray-monitor/bacula-tray-monitor.conf") CONFIG_FILES="$CONFIG_FILES src/qt-console/tray-monitor/bacula-tray-monitor.conf" ;; "src/qt-console/tray-monitor/tray-monitor.pro") CONFIG_FILES="$CONFIG_FILES src/qt-console/tray-monitor/tray-monitor.pro" ;; "src/qt-console/tray-monitor/tray-monitor.pro.mingw32") CONFIG_FILES="$CONFIG_FILES src/qt-console/tray-monitor/tray-monitor.pro.mingw32" ;; "src/qt-console/tray-monitor/tray-monitor.pro.mingw64") CONFIG_FILES="$CONFIG_FILES src/qt-console/tray-monitor/tray-monitor.pro.mingw64" ;; "src/qt-console/tray-monitor/install_conf_file") CONFIG_FILES="$CONFIG_FILES src/qt-console/tray-monitor/install_conf_file" ;; "src/dird/Makefile") CONFIG_FILES="$CONFIG_FILES src/dird/Makefile" ;; "src/dird/bacula-dir.conf") CONFIG_FILES="$CONFIG_FILES src/dird/bacula-dir.conf" ;; "src/lib/Makefile") CONFIG_FILES="$CONFIG_FILES src/lib/Makefile" ;; "src/stored/Makefile") CONFIG_FILES="$CONFIG_FILES src/stored/Makefile" ;; "src/stored/bacula-sd.conf") CONFIG_FILES="$CONFIG_FILES src/stored/bacula-sd.conf" ;; "src/filed/Makefile") CONFIG_FILES="$CONFIG_FILES src/filed/Makefile" ;; "src/filed/bacula-fd.conf") CONFIG_FILES="$CONFIG_FILES src/filed/bacula-fd.conf" ;; "src/cats/Makefile") CONFIG_FILES="$CONFIG_FILES src/cats/Makefile" ;; "src/cats/make_catalog_backup.pl") CONFIG_FILES="$CONFIG_FILES src/cats/make_catalog_backup.pl" ;; "src/cats/make_catalog_backup") CONFIG_FILES="$CONFIG_FILES src/cats/make_catalog_backup" ;; "src/cats/delete_catalog_backup") CONFIG_FILES="$CONFIG_FILES src/cats/delete_catalog_backup" ;; "src/cats/create_postgresql_database") CONFIG_FILES="$CONFIG_FILES src/cats/create_postgresql_database" ;; "src/cats/update_postgresql_tables") CONFIG_FILES="$CONFIG_FILES src/cats/update_postgresql_tables" ;; "src/cats/make_postgresql_tables") CONFIG_FILES="$CONFIG_FILES src/cats/make_postgresql_tables" ;; "src/cats/grant_postgresql_privileges") CONFIG_FILES="$CONFIG_FILES src/cats/grant_postgresql_privileges" ;; "src/cats/drop_postgresql_tables") CONFIG_FILES="$CONFIG_FILES src/cats/drop_postgresql_tables" ;; "src/cats/drop_postgresql_database") CONFIG_FILES="$CONFIG_FILES src/cats/drop_postgresql_database" ;; "src/cats/create_mysql_database") CONFIG_FILES="$CONFIG_FILES src/cats/create_mysql_database" ;; "src/cats/update_mysql_tables") CONFIG_FILES="$CONFIG_FILES src/cats/update_mysql_tables" ;; "src/cats/make_mysql_tables") CONFIG_FILES="$CONFIG_FILES src/cats/make_mysql_tables" ;; "src/cats/grant_mysql_privileges") CONFIG_FILES="$CONFIG_FILES src/cats/grant_mysql_privileges" ;; "src/cats/drop_mysql_tables") CONFIG_FILES="$CONFIG_FILES src/cats/drop_mysql_tables" ;; "src/cats/drop_mysql_database") CONFIG_FILES="$CONFIG_FILES src/cats/drop_mysql_database" ;; "src/cats/create_sqlite3_database") CONFIG_FILES="$CONFIG_FILES src/cats/create_sqlite3_database" ;; "src/cats/update_sqlite3_tables") CONFIG_FILES="$CONFIG_FILES src/cats/update_sqlite3_tables" ;; "src/cats/make_sqlite3_tables") CONFIG_FILES="$CONFIG_FILES src/cats/make_sqlite3_tables" ;; "src/cats/grant_sqlite3_privileges") CONFIG_FILES="$CONFIG_FILES src/cats/grant_sqlite3_privileges" ;; "src/cats/drop_sqlite3_tables") CONFIG_FILES="$CONFIG_FILES src/cats/drop_sqlite3_tables" ;; "src/cats/drop_sqlite3_database") CONFIG_FILES="$CONFIG_FILES src/cats/drop_sqlite3_database" ;; "src/cats/sqlite") CONFIG_FILES="$CONFIG_FILES src/cats/sqlite" ;; "src/cats/mysql") CONFIG_FILES="$CONFIG_FILES src/cats/mysql" ;; "src/cats/create_bacula_database") CONFIG_FILES="$CONFIG_FILES src/cats/create_bacula_database" ;; "src/cats/update_bacula_tables") CONFIG_FILES="$CONFIG_FILES src/cats/update_bacula_tables" ;; "src/cats/grant_bacula_privileges") CONFIG_FILES="$CONFIG_FILES src/cats/grant_bacula_privileges" ;; "src/cats/make_bacula_tables") CONFIG_FILES="$CONFIG_FILES src/cats/make_bacula_tables" ;; "src/cats/drop_bacula_tables") CONFIG_FILES="$CONFIG_FILES src/cats/drop_bacula_tables" ;; "src/cats/drop_bacula_database") CONFIG_FILES="$CONFIG_FILES src/cats/drop_bacula_database" ;; "src/cats/install-default-backend") CONFIG_FILES="$CONFIG_FILES src/cats/install-default-backend" ;; "src/findlib/Makefile") CONFIG_FILES="$CONFIG_FILES src/findlib/Makefile" ;; "src/tools/Makefile") CONFIG_FILES="$CONFIG_FILES src/tools/Makefile" ;; "src/tools/cdp-client/Makefile") CONFIG_FILES="$CONFIG_FILES src/tools/cdp-client/Makefile" ;; "src/plugins/fd/Makefile.inc") CONFIG_FILES="$CONFIG_FILES src/plugins/fd/Makefile.inc" ;; "src/plugins/sd/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/sd/Makefile" ;; "src/plugins/dir/Makefile.inc") CONFIG_FILES="$CONFIG_FILES src/plugins/dir/Makefile.inc" ;; "src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/baculabackupimage.py") CONFIG_FILES="$CONFIG_FILES src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/baculabackupimage.py" ;; "po/Makefile.in") CONFIG_FILES="$CONFIG_FILES po/Makefile.in" ;; "updatedb/update_mysql_tables") CONFIG_FILES="$CONFIG_FILES updatedb/update_mysql_tables" ;; "updatedb/update_sqlite3_tables") CONFIG_FILES="$CONFIG_FILES updatedb/update_sqlite3_tables" ;; "updatedb/update_postgresql_tables") CONFIG_FILES="$CONFIG_FILES updatedb/update_postgresql_tables" ;; "updatedb/update_mysql_tables_9_to_10") CONFIG_FILES="$CONFIG_FILES updatedb/update_mysql_tables_9_to_10" ;; "updatedb/update_sqlite3_tables_9_to_10") CONFIG_FILES="$CONFIG_FILES updatedb/update_sqlite3_tables_9_to_10" ;; "updatedb/update_postgresql_tables_9_to_10") CONFIG_FILES="$CONFIG_FILES updatedb/update_postgresql_tables_9_to_10" ;; "updatedb/update_mysql_tables_10_to_11") CONFIG_FILES="$CONFIG_FILES updatedb/update_mysql_tables_10_to_11" ;; "updatedb/update_sqlite3_tables_10_to_11") CONFIG_FILES="$CONFIG_FILES updatedb/update_sqlite3_tables_10_to_11" ;; "updatedb/update_postgresql_tables_10_to_11") CONFIG_FILES="$CONFIG_FILES updatedb/update_postgresql_tables_10_to_11" ;; "updatedb/update_mysql_tables_11_to_12") CONFIG_FILES="$CONFIG_FILES updatedb/update_mysql_tables_11_to_12" ;; "updatedb/update_sqlite3_tables_11_to_12") CONFIG_FILES="$CONFIG_FILES updatedb/update_sqlite3_tables_11_to_12" ;; "updatedb/update_postgresql_tables_11_to_12") CONFIG_FILES="$CONFIG_FILES updatedb/update_postgresql_tables_11_to_12" ;; "updatedb/update_postgresql_tables_1021_to_1022") CONFIG_FILES="$CONFIG_FILES updatedb/update_postgresql_tables_1021_to_1022" ;; "updatedb/update_postgresql_tables_1022_to_1023") CONFIG_FILES="$CONFIG_FILES updatedb/update_postgresql_tables_1022_to_1023" ;; "updatedb/update_postgresql_tables_1023_to_1024") CONFIG_FILES="$CONFIG_FILES updatedb/update_postgresql_tables_1023_to_1024" ;; "updatedb/update_postgresql_tables_1024_to_1025") CONFIG_FILES="$CONFIG_FILES updatedb/update_postgresql_tables_1024_to_1025" ;; "updatedb/update_postgresql_tables_1025_to_1026") CONFIG_FILES="$CONFIG_FILES updatedb/update_postgresql_tables_1025_to_1026" ;; "updatedb/update_mysql_tables_1020_to_1021") CONFIG_FILES="$CONFIG_FILES updatedb/update_mysql_tables_1020_to_1021" ;; "updatedb/update_mysql_tables_1021_to_1022") CONFIG_FILES="$CONFIG_FILES updatedb/update_mysql_tables_1021_to_1022" ;; "updatedb/update_mysql_tables_1022_to_1023") CONFIG_FILES="$CONFIG_FILES updatedb/update_mysql_tables_1022_to_1023" ;; "updatedb/update_mysql_tables_1023_to_1024") CONFIG_FILES="$CONFIG_FILES updatedb/update_mysql_tables_1023_to_1024" ;; "updatedb/update_mysql_tables_1024_to_1025") CONFIG_FILES="$CONFIG_FILES updatedb/update_mysql_tables_1024_to_1025" ;; "updatedb/update_mysql_tables_1025_to_1026") CONFIG_FILES="$CONFIG_FILES updatedb/update_mysql_tables_1025_to_1026" ;; "examples/nagios/check_bacula/Makefile") CONFIG_FILES="$CONFIG_FILES examples/nagios/check_bacula/Makefile" ;; "platforms/rpms/redhat/bacula.spec") CONFIG_FILES="$CONFIG_FILES platforms/rpms/redhat/bacula.spec" ;; "platforms/rpms/redhat/bacula-bat.spec") CONFIG_FILES="$CONFIG_FILES platforms/rpms/redhat/bacula-bat.spec" ;; "platforms/rpms/redhat/bacula-docs.spec") CONFIG_FILES="$CONFIG_FILES platforms/rpms/redhat/bacula-docs.spec" ;; "platforms/rpms/redhat/bacula-mtx.spec") CONFIG_FILES="$CONFIG_FILES platforms/rpms/redhat/bacula-mtx.spec" ;; "platforms/rpms/suse/bacula.spec") CONFIG_FILES="$CONFIG_FILES platforms/rpms/suse/bacula.spec" ;; "platforms/rpms/suse/bacula-bat.spec") CONFIG_FILES="$CONFIG_FILES platforms/rpms/suse/bacula-bat.spec" ;; "platforms/rpms/suse/bacula-docs.spec") CONFIG_FILES="$CONFIG_FILES platforms/rpms/suse/bacula-docs.spec" ;; "platforms/rpms/suse/bacula-mtx.spec") CONFIG_FILES="$CONFIG_FILES platforms/rpms/suse/bacula-mtx.spec" ;; "$PFILES") CONFIG_FILES="$CONFIG_FILES $PFILES" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then if $AWK 'BEGIN { getline <"/dev/null" }' /dev/null; then ac_cs_awk_getline=: ac_cs_awk_pipe_init= ac_cs_awk_read_file=' while ((getline aline < (F[key])) > 0) print(aline) close(F[key])' ac_cs_awk_pipe_fini= else ac_cs_awk_getline=false ac_cs_awk_pipe_init="print \"cat <<'|#_!!_#|' &&\"" ac_cs_awk_read_file=' print "|#_!!_#|" print "cat " F[key] " &&" '$ac_cs_awk_pipe_init # The final `:' finishes the AND list. ac_cs_awk_pipe_fini='END { print "|#_!!_#|"; print ":" }' fi ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF # Create commands to substitute file output variables. { echo "cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1" && echo 'cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK &&' && echo "$ac_subst_files" | sed 's/.*/F["&"]="$&"/' && echo "_ACAWK" && echo "_ACEOF" } >conf$$files.sh && . ./conf$$files.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 rm -f conf$$files.sh { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" \$ac_cs_awk_pipe_init } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } if (nfields == 3 && !substed) { key = field[2] if (F[key] != "" && line ~ /^[ ]*@.*@[ ]*$/) { \$ac_cs_awk_read_file next } } print line } \$ac_cs_awk_pipe_fini _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove sole $(srcdir), # ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" # Set up the scripts for CONFIG_HEADERS section. # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then cat >"$ac_tmp/defines.awk" <<\_ACAWK || BEGIN { _ACEOF # Transform confdefs.h into an awk script `defines.awk', embedded as # here-document in config.status, that substitutes the proper values into # config.h.in to produce config.h. # Create a delimiter string that does not exist in confdefs.h, to ease # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_tt=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_tt"; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done # For the awk script, D is an array of macro values keyed by name, # likewise P contains macro parameters if any. Preserve backslash # newline sequences. ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* sed -n ' s/.\{148\}/&'"$ac_delim"'/g t rset :rset s/^[ ]*#[ ]*define[ ][ ]*/ / t def d :def s/\\$// t bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3"/p s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p d :bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3\\\\\\n"\\/p t cont s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p t cont d :cont n s/.\{148\}/&'"$ac_delim"'/g t clear :clear s/\\$// t bsnlc s/["\\]/\\&/g; s/^/"/; s/$/"/p d :bsnlc s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p b cont ' >$CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 for (key in D) D_is_set[key] = 1 FS = "" } /^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { line = \$ 0 split(line, arg, " ") if (arg[1] == "#") { defundef = arg[2] mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # case $INSTALL in [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; esac _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | if $ac_cs_awk_getline; then $AWK -f "$ac_tmp/subs.awk" else $AWK -f "$ac_tmp/subs.awk" | $SHELL fi \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" } >"$ac_tmp/config.h" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$ac_tmp/config.h" "$ac_file" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error $? "could not create -" "$LINENO" 5 fi ;; :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 $as_echo "$as_me: executing $ac_file commands" >&6;} ;; esac case $ac_file$ac_mode in "libtool":C) # See if we are running on zsh, and set the options which allow our # commands through without removal of \ escapes. if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi cfgfile="${ofile}T" trap "$RM \"$cfgfile\"; exit 1" 1 2 15 $RM "$cfgfile" cat <<_LT_EOF >> "$cfgfile" #! $SHELL # `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. # Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION # Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: # NOTE: Changes made to this file will be lost: look at ltmain.sh. # # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, # 2006, 2007, 2008, 2009, 2010, 2011 Free Software # Foundation, Inc. # Written by Gordon Matzigkeit, 1996 # # This file is part of GNU Libtool. # # GNU Libtool is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of # the License, or (at your option) any later version. # # As a special exception to the GNU General Public License, # if you distribute this file as part of a program or library that # is built using GNU Libtool, you may include this file under the # same distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Libtool; see the file COPYING. If not, a copy # can be downloaded from http://www.gnu.org/licenses/gpl.html, or # obtained by writing to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # The names of the tagged configurations supported by this script. available_tags="CXX " # ### BEGIN LIBTOOL CONFIG # Which release of libtool.m4 was used? macro_version=$macro_version macro_revision=$macro_revision # Whether or not to build shared libraries. build_libtool_libs=$enable_shared # Whether or not to build static libraries. build_old_libs=$enable_static # What type of objects to build. pic_mode=$pic_mode # Whether or not to optimize for fast installation. fast_install=$enable_fast_install # Shell to use when invoking shell scripts. SHELL=$lt_SHELL # An echo program that protects backslashes. ECHO=$lt_ECHO # The PATH separator for the build system. PATH_SEPARATOR=$lt_PATH_SEPARATOR # The host system. host_alias=$host_alias host=$host host_os=$host_os # The build system. build_alias=$build_alias build=$build build_os=$build_os # A sed program that does not truncate output. SED=$lt_SED # Sed that helps us avoid accidentally triggering echo(1) options like -n. Xsed="\$SED -e 1s/^X//" # A grep program that handles long lines. GREP=$lt_GREP # An ERE matcher. EGREP=$lt_EGREP # A literal string matcher. FGREP=$lt_FGREP # A BSD- or MS-compatible name lister. NM=$lt_NM # Whether we need soft or hard links. LN_S=$lt_LN_S # What is the maximum length of a command? max_cmd_len=$max_cmd_len # Object file suffix (normally "o"). objext=$ac_objext # Executable file suffix (normally ""). exeext=$exeext # whether the shell understands "unset". lt_unset=$lt_unset # turn spaces into newlines. SP2NL=$lt_lt_SP2NL # turn newlines into spaces. NL2SP=$lt_lt_NL2SP # convert \$build file names to \$host format. to_host_file_cmd=$lt_cv_to_host_file_cmd # convert \$build files to toolchain format. to_tool_file_cmd=$lt_cv_to_tool_file_cmd # An object symbol dumper. OBJDUMP=$lt_OBJDUMP # Method to check whether dependent libraries are shared objects. deplibs_check_method=$lt_deplibs_check_method # Command to use when deplibs_check_method = "file_magic". file_magic_cmd=$lt_file_magic_cmd # How to find potential files when deplibs_check_method = "file_magic". file_magic_glob=$lt_file_magic_glob # Find potential files using nocaseglob when deplibs_check_method = "file_magic". want_nocaseglob=$lt_want_nocaseglob # DLL creation program. DLLTOOL=$lt_DLLTOOL # Command to associate shared and link libraries. sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd # The archiver. AR=$lt_AR # Flags to create an archive. AR_FLAGS=$lt_AR_FLAGS # How to feed a file listing to the archiver. archiver_list_spec=$lt_archiver_list_spec # A symbol stripping program. STRIP=$lt_STRIP # Commands used to install an old-style archive. RANLIB=$lt_RANLIB old_postinstall_cmds=$lt_old_postinstall_cmds old_postuninstall_cmds=$lt_old_postuninstall_cmds # Whether to use a lock for old archive extraction. lock_old_archive_extraction=$lock_old_archive_extraction # A C compiler. LTCC=$lt_CC # LTCC compiler flags. LTCFLAGS=$lt_CFLAGS # Take the output of nm and produce a listing of raw symbols and C names. global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe # Transform the output of nm in a proper C declaration. global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl # Transform the output of nm in a C name address pair. global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address # Transform the output of nm in a C name address pair when lib prefix is needed. global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix # Specify filename containing input files for \$NM. nm_file_list_spec=$lt_nm_file_list_spec # The root where to search for dependent libraries,and in which our libraries should be installed. lt_sysroot=$lt_sysroot # The name of the directory that contains temporary libtool files. objdir=$objdir # Used to examine libraries when file_magic_cmd begins with "file". MAGIC_CMD=$MAGIC_CMD # Must we lock files when doing compilation? need_locks=$lt_need_locks # Manifest tool. MANIFEST_TOOL=$lt_MANIFEST_TOOL # Tool to manipulate archived DWARF debug symbol files on Mac OS X. DSYMUTIL=$lt_DSYMUTIL # Tool to change global to local symbols on Mac OS X. NMEDIT=$lt_NMEDIT # Tool to manipulate fat objects and archives on Mac OS X. LIPO=$lt_LIPO # ldd/readelf like tool for Mach-O binaries on Mac OS X. OTOOL=$lt_OTOOL # ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. OTOOL64=$lt_OTOOL64 # Old archive suffix (normally "a"). libext=$libext # Shared library suffix (normally ".so"). shrext_cmds=$lt_shrext_cmds # The commands to extract the exported symbol list from a shared archive. extract_expsyms_cmds=$lt_extract_expsyms_cmds # Variables whose values should be saved in libtool wrapper scripts and # restored at link time. variables_saved_for_relink=$lt_variables_saved_for_relink # Do we need the "lib" prefix for modules? need_lib_prefix=$need_lib_prefix # Do we need a version for libraries? need_version=$need_version # Library versioning type. version_type=$version_type # Shared library runtime path variable. runpath_var=$runpath_var # Shared library path variable. shlibpath_var=$shlibpath_var # Is shlibpath searched before the hard-coded library search path? shlibpath_overrides_runpath=$shlibpath_overrides_runpath # Format of library name prefix. libname_spec=$lt_libname_spec # List of archive names. First name is the real one, the rest are links. # The last name is the one that the linker finds with -lNAME library_names_spec=$lt_library_names_spec # The coded name of the library, if different from the real name. soname_spec=$lt_soname_spec # Permission mode override for installation of shared libraries. install_override_mode=$lt_install_override_mode # Command to use after installation of a shared archive. postinstall_cmds=$lt_postinstall_cmds # Command to use after uninstallation of a shared archive. postuninstall_cmds=$lt_postuninstall_cmds # Commands used to finish a libtool library installation in a directory. finish_cmds=$lt_finish_cmds # As "finish_cmds", except a single script fragment to be evaled but # not shown. finish_eval=$lt_finish_eval # Whether we should hardcode library paths into libraries. hardcode_into_libs=$hardcode_into_libs # Compile-time system search path for libraries. sys_lib_search_path_spec=$lt_sys_lib_search_path_spec # Run-time system search path for libraries. sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec # Whether dlopen is supported. dlopen_support=$enable_dlopen # Whether dlopen of programs is supported. dlopen_self=$enable_dlopen_self # Whether dlopen of statically linked programs is supported. dlopen_self_static=$enable_dlopen_self_static # Commands to strip libraries. old_striplib=$lt_old_striplib striplib=$lt_striplib # The linker used to build libraries. LD=$lt_LD # How to create reloadable object files. reload_flag=$lt_reload_flag reload_cmds=$lt_reload_cmds # Commands used to build an old-style archive. old_archive_cmds=$lt_old_archive_cmds # A language specific compiler. CC=$lt_compiler # Is the compiler the GNU compiler? with_gcc=$GCC # Compiler flag to turn off builtin functions. no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag # Additional compiler flags for building library objects. pic_flag=$lt_lt_prog_compiler_pic # How to pass a linker flag through the compiler. wl=$lt_lt_prog_compiler_wl # Compiler flag to prevent dynamic linking. link_static_flag=$lt_lt_prog_compiler_static # Does compiler simultaneously support -c and -o options? compiler_c_o=$lt_lt_cv_prog_compiler_c_o # Whether or not to add -lc for building shared libraries. build_libtool_need_lc=$archive_cmds_need_lc # Whether or not to disallow shared libs when runtime libs are static. allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes # Compiler flag to allow reflexive dlopens. export_dynamic_flag_spec=$lt_export_dynamic_flag_spec # Compiler flag to generate shared objects directly from archives. whole_archive_flag_spec=$lt_whole_archive_flag_spec # Whether the compiler copes with passing no objects directly. compiler_needs_object=$lt_compiler_needs_object # Create an old-style archive from a shared archive. old_archive_from_new_cmds=$lt_old_archive_from_new_cmds # Create a temporary old-style archive to link instead of a shared archive. old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds # Commands used to build a shared archive. archive_cmds=$lt_archive_cmds archive_expsym_cmds=$lt_archive_expsym_cmds # Commands used to build a loadable module if different from building # a shared archive. module_cmds=$lt_module_cmds module_expsym_cmds=$lt_module_expsym_cmds # Whether we are building with GNU ld or not. with_gnu_ld=$lt_with_gnu_ld # Flag that allows shared libraries with undefined symbols to be built. allow_undefined_flag=$lt_allow_undefined_flag # Flag that enforces no undefined symbols. no_undefined_flag=$lt_no_undefined_flag # Flag to hardcode \$libdir into a binary during linking. # This must work even if \$libdir does not exist hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec # Whether we need a single "-rpath" flag with a separated argument. hardcode_libdir_separator=$lt_hardcode_libdir_separator # Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes # DIR into the resulting binary. hardcode_direct=$hardcode_direct # Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes # DIR into the resulting binary and the resulting library dependency is # "absolute",i.e impossible to change by setting \${shlibpath_var} if the # library is relocated. hardcode_direct_absolute=$hardcode_direct_absolute # Set to "yes" if using the -LDIR flag during linking hardcodes DIR # into the resulting binary. hardcode_minus_L=$hardcode_minus_L # Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR # into the resulting binary. hardcode_shlibpath_var=$hardcode_shlibpath_var # Set to "yes" if building a shared library automatically hardcodes DIR # into the library and all subsequent libraries and executables linked # against it. hardcode_automatic=$hardcode_automatic # Set to yes if linker adds runtime paths of dependent libraries # to runtime path list. inherit_rpath=$inherit_rpath # Whether libtool must link a program against all its dependency libraries. link_all_deplibs=$link_all_deplibs # Set to "yes" if exported symbols are required. always_export_symbols=$always_export_symbols # The commands to list exported symbols. export_symbols_cmds=$lt_export_symbols_cmds # Symbols that should not be listed in the preloaded symbols. exclude_expsyms=$lt_exclude_expsyms # Symbols that must always be exported. include_expsyms=$lt_include_expsyms # Commands necessary for linking programs (against libraries) with templates. prelink_cmds=$lt_prelink_cmds # Commands necessary for finishing linking programs. postlink_cmds=$lt_postlink_cmds # Specify filename containing input files. file_list_spec=$lt_file_list_spec # How to hardcode a shared library path into an executable. hardcode_action=$hardcode_action # The directories searched by this compiler when creating a shared library. compiler_lib_search_dirs=$lt_compiler_lib_search_dirs # Dependencies to place before and after the objects being linked to # create a shared library. predep_objects=$lt_predep_objects postdep_objects=$lt_postdep_objects predeps=$lt_predeps postdeps=$lt_postdeps # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path # ### END LIBTOOL CONFIG _LT_EOF case $host_os in aix3*) cat <<\_LT_EOF >> "$cfgfile" # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test "X${COLLECT_NAMES+set}" != Xset; then COLLECT_NAMES= export COLLECT_NAMES fi _LT_EOF ;; esac ltmain="$ac_aux_dir/ltmain.sh" # We use sed instead of cat because bash on DJGPP gets confused if # if finds mixed CR/LF and LF-only lines. Since sed operates in # text mode, it properly converts lines to CR/LF. This bash problem # is reportedly fixed, but why not run on old versions too? sed '$q' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) if test x"$xsi_shell" = xyes; then sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ func_dirname ()\ {\ \ case ${1} in\ \ */*) func_dirname_result="${1%/*}${2}" ;;\ \ * ) func_dirname_result="${3}" ;;\ \ esac\ } # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_basename ()$/,/^} # func_basename /c\ func_basename ()\ {\ \ func_basename_result="${1##*/}"\ } # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ func_dirname_and_basename ()\ {\ \ case ${1} in\ \ */*) func_dirname_result="${1%/*}${2}" ;;\ \ * ) func_dirname_result="${3}" ;;\ \ esac\ \ func_basename_result="${1##*/}"\ } # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ func_stripname ()\ {\ \ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ \ # positional parameters, so assign one to ordinary parameter first.\ \ func_stripname_result=${3}\ \ func_stripname_result=${func_stripname_result#"${1}"}\ \ func_stripname_result=${func_stripname_result%"${2}"}\ } # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ func_split_long_opt ()\ {\ \ func_split_long_opt_name=${1%%=*}\ \ func_split_long_opt_arg=${1#*=}\ } # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ func_split_short_opt ()\ {\ \ func_split_short_opt_arg=${1#??}\ \ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ } # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ func_lo2o ()\ {\ \ case ${1} in\ \ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ \ *) func_lo2o_result=${1} ;;\ \ esac\ } # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_xform ()$/,/^} # func_xform /c\ func_xform ()\ {\ func_xform_result=${1%.*}.lo\ } # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_arith ()$/,/^} # func_arith /c\ func_arith ()\ {\ func_arith_result=$(( $* ))\ } # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_len ()$/,/^} # func_len /c\ func_len ()\ {\ func_len_result=${#1}\ } # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: fi if test x"$lt_shell_append" = xyes; then sed -e '/^func_append ()$/,/^} # func_append /c\ func_append ()\ {\ eval "${1}+=\\${2}"\ } # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ func_append_quoted ()\ {\ \ func_quote_for_eval "${2}"\ \ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ } # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: # Save a `func_append' function call where possible by direct use of '+=' sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: else # Save a `func_append' function call even when '+=' is not available sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: fi if test x"$_lt_function_replace_fail" = x":"; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 $as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} fi mv -f "$cfgfile" "$ofile" || (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") chmod +x "$ofile" cat <<_LT_EOF >> "$ofile" # ### BEGIN LIBTOOL TAG CONFIG: CXX # The linker used to build libraries. LD=$lt_LD_CXX # How to create reloadable object files. reload_flag=$lt_reload_flag_CXX reload_cmds=$lt_reload_cmds_CXX # Commands used to build an old-style archive. old_archive_cmds=$lt_old_archive_cmds_CXX # A language specific compiler. CC=$lt_compiler_CXX # Is the compiler the GNU compiler? with_gcc=$GCC_CXX # Compiler flag to turn off builtin functions. no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX # Additional compiler flags for building library objects. pic_flag=$lt_lt_prog_compiler_pic_CXX # How to pass a linker flag through the compiler. wl=$lt_lt_prog_compiler_wl_CXX # Compiler flag to prevent dynamic linking. link_static_flag=$lt_lt_prog_compiler_static_CXX # Does compiler simultaneously support -c and -o options? compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX # Whether or not to add -lc for building shared libraries. build_libtool_need_lc=$archive_cmds_need_lc_CXX # Whether or not to disallow shared libs when runtime libs are static. allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX # Compiler flag to allow reflexive dlopens. export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX # Compiler flag to generate shared objects directly from archives. whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX # Whether the compiler copes with passing no objects directly. compiler_needs_object=$lt_compiler_needs_object_CXX # Create an old-style archive from a shared archive. old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX # Create a temporary old-style archive to link instead of a shared archive. old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX # Commands used to build a shared archive. archive_cmds=$lt_archive_cmds_CXX archive_expsym_cmds=$lt_archive_expsym_cmds_CXX # Commands used to build a loadable module if different from building # a shared archive. module_cmds=$lt_module_cmds_CXX module_expsym_cmds=$lt_module_expsym_cmds_CXX # Whether we are building with GNU ld or not. with_gnu_ld=$lt_with_gnu_ld_CXX # Flag that allows shared libraries with undefined symbols to be built. allow_undefined_flag=$lt_allow_undefined_flag_CXX # Flag that enforces no undefined symbols. no_undefined_flag=$lt_no_undefined_flag_CXX # Flag to hardcode \$libdir into a binary during linking. # This must work even if \$libdir does not exist hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX # Whether we need a single "-rpath" flag with a separated argument. hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX # Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes # DIR into the resulting binary. hardcode_direct=$hardcode_direct_CXX # Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes # DIR into the resulting binary and the resulting library dependency is # "absolute",i.e impossible to change by setting \${shlibpath_var} if the # library is relocated. hardcode_direct_absolute=$hardcode_direct_absolute_CXX # Set to "yes" if using the -LDIR flag during linking hardcodes DIR # into the resulting binary. hardcode_minus_L=$hardcode_minus_L_CXX # Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR # into the resulting binary. hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX # Set to "yes" if building a shared library automatically hardcodes DIR # into the library and all subsequent libraries and executables linked # against it. hardcode_automatic=$hardcode_automatic_CXX # Set to yes if linker adds runtime paths of dependent libraries # to runtime path list. inherit_rpath=$inherit_rpath_CXX # Whether libtool must link a program against all its dependency libraries. link_all_deplibs=$link_all_deplibs_CXX # Set to "yes" if exported symbols are required. always_export_symbols=$always_export_symbols_CXX # The commands to list exported symbols. export_symbols_cmds=$lt_export_symbols_cmds_CXX # Symbols that should not be listed in the preloaded symbols. exclude_expsyms=$lt_exclude_expsyms_CXX # Symbols that must always be exported. include_expsyms=$lt_include_expsyms_CXX # Commands necessary for linking programs (against libraries) with templates. prelink_cmds=$lt_prelink_cmds_CXX # Commands necessary for finishing linking programs. postlink_cmds=$lt_postlink_cmds_CXX # Specify filename containing input files. file_list_spec=$lt_file_list_spec_CXX # How to hardcode a shared library path into an executable. hardcode_action=$hardcode_action_CXX # The directories searched by this compiler when creating a shared library. compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX # Dependencies to place before and after the objects being linked to # create a shared library. predep_objects=$lt_predep_objects_CXX postdep_objects=$lt_postdep_objects_CXX predeps=$lt_predeps_CXX postdeps=$lt_postdeps_CXX # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path_CXX # ### END LIBTOOL TAG CONFIG: CXX _LT_EOF ;; "default-1":C) for ac_file in $CONFIG_FILES; do # Support "outfile[:infile[:infile...]]" case "$ac_file" in *:*) ac_file=`echo "$ac_file"|sed 's%:.*%%'` ;; esac # PO directories have a Makefile.in generated from Makefile.in.in. case "$ac_file" in */Makefile.in) # Adjust a relative srcdir. ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'` ac_dir_suffix="/`echo "$ac_dir"|sed 's%^\./%%'`" ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'` # In autoconf-2.13 it is called $ac_given_srcdir. # In autoconf-2.50 it is called $srcdir. test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir" case "$ac_given_srcdir" in .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;; /*) top_srcdir="$ac_given_srcdir" ;; *) top_srcdir="$ac_dots$ac_given_srcdir" ;; esac # Treat a directory as a PO directory if and only if it has a # POTFILES.in file. This allows packages to have multiple PO # directories under different names or in different locations. if test -f "$ac_given_srcdir/$ac_dir/POTFILES.in"; then rm -f "$ac_dir/POTFILES" test -n "$as_me" && echo "$as_me: creating $ac_dir/POTFILES" || echo "creating $ac_dir/POTFILES" cat "$ac_given_srcdir/$ac_dir/POTFILES.in" | sed -e "/^#/d" -e "/^[ ]*\$/d" -e "s,.*, $top_srcdir/& \\\\," | sed -e "\$s/\(.*\) \\\\/\1/" > "$ac_dir/POTFILES" POMAKEFILEDEPS="POTFILES.in" # ALL_LINGUAS, POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES depend # on $ac_dir but don't depend on user-specified configuration # parameters. if test -f "$ac_given_srcdir/$ac_dir/LINGUAS"; then # The LINGUAS file contains the set of available languages. if test -n "$OBSOLETE_ALL_LINGUAS"; then test -n "$as_me" && echo "$as_me: setting ALL_LINGUAS in configure.in is obsolete" || echo "setting ALL_LINGUAS in configure.in is obsolete" fi ALL_LINGUAS_=`sed -e "/^#/d" -e "s/#.*//" "$ac_given_srcdir/$ac_dir/LINGUAS"` # Hide the ALL_LINGUAS assigment from automake. eval 'ALL_LINGUAS''=$ALL_LINGUAS_' POMAKEFILEDEPS="$POMAKEFILEDEPS LINGUAS" else # The set of available languages was given in configure.in. eval 'ALL_LINGUAS''=$OBSOLETE_ALL_LINGUAS' fi # Compute POFILES # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).po) # Compute UPDATEPOFILES # as $(foreach lang, $(ALL_LINGUAS), $(lang).po-update) # Compute DUMMYPOFILES # as $(foreach lang, $(ALL_LINGUAS), $(lang).nop) # Compute GMOFILES # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).gmo) case "$ac_given_srcdir" in .) srcdirpre= ;; *) srcdirpre='$(srcdir)/' ;; esac POFILES= UPDATEPOFILES= DUMMYPOFILES= GMOFILES= for lang in $ALL_LINGUAS; do POFILES="$POFILES $srcdirpre$lang.po" UPDATEPOFILES="$UPDATEPOFILES $lang.po-update" DUMMYPOFILES="$DUMMYPOFILES $lang.nop" GMOFILES="$GMOFILES $srcdirpre$lang.gmo" done # CATALOGS depends on both $ac_dir and the user's LINGUAS # environment variable. INST_LINGUAS= if test -n "$ALL_LINGUAS"; then for presentlang in $ALL_LINGUAS; do useit=no if test "%UNSET%" != "$LINGUAS"; then desiredlanguages="$LINGUAS" else desiredlanguages="$ALL_LINGUAS" fi for desiredlang in $desiredlanguages; do # Use the presentlang catalog if desiredlang is # a. equal to presentlang, or # b. a variant of presentlang (because in this case, # presentlang can be used as a fallback for messages # which are not translated in the desiredlang catalog). case "$desiredlang" in "$presentlang"*) useit=yes;; esac done if test $useit = yes; then INST_LINGUAS="$INST_LINGUAS $presentlang" fi done fi CATALOGS= if test -n "$INST_LINGUAS"; then for lang in $INST_LINGUAS; do CATALOGS="$CATALOGS $lang.gmo" done fi test -n "$as_me" && echo "$as_me: creating $ac_dir/Makefile" || echo "creating $ac_dir/Makefile" sed -e "/^POTFILES =/r $ac_dir/POTFILES" -e "/^# Makevars/r $ac_given_srcdir/$ac_dir/Makevars" -e "s|@POFILES@|$POFILES|g" -e "s|@UPDATEPOFILES@|$UPDATEPOFILES|g" -e "s|@DUMMYPOFILES@|$DUMMYPOFILES|g" -e "s|@GMOFILES@|$GMOFILES|g" -e "s|@CATALOGS@|$CATALOGS|g" -e "s|@POMAKEFILEDEPS@|$POMAKEFILEDEPS|g" "$ac_dir/Makefile.in" > "$ac_dir/Makefile" for f in "$ac_given_srcdir/$ac_dir"/Rules-*; do if test -f "$f"; then case "$f" in *.orig | *.bak | *~) ;; *) cat "$f" >> "$ac_dir/Makefile" ;; esac fi done fi ;; esac done ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi if test "${support_bat}" = "yes" ; then if test "x$QMAKE" = "xnone"; then as_fn_error $? "Could not find qmake $PATH. Check your Qt installation" "$LINENO" 5 fi cd src/qt-console echo "Creating bat Makefile" touch bat chmod 755 bat rm -f Makefile rm -rf moc32 obj32 moc64 obj64 ui32 ui64 $QMAKE ${MAKE:-make} clean cd tray-monitor echo "Creating tray-monitor Makefile" rm -f Makefile rm -rf moc32 obj32 moc64 obj64 ui32 ui64 $QMAKE ${MAKE:-make} clean $QMAKE ${MAKE:-make} clean cd ${BUILD_DIR} fi if test X"$GCC" = "Xyes" ; then echo "Doing make of dependencies" ${MAKE:-make} depend fi cd src/qt-console chmod 755 install_conf_file build-depkgs-qt-console cd tray-monitor chmod 755 install_conf_file cd ${BUILD_DIR} cd scripts chmod 755 bacula btraceback mtx-changer chmod 755 bconsole disk-changer devel_bacula logrotate chmod 755 key-manager.py install-key-manager.sh cd .. c=updatedb chmod 755 $c/update_mysql_tables_10_to_11 $c/update_sqlite3_tables_10_to_11 chmod 755 $c/update_postgresql_tables_10_to_11 chmod 755 $c/update_mysql_tables_11_to_12 $c/update_sqlite3_tables_11_to_12 chmod 755 $c/update_postgresql_tables_11_to_12 c=src/cats chmod 755 $c/create_bacula_database $c/update_bacula_tables $c/make_bacula_tables chmod 755 $c/grant_bacula_privileges $c/drop_bacula_tables $c/drop_bacula_database chmod 755 $c/create_mysql_database $c/update_mysql_tables $c/make_mysql_tables chmod 755 $c/grant_mysql_privileges $c/drop_mysql_tables $c/drop_mysql_database chmod 755 $c/create_sqlite3_database $c/update_sqlite3_tables $c/make_sqlite3_tables chmod 755 $c/grant_sqlite3_privileges $c/drop_sqlite3_tables $c/drop_sqlite3_database chmod 755 $c/create_postgresql_database $c/update_postgresql_tables $c/make_postgresql_tables chmod 755 $c/grant_postgresql_privileges $c/drop_postgresql_tables $c/drop_postgresql_database chmod 755 $c/make_catalog_backup $c/delete_catalog_backup $c/make_catalog_backup.pl chmod 755 $c/sqlite chmod 755 $c/mysql chmod 755 $c/install-default-backend chmod 755 src/win32/build-depkgs-mingw32 src/win32/build-depkgs-mingw-w64 if test "x$ac_cv_sys_largefile_CFLAGS" != "xno" ; then largefile_support="yes" fi if test X"$GCC" = "Xyes" ; then CCVERSION=`${CC} --version | tr '\n' ' ' | cut -f 3 -d ' '` if test "x${CCVERSION}" = "x" ; then CCVERSION=`${CC} --version | tr '\n' ' ' | cut -f 1 -d ' '` fi CXXVERSION=`${CXX} --version | tr '\n' ' ' | cut -f 3 -d ' '` if test x"${CXXVERSION}" = x ; then CXXVERSION=`${CXX} --version | tr '\n' ' ' | cut -f 1 -d ' '` fi fi # clean up any old junk echo " " echo "Cleaning up" echo " " ${MAKE:-make} clean echo " Configuration on `date`: Host: ${host}${post_host} -- ${DISTNAME} ${DISTVER} Bacula version: ${BACULA} ${VERSION} (${DATE}) Source code location: ${srcdir} Install binaries: ${sbindir} Install libraries: ${libdir} Install config files: ${sysconfdir} Scripts directory: ${scriptdir} Archive directory: ${archivedir} Working directory: ${working_dir} PID directory: ${piddir} Subsys directory: ${subsysdir} Man directory: ${mandir} Data directory: ${datarootdir} Plugin directory: ${plugindir} C Compiler: ${CC} ${CCVERSION} C++ Compiler: ${CXX} ${CXXVERSION} Compiler flags: ${WCFLAGS} ${CFLAGS} Linker flags: ${WLDFLAGS} ${LDFLAGS} Libraries: ${LIBS} Statically Linked Tools: ${support_static_tools} Statically Linked FD: ${support_static_fd} Statically Linked SD: ${support_static_sd} Statically Linked DIR: ${support_static_dir} Statically Linked CONS: ${support_static_cons} Database backends: ${db_backends} Database port: ${db_port} Database name: ${db_name} Database user: ${db_user} Database SSL options: ${db_ssl_options} Job Output Email: ${job_email} Traceback Email: ${dump_email} SMTP Host Address: ${smtp_host} Director Port: ${dir_port} File daemon Port: ${fd_port} Storage daemon Port: ${sd_port} Director User: ${dir_user} Director Group: ${dir_group} Storage Daemon User: ${sd_user} Storage DaemonGroup: ${sd_group} File Daemon User: ${fd_user} File Daemon Group: ${fd_group} Large file support: $largefile_support Bacula conio support: ${got_conio} ${CONS_LIBS} readline support: ${got_readline} ${PRTREADLINE_SRC} TCP Wrappers support: ${TCPW_MSG} ${WRAPLIBS} TLS support: ${support_tls} Encryption support: ${support_crypto} ZLIB support: ${have_zlib} LZO support: ${have_lzo} S3 support: ${have_libs3} LIBCAP support: ${have_libcap} ZSTD support: ${support_zstd} enable-smartalloc: ${support_smartalloc} enable-lockmgr: ${support_lockmgr} bat support: ${support_bat} client-only: ${build_client_only} build-dird: ${build_dird} build-stored: ${build_stored} Plugin support: ${have_plugins} LDAP support: ${ldap_support} LDAP StartTLS support: ${ldap_start_tls_support} AFS support: ${have_afs} ACL support: ${have_acl} XATTR support: ${have_xattr} GPFS support: ${have_gpfs} ${GPFSLIB} systemd support: ${support_systemd} ${SYSTEMD_UNITDIR} Batch insert enabled: ${batch_insert_db_backends} Plugins: - Docker: ${support_docker} - Kubernetes: ${support_kubernetes} - LDAP BPAM: ${support_ldap_bpam} - TOTP BPAM: ${support_totp_bpam} - CDP: ${support_cdp} - Antivirus: ${support_antivirus} - aws: ${have_aws} " > config.out # create a small shell script useful for support with # configure options and config.out info cat > scripts/bacula_config << EOF #!/bin/sh cat << __EOC__ $ $0 $ac_configure_args EOF cat config.out >> scripts/bacula_config echo __EOC__ >> scripts/bacula_config chmod 755 scripts/bacula_config cat config.out bacula-15.0.3/manpages/0000755000175000017500000000000014771010173014455 5ustar bsbuildbsbuildbacula-15.0.3/manpages/bconsole.80000644000175000017500000000237714771010173016363 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BCONSOLE 8 "4 December 2009" "Kern Sibbald" "Network backup, recovery and verification" .\" Please adjust this date whenever revising the manpage. .\" .SH NAME bconsole \- Bacula's management Console .SH SYNOPSIS .B bconsole .RI [options] .br .SH DESCRIPTION This manual page documents briefly the .B bconsole command. .PP .SH OPTIONS .TP .BI \-D\ dir Select a Director. .TP .BI \-l List defined Directors. .TP .BI \-c\ config Specify configuration file. .TP .BI \-d\ nn Set debug level to \fInn\fP. .TP .BI \-dt Print timestamp in debug output. .TP .B \-n No conio (for scripting). .TP .B \-s No signals (for debugging). .TP .B \-u\ nn Set command execution timeout to \fInn\fP seconds. .TP .B \-t Test the configuration file and report errors. .TP .B \-? Show version and usage of program. .SH SEE ALSO .BR bacula\-dir (8), .BR bls (1), .BR bextract (1). .br .SH AUTHOR This manual page was written by Jose Luis Tallon .nh . .SH COPYRIGHT This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bdirjson.80000644000175000017500000000371514771010173016366 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BDIRJSON 8 "12 September 2023" "Eric Bollengier" "Network backup, utilities" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: .\" .nh disable hyphenation .\" .hy enable hyphenation .\" .ad l left justify .\" .ad b justify to both left and right margins .\" .nf disable filling .\" .fi enable filling .\" .br insert line break .\" .sp insert n+1 empty lines .\" for manpage-specific macros, see man(7) .SH NAME bdirjson \- Bacula's JSON configuration export tool .SH SYNOPSIS .B bdirjson .RI [ options ] .SH DESCRIPTION This manual page documents briefly the .B bdirjson command. .br This is a simple program that will allow you to dump the Bacula Director configuration in JSON format. .PP .\" TeX users may be more comfortable with the \fB\fP and .\" \fI\fP escape sequences to invoke bold face and italics, .\" respectively. .SH OPTIONS A summary of options is included below. .TP .B \-? Show version and usage of program. .TP .BI \-d\ nn Set debug level to \fInn\fP. .TP .BI \-dt Print timestamp in debug output .TP .BI \-r\ Get resource type .TP .BI \-n\ Get resource name .TP .BI \-l\ Get only directives matching dirs (use with \-r) .TP .BI \-D Get only data .TP .BI \-R Do not apply JobDefs to Job .TP .BI \-c\ Set configuration file to file .TP .BI \-d\ Set debug level to .TP .BI \-dt Print timestamp in debug output .TP .BI \-t Read configuration and exit .TP .BI \-s Output in show text format .TP .BI \-v Verbose user messages .br .SH AUTHOR This manual page was written by Eric Bollengier .nh . This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bacula-sd.80000644000175000017500000000313414771010173016402 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BACULA\-SD 8 "28 October 2017" "Kern Sibbald" "Network backup, recovery & verification" .\" Please adjust this date whenever revising the manpage. .\" .SH NAME .B bacula\-sd \- Bacula's Storage Daemon .SH SYNOPSIS .B bacula\-sd .RI [ options ] .br .SH DESCRIPTION This manual page documents briefly the .B bacula\-sd command. .br Bacula's Storage Daemon acts as the interface between the Bacula network backup system and a tape drive/autochanger or filesystem where the backups will be stored. .SH OPTIONS .TP .BI \-c\ file Specify the configuration file to use. .TP .BI \-d\ nn Set debug level to \fInn\fP. .TP .BI \-dt Print timestamp in debug output. .TP .BI \-T Send debug messages to the trace file. .TP .BI \-f Run in foreground (for debugging). .TP .BI \-g\ group Set the group/gid to run as. .TP .BI \-P Do not create a PID file. .TP .BI \-p Proceed in spite of I/O errors .TP .BI \-m Print kaboom output (for debugging) .TP .BI \-s No signals (for debugging). .TP .B \-t Test the configuration file and report errors. .TP .BI \-u\ user Set the username/uid to run as. .TP .BI \-v Set verbose mode. .TP .B \-? Show version and usage of program. .SH SEE ALSO .BR bacula\-dir (8), .BR bacula\-fd (8). .br .SH AUTHOR This manual page was written by Jose Luis Tallon .nh . .SH COPYRIGHT This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bcopy.80000644000175000017500000000300114771010173015654 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BCOPY 8 "26 November 2009" "Kern Sibbald" "Network backup, recovery and verification" .\" Please adjust this date whenever revising the manpage. .\" .SH NAME bcopy \- Bacula's 'Copy from tape' .SH SYNOPSIS .B bcopy .RI [ options ] .I input-archive .I output-archive .br .SH DESCRIPTION This manual page documents briefly the .B bcopy command. .PP .\" TeX users may be more comfortable with the \fB\fP and .\" \fI\fP escape sequences to invoke bold face and italics, .\" respectively. .SH OPTIONS A summary of options is included below. .TP .B \-? Show version and usage of program. .TP .BI \-b\ bootstrap Specify a bootstrap file. .TP .BI \-c\ config Specify configuration file. .TP .BI \-d\ nn Set debug level to \fInn\fP. .TP .BI \-dt Print timestamp in debug output. .TP .BI \-i\ input Specify input Volume names (separated by '|') .TP .BI \-o\ output Specify output Volume names (separated by '|') .TP .BI \-p Proceed in spite of I/O errors. .TP .BI \-w\ directory Specify working directory (default \fI/tmp\fP). .TP .B \-v Set verbose mode. .SH SEE ALSO .BR bls (1), .BR bextract (1). .br .SH AUTHOR This manual page was written by Jose Luis Tallon .nh . .SH COPYRIGHT This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bscan.80000644000175000017500000000523314771010173015637 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BSCAN 8 "26 November 2009" "Kern Sibbald" "Network backup, recovery and verification" .\" Please adjust this date whenever revising the manpage. .\" .SH NAME bscan \- Bacula's 'Scan tape' .SH SYNOPSIS .B bscan .RI [ options ] .I bacula-archive .br .SH DESCRIPTION .LP The purpose of bscan is to read (scan) a Bacula Volume and to recreate or update the database contents with the information found on the Volume. This is done in a non-destructive way. This permits restoring database entries that have been lost by pruning, purging, deleting, or a database corruption problem. .LP Normally, it should not be necessary to run the bscan command because the database is self maintaining, and most corrupted databases can be repaired by the tools provided by the database vendors. In addition, if you have maintained bootstrap files during backups, you should be able to recover all your data from the bootstrap file without needed an up to date catalog. .B bscan command. .PP .\" TeX users may be more comfortable with the \fB\fP and .\" \fI\fP escape sequences to invoke bold face and italics, .\" respectively. .SH OPTIONS A summary of options is included below. .TP .B \-? Show version and usage of program. .TP .BI \-b\ bootstrap Specify a bootstrap file. .TP .BI \-c\ config Specify configuration file. .TP .BI \-d\ nn Set debug level to \fInn\fP. .TP .BI \-dt Print timestamp in debug output. .TP .B \-m Update media info in database. .TP .B \-D Specify the driver database name (default: \fINULL\fP) .TP .BI \-n\ name Specify the database name (default: \fIbacula\fP) .TP .BI \-u\ username Specify database username (default: \fIbacula\fP) .TP .BI \-P\ password Specify database password (default: \fInone\fP) .TP .BI \-h\ host Specify database host (default: \fINULL\fP) .TP .BI \-t\ port Specify database port (default: 0) .TP .B \-p Proceed in spite of I/O errors. .TP .B \-r List records. .TP .B \-s Synchronize or store in Database. .TP .B \-S Show scan progress periodically. .TP .B \-v Verbose output mode. .TP .BI \-V\ volume Specify volume names (separated by '|') .TP .BI \-w\ dir Specify working directory (default from conf file) .SH SEE ALSO .BR bls (8), .BR bextract (8). .br .SH AUTHOR This manual page was written by Jose Luis Tallon .nh , for the Debian GNU/Linux system (but may be used by others). .SH COPYRIGHT This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bls.80000644000175000017500000000361714771010173015335 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BLS 8 "26 November 2009" "Kern Sibbald" "Network backup, recovery and verification" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: .\" .nh disable hyphenation .\" .hy enable hyphenation .\" .ad l left justify .\" .ad b justify to both left and right margins .\" .nf disable filling .\" .fi enable filling .\" .br insert line break .\" .sp insert n+1 empty lines .\" for manpage-specific macros, see man(7) .SH NAME bls \- Bacula's 'Tape LS' .SH SYNOPSIS .B bls .RI [ options ] .I .br .SH DESCRIPTION This manual page documents briefly the .B bls command. .PP .\" TeX users may be more comfortable with the \fB\fP and .\" \fI\fP escape sequences to invoke bold face and italics, .\" respectively. .SH OPTIONS A summary of options is included below. .TP .B \-? Show version and usage of program. .TP .BI \-b\ bootstrap Specify a bootstrap file. .TP .BI \-c\ config Specify configuration file. .TP .BI \-d\ nn Set debug level to \fInn\fP. .TP .BI \-dt Print timestamp in debug output .TP .BI \-e\ Specify exclude list file. .TP .BI \-i\ Specify include list file. .TP .BI \-j List jobs. .TP .BI \-k List blocks. .TP .I (no \-j or \-k option) List saved files .TP .BI -L Dump label. .TP .BI \-p Proceed in spite of errors. .TP .BI \-V\ volumes Specify volume names (separated by '|'). .TP .B \-v Set verbose mode. .SH SEE ALSO .BR bscan (8), .BR bextract (8). .br .SH AUTHOR This manual page was written by Jose Luis Tallon .nh . .SH COPYRIGHT This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bbconsjson.80000644000175000017500000000372414771010173016714 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BBCONSJSON 8 "12 September 2023" "Eric Bollengier" "Network backup, utilities" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: .\" .nh disable hyphenation .\" .hy enable hyphenation .\" .ad l left justify .\" .ad b justify to both left and right margins .\" .nf disable filling .\" .fi enable filling .\" .br insert line break .\" .sp insert n+1 empty lines .\" for manpage-specific macros, see man(7) .SH NAME bbconsjson \- Bacula's JSON configuration export tool .SH SYNOPSIS .B bbconsjson .RI [ options ] .SH DESCRIPTION This manual page documents briefly the .B bbconsjson command. .br This is a simple program that will allow you to dump the Bacula Console configuration in JSON format. .PP .\" TeX users may be more comfortable with the \fB\fP and .\" \fI\fP escape sequences to invoke bold face and italics, .\" respectively. .SH OPTIONS A summary of options is included below. .TP .B \-? Show version and usage of program. .TP .BI \-d\ nn Set debug level to \fInn\fP. .TP .BI \-dt Print timestamp in debug output .TP .BI \-r\ Get resource type .TP .BI \-n\ Get resource name .TP .BI \-l\ Get only directives matching dirs (use with \-r) .TP .BI \-D Get only data .TP .BI \-R Do not apply JobDefs to Job .TP .BI \-c\ Set configuration file to file .TP .BI \-d\ Set debug level to .TP .BI \-dt Print timestamp in debug output .TP .BI \-t Read configuration and exit .TP .BI \-s Output in show text format .TP .BI \-v Verbose user messages .br .SH AUTHOR This manual page was written by Eric Bollengier .nh . This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bacula-tray-monitor.10000644000175000017500000000230014771010173020423 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BACULA-TRAY-MONITOR 1 "May 10, 2006" "Kern Sibbald" "Network backup, recovery and verification" .\" Please adjust this date whenever revising the manpage. .\" .SH NAME bacula-tray-monitor \- Bacula's 'System Tray' monitor .SH SYNOPSIS .B bacula-tray-monitor .RI [options] .br .SH DESCRIPTION This manual page documents briefly the .B bacula-tray-monitor command, a simple monitor for the 'system tray' in KDE/GNOME .PP .SH OPTIONS bacula-tray-monitor [\-c config_file] [\-d debug_level] [\-t] .TP .B \-c Specify configuration file. .TP .B \-d Set debug level to \fInn\fP. .TP .B \-dt Print timestamp in debug output. .TP .B \-t Test config mode: read configuration and exit. .TP .B \-? Show version and usage of program. .SH SEE ALSO .BR bacula-dir (8), .BR bls (1), .BR bextract (1). .br .SH AUTHOR This manual page was written by Jose Luis Tallon .nh . .SH COPYRIGHT This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bat.10000644000175000017500000000234214771010173015306 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BAT 1 "26 September 2009" "Kern Sibbald" "Network backup, recovery and verification" .\" Please adjust this date whenever revising the manpage. .\" .SH NAME bat \- Bacula Administration Tool Console .SH SYNOPSIS .B bat .RI [options] .br .SH DESCRIPTION This manual page documents briefly the .B bat command, the Qt4 version of the Bacula Administration Tool console. This is a GUI full featured program similar the bconsole program, but it is graphical oriented and more features. .PP .SH OPTIONS bat [\-s] [\-c config_file] [\-d debug_level] [\-t] .TP .B \-c Specify configuration file. Default is bat.conf. .TP .B \-d Set debug level to \fInn\fP. .TP .B \-s No signals. Used in debugging only. .TP .B \-t Test config mode: read configuration and exit. .TP .B \-? Show version and usage of program. .SH SEE ALSO .BR bacula-dir (8), .BR bls (1), .BR bextract (1). .br .SH AUTHOR This manual page was written by Kern Sibbald. .SH COPYRIGHT This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bacula.80000644000175000017500000001123414771010173015776 0ustar bsbuildbsbuild.\" manual page [] for Bacula .\" SH section heading .\" SS subsection heading .\" LP paragraph .\" IP indented paragraph .\" TP hanging label .TH Bacula 8 "The Network Backup Solution" .SH NAME Bacula \- The Network Backup Solution .SH SYNOPSIS .B bacula-dir \- Director .br .B bacula-fd \- File daemon or Client .br .B bacula-sd \- Storage daemon .br .B bconsole \- Console to control Bacula .br .SH DESCRIPTION .LP Bacula is a set of computer programs that permits you (or the system administrator) to manage backup, recovery, and verification of computer data across a network of computers of different kinds. In technical terms, it is a network Client/Server based backup program. Bacula is relatively easy to use and efficient, while offering many advanced storage management features that make it easy to find and recover lost or damaged files. Due to its modular design, Bacula is scalable from small single computer systems to systems consisting of hundreds of computers located over a large network. .LP Bacula Director service consists of the program that supervises all the backup, restore, verify and archive operations. The system administrator uses the Bacula Director to schedule backups and to recover files. For more details see the Director Services Daemon Design Document in the Bacula Developer's Guild. The Director runs as a daemon or a service (i.e. in the background). .LP Bacula Console services is the program that allows the administrator or user to communicate with the Bacula Director (see above). Currently, the Bacula Console is available in two versions. The first and simplest is to run the Console program in a shell window (i.e. TTY interface). Most system administrators will find this completely adequate. The second version is a Qt 4.2 GUI interface named bat that has more features than the bconsole program. .LP Bacula File services (or Client program) is the software program that is installed on the machine to be backed up. It is specific to the operating system on which it runs and is responsible for providing the file attributes and data when requested by the Director. The File services are also responsible for the file system dependent part of restoring the file attributes and data during a recovery operation. For more details see the File Services Daemon Design Document in the Bacula Developer's Guide. This program runs as a daemon on the machine to be backed up, and in some of the documentation, the File daemon is referred to as the Client (for example in Bacula's configuration file). In addition to Unix/Linux File daemons, there is a Windows File daemon (normally distributed in binary format). The Windows File daemon runs on all currently known Windows versions (2K, 2003, XP, and Vista). .LP Bacula Storage services consist of the software programs that perform the storage and recovery of the file attributes and data to the physical backup media or volumes. In other words, the Storage daemon is responsible for reading and writing your tapes (or other storage media, e.g. files). For more details see the Storage Services Daemon Design Document in the Bacula Developer's Guide. The Storage services runs as a daemon on the machine that has the backup device (usually a tape drive). .LP Catalog services are comprised of the software programs responsible for maintaining the file indexes and volume databases for all files backed up. The Catalog services permit the System Administrator or user to quickly locate and restore any desired file. The Catalog services sets Bacula apart from simple backup programs like tar and bru, because the catalog maintains a record of all Volumes used, all Jobs run, and all Files saved, permitting efficient restoration and Volume management. Bacula currently supports three different databases, MySQL, PostgreSQL, and SQLite3, one of which must be chosen when building Bacula. .SH OPTIONS See the HTML/PDF documentation at: .br .br for details of the command line options. .SH CONFIGURATION Each daemon has its own configuration file which must be tailored for each particular installation. Please see the HTML/PDF documentation for the details. .SH SEE ALSO The HTML manual installed on your system (typically found in .br /usr/share/doc/bacula-) or the online manual at: .br .SH BUGS See .SH AUTHOR Kern Sibbald .SS Current maintainer Kern Sibbald .SS Contributors An enormous list of past and former persons who have devoted their time and energy to this project -- thanks. See the AUTHORS file in the main Bacula source directory. .SH COPYRIGHT This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bacula-fd.80000644000175000017500000000330614771010173016366 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BACULA\-SD 8 "28 October 2017" "Kern Sibbald" "Network backup, recovery & verification" .\" Please adjust this date whenever revising the manpage. .\" .SH NAME .B bacula\-fd \- Bacula's File Daemon .SH SYNOPSIS .B bacula\-fd .RI [ options ] .br .SH DESCRIPTION This manual page documents briefly the .B bacula command. .br Bacula's File Daemon acts as the interface between the Bacula network backup system and the filesystems to be backed up: it is responsible for reading/writing/verifying the files to be backup'd/verified/restored. Network transfer can optionally be compressed. .SH OPTIONS .TP .BI \-c\ file Specify the configuration file to use. .TP .BI \-d\ nn Set debug level to \fInn\fP. .TP .BI \-dt Print timestamp in debug output. .TP .BI \-T Send debug messages to the trace file. .TP .BI \-f Run in foreground (for debugging). .TP .BI \-g\ group Set the group/gid to run as. .TP .BI \-k Keep readall permission when dropping privileges. .TP .BI \-m Print kaboom output (for debugging). .TP .BI \-P Do not create a PID file. .TP .BI \-s No signals (for debugging). .TP .B \-t Test the configuration file and report errors. .TP .BI \-u\ user Set the username/uid to run as. .TP .BI \-v Set verbose mode. .TP .B \-? Show version and usage of program. .SH SEE ALSO .BR bacula\-dir (8), .BR bacula\-sd (8). .br .SH AUTHOR This manual page was written by Jose Luis Tallon . .SH COPYRIGHT This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bextract.80000644000175000017500000000301514771010173016361 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BEXTRACT 8 "26 November 2009" "Kern Sibbald" "Network backup, recovery and verification" .\" Please adjust this date whenever revising the manpage. .\" .SH NAME bextract \- Bacula's 'Extract from tape' .SH SYNOPSIS .B bextract .RI [ options ] .I bacula-archive-device-name .I output-directory .br .SH DESCRIPTION This manual page documents briefly the .B bextract command. .PP .\" TeX users may be more comfortable with the \fB\fP and .\" \fI\fP escape sequences to invoke bold face and italics, .\" respectively. .SH OPTIONS A summary of options is included below. .TP .B \-? Show version and usage of program. .TP .BI \-b\ bootstrap Specify a bootstrap file. .TP .BI \-c\ config Specify configuration file. .TP .BI \-d\ nn Set debug level to \fInn\fP. .TP .BI \-dt Print timestamp in debug output. .TP .BI \-e\ file Specify exclude list. .TP .BI \-i\ file Specify include list. .TP .BI \-p Proceed in spite of I/O errors. .TP .BI \-t\ Read data from volume, do not write anything .TP .B \-v Set verbose mode. .TP .BI \-V\ volume-name Specify volume names. .SH SEE ALSO .BR bls (1), .BR bextract (1). .br .SH AUTHOR This manual page was written by Jose Luis Tallon .nh . .SH COPYRIGHT This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bsdjson.80000644000175000017500000000371714771010173016220 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BSDJSON 8 "12 September 2023" "Eric Bollengier" "Network backup, utilities" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: .\" .nh disable hyphenation .\" .hy enable hyphenation .\" .ad l left justify .\" .ad b justify to both left and right margins .\" .nf disable filling .\" .fi enable filling .\" .br insert line break .\" .sp insert n+1 empty lines .\" for manpage-specific macros, see man(7) .SH NAME bsdjson \- Bacula's JSON configuration export tool .SH SYNOPSIS .B bsdjson .RI [ options ] .SH DESCRIPTION This manual page documents briefly the .B bsdjson command. .br This is a simple program that will allow you to dump the Bacula Storage Daemon configuration in JSON format. .PP .\" TeX users may be more comfortable with the \fB\fP and .\" \fI\fP escape sequences to invoke bold face and italics, .\" respectively. .SH OPTIONS A summary of options is included below. .TP .B \-? Show version and usage of program. .TP .BI \-d\ nn Set debug level to \fInn\fP. .TP .BI \-dt Print timestamp in debug output .TP .BI \-r\ Get resource type .TP .BI \-n\ Get resource name .TP .BI \-l\ Get only directives matching dirs (use with \-r) .TP .BI \-D Get only data .TP .BI \-R Do not apply JobDefs to Job .TP .BI \-c\ Set configuration file to file .TP .BI \-d\ Set debug level to .TP .BI \-dt Print timestamp in debug output .TP .BI \-t Read configuration and exit .TP .BI \-s Output in show text format .TP .BI \-v Verbose user messages .br .SH AUTHOR This manual page was written by Eric Bollengier .nh . This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bpluginfo.80000644000175000017500000000615714771010173016544 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH bpluginfo "8" "July 2012" "bpluginfo" "Network backup, recovery and verification" .\" Please adjust this date whenever revising the manpage. .\" .SH NAME bpluginfo \- Bacula Plugin information utility .SH SYNOPSIS .B bplufinfo .RI [ options ] .I plugin_filename.so .br .SH DESCRIPTION .LP The main purpose of .B bpluginfo is to display different information about Bacula plugin. You can use it to check a plugin name, author, license and short description. You can use '-f' option to display API implemented by the plugin. Some plugins may require additional '-a' option for validating a Bacula Daemons API. In most cases it is not required. .PP ./ Bacula is a set of programs for performing a ./ .PP ./ - ./ .BR bpluginfo .PP .SH OPTIONS A summary of options is included below. .TP .B \-h Show usage of the program. .TP .BI \-v Verbose information printing all available data from the plugin, including plugin header and implemented API. .TP .BI \-i Display short information from plugin header only. This is a default option. Option incompatible with .B -f option. .TP .BI \-f Display information about implemented API functions. .TP .BI \-a\ You can supply the plugin initialization function with a particular Bacula API number. Without this option a default API number is '1'. Option require a numeric argument. .SH RETURN CODE .BR bpluginfo returns 0 on success, and non-zero on error. .TP You can check return code to find what was a cause of the error. * 0 - success * 1 - cannot load a plugin * 2 - cannot find a loadPlugin function * 3 - cannot find an unloadPlugin function * 10 - not enough memory .SH EXAMPLE USAGE This is an example of bplufinfo usage with verbose option (-v) and default plugin. .LP .sp .RS .nf \fB$ bpluginfo -v bpipe-fd.so Plugin type: File Daemon plugin Plugin magic: *FDPluginData* Plugin version: 1 Plugin release date: January 2008 Plugin author: Kern Sibbald Plugin license: Bacula or Bacula Enterprise Plugin description: Bacula Pipe File Daemon Plugin Plugin API version: 6 Plugin functions: newPlugin() freePlugin() getPluginValue() setPluginValue() handlePluginEvent() startBackupFile() endBackupFile() startRestoreFile() endRestoreFile() pluginIO() createFile() setFileAttributes() .fi .RE .SH AUTHOR Written by Radoslaw Korzeniewski (c) Inteos Sp. z o.o. .SH BUGS Does not handle all required bacula functions callbacks which can lead to utility crash. .\".SH TODO" .PP .PP .SH "REPORTING BUGS" Report bugs to . .SH COPYRIGHT Copyright \(co 2012 Free Software Foundation Europe e.V. .br This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. .SH "SEE ALSO" .BR bacula-dir, .BR bacula-sd, .BR bacula-fd, .BR "Bacula Plugins API" This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bfdjson.80000644000175000017500000000371414771010173016200 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BFDJSON 8 "12 September 2023" "Eric Bollengier" "Network backup, utilities" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: .\" .nh disable hyphenation .\" .hy enable hyphenation .\" .ad l left justify .\" .ad b justify to both left and right margins .\" .nf disable filling .\" .fi enable filling .\" .br insert line break .\" .sp insert n+1 empty lines .\" for manpage-specific macros, see man(7) .SH NAME bfdjson \- Bacula's JSON configuration export tool .SH SYNOPSIS .B bfdjson .RI [ options ] .SH DESCRIPTION This manual page documents briefly the .B bfdjson command. .br This is a simple program that will allow you to dump the Bacula File Daemon configuration in JSON format. .PP .\" TeX users may be more comfortable with the \fB\fP and .\" \fI\fP escape sequences to invoke bold face and italics, .\" respectively. .SH OPTIONS A summary of options is included below. .TP .B \-? Show version and usage of program. .TP .BI \-d\ nn Set debug level to \fInn\fP. .TP .BI \-dt Print timestamp in debug output .TP .BI \-r\ Get resource type .TP .BI \-n\ Get resource name .TP .BI \-l\ Get only directives matching dirs (use with \-r) .TP .BI \-D Get only data .TP .BI \-R Do not apply JobDefs to Job .TP .BI \-c\ Set configuration file to file .TP .BI \-d\ Set debug level to .TP .BI \-dt Print timestamp in debug output .TP .BI \-t Read configuration and exit .TP .BI \-s Output in show text format .TP .BI \-v Verbose user messages .br .SH AUTHOR This manual page was written by Eric Bollengier .nh . This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/btraceback.80000644000175000017500000000445414771010173016636 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BTRACEBACK 1 "6 December 2009" "Kern Sibbald" "Network backup, recovery and verification" .\" Please adjust this date whenever revising the manpage. .\" .SH NAME btraceback \- wrapper script around gdb and bsmtp .SH SYNOPSIS .B btraceback .I /path/to/binary .I pid .SH DESCRIPTION \fBbtraceback\fR is a wrapper shell script around the \fBgdb\fR debugger (or \fBdbx\fR on Solaris systems) and \fBbsmtp\fR, provided for debugging purposes. .SH USAGE \fBbtraceback\fR is called by the exception handlers of the Bacula daemons during a crash. It can also be called interactively to view the current state of the threads belonging to a process, but this is not recommended unless you are trying to debug a problem (see below). .SH NOTES In order to work properly, debugging symbols must be available to the debugger on the system, and gdb, or dbx (on Solaris systems) must be available in the \fB$PATH\fR. If the Director or Storage daemon runs under a non-root uid, you will probably need to be modify the \fBbtraceback\fR script to elevate privileges for the call to \fBgdb\fR/\fBdbx\fR, to ensure it has the proper permissions to debug when called by the daemon. Although Bacula's use of \fBbtraceback\fR within its exception handlers is always safe, manual or interactive use of \fBbtraceback\fR is subject to the same risks than live debugging of any program, which means it could cause Bacula to crash under rare and abnormal circumstances. Consequently we do not recommend manual use of \fBbtraceback\fR in production environments unless it is required for debugging a problem. .SH ENVIRONMENT \fBbtracback\fR relies on \fB$PATH\fR to find the debugger. .SH FILES .TP .I /usr/lib/bacula/btraceback .RS The script itself. .RE .TP .I /usr/sbin/btraceback .RS symbolic link to \fI/usr/lib/bacula/btraceback\fR .RE .TP .I /etc/bacula/scripts/btraceback.gdb .RS the GDB command batch used to output a stack trace .RE .SH AUTHOR This manual page was written by Lucas B. Cohen .nh .SH SEE ALSO .BR "bsmtp" "(1) " This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bacula-dir.80000644000175000017500000000447314771010173016561 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BACULA\-DIR 8 "28 October 2017" "Kern Sibbald" "Network backup, recovery&verification" .\" Please adjust this date whenever revising the manpage. .\" .SH NAME .B bacula\-dir \- Bacula Director .SH SYNOPSIS .B bacula\-dir .RI [ options ] .br .SH DESCRIPTION This manual page documents briefly the .B bacula\-dir command. .br Bacula's Director Daemon acts as the controller of the network backup system: it is responsible for scheduling and coordinating backups across the network. .SH OPTIONS .TP .BI \-c\ file Specify the configuration file to use. .TP .BI \-d\ nn Set debug level to \fInn\fP. .TP .BI \-dt Print timestamp in debug output. .TP .BI \-T Send debug messages to the trace file. .TP .BI \-f Run in foreground (for debugging). .TP .BI \-g\ group Set the group/gid to run as. .TP .BI \-m Print kaboom output (for debugging). .TP .BI \-P Do not create a PID file. .TP .BI \-r\ job Run . .TP .BI \-s No signals (for debugging). .TP .B \-t Test the configuration file and report errors. .TP .BI \-u\ user Set the username/uid to run as. .TP .BI \-v Set verbose mode. .TP .BI \-? Show version and usage of program. .SH TCP-WRAPPERS CONFIGURATION Tcpwrappers looks for the service name of the bacula daemons in .I hosts.allow , and the service names of these daemons is configured to be different from the binary. The service names are configured to be .B %hostname%-%component% rather than .B bacula-dir (As defined in the bacula-dir.conf.in file) So the hosts.allow entry has to match .B %hostname%-%component% (servername-dir for example) instead of bacula-%component% .B WARNING: This means that if the hosts.allow file has the entry: .B bacula-dir: ALL you will not be able to run bconsole to connect to the local director! The entry would have to read: .B server-dir: ALL and this will allow the console to connect to the director. (The process running is bacula-dir.) .SH SEE ALSO .BR bacula-fd (8), .BR bacula-sd (8). .SH AUTHOR This manual page was written by Jose Luis Tallon .nh . .SH COPYRIGHT This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bsmtp.10000644000175000017500000000750714771010173015675 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BSMTP 1 "6 December 2009" "Kern Sibbald" "Network backup, recovery and verification" .\" Please adjust this date whenever revising the manpage. .\" .SH NAME bsmtp \- Bacula's SMTP client (mail submission program) .SH SYNOPSIS .B bsmtp .RI [ options ] .I <...> .SH DESCRIPTION .B bsmtp is a simple mail user agent designed to permit more flexibility than the standard mail programs typically found on Unix systems, and to ease portability. It can even run on Windows machines. It is used by the Director daemon to send notifications and requests to the operator. .SH OPTIONS .TP .B \-8 Encode the mail in UTF-8. .TP .B \-c Set the \fBCc:\fR header. .TP .BI \-d\ nn Set debug level to \fInn\fP. .TP .BI \-dt Print timestamp in debug output. .TP .B \-f Set the \fBFrom:\fR header. If not specified, .B bsmtp will try to use your username. .TP .BI \-h\ mailhost:port Use mailhost:port as the SMTP server. (default port: 25) .TP .B \-s Set the \fBSubject:\fR header. .TP .B \-r Set the \fBReply-To:\fR: header. .TP .B \-l Set the maximum number of lines to be sent. (default: unlimited) .TP .B \-? Show version and usage of program. .SH USAGE \fIrecipients\fR is a space separated list of email addresses. The body of the email message is read from standard input. Message is ended by sending the EOF character (Ctrl-D on many systems) on the start of a new line, much like many 'mail' commands. The actual, automated behavior of \fBbsmtp\fR will depend on the mail-related configuration of the Director in the \fIMessages\fR resource of \fIbacula-dir.conf\fR. Interactive use of \fBbsmtp\fR is pertinent to manually test and ensure these configuration bits are valid. This is highly recommended. .SH CONFIGURATION These commands should each appear on a single line in the configuration file. Messages { Name = Standard mailcommand = "/home/bacula/bin/bsmtp \-h mail.domain.com \-f \\"\\(Bacula\\) \\<%r\\>\\" \-s \\"Bacula: %t %e of %c %l\\" %r" operatorcommand = "/home/bacula/bin/bsmtp \-h mail.domain.com \-f \\"\\(Bacula\\) \\<%r\\>\\" \-s \\"Bacula: Intervention needed for %j\\" %r" mail = sysadmin@site.domain.com = all, !skipped operator = sysop@site.domain.com = mount console = all, !skipped, !saved } \fIhome/bacula/bin\fR is replaced with the path to the Bacula binary directory, and mail.domain.com is replaced with the fully qualified name of an SMTP server, which usually listen on port 25. .SH ENVIRONMENT If the \fB-h\fR option is not specified, \fBbsmtp\fR will use environment variable \fBSMTPSERVER\fR, or 'localhost' if not set. .SH NOTES Since \fBbsmtp\fR always uses a TCP connection rather than writing to a spool file, you may find that your \fBFrom:\fR address is being rejected because it does not contain a valid domain, or because your message has gotten caught in spam filtering rules. Generally, you should specify a fully qualified domain name in the from field, and depending on whether your SMTP gateway is Exim or Sendmail, you may need to modify the syntax of the from part of the message. If \fBbsmtp\fR cannot connect to the specified mail host, it will retry to connect to \fBlocalhost\fR. .SH BUGS If you are getting incorrect dates (e.g. 1970) and you are running with a non-English locale, you might try setting the \fBLANG="en_US"\fR environment variable. .SH AUTHOR This manual page was written by Jose Luis Tallon .nh , revised and edited by Lucas B. Cohen .nh . .SH SEE ALSO .BR "bacula-dir" "(8) " .SH COPYRIGHT This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/Makefile.in0000644000175000017500000000204114771010173016517 0ustar bsbuildbsbuild# # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # @MCOMMON@ .PHONY: dummy MAN8 = bacula.8 bacula-dir.8 bacula-fd.8 bacula-sd.8 \ bconsole.8 bcopy.8 bextract.8 bls.8 bscan.8 btape.8 \ btraceback.8 dbcheck.8 bwild.8 bregex.8 bbconsjson.8 \ bdirjson.8 bsdjson.8 bfdjson.8 MAN1 = bsmtp.1 bat.1 all: nothing: depend: install: $(MKDIR) $(DESTDIR)/$(mandir)/man8 for I in ${MAN8}; \ do ($(RMF) $$I.gz; gzip -c $$I >$$I.gz; \ $(INSTALL_DATA) $$I.gz $(DESTDIR)$(mandir)/man8/$$I.gz; \ rm -f $$I.gz); \ done $(MKDIR) $(DESTDIR)/$(mandir)/man1 for I in ${MAN1}; \ do ($(RMF) $$I.gz; gzip -c $$I >$$I.gz; \ $(INSTALL_DATA) $$I.gz $(DESTDIR)$(mandir)/man1/$$I.gz; \ rm -f $$I.gz); \ done uninstall: for I in ${MAN8}; \ do (rm -f $(DESTDIR)$(mandir)/man8/$$I.gz); \ done for I in ${MAN1}; \ do (rm -f $(DESTDIR)$(mandir)/man1/$$I.gz); \ done clean: @$(RMF) *~ 1 2 3 *.bak @find . -name .#* -exec $(RMF) {} \; depend: distclean: clean $(RMF) Makefile bacula-15.0.3/manpages/dbcheck.80000644000175000017500000001745514771010173016145 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH DBCHECK 8 "26 September 2009" "Kern Sibbald" "Network backup, recovery and verification" .\" Please adjust this date whenever revising the manpage. .\" .SH NAME dbcheck \- Bacula's Catalog Database Check/Clean program .SH SYNOPSIS .B dbcheck .RI [ options ] .I working-directory .I bacula-database .I user .I password .RI [ dbhost ] .RI [ dbport ] .br .SH DESCRIPTION This manual page documents briefly the .B dbcheck command. .PP dbcheck will not repair your database if it is broken. Please see your vendor's instructions for fixing broken database. dbcheck is a simple program that will search for logical inconsistencies in the Bacula tables in your database, and optionally fix them. It is a database maintenance routine, in the sense that it can detect and remove unused rows, but it is not a database repair routine. To repair a database, see the tools furnished by the database vendor. Normally dbcheck should never need to be run, but if Bacula has crashed or you have a lot of Clients, Pools, or Jobs that you have removed, it could be useful. .SH OPTIONS A summary of options is included below. .TP .B \-? Show version and usage of program. .TP .BI \-b If specified, dbcheck will run in batch mode, and it will proceed to examine and fix (if \-f is set) all programmed inconsistency checks. By default, dbcheck will enter interactive mode (see below). .TP .BI \-C\ catalog catalog name in the director conf file. .TP .BI \-c\ config If the \-c option is given with the Director's conf file, there is no need to enter any of the command line arguments, in particular the working directory as dbcheck will read them from the file. .TP .BI \-B print catalog configuration and exit. .TP .BI -d\ nn set debug level to \fInn\fP. .TP .BI \-dt print timestamp in debug output. .TP .BI \-f If specified, dbcheck will repair (fix) the inconsistencies it finds. Otherwise, it will report only. .TP .BI \-v Set verbose mode. .SH INTERACTIVE MODE In interactive mode dbcheck will prompt with the following: .PP Hello, this is the database check/correct program. Please select the function you want to perform. 1) Toggle modify database flag 2) Toggle verbose flag 3) Repair bad Filename records 4) Repair bad Path records 5) Eliminate duplicate Filename records 6) Eliminate duplicate Path records 7) Eliminate orphaned Jobmedia records 8) Eliminate orphaned File records 9) Eliminate orphaned Path records 10) Eliminate orphaned Filename records 11) Eliminate orphaned FileSet records 12) Eliminate orphaned Client records 13) Eliminate orphaned Job records 14) Eliminate all Admin records 15) Eliminate all Restore records 16) All (3-15) 17) Quit Select function number: By entering 1 or 2, you can toggle the modify database flag (\-f option) and the verbose flag (\-v). It can be helpful and reassuring to turn off the modify database flag, then select one or more of the consistency checks (items 3 through 9) to see what will be done, then toggle the modify flag on and re-run the check. The inconsistencies examined are the following: .BR Duplicate filename records. This can happen if you accidentally run two copies of Bacula at the same time, and they are both adding filenames simultaneously. It is a rare occurrence, but will create an inconsistent database. If this is the case, you will receive error messages during Jobs warning of duplicate database records. If you are not getting these error messages, there is no reason to run this check. .BR Repair bad Filename records. This checks and corrects filenames that have a trailing slash. They should not. .BR Repair bad Path records. This checks and corrects path names that do not have a trailing slash. They should. .BR Duplicate path records. This can happen if you accidentally run two copies of Bacula at the same time, and they are both adding filenames simultaneously. It is a rare occurrence, but will create an inconsistent database. See the item above for why this occurs and how you know it is happening. .BR Orphaned JobMedia records. This happens when a Job record is deleted (perhaps by a user issued SQL statement), but the corresponding JobMedia record (one for each Volume used in the Job) was not deleted. Normally, this should not happen, and even if it does, these records generally do not take much space in your database. However, by running this check, you can eliminate any such orphans. .BR Orphaned File records. This happens when a Job record is deleted (perhaps by a user issued SQL statement), but the corresponding File record (one for each Volume used in the Job) was not deleted. Note, searching for these records can be very time consuming (i.e. it may take hours) for a large database. Normally this should not happen as Bacula takes care to prevent it. Just the same, this check can remove any orphaned File records. It is recommended that you run this once a year since orphaned File records can take a large amount of space in your database. You might want to ensure that you have indexes on JobId, FilenameId, and PathId for the File table in your catalog before running this command. .BR Orphaned Path records. This condition happens any time a directory is deleted from your system and all associated Job records have been purged. During standard purging (or pruning) of Job records, Bacula does not check for orphaned Path records. As a consequence, over a period of time, old unused Path records will tend to accumulate and use space in your database. This check will eliminate them. It is recommended that you run this check at least once a year. .BR Orphaned Filename records. This condition happens any time a file is deleted from your system and all associated Job records have been purged. This can happen quite frequently as there are quite a large number of files that are created and then deleted. In addition, if you do a system update or delete an entire directory, there can be a very large number of Filename records that remain in the catalog but are no longer used. During standard purging (or pruning) of Job records, Bacula does not check for orphaned Filename records. As a consequence, over a period of time, old unused Filename records will accumulate and use space in your database. This check will eliminate them. It is strongly recommended that you run this check at least once a year, and for large database (more than 200 Megabytes), it is probably better to run this once every 6 months. .BR Orphaned Client records. These records can remain in the database long after you have removed a client. .BR Orphaned Job records. If no client is defined for a job or you do not run a job for a long time, you can accumulate old job records. This option allow you to remove jobs that are not attached to any client (and thus useless). .BR All Admin records. This command will remove all Admin records, regardless of their age. .BR All Restore records. This command will remove all Restore records, regardless of their age. By the way, I personally run dbcheck only where I have messed up my database due to a bug in developing Bacula code, so normally you should never need to run dbcheck inspite of the recommendations given above, which are given so that users don't waste their time running dbcheck too often. .SH SEE ALSO .BR bls (1), .BR bextract (1). .br .SH AUTHOR This manual page was written by Jose Luis Tallon .nh . .SH COPYRIGHT This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bregex.80000644000175000017500000000375414771010173016033 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BREGEX 8 "30 October 2011" "Kern Sibbald" "Network backup, utilities" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: .\" .nh disable hyphenation .\" .hy enable hyphenation .\" .ad l left justify .\" .ad b justify to both left and right margins .\" .nf disable filling .\" .fi enable filling .\" .br insert line break .\" .sp insert n+1 empty lines .\" for manpage-specific macros, see man(7) .SH NAME bregex \- Bacula's 'regex' engine .SH SYNOPSIS .B bregex .RI [ options ] .I -f .br .SH DESCRIPTION This manual page documents briefly the .B bregex command. .br This program can be useful for testing regex expressions to be applied against a list of filenames. .PP .\" TeX users may be more comfortable with the \fB\fP and .\" \fI\fP escape sequences to invoke bold face and italics, .\" respectively. .SH OPTIONS A summary of options is included below. .TP .B \-? Show version and usage of program. .TP .BI \-d\ nn Set debug level to \fInn\fP. .TP .BI \-dt Print timestamp in debug output .TP .BI \-f\ The data-file is a filename that contains lines of data to be matched (or not) against one or more patterns. When the program is run, it will prompt you for a regular expression pattern, then apply it one line at a time against the data in the file. Each line that matches will be printed preceded by its line number. You will then be prompted again for another pattern. .TP .BI \-n Print lines that do not match .TP .BI \-l Suppress lines numbers. .SH SEE ALSO .BR regex(7) .br .SH AUTHOR This manual page was written by Bruno Friedmann .nh . This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/btape.80000644000175000017500000000454414771010173015650 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BTAPE 8 "26 November 2009" "Kern Sibbald" "Network backup, recovery and verification" .\" Please adjust this date whenever revising the manpage. .\" .SH NAME btape \- Bacula's Tape interface test program .SH SYNOPSIS .B btape .RI [ options ] .I device-name .br .SH DESCRIPTION This manual page documents briefly the .B btape command. .PP .\" TeX users may be more comfortable with the \fB\fP and .\" \fI\fP escape sequences to invoke bold face and italics, .\" respectively. .SH OPTIONS A summary of options is included below. .TP .B \-? Show summary of options and commands. .TP .BI \-b\ bootstrap Specify a bootstrap file. .TP .BI \-c\ config Specify configuration file. .TP .BI \-d\ nn Set debug level to \fInn\fP. .TP .BI \-p Proceed inspite of I/O errors. .TP .B \-s No signals (for debugging). .TP .B \-v Set verbose mode. .sp 3 .SH COMMANDS .TP .B autochanger test autochanger .TP .B bsf backspace file .TP .B bsr backspace record .TP .B cap list device capabilities .TP .B clear clear tape errors .TP .B eod go to end of Bacula data for append .TP .B eom go to the physical end of medium .TP .B fill fill tape, write onto second volume .TP .B unfill read filled tape .TP .B fsf forward space a file .TP .B fsr forward space a record .TP .B help print this reference .TP .B label write a Bacula label to the tape .TP .B load load a tape .TP .B quit quit btape .TP .B rawfill use write() to fill tape .TP .B readlabel read and print the Bacula tape label .TP .B rectest test record handling functions .TP .B rewind rewind the tape .TP .B scan read() tape block by block to EOT and report .TP .B scanblocks Bacula read block by block to EOT and report .TP .B status print tape status .TP .B test General test Bacula tape functions .TP .B weof write an EOF on the tape .TP .B wr write a single Bacula block .TP .B rr read a single record .TP .B rb read a single bacula block .TP .B qfill quick fill command .br .SH SEE ALSO .BR bscan (1), .BR bextract (1). .br .SH AUTHOR This manual page was written by Jose Luis Tallon .nh . .SH COPYRIGHT This man page document is released under the BSD 2-Clause license. bacula-15.0.3/manpages/bwild.80000644000175000017500000000431614771010173015653 0ustar bsbuildbsbuild.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BWILD 8 "30 October 2011" "Kern Sibbald" "Network backup, utilities" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: .\" .nh disable hyphenation .\" .hy enable hyphenation .\" .ad l left justify .\" .ad b justify to both left and right margins .\" .nf disable filling .\" .fi enable filling .\" .br insert line break .\" .sp insert n+1 empty lines .\" for manpage-specific macros, see man(7) .SH NAME bwild \- Bacula's 'wildcard' engine .SH SYNOPSIS .B bwild .RI [ options ] .I -f .br .SH DESCRIPTION This manual page documents briefly the .B bwild command. .br This is a simple program that will allow you to test wild-card expressions against a file of data. .PP .\" TeX users may be more comfortable with the \fB\fP and .\" \fI\fP escape sequences to invoke bold face and italics, .\" respectively. .SH OPTIONS A summary of options is included below. .TP .B \-? Show version and usage of program. .TP .BI \-d\ nn Set debug level to \fInn\fP. .TP .BI \-dt Print timestamp in debug output .TP .BI \-f\ The data-file is a filename that contains lines of data to be matched (or not) against one or more patterns. When the program is run, it will prompt you for a wild-card pattern, then apply it one line at a time against the data in the file. Each line that matches will be printed preceded by its line number. You will then be prompted again for another pattern. .br Enter an empty line for a pattern to terminate the program. You can print only lines that do not match by using the \-n option, and you can suppress printing of line numbers with the \-l option. .TP .BI \-n Print lines that do not match .TP .BI \-l Suppress lines numbers. .TP .BI \-i use case insensitive match. .SH SEE ALSO .BR fnmatch(3) .br .SH AUTHOR This manual page was written by Bruno Friedmann .nh . This man page document is released under the BSD 2-Clause license. bacula-15.0.3/README.dynlog.txt0000644000175000017500000001271214771010173015656 0ustar bsbuildbsbuildBacula's Dynamic logging with gdb's dprintf It is possible to add some logging with the help of GDB and without modifying the binary. The logging goes into the trace file with other logging from Dmsg() functions Be sure that gdb can attach to any process $ cat /proc/sys/kernel/yama/ptrace_scope 0 If not zero, run the following command $ echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope or modify /etc/sysctl.d/10-ptrace.conf to get the change persistent The you must tweak the following script to add your own logging -------------------------------8<------------------------------- #!/bin/sh # DAEMON can be "fd", "sd" or "dir" depending the Bacula's # component you want to trace DAEMON=fd PID=`pgrep -x bacula-$DAEMON` filename=`mktemp --suffix $PID gdb.XXX` trap "rm $filename" EXIT cat >> $filename <8------------------------------- See the line dprintf at the end, you can modify it and add some more. The arguments can be split in 3 groups - the location where the logging must be done in the source code - the message format - a list of variables that you want to print and that match your format string "restore.c:487" is the location in the sources where the message must be printed "restore.c:487-%u file_index=%ld\n" is the format string surrounded with double quote. To match the Bacula's messages we repeat the location and add %u to print the jobid. Then come the useful information that you want to display. The conversion specifier are the same as the one used inside Bacula in Dmsg(), Mmsg() or bsnprintf(), they are a little bit different of the one used standard "printf" function. Unfortunately when using the compiler optimization (gcc -O2) like we do for our production binaries some variables are "optimized out". This means that some variables share the same location and the value depend where you are in the function. If a variable "A" is only used a the beginning of a function and a variable "B" at the end only, there is a chance that they will share the same location and if you add a "dprintf" at the beginning you can probably print the "A" value but not the "B" one and this is the opposite at the end of the function. The optimization can vary from one compiler version to another, then if you test your "dprintf" on a debian for a customer that run a redhat with a different gcc, there is a possibility for the variable to be optimized in a different way :-(. The best way to know which variables can be printed at one place is to use the same binaries as the target, setup a breakpoint where you need the information, wait for the program to stop there and use the command "info locals" to show witch variables are available. Real Dmsg() don't suffer of this problem as the compiler will optimize the variables around the requirements of the program and the Dmsg themselves. Some information about the instructions in the script: set target-async on set non-stop on The two line above are used to not stop gdb when it reach the location that we want to monitor. attach $PID The two first line must be executed before to attach to a process, that why we attach the process after. handle SIGUSR2 nostop Bacula use USR2 for internal use and we don't want to interrupt gdb when they are used set overload-resolution off Our "gdb_dprintf()" below expect a variable number of arguments and this is the only way for gdb to accept the function without complaining set dprintf-style call set dprintf-function gdb_dprintf This is where we tell GDB to use our function "gdb_dprintf()" instead of standard "printf()" dprintf restore.c:487, "restore.c:487-%u file_index=%ld\n", get_jobid_from_tsd(), file_index We can add as many dprintf as we want... cont Then tel gdb to continue to run the program, after the setup To enable the extra logging : 1 - edit the script above a - choose the component you want to monitor, the "fd", "sd" or "dir" b - edit or add more dprintf lines 2 - enable logging on the component using the setdebug command for exemple : * setdebug level=100 trace=1 client 3 - run the script above # sh gdbdynlog.sh 4 - when our are done you must interrupt gdb with CTRL-C, then quit gdb with the "quit" command and answer yes to the last question 5 - stop the extra logging using the setdebug command * setdebug level=0 trace=0 Before to send a script and these instructions to the customer verify that your dprintf works on the binaries used by the customer! For the example above, you should see something like this in your trace file: -------------------------------8<------------------------------- ... bac-fd: authenticatebase.cc:561-32 TLSPSK Start PSK bac-fd: restore.c:487-32 file_index=1 ... bac-fd: restore.c:487-32 file_index=3013 bac-fd: restore.c:1147-32 End Do Restore. Files=3013 Bytes=195396973 bac-fd: job.c:3484-32 end_restore_cmd ------------------------------->8------------------------------- When you are done you must interrupt gdb with CTRL-C then quit gdb with the quit command and answer yes to the last question CTRL-C (gdb) quit A debugging session is active. Inferior 1 [process 19286] will be detached. Quit anyway? (y or n) y Detaching from program: /home/bac/workspace2/bee/regress/bin/bacula-fd, process 19286 bacula-15.0.3/ReleaseNotes0000644000175000017500000050246114771010173015206 0ustar bsbuildbsbuild Release Notes for Bacula 15.0 This is a major release with many new features and a number of changes. Please take care to test this code carefully before putting it into production. Although the new features have been tested, they have not run in a production environment. Compatibility: -------------- As always, both the Director and Storage daemon(s) must be upgraded at the same time. Any File daemon running on the same machine as a Director or Storage daemon must be of the same version. Older File Daemons should be compatible with the 15.0 Director and Storage daemons. There should be no need to upgrade older File Daemons. In 15.0, we have upgraded the volume format from BB02 to BB03 to support options such as the Volume encryption. Old volumes can still be used by the 15.0 Storage Daemon, however, new 15.0 BB03 volumes cannot be used by old Storage Daemons. New Catalog format in version 15.0.0 and greater ------------------------------------------------ This release of Bacula uses a new catalog format. We provide a set of scripts that permit conversion from 9.x and earlier versions to the new 15.0 format (1026). Normally the conversion/upgrade is automatic, though there is a big change from 9.x to 11.0 that takes longer than usual, the upgrade process will require about twice the disk space of the actual database. The database upgrade introduced in 11.0 should significantly increase performance when inserting a large number of Jobs with a lot of Files into the database catalog. If you start from scratch, you don't need to run the update_bacula_tables script because the create_bacula_tables script automatically creates the new table format. However, if you are using a version of Bacula older than 5.0.0 (e.g. 3.0.3) then you need to run the update_bacula_tables and the grant_bacula_privileges scripts that will be found in the /src/cats directory after you run the ./configure command. As mentioned above, before running this script, please backup your catalog database, be sure to shutdown Bacula and be aware that running the script can take some time depending on your database size. Release 15.0.3 / 25 March 2025 ---------------------------------------------------------------- 15.0.3 is a minor bug fix release. - Use the CLOEXEC function in sockets and all file descriptors - BSOCK improve POLL to detect and report errors - Check for backquote in check_for_invalid_chars() function - Detect unsolvable volume cycle in split_bsr_loop() - Enforce malware database download from Abuse.ch - Fix #10947 time output without century for locale that use multi bytes utf8 - Fix #10985 Report the FD/SD Encryption in the Job record and the job output - Fix #11048 About LastBackedUpTo StorageGroup policy not correctly set - Fix #11058 About fsync error reported for tape driver - Fix #11197 About error when using bextract on ZSTD compressed data - Fix #11251 About bcopy not mounting correctly volumes provided with -i option - Fix Client/Uname field not always updated after a status client - Fix compilation issue with zstd and without lzo - Fix compilation variable in var.c and expand.c - Fix org#2714 Fails to take TLS Allowed CN into account - Fix org#2738 About error message with generated fileset - Fix org#2748 About compilation error with ZSTD and not LZO enabled - Update bsmtp copyright information - baculum: Add cloud storage status to SD status endpoint - baculum: Add enable and disable client, storage, job and schedule endpoints - baculum: Add name and sort parameters to filesets filter - baculum: Add new query parameters in M365EmailList endpoint - baculum: Fix #2722 port from Bacularis fix for displaying schedule list - baculum: Fix compatibility with very old PHP 5.4 - baculum: Fix missing scopes on supported OAuth2 scope list - baculum: Fix sorting in filesets endpoint if unique filesets parameter is used - baculum: Update API documentation - bpipe: Fix org#2737 About segfault with bpipe - k8s: Add Ingress integration backup/restore - k8s: Add dockerfile to create an image to compile k8s plugin in any debian/ubuntu distribution - k8s: Add more options to debug - k8s: Add new level(In pvc annotations) in selection of backup mode - k8s: Add parallel job in same namespace - k8s: Add pvc annotation takes precedence without pod annotation - k8s: Add pvc clean up from old backup jobs - k8s: Avoid pvc data when pvc is in Pending status. k8s: Fix redoing backup when pvcdata is 0 bytes and the backup mode is standard - k8s: Avoid pvcs backup when the pvc status is Terminating - k8s: Fix #0010901 - Problem when restore service clusterIPs - k8s: Fix #0011005: ModuleNotFoundError: No module named 'baculak8s.plugins.k8sbackend.ingress' - k8s: Fix get provisioner permissions - k8s: Fix restore problem where pod require the pvc data when it starts - k8s: Fix show pvdatada message when you don't use it - k8s: Get images from repositories with auth - show minimal backtrace if gdb is not installed - win32: Fix unwanted debug messages in windows File Daemon Bugs fixed/closed since last release: 10291 10901 10947 10985 11005 11048 11058 11197 11251 2722 ---------------------------------------------------------------- Release 15.0.2 / 21 March 2024 ---------------------------------------------------------------- Security: - Director TOTP Console authentication plugin - Better restricted console support - Add Storage Daemon Volume encryption support - Add support for Immutable filesystem flag for volumes - Add support for Append Only filesystem flag for volumes - Clamav Antivirus plugin - Malware detection code (via Abuse.ch) - Add AllowedBackupDirectories FileDaemon's directive - Add AllowedScriptDirectories FileDaemon's directive - Add ExcludeBackupDirectories FileDaemon's directive - Add AllowedRestoreDirectories FileDaemin's directive Management: - New FreeSpace and LastBackedUpTo storage group policy - New ZSTD fileset compression support - Add Kubernetes CSI Volume Snapshot support - Add Amazon Cloud driver (in replacement of libS3 cloud driver) - Switch Storage Daemon volume format from BB02 to BB03 - New Bacula Installation Manager (BIM) to ease the installation - Add runscript "AtJobCompletion" execution option Catalog changes: - FileSet content description in the FileSet table - Add Job/RealStartTime catalog field - Add Job/Encrypted catalog field - Add Media Protected and UseProtect fields - Add Media VolEncrypted field - Add FileEvent table and "list fileevent" to track malware and viruses - Plugins list available in the Client table - Store verified jobid into the catalog PriorJobId Job field Console changes: - Add JSON output to various commands (.jlist, .api 2 api_opts=j) - .help enhancement with description of commands - help command restricted to the available commands - Add .search bconsole command - Add bconsole "list joblog jobid=x pattern=xxx" option - Add fileindex=jobid,fidx option in .bvfs_restore - Add VolType to .bvfs_versions - Add "update volumeprotect storage=xxx" bconsole command - Add "status dir novolume" to not compute volume in status director output - Limit the "status dir" schedule output to 50 jobs. Can be managed via "limit=x offset=y" parameters - Add new error codes to job messages - Update timestamp of the pid file after a reload command - Add ".status dir client=xxxx" filter - Add "list jobs reviewed=<1|0>" command - Add Runscript to control the run queue (RunsWhen=Queue) - Add ".ls dironly" bconsole command - Add new Job statuses when the Job is waiting on SD/FD - Add new PriorJobId and PriorJobName to volume label format variables - Progress Status for Copy/Migration Jobs in "status director" output - Add "list fileevent" bconsole command Baculum and Rest API changes: - Add joberrors parameter to jobs endpoint - Add fileset parameter to objects endpoint - Add filename and path properties to fileevent endpoints - Add sorting parameters to clients endpoint - Add running jobs property to clients endpoint - Add documentation for os and version filters in clients endpoint - Add os and version parameters to clients endpoint - Add os, version properties and overview parameter to clients endpoint - Add endpoint to check disk archive device prformance on storage - Add endpoint to list files and dirs on storage daemon host - Add delete pool endpoint - Add delete object endpoint - Use new delete module in volume and job endpoints - Add module for delete command - Add client name parameter to clients endpoint - Add file events API endpoint - Improve extended name validator - Speed up dashboard page loading - Fix parsing director time in time endpoint - Add objecttype parameter to object categories endpoint - Add second dimensional sorting and use it for sorting jobstatus in - Add modify default object sorting in object overview endpoint - Fix support for PHP 5.4 in web interface layer - Fix content field in job record - Improve support for newer PostgreSQL versions - Add fileset content property to jobs endpoint - Add object categories endpoint - Add support for ALL action in console ACL - Add objecttype filter to objects names endpoint - Add default sorting by endtime to objects overview endpoint - Add sorting parameters to volumes overview endpoint - Add second dimension of sorting in jobs objects endpoint - Add offset and limit parameters to director status endpoint - Add sorting by endtime and add endtime property to objects overview - Add group_order_by and group_order_direction parameters to documentation - Add notes about object type filters in objects overview endpoint - Fix storing ACL config actions for very old PHP versions - Add objectsize property to objects overview endpoint - Add job type property to objects overview endpoint - Add path property to objects overview endpoint - Fix offset and limit parameter in jobs objects endpoint - Add objectname parameter to jobs objects endpoint - Add objectsource property to objects overview endpoint - New API config ACLs - Add group_order_by and group_order_direction parameters to objects - Add to grup function sorting group capability - Add sorting by joberrors if sorted by jobstatus first - Improve using unique_objects parameter in object endpoint - Add group_offset and unique_objects parameters to objects endpoint - Add volume names endpoint - Add object names endpoint - Add object types endpoint - Add documentation for client parameter in objects endpoint - Add job status filter to objects endpoint - Add joberrors filter to sources endpoint - Add server parameter to list vsphere datastores endpoint - Add job level property to sources endpoint - Add pool resnames endpoint - Add storage resnames endpoint - Add director time endpoint - Change M365 tenants endpoint output to contain tenant names - Update documentation - Add object overview endpoint - Add client plugin list endpoint - Add content parameter to filesets endpoint - Add directive filter to config endpoints - Add enabled filter to clients show endpoint - Add endpoint to list AWS cloud buckets - Split client overview endpoint into reachable and unreachable clients - Adapt storage file ls command parameters to new form - Add cancel jobs running on storage endpoint - Add delete client endpoint - Add endpoint to create directory on storage daemon host - Add endpoint to get device disk usage on storage daemon host - Add endpoint to list SCSI tape devices on storage daemon host - Add fileset filter to objects overview endpoint - Add job name and fileset to status client endpoint - Add job type parameter to objects overview endpoint - Add jobdefs list endpoint - Add jobstatus filter to objects overview endpoint - Add name parameter to storages endpoint - Add option to interpret Bacula error codes by API - Add parser for diskperf command output - Add regex operator support in queries - Add restricting resources in objects overview endpoint - Add type parameter to clients endpoint - Add usage of multiple content values in filesets endpoint - Add volume statistics endpoint - List only reachable/unreachable clients in clients endpoint Misc: - Add XXHASH to FileSet signature option - Add plugins for Verfy jobs - Display mtime instead of ctime in estimate listing output - Add specific jobstatus when executing Runscripts - New man pages - Add %i (jobid) to edit_device_codes(), can be used in storage daemon scripts - Pass comment field to copy/migration jobs from the control job - Add JobTimestamp variable for volume label format - Improve BSR cycle detection and resolution Release 15.0.1 / 13 February 2024 ---------------------------------------------------------------- 15.0.1 is a minor bug fix beta release. - cloud: Fix #10525 Add device name to the transfer fields - Fix #10163 Add %i (jobid) to edit_device_codes() - Fix #10365 Pass comment field to copy/migration jobs from the control job - Fix #10524 About adding JobTimestamp variable for volume format - Fix #10401 About issue when truncating immutable volume - Fix #10453 volume with a wrong label - Fix #10513 About show command issue with incorrect storage configuration - Fix #10631 remove unauthorized Jmsg() in BSOCK::recv() - Fix #2699 About SQLite update script - Fix #2701 compilation of bjoblist - Fix JSON output in .status dir running - Fix openssl 3.x don't tolerate to call EVP_CipherFinal_ex() twice - Fix org#2440 Improve Makefiles to use relative paths - Fix org#2561 Convert text from ISO-8859 to UTF8 - Fix org#2698 about error with osx platform - Fix org#2704 about old FD compatibility - Fix org#2705 about issue with accurate checking of new file signature attributes - Fix restore issue when compression is enabled but not available - Fix warning about BSOCK::send() - Fix zlib compression was disable in FD - Fix: #0010535. Problem with k8s snapshot version - baculum: Add application version endpoint - baculum: Add client plugin list endpoint - baculum: Add content parameter to filesets endpoint - baculum: Add directive filter to config endpoints - baculum: Add enabled filter to clients show endpoint - baculum: Add endpoint to list AWS cloud buckets - baculum :Split client overview endpoint into reachable and unreachable clients - baculum: Adapt storage file ls command parameters to new form - baculum: Add cancel jobs running on storage endpoint - baculum: Add delete client endpoint - baculum: Add endpoint to create directory on storage daemon host - baculum: Add endpoint to get device disk usage on storage daemon host - baculum: Add endpoint to list SCSI tape devices on storage daemon host - baculum: Add fileset filter to objects overview endpoint - baculum: Add job name and fileset to status client endpoint - baculum: Add job type parameter to objects overview endpoint - baculum: Add jobdefs list endpoint - baculum: Add jobstatus filter to objects overview endpoint - baculum: Add name parameter to storages endpoint - baculum: Add option to interpret Bacula error codes by API - baculum: Add parser for diskperf command output - baculum: Add regex operator support in queries - baculum: Add restricting resources in objects overview endpoint - baculum: Add type parameter to clients endpoint - baculum: Add using multiple content values in filesets endpoint - baculum: Add volume statistics endpoint - baculum: Fix content property in sources endpoint - baculum: Fix count property in volume overview endpoint - baculum: Fix name parameter in jobs objects endpoint - baculum: Fix using error module - baculum: List only reachable/unreachable clients in clients endpoint - baculum: Mask sensitive AWS data in debug log - baculum: Update API documentation - cloud: Fix #10291 Assume that driver ls can return an error when scanning an unexistant cloud volume and loosen the conditions that handle this case - cloud: Fix #10685 TruncateCache at endofjob was not processed due to wrong transfer status verification - cloud: proof guard truncation - cloud: test compare upload to AWS with 4 different methods, including bacula post-upload - k8s: Fix compilation problem - k8s: Fix csi compatibility - k8s: Fix problem when it restores a 'namespace' - k8s: Fix pvc naming error in csi snapshots - rpms: Fix cloud spec file for redhat8 Bugs fixed/closed since last release: 10163 10291 10365 10401 10453 10513 10524 10525 10535 10591 10604 10631 10685 2699 2701 ---------------------------------------------------------------- Release 13.0.3 / 02 May 2023 ---------------------------------------------------------------- 13.0.3 is a minor bug fix release with several new features and a number of bug fixes. - Fix #10030 About small issue while canceling the restore command - Fix #9968 Enhance restricted Console support - Fix #10032 Allow restore menu 1 to users without sqlquery command ACL - Fix #10033 Add extra Client and FileSet ACL checks to the estimate command - Fix #9907 About Director crash with Runscript Console - Fix #9968 Adapt restore menu and add RBCLIENT/BCLIENT in some ACL SQL checking - Fix small memory leak with setbandwidth command - Adapt delete, prune, purge commands to work with restricted consoles - Check ClientACL in acl_access_jobid_ok() - Check Pool specific ACL in select_media_dbr() - Disable Bootstrap manual selection in restore for Restricted Console - Do not display specific SQL errors to restricted consoles - Include BackupClient in list jobs, list jobmedia, list joblog - Restrict the use of local files during the restore file selection process when using a Restricted Console - Take the first valid FileSet for the restore Job with restricted consoles - baculum: Add checking errors in output from vsphere plugin servers, hosts and datastores commands - baculum: Add new m365 plugin mailbox list endpoint - baculum: Add VMware vSphere datastore list endpoint - baculum: Add VMware vSphere host list endpoint - baculum: Add VMware vSphere restore host list endpoint - baculum: Add VMware vSphere server list endpoint - baculum: Add a new endpoint to list jobs together with objects - baculum: Add client filter to objects endpoint - baculum: Add client property to object and objects endpoint - baculum: Add client resnames endpoint - baculum: Add client, pool, fileset and fileset content properties to jobs objects endpoint - baculum: Add displaying bconsole command output if command is multiline - baculum: Add enabled flag filter to volumes endpoint - baculum: Add endpoint to list m365 jobs by email - baculum: Add endtime property and filters to source list endpoint - baculum: Add estimated job values endpoint that uses job historical data for estimation - baculum: Add fileindex parameter to bvfs restore endpoint - baculum: Add fileset and filesetid filters to jobs objects endpoint - baculum: Add fileset content property to sources endpoint output - baculum: Add job errors property to objects endpoint - baculum: Add job errors property to sources endpoint - baculum: Add job status property to objects endpoint - baculum: Add job type to sources endpoint - baculum: Add joberrors filter to jobs objects endpoint - baculum: Add joberrors filter to objects endpoint - baculum: Add mediaid to volume overview endpoint - baculum: Add method to execute SQL queries - baculum: Add more detailed output to restore endpoint - baculum: Add objectid parameter to Bvfs restore endpoint - baculum: Add objecttype parameter to jobs objects endpoint - baculum: Add offset and limit parameters to bvfs versions endpoint - baculum: Add offset parameter to filesets endpoint - baculum: Add offset parameter to m365 plugin email list endpoint - baculum: Add offset parameter to sources endpoint - baculum: Add options to configure preserving table settings - baculum: Add order_by and order_direction parameters to sources endpoint - baculum: Add order_by and order_direction parameters to volumes endpoint - baculum: Add order_by and order_direction params to jobs objects endpoint - baculum: Add output parameter to run job endpoint - baculum: Add output parameter to run restore endpoint - baculum: Add overview parameter to job list endpoint - baculum: Add overview parameter to objects endpoint - baculum: Add pool filter to volumes endpoint - baculum: Add priorjobname parameter to jobs endpoint - baculum: Add priorjobname property to jobs endpoint - baculum: Add restore_host parameter to vsphere plugin datastores API endpoint - baculum: Add server parameter to vsphere plugin hosts API endpoint - baculum: Add sorting by jobstatus to objects endpoint - baculum: Add starttime property to objects endpoint - baculum: Add storage filter to volumes endpoint - baculum: Add support for cloud storage commands - baculum: Add support for plugin filter in client list endpoint - baculum: Add tenant indentifier list endpoint - baculum: Add to documentation missing type parameter to job resnames endpoint - baculum: Add to estimated job values endpoint average number of backed up objects - baculum: Add to jobs endpoint time filters in date/time format - baculum: Add to jobs objects endpoint time filters in date/time format - baculum: Add to objects endpoint capability to sort by client name - baculum: Add to objects endpoint time filters in date/time format - baculum: Add volerrors property to volumes overview endpoint - baculum: Add volstatsu property to volumes overview endpoint - baculum: Add volstatus filter to volumes endpoint - baculum: Add voltype parameter to volumes endpoint - baculum: Add voltype property support in bvfs output parser - baculum: Add volumename filter to volumes endpoint - baculum: Add volumes overview endpoint - baculum: Add when parameter to run job endpoint - baculum: Change a way of executing SQL queries - baculum: Change a way of preparing overview with counters in objects endpoint - baculum: Change overview behaviour in objects endpoint if used together with groupby parameter - baculum: Disable querying API for seeing which authentication methods are supported - baculum: Enhance validation in time period control - baculum: Extend object name validation pattern - baculum: Fix compatibility with PHP 5.4 - baculum: Fix documentation about jobids parameter in bvfs restore endpoint - baculum: Fix documentation for date parameters - baculum: Fix example values in OpenAPI documentation - baculum: Fix listing restore job in job endpoints - baculum: Fix losing autochanger directive value in storage resource in director configuration - baculum: Fix m365 user list endpoint - baculum: Fix offset and limit parameters for case when storage in catalog is inconsistent with configuration - baculum: Fix problem with double jobids in jobs objects endpoint - baculum: Fix support for PHP 5 - baculum: Improve identifier validator - baculum: Improve job statuses for job overview purpose - baculum: Improve jobs objects endpoint working - baculum: Improve precision in show command output parser - baculum: Make show command output parser more accurate - baculum: Remove overview, order_by, order_direction and object_limit parameters from jobs objects endpoint - baculum: Rework and improve sources endpoint - baculum: Update documentation - baculum: add offset parameter support in new job objects endpoint - rpms: Add rhel9 target to spec file Bugs fixed/closed since last release: 10030 10032 10033 9907 9968 ---------------------------------------------------------------- Release 13.0.2 / 16 February 2023 ---------------------------------------------------------------- 13.0.2 is a minor bug fix release. - Fix #9535 avoid "Will not descend from / to /good_dir" - Fix #9568 About "cancel inactive" command Storage Daemon selection - Fix #9614 Re-create Jobs with bscan only if the bootstrap is matching - Fix #9686 Grant PROCESS privilege to bacula user to allow catalog backup - Fix #9876 Update information printed during file restore error - Fix #9882 About tapealert script issue on rhel8 - Fix Cython detection on python >= 3.8 - Fix bconsole command issue after a first error - Fix errors in update_bacula_tables - Fix org#2577 Remove -f option from MySQL update scripts to detect errors properly - Fix org#2628 About improving the update_bacula_tables script on up to date catalogs - Fix org#2665 About memory leak on FreeBSD with extended attributes - Fix org#2666 About fixing getaddrinfo check in ./configure - baculum: Add afterjobid parameter to job list endpoint - baculum: Add jobids parameter to objects endpoint - baculum: Add plugin column - baculum: Add age parameter to jobs and objects endpoints - baculum: Add capability to restore using plugin - baculum: Add dedupengine output type to status storage - baculum: Add documentation for component actions - baculum: Add documentation to new job sort parameters - baculum: Add documentation to new jobids parameter in job list endpoint - baculum: Add event list and single event record endpoints - baculum: Add group_limit, order_by and order_direction parameters to objects endpoint - baculum: Add groupby parameter to object list endpoint - baculum: Add job sum statistics endpoint - baculum: Add jobids parameter to Bvfs update endpoint - baculum: Add missing objectid parameter to API documentation - baculum: Add multiple jobids filter to jobs endpoint - baculum: Add new fileindex property to objects - baculum: Add new filters to object category sum endpoint - baculum: Add new job, fileset and media properties support - baculum: Add object category stats endpoint - baculum: Add object category status endpoint - baculum: Add object size statistics endpoint - baculum: Add object versions endpoint - baculum: Add offset parameter to event and pool list endpoint - baculum: Add offset parameter to jobs, objects and volumes endpoints - baculum: Add offset parameter to messages endpoint - baculum: Add offset parameter to storage and client list endpoint - baculum: Add option to enable/disable audit log - baculum: Add patch for offset parameter support in SQL queries - baculum: Add query command support, object endpoint and m365 user list endpoint - baculum: Add restore plugin option fields endpoint - baculum: Add restore plugin options endpoint - baculum: Add search Bacula items endpoint - baculum: Add single object record endpoint - baculum: Add sources endpoint - baculum: Add time range parameters to jobs endpoint - baculum: Add time range parameters to objects endpoint - baculum: Add to jobs endpoint parameters to sort property and sort order - baculum: Fix OFFSET parameter in PHP framework - baculum: Fix sources endpoint double results - baculum: Fix time range filter for job and object endpoints - baculum: Fix using multiple job statuses in list jobs jobstatus filter - baculum: Fix using operators for SQL queries - cloud: Fix #8351 Catalog part number correction notification goes debug - cloud: Fix #9508 transfer remove dcr use for JobId - cloud: Fix #9606 Rearange POOLMEM usage in cb functions - k8s: Add support for Python3.10 - win32: Fix org#2667 enable sockaddress_storage for windows - win32: Switch to openssl 1.1.1t Bugs fixed/closed since last release: 8351 9508 9535 9568 9606 9614 9686 9876 9882 Release 13.0.1 / 05 August 2022 13.0.1 is a minor bug fix release. - Fix org#2594 About compilation warning on VolRead/WriteTime - Fix org#2644 Add support for binary files to bacula md5sum - Fix org#2655 About incorrect definition of MAX_FOPTS - Fix org#2656 About incorrect error message on TLS CA Certificate - Fix org#2657 About startup problem for bacula-sd - baculum: Adapt code to use PSR-4 autoloader - baculum: Fix #2653 PHP warning about wrong array_key_exists() parameter in session record - baculum: Improve logging and add audit log - osx: Fix #9309 about extended attribute backup error on macOS - Rework MacOS package - rpms: Add kubernetes spec file - rpms: Add spec file for k8s tools - rpms: Fix cloud package - rpms: more work on docker and docker tools - win32: update openssl to 1.1.1q Bugs fixed/closed since last release: 2653 9309 Release 13.0.0 04 July 2022 13.0.0 is a major release. New Features: ------------- - Job 'Storage Group' support - Kubernetes plugin - New Accurate option to save only file's ACL and metadata - Windows CSV (Cluster Shared Volumes) support - More logging for daemon<->daemon connections in Job log output - Tag support on catalog objects - Support for SHA256 and SHA512 signatures in FileSet - External LDAP Console authentication Misc Features: - Windows installer 'Silent Mode' options - Add PriorJob to bconsole 'llist job' output - Check for IP SANs when verifying TLS certs - Clarify SD vbackup Device error message - Remove deprecated sbrk in MacOS and Windows - Add bconsole .jlist command to get JSON output from regular list commands - Ensure that the Director will reject catalog updates from the FD - Add variable for PreviousJobId in mail messages - Respect the 'nodump' flag in more OSes than just BSD - Add debug/trace/tags information to .status header - Handle lin_tape end of device with the new 'Use Lintape=yes' Device directive - Add MaximumJobErrorCount FileDaemon directive - bsmtp: Add the possibility to add emails separated with a comma as recipient list - SDPacketCheck FileDaemon used to control the network flow - Add bconsole .bvfs_lsfiles allfiles command Main Fixes: - Fix org#2188 About the presence of FileSet and Pool directives in the Job - Fix Director crash for Client Initiated Backup - Fix Director crash for Migration Job - Fix incorrect output for the .status client command - Skip XATTR larger than MaximumNetworkBuffer - Fix deadlock when starting the Director with an improperly configured catalog - Fix Director crash caused by BAT - Fix org#2627 About Director crashing for Copy Jobs and resource rename - Move the delete volume event just before the actual deletion - Fix mail variables not working after a conf reload - Fix OpenBSD chio-changer script - Fix SQL query generated with ACLs - Fix heartbeat segfault when the Job is terminated very quickly - Fix About wrong backup Client displayed to the user when the original Client doesn't exist - Fix org#2605 About incorrect message in restore command - cdp: open the inotify stream using the CLOEXEC (close on exec) flag - docker: Check the presence of the docker tools during loadPlugins() - Fix reload issue when a Job doesn't have a Pool defined - Fix Copy Job with SelectionType=PoolUncopiedJobs selecting Jobs from wrong Pool - Fix about checking for Storage being used for Job restart/resume - Fix about incorrect variable substitution with the query command - Fix org#2579 About incorrect JSON generated from empty Messages resource - Fix #9116: copy job missuses the client->FdStorageAddress directive - Fix org#2658 About segfault with bsdjson with incorrect parameters - Skip storage daemon detection if the information is not available in the BSR - alist: Fix for memory overflow access - Fix org#2659 Install dbcheck and bsmtp in 755 - Fix org#2662 About SQLite migration script issue - snapshot: Adapt for BTRFS 5.17 - snapshot: Fix snapshot delete/prune command - snapshot: Fix #9143 About snapshot not properly stored in the catalog - snapshot: Add support for new LVM 2.03.15 - win32: Update to OpenSSL 1.1.1q - rpms: Fix org#2633 about log directory not created on Centos7 GUI: - baculum: Fix clearing OAuth2 properties after testing API connection on security page - baculum: Fix directing to default page after log in for users with non-admin roles - baculum: Fix #2667 keep original fileset options order - baculum: Add to install wizard pre-defined b*json tool paths for FreeBSD and older Debian/Ubuntu - baculum: Fix #2661 required parameter PHP error on PHP 8.0 - baculum: Fix error calling method_exists() with non-objects on PHP 8 - baculum: Fix clearing OAuth2 properties after testing API connection on security page - baculum: Fix directing to default page after log in for users with non-admin roles - baculum: Add to install wizard pre-defined b*json tool paths for FreeBSD and older Debian/Ubuntu - baculum: Fix #2661 required parameter PHP error on PHP 8.0 - baculum: Fix error calling method_exists() with non-objects on PHP 8 ---------------------------------------------------------------- Release 11.0.6 10 March 2022 11.0.6 is an important bug fix and security fix release. We advise all 11.0.x users to upgrade to this version. - Adjust sample-query.sql file for new catalog schema - Fix #2654 About compilation issue on Alpine Linux - Fix #2656 About segfault in XATTR code for FreeBSD - Fix #7776 About FD error not correctly reported in the Job log - Fix #7998 About Director crashing for client initated backup - Fix #8126 About strange output for the .status client command - Fix MySQL default connection in the grant_mysql_privileges script - Fix db_get_accurate_jobids() with concurrent queries on the same Jobs - Fix issue with MySQL 8 in src/cats/grant_mysql_privileges - Fix detection of PSK - Fix org#2622 About incorrect behavior of the MaxDiffInterval directive - Fix org#2623 About .ls/estimate command not printing files correctly - Fix org#2627 About Director crashing for Copy Jobs and resource rename - win32: Upgrade OpenSSL to 1.1.1m - Got regression testing working correctly on FreeBSD - Update depkgs version to use latest libs3 - baculum: Add API endpoints for basic user management - baculum: Add JSON output parameter to show client(s), show job(s), show pool(s) API endpoints - baculum: Add capability to assign dedicated bconsole config file to API basic users - baculum: Add capability to close modal windows on clicking gray shadow - baculum: Add capability to provide translated directive documentation file - baculum: Add capability to use pre-defined paths in API config wizard - idea proposed by Heitor Faria - baculum: Add console page to configure consoles - baculum: Add copy resource function to enable duplicating resources - baculum: Add director show API endpoint - baculum: Add documentation for directives - baculum: Add interface to manage basic users API from Web component side - baculum: Add jump to previous/next error navigation in messages window - baculum: Add new columns to job list page - idea proposed by Sergey Zhidkov - baculum: Add option to enable/disable messages log window - idea proposed by Bill Arlofski - baculum: Add password generator added to password fields - baculum: Add time range filters to job history page - idea proposed by Heitor Faria - baculum: Add to API deleting volume from the catalog endpoint - baculum: Add to config API endpoint parameter to apply jobdefs in results - baculum: Add to directive controls option to hide reset button and remove button - baculum: Add warning to running job status if job needs media - baculum: Apply PRADO framework patches to support PHP 8 - baculum: Backup job wizard improvements - baculum: Change buttons on dashboard page - reported by Sergey Zhidkov - baculum: Do not require using some job resource values to ease using jobdefs - idea proposed by Heitor Faria - baculum: Enlarge boxes with resource count in status director - reported by Sergey Zhidkov - baculum: Fix #2642 add tool to re-assigning volumes from one pool to another - baculum: Fix #2646 apply new user permissions immediately instead of after logging out and logging in - baculum: Fix #2647 PHP warning about headers already sent on storage view page - baculum: Fix #2653 create new resource by copying configuration from other resource - baculum: Fix auto-scrolling in windows with configuration - baculum: Fix component autochanger schemas in OpenAPI documentation - baculum: Fix displaying directive sections in resouce configuration - baculum: Fix displaying documentation for jobdefs directives - baculum: Fix displaying issue in restore browser - reported by Sergey Zhidkov - baculum: Fix error about expected port number when writing component main resource - baculum: Fix legend in job status pie chart on job view page - baculum: Fix loading dashboard page if job status is created but not yet running - baculum: Fix missing texts in translation files - reported by Sergey Zhidkov - baculum: Fix opening job details in job table on main dashboard page - reported by Sergey Zhidkov - baculum: Fix problem with listing directories in restore wizard - reported by Tomasz Swiderski - baculum: Fix remove storage resource if autochanger directive is set - baculum: Fix required fields in jobdefs forms - baculum: Fix running job number on some pages - baculum: Fix table width on schedule list page - baculum: Fix undefined index error if user did not use Bacula configuration function - baculum: Improve checking director in status director API endpoint - baculum: Improve sun icon for displaying job status weather - idea proposed by Heitor Faria - baculum: Improve wizards view and responsivity - baculum: Loading pages optimization - baculum: Make job status pie chart clickable and direct to job history page with filtered results - idea proposed by Bill Arlofski - baculum: Make job status pie chart smaller - idea proposed by Sergey Zhidkov - baculum: Make table texts translatable - reported by Sergey Zhidkov - baculum: Misc visual improvements - baculum: Move all external dependencies to vendor directory - baculum: Move resource monitor and error message box to separate modules - baculum: New advanced schedule settings - baculum: New copy job wizard - baculum: New delete volumes bulk action on volume list page - baculum: New director page with graphical/text status and with configure director resources - baculum: New migrate job wizard - baculum: Reduce free space between interface elements - idea proposed by Sergey Zhidkov - baculum: Reduce size of icons in run job window and on dashboard page - baculum: Remove old configure page - baculum: Remove redundant statistics pages - baculum: Reorganize dasboard page - idea proposed by Sergey Zhidkov - baculum: Set responsive priority for job list table - baculum: Unify buttons view - baculum: Unset default API host setting if default API host is no longer assigned to user - baculum: Update API documentation - baculum: Update Polish translations - baculum: Update Portuguese translations - baculum: Update Russian translations - baculum: Visual improvements in interface - rpms: Disable tcp_wrapper for rhel8 in bacula.spec.in - rpms: Do not build with tcp_wrapper on Fedora 31 - rpms: Fix #2599 - bacula-postgresql conflicts with bacula-mysql - rpms: Fix #2615 - Missing bacula-sd-cloud-s3-driver-.so - rpms: Fix libs3 installation path - rpms: Fix mysql devel package dependency for rhel/centos 7 - rpms: Remove tcp_wrappers for cloud-storage rpm Bugs fixed/closed since last release: 2599 2615 2622 2623 2627 2642 2646 2647 2653 2654 2656 7776 7998 8126 Release 11.0.5 03 June 2021 11.0.5 is a minor bug fix release. - Fix compilation - Fix org#2427 About incorrect handling of empty files with Accurate=yes on Windows - Update MySQL update procedure for 5.6 Bugs fixed/closed since last release: 2427 Release 11.0.4 28 May 2021 11.0.4 is a minor bug fix release. - baculum: Update script version - Fix org#2618 Disable fix on bvfs_get_jobids() temporarily - Improve MySQL upgrade procedure Bugs fixed/closed since last release: 2618 Release 11.0.3 21 May 2021 11.0.3 is a minor bug fix release. - Check if char **jobid parameter is NULL before modifying it in bvfs_parse_arg_version() - Enhance the update_mysql_tables script - Fix compilation of check_bacula.c reported by Dan - Fix org#2442 About the check of the Control Device during startup - Fix org#2500 .bvfs_get_jobids jobid=X must return X in the list - Fix org#2604 About column alignment of 'Terminated Jobs' section - Fix org#2605 About incorrect messages in restore command - Fix stored/Makefile.in to install cloud driver object with cloud targets - Fix various default permissions - baculum: Add autochanger management section and improve few other texts - baculum: Add component action (start/stop/restart) buttons to client and storage pages - baculum: Add example working directory path in API install wizard - baculum: Add new device interface definition to Baculum OpenAPI documentation - baculum: Fix #2592 logout button on Safari web browser - baculum: Fix double device error code number - baculum: Fix opening update slots window reported by Hector Barrera - baculum: Fix sub-tabs on client and on storage pages - baculum: Implement autochanger management - baculum: Implement support for assigning multiple API hosts to one user - baculum: Restore wizard improvements - baculum: Update Portuguese translations - baculum: Update Russian translations - baculum: Update documentation chapter and screenshots - baculum: Use catalog access in changer listall endpoint only if it is configured on API host - docs: Add information about the git branch used with Bacula - docs: Fix #7657 Enhance the FSType description - docs: Fix #7659 About EnhancedWild fileset directive documentation - docs: Fix org#2578 About missing "restore directory=xxx" keyword documentation Bugs fixed/closed since last release: 2442 2500 2578 2592 2604 2605 7657 7659 Release 11.0.2 26 March 2021 11.0.2 is a minor bug fix release. - Add functions to unittests library - Add support for store_alist_str() in plugin configuration items - Enhance bdelete_and_free() macro - Update baculabackupreport script - Fix #7286 DIR segfault when doing a "dir" command in a restore - Fix #7321 About issue when stopping jobs waiting for resources - Fix #7396 GRANT command error in granting privileges script for MySQL - Fix #7449 About incorrect JSON output with 'TLS Allowed CN' directive - Fix #7451 About deleted files incorrectly kept in Virtual Full - Fix S3 compilation - Fix Verify job issue with offset stream and compressed blocks - Fix bug #2498 - Wrong mode for /etc/logrotate.d/bacula - Fix check_bacula.c to ignore daemon events - Fix possible memory corruption in the label process - Fix reload issue when a Job doesn't have a Pool defined - Possible fix for SD high memory usage problem - Remove suspicious debug line on setdebug() - baculum: Add Craig Holyoak to AUTHORS - baculum: Fix #2597 LDAP login with LDAPS option - baculum: Fix cancel button in new job wizard - baculum: Fix displaying warning messages in messages window - baculum: Fix undefined property error in run job API endpoint if level value is not provided - baculum: Implement API version 2 - baculum: Improve updating asset files after upgrade - baculum: Unify jobs/{jobid}/files endpoint output for detailed and normal modes - baculum: Update Polish translations - baculum: Update Portuguese translations - baculum: Update Russian translations - baculum: Use new APIv2 status client request on job history view page - rpms: Fix bacula.spec for Fedora 31 - rpms: Fix bacula.spec for rhel8 / centos 8 - rpms: Fix missing query.sql - rpms: Update bacula.spec for rhel8 - win32: Fill the version information as CFLAGS - win32: Fix #7373 binaries are tagged with correct resource - win32: Fix error message when labeling volume on Windows SD - win32: Update openssl version to 1.1.1k Bugs fixed/closed since last release: 2498 2597 7286 7321 7373 7396 7449 7451 Release 11.0.1 04 February 2021 11.0.1 is a minor bug fix release. - Add PGSQL detection for macOS and MacPorts. - BEE Backport bacula/src/tools/dbcheck.c - Fix #7079 About a segfault in a copyjob when the fileset is no longer defined - Fix #7168 About incorrect start time displayed for canceled jobs not yet running - Fix #7207 About 'dbcheck -n' not working properly - Fix #7214 Adapt mtx-changer.conf for GNU cpio mt version - Fix #7247 About incorrect variable substitution with the query command - Fix MySQL update procedure with incorrect handling of the FileIndex type - Fix compilation warnings with Solaris Studio - Fix copy/migration job selection - Fix org#2579 About incorrect JSON generated from empty Messages resource - Fix org#2587 Improve btraceback output - Fix org#2588 About incorrect Object/ObjectId type in update_postgresql_tables - Fix update_sqlite3_tables to upgrade from 9.6 to 11.0 - Initialize StartTime in db_create_job_record() - Remove deprecated sbrk in macOS and Windows. - Use PKG_PROG_PKG_CONFIG macro to search for pkg-config. It is cross-compile safe. - alist: Fix for memory overflow access. - baculum: Add capability to create filedaemon console and schedule on new resource page - baculum: Add console messages log API endpoint - baculum: Add console messages log envelope - baculum: Add job status weather on job list page - baculum: Add new icons for job status weather purpose - baculum: Add to Bvfs lsdirs and lsfiles API endpoints pathid parameter - baculum: Browser paths in restore browser using pathid - baculum: Fix #2560 in restore wizard display names encoded in non-UTF encoding - baculum: Fix finding jobs by filename in restore wizard if filename contains whitespace characters - baculum: Fix problem with setting hourly schedule - reported by Elias Pereira - baculum: Remove excanvas.js dependency - baculum: Update debian files to latest version - baculum: Update spec files - win32: Backport signing procedures to 11.0 - win32: Fix #7094 bypass random pwd generation when pwd is provided - win32: Fix conditional #define's - win32: Fix #7256 Update Windows version detection with latest versions (Windows 10). Bugs fixed/closed since last release: 2560 2579 2587 2588 7079 7094 7168 7207 7214 7247 7256 ---------------------------------------------------------------- Release 11.0.0 12 December 2020 11.0.0 is a major release. New Features: ------------- - New catalog format - Automatic TLS PSK encrypted communication - Support for Client behind NAT - Continious Data Protection (CDP) Plugin - Global Director Autoprune flag - Events/Audit features - New Baculum features - Support for GPFS Misc: ----- - New Prune Command Option - Dynamic Client Address Directive - Ability to disable Volume Retention - Ask to mount/create volume when the disk space is low - Simplification of the Windows FileSet with File=/ - Use of QT5 for Bat on Windows - Support for Windows files with non-UTF16 names - Windows Snapshot management has been improved - Support for the system.cifs_acl extended attribute backup with Linux CIFS - Built-in Client Scheduler - Reload command improvements (Keep IP address, Maximum Concurrent value, ...) - Support for GLOB pattern in Console ACL directives - Faster CRC32 algorithm Please see the New Features chapter of the manual for documentation on the new features. ---------------------------------------------------------------- Release Notes for Bacula 9.6.7 Release 9.6.7 10Dec20 This is a minor bug fix that corrects among other things the MySQL/MariaDB schemas. This is the last release of the 9.6.x series. 03Dec20 - Restore custom permission of symlink on FreeBSD and MacOS - Fix #2582 bextract is broken for sparse gzip and compressed streams - Fix org#2567 device capabilities overwritten - baculum: Update script version - Fix org#2573 About Syntax error in granting privileges script with MySQL if --with-db-password parameter is used - Fix org#2471 About deleted files are listed as being present in an accurate backup by various sample queries - Fix org#2571 About errors in es_AR.po file - Fix org#2568 About compilation issue on gcc10 - Fix org#2584 About inconsitancies in the make_mysql_tables script - baculum: Fix English text noticed by Peter McDonald - baculum: Update Russian translations - baculum: Update Portuguese translations - baculum: Implement graphical status storage - baculum: Add gauge.js library - baculum: Add json output option to show storages and show single storage endpoints - baculum: Add path field to restore wizard to narrow down search file results to specific path - idea proposed by Wanderlei Huttel - baculum: Add path parameter to job files API endpoint - baculum: Fix #2565 status icon overlaps action buttons in API wizard - baculum: Add Sergey Zhidkov to AUTHORS - baculum: Add Russian translations - baculum: Fix access to job resources if no job assigned to user - baculum: Update Portuguese translations - baculum: Ajax queue improvement in framework - baculum: Fix renaming config resources - baculum: Add graphs to job view page - baculum: Fix using offset in job file list query with MySQL catalog database - baculum: Show more detailed job file list - idea proposed by Wanderlei Huttel - baculum: Rework job file list API endpoint - baculum: Add searching jobs by filename in restore wizard - idea proposed by Wanderlei Huttel - baculum: Add job files API endpoint - Add code to retry a MySQL query after a deadlock - baculum: Update Portuguese translations - baculum: Add default days and limit values to status schedules API endpoint - baculum: Fix displaying multitextbox and multicombobox directive controls - baculum: Fix date filter on status schedule page - baculum: Fix #2570 fileset excludes settings with PHP 7.4 - baculum: New features and improvements to multi-user interface and restricted access ================================================================== Release 9.6.6 20Sep20 This is a minor bug fix release. Unless there is some new major bug found, this will be the last of the 9.6.x releases. The next release major release (a really big one) is currently scheduled for December. More about this in a subsequent status report ... 18Sep20 - baculum: Fix displaying date and time on volume view page - baculum: Fix #2564 changing volume status to Read-Only - baculum: Fix saving multicombobox control values - baculum: Fix multicombobox and multitextbox controls to work with PHP 7.4 - baculum: Fix #2562 displaying date and time in tables on Safari - Update copyright year - Clarify SD vbackup device error message - Fix MT2554 :update upload_cache call in dircmd.c - zero malloced memory when SMARTALLOC not enabled - baculum: Fix #2558 saving day or day range in schedule resource - reported by Jose Alberto - Fix MT2554 :upgrade upload_cache interface. - Eliminate compiler errors when smartalloc is turned off - baculum: Fix date formatter to work with PHP 7.4 - baculum: Fix configure Bacula hosts page to work with PHP 7.4 - baculum: Update Portuguese translations - baculum: Change colours in table headers and borders - baculum: Add remove runscript button - Fix #6366 About an issue with verify job level=DiskToCatalog - baculum: Close modal windows on pressing ESC key - baculum: Fix logout button on authorization failed page - baculum: Add local user authentication method support - baculum: Add date and time formatters - idea proposed by Wanderlei Huttel - baculum: Enable re-running jobs in async way and visual improvements - baculum: Change cursor over selectable table - Fix build script copyright detection - release: add code to detect Bacula Systems copyrights and fail release - Fix compilation of bsnapshot on Fedora Bugs fixed/closed since last release: 2558 6366 =================================================================== Release 9.6.4 This is a major security and bug fix release. We suggest everyone to upgrade as soon as possible. One significant improvement is for the AWS S3 cloud driver. First the code base has been brought much closer to the Enterprise version (still a long ways to go). Second is that the community code now uses the latest version of libs3 as maintained by Bacula Systems. The libs3 code is available as a tar file for Bacula version 9.6.4 at: www.bacula.org/downloads/libs3-20200523.tar.gz Note: Version 9.6.4 must be compiled with the above libs3 version or later. To build libs3: - Remove any libs3 package loaded by your OS - Download above link - tar xvfz libs3-20200523.tar.gz - cd libs3-20200523 - make # should have no errors - sudo make install Then when you do your Bacula ./configure it should automatically detect and use the libs3. The output from the ./configure will show whether or not libs3 was found during the configuration. E.g. S3 support: yes in the output from ./configure. 08Jun20 - Add configure variables to baculabackupreport. Patch from bug #2538 - Fix orphaned buffers in cloud by adding truncate argument to end_of_job() - Improve clarity of Cloud part mismatches and make it an INFO message rather than a WARNING since it corrects the catalog. - Small trivial change to check_parts - Backport more Enterprise cloud parts changes - Backport cloud upload code from Enterprise - Update s3_driver.c to new libs3 API calling sequence - Fix tray-monitor installation - Recompile ./configure - Add ./configure code to check for and enable/disable S3 support - win32: Fix org#2547 About possible NULL pointer dereference in get_memory_info - Ensure cloud driver loaded when listing cloud volumes - baculum: Request #2546 support for full restore when file records for backup job are pruned - baculum: Fix problem with authorization error after upgrade - baculum: Add UPGRADE file - baculum: Fix returning value in TStyleDiff - generated notice with PHP 7.4 - baculum: Remove execute bit for framework scripts - baculum: Fix displaying empty column button in table column visibility menu - reported by Wanderlei Huttel - baculum: Update Polish translations - baculum: Update Portuguese translations - baculum: New user management. LDAP support. Role-based access control. - Fix new compiler warnings + always use bstrncpy not strncpy to ensure EOS at end of string - Return smartalloc buffers zeroed -- future performance improvement - Improve scanning data/time, fixes bug #2472 - Make ABORT mention segfault to clarify non-bug #2528 - Make reading a short block a warning rather than error - baculum: Fix validators in run job modal window - Remove unused -t option in dbcheck.c -- fixes bug #2453 - Fix bug 2523 -- spurious extra linking - Fix bug #2534 possible double free in error case - Fix possible sscanf overflows - Fix overflow from malicious FD reported by Pasi Saarinen - baculum: Add option to show time in job log - idea proposed by Wanderlei Huttel - baculum: Add show log time parameter to job log endpoint - baculum: Add tip about using table row selection - Fix bug #2525 seg fault when doing estimate with accurate and MD5 - baculum: Fix issues with SELinux support reported by Neil MacGregor - Correct some copyrights - Add Docker plugin rpm spec files Bugs fixed/closed since last release: 2453 2472 2525 2528 2534 2538 2546 2547 =================================================================== Release Notes for Bacula 9.6.3 This is a minor bug fix (mostly fixing incorrect copyrights) to Release-9.6.2. 09Mar20 - Eliminate false error when droping postgres table MAC - Apply Carsten's character set fix for the docs. Many thanks! - Fix logic error in clearing bit on Windows - baculum: Update Portuguese translations - baculum: Update Polish translations - baculum: Add patch to PRADO framework 4.0.1 for supporting PostgreSQL 12 catalog database - baculum: Add support for PostgreSQL 12 catalog database - Enhance failed bpipe to changer error message - Clean up some incorrect copyrights - Correct spelling errors in messages - Add to plugins links - baculum: Add bulk actions for job history and volume tables - baculum: Update DataTables and its plugins - docker: Update copyright headers. - Update BSD copyright on *.conf.in files - docker: Remove unneeded tar binary. - Fix workaround for Sun C++ recommended by Phil Stracchino - baculum: Update Polish translations - baculum: Update Portuguese translations Bugs fixed/closed since last release: None ==================================================== Release 9.6.2 This is a minor new release with several new features and a number of bug fixes. The catalog datbase format remains unchanged from the 9.4.4 release Note: Release-9.6.0 had a build error when using readline, and Release-9.6.1 had an inappropriate file size for the readline history file, so both releases have been withdrawn. Major Baculum New Features: - SELinux support - New graph types - Graphical client status - Graphical running job status - Capability to start, stop and restart components - Support for commands that can take a long time (label, estimate...) - List job files tab on the job history page - Bandwidth limit setting for client and for job - New statistics configuration page - Improvements to responsive interface - Option to show size unit values as decimal or binary bytes - Support for new directives - New Web controls (password, speed, multiple textbox) - New API functions - Job history list on job page Bacula New features: - Docker plugin. Documentation for this plugin is not yet ready, but will be forthcoming within a few weeks. - Statistics Collector for Dir, FD, and SD (interface to Graphite) - New Statistics resource Documentation for this feature is in the New Features section of the main manual - Support for MacOS suspend in File daemon - SD SyncOnClose directive in Device resource 26Feb20 - Apply fix for history size from Martin Simmons - Fix missing part of patch 8135b9d21d -- readline truncate fix - baculum: Fix using bconsole with sudo on Fedora if SELinux is enabled - baculum: Add copy, CSV and column visibility buttons to tables - baculum: Add buttons and colvis DataTables plugins - baculum: Add additional values to job and volume API endpoints - docker: Replace realloc_pm() for check_size(). - Update pluglib. - Fix Docker Plugin for accurate backup. - Docker: add baculatar docker image. - Win32: update Windows build to including needed collector files - Get Branch-9.4 ReleaseNotes - Update po POTFILES.in and version - Get ChangeLog from Branch-9.4 - baculum: Update Polish translations for API and Web - baculum: Update Portuguese translations for API and Web - Fix cats-test.c compile - baculum: Upgrade W3.CSS from version 4.10 to 4.13 - Add Docker Plugin for FileDaemon. - Add pluglib fd plugin support utilities. - Redesigning PM management add missing files. - baculum: Fix delete job button visibility - baculum: Add to BVFS lsdirs, lsfiles and versions endpoints new output=raw/json parameter - baculum: Update spec and deb files - baculum: Do not try to switch to new user in API and Web install wizards - baculum: Improve Polish translations - baculum: Fix maximum length for basic auth password fields - baculum: Start storing basic auth passwords in APR MD5 format - baculum: Fix button to reopen change user password setting - baculum: Fix sorting clients in fileset browser window - baculum: Improve showing create new resource messages - baculum: Improve texts in restore wizard - baculum: Use new icons in restore and fileset browsers - baculum: Fix PHP error on storage view page with autochanger comming from host different than main (reported by Jose Alberto) - baculum: Fix internal error on restore page if MySQL catalog database is used - baculum: Avoid doing redundant API calls by job monitor - baculum: Add support to restore from copy jobs - baculum: Add API changes to support restore from copy jobs - baculum: Add script for checking if Baculum files are installed correctly - baculum: Refactor authentication, authorization and exceptions - Fix #5708 about "cancel all" command issue - baculum: Bandwidth limit window improvements - baculum: Add auto-refreshing job tables - baculum: Add progress bar to restore jobs on status client - baculum: Add job name parameter to monitor - baculum: Add table filters - baculum: Add restore progress bar - baculum: In job status avoid calling client if job isn't running - Fix compilation on Solaris - baculum: Extend max length for client secret field to maximum allowed secret size - baculum: Improvements to messages resource - baculum: Implement swipe event and use it to hide main menu on mobile devices - baculum: Group directives into sections in config directive list - baculum: Fix problem with shaking spinning icons on Firefox - baculum: Add confirm window to delete job action - baculum: Add job history list on job page - baculum: Fix loading first job setting in run job window on window open - baculum: Add in API wizard example sudo configuration for bconsole and JSON tools - baculum: Make resource config page buttons always available - baculum: Fix language setting in config wizard during first run - baculum: Fix refreshing job status only when actions tab is open - baculum: Miscellaneous improvements to schedule configuration - baculum: Extend combobox control to support associative arrays as data source - Fix for #0005391: show negative values. - Fix #5546 about incorrect level for job resumed - baculum: Move refresh job button and log order button to job log tab - baculum: Set job values on job selection in run job window - baculum: Turn application mode to normal - baculum: Add jsmin-php as framework dependency - baculum: Fix small issues with old not using svg icons - Fix error on .ls when plugin name without ':' - baculum: Add list job files to job history view page - baculum: Add list job files API endpoint - tray-monitor: fix potential memory corruption - Fix #5461 #5513 #4717 About WroteVol non-zero message - Update Docker Plugin build procedure. - docker: Add Docker Plugin DKID unittest. - Add Docker Plugin regression tests - more files. - Add Docker Plugin regression tests. - build: Add Makefile update to fd plugin builds. - baculum: Fix PHP error on running job status page and client status page - baculum: Fix support for UTF-8 currency symbols in paths - reported by Frédéric F. - Fix running job count in status output - baculum: Fix directing to default API page when API settings has not been created yet - baculum: Set default refresh interval for status client - baculum: Improve selecting storage value in run job window - baculum: Fix saving to config empty runscript subresources - baculum: Add graphical running job status on running job page - baculum: Add progress bars to backup jobs displaying on status client page - baculum: Fix displaying SqlQuery value in selection type job directive - baculum: Add to client status modal window to set bandwidth limit for job - baculum: Add API endpoint to set job bandwidth limit - baculum: Add to client status modal window to set bandwidth limit for client - baculum: Add API endpoint to set client bandwidth limit - baculum: Add UnitType and AllowRemove parameters to speed type directive control - baculum: Don not use data description and data dependencies modules initialization - baculum: Improve checking dependencies - baculum: Fix running job twice when job is running by run job window - baculum: Changes to proper working list type controls on page load - baculum: Change Font Awesome SVG icons into web fonts icons - Add new psk-enable-test - baculum: Fix removing fileset options subresources - baculum: Make loading configuration controls easier - baculum: Enable adding multiple file set file browser controls on the same page - baculum: Fix using nested directives in repeater control - baculum: Misc changes to keep backward compatibility in API endpoints - baculum: Fix scrolling to new runscript subresource on add new runscript action - baculum: Change way of working directive renderer - baculum: Split API panel, oauth, api and page parts into separate services - baculum: Add component start/stop/restart actions to Web - baculum: Update API SELinux module for new component action policies - baculum: Implement component start/stop/restart actions in API - baculum: Show error message on status client request if client is not available - baculum: Implement graphical status client - baculum: Update Font Awesome icons to version 5.9.0 - baculum: Add output=raw/json parameter to show client API endpoint - baculum: Add status client API endpoint - baculum: Remove deprecated and not used directives - baculum: On restore wizard job list add link to job history for specific jobid - baculum: Fix showing job size value on jobs on volume page - baculum: Minor fixes and improvements in fileset file browser - baculum: Allow dollar character in bconsole commands (used for paths) - baculum: Enable restoring data from locations included in paths defined in FileSet - baculum: Make 'Run job' and 'Perform restore' buttons clickable at whole theirs area - baculum: Miscellaneous improvements to use restore file browsers on different screen sizes and mobile devices - baculum: Fix removing items from selected file browser in restore wizard - baculum: Unify config module menus look - baculum: Create multiple combobox control and use it in console ACL directives - baculum: Add timeout to first refresh job log to have log output earlier - baculum: Do not show unknown job level for admin job type - baculum: Add new directives and new resources support - baculum: Fix setting selected items in list directive controls - baculum: Add version number to API and Web - idea proposed by Wanderlei Huttel - baculum: Disable emulation prepared statements for MySQL to solve problem getting every value as string - baculum: Apply framework patch that fixes SQL error when native MySQL prepare statements are used - baculum: Add new graph types - baculum: Add statistics resource support - baculum: Add option to show size unit values as decimal or binary bytes - Add a Bacula statistics collection routine. - Fix incorrect ASSERTD(). - baculum: Fix showing unit for size and time period directive types - baculum: Add missing speed type control to support speed type directives - reported by Wanderlei Huttel - Fix comment - baculum: Fix #2477 escaping backslashes in config in text directive types - Fix bug 2476 -- copy/migration jobs fail when waiting for a new Volume - Add copy-jobspan-label-wait-test to do_all - Add two new regression tests submitted by Martin Simmons for bug 2476 - Add bsmtp Message-Id/MIME-Version/Content-Type headers. - baculum: Fix #2474 error 404 if document root path uses link with ending slash - fix suggested by vondi - Add Michael Narigon as author for Mac heap implementation - Remove bacula32.def and bacula64.def. - Redesigning PM management and add support for macOS. - baculum: Add password field control and use it for password directives - baculum: Add SELinux modules for Web and API - baculum: Remove php database extensions dependency from web requirements - Add tests/restart-jobmedia-test to do_all - baculum: Update API documentation to job estimate endpoint - baculum: Use in run job window estimate command in background to avoid HTTP timeout - baculum: Move running job estimation to background in API part - baculum: Prevent selecting in restore file browser directories placed in locations that are outside paths defined in fileset - reported by Wanderlei Huttel - baculum: Fix removing path items selected to restore in restore wizard - baculum: Fix clearing restore path field after selecting backup in restore wizard - baculum: Update API documentation - baculum: Add API endpoints to update slots with and without barcodes and use them in on web interface side - baculum: Use on web interface side new API endpoints to label volume with and without barcodes - baculum: Add API endpoints to label volume with and without barcodes - baculum: Update run job API endpoint in API documentation - baculum: Request #2469 add start and cancel buttons on job history list page - baculum: Add filesetid parameter to run job API endpoint - baculum: Set column visibility priorities for status schedule tables in responsive mode - baculum: Add status schedule endpoint to API documentation - baculum: Fix showing graphs if exists finished job with empty start time value - baculum: Fix TPhpFatalErrorException exception on job view page with PHP version lower than 5.5 - baculum: Extend log parser to support restore client, job name and volume names - Rework fsync patch for win32 - Fix Windows SD compilation - Add Cython detection - Add SyncOnClose Storage Device directive - Add db_get_jobmedia_record() function - Check JobMedia validity after an incomplete job Bugs fixed/closed since last release: 0005391 2469 2474 2477 5461 5546 5708 ======================================================================== Release 9.4.4 This is a bug fix release to 9.4.3. It includes some fixes that fix bad data records in Copy/Migration jobs or problems doing restores of Copy/Migration jobs. 28May19 - rpm: Fix mysql dependency for bacula-postgresql - Fix bug 2476 -- copy/migration jobs fail when waiting for a new Volume - Add copy-jobspan-label-wait-test to do_all - Add two new regression tests submitted by Martin Simmons for bug 2476 - Remove bacula32.def and bacula64.def. - Add Michael Narigon as author for Mac heap implementation - Add tests/restart-jobmedia-test to do_all - Allow to hangup/blowup inside a file for tests slash - fix suggested by vondi - Add db_get_jobmedia_record() function - Check JobMedia validity after an incomplete job - baculum: Fix #2477 escaping backslashes in config in text directive types - baculum: Fix #2474 error 404 if document root path uses link with ending - baculum: Remove php database extensions dependency from web requirements - baculum: Fix removing path items selected to restore in restore wizard - baculum: Fix clearing restore path field after selecting backup in restore wizard - baculum: Fix TPhpFatalErrorException exception on job view page with PHP version lower than 5.5 Bugs fixed/closed since last release: 2474 2476 2477 ======================================================================== Release 9.4.3 This is a bug fix release for version 9.4.2. It includes a number of bug fixes and patches. Baculum: there have been significant additions and changes to Baculum. If you want a web gui please check it out. S3 driver: If you are trying to build the S3 drivers, please remember to use the community supplied (from Bacula Enterprise) version of libs3.so found at: https://www.bacula.org/downloads/libs3-20181010.tar.gz As usual the binaries that correspond to this release will follow in a week or two. If there are no additional major bugs, this will be the last of the 9.4.x releases. The next release will have a number of new features, and will require a major database upgrade (don't worry it will be easy -- just run update_bacula_tables) 02May19 - Fix Window bpipe-fd strncpy programming error - Change mysql my_bool to bool as it was removed from mysql - Improve assert message - examples: move backup-to-cdwriter.txt to move-backup-to-usb.txt - fix memory leak in DIR for copy-job - Skip empty lines when generating the FileSet from a command - Fix creation of bad JobMedia records in Incomplete Job - Add messages for Incomplete Jobs - Fix misplaced cancel check reported by Alain - Change round() to bround() to avoid library definition conflict - rpms: Fix bacula-cloud spec file - rpms: Add bacula-cloud spec file - rpms: Add missing isworm script - Use more appropriate computation for VolIndex when creating restore .bsr - Fix Daemon message "Message repeated X times" count - Fix Carsten's names - Make diff.pl adapt to different install locations - Fix #4598 Display JobIds used in the restore job log - Add smartalloc function to print the owner of a buffer - Avoid to use the same variable name for two different things in the cmd_parser class - Fix #4433 about 'UPDATE File SET MD5='...' WHERE FileId=0' error when using SpoolAttributes=no - Fix MaxVolumeBytes accounting after a mount request - Fix verify volume jobs with sparse files - Fix small memory leak with Console runscripts - Add 'prune jobs/files all' command - fix #4383 Sometime SD hangs when TLS and DEDUP are used together - baculum: Add status schedule page - baculum: Add status schedule API endpoint - baculum: Fix schedule directives setting - baculum: Update Portuguese translation file - baculum: Fix #2466 add plugin directive support in fileset resource - baculum: Fix updating whole Bacula config at once - baculum: Fix showing validation error if new config is incorrect - baculum: Fix setting multiple config resources at once - baculum: Fix renaming resources - baculum: Add links to resources in job log output - baculum: Update example web server config files and spec file - baculum: Fix list type directives on configure hosts page - baculum: Fix showing messages resource configuration - baculum: Add parent node property to directive list types - baculum: Add capability to define multiple drivetype and fstype directives in fileset resource - baculum: Fix showing runscript subresource on job pages - baculum: Update new texts in Portuguese translation file - baculum: Add missing texts to translation files reported by Wanderlei Huttel - baculum: Update Portuguese translations - baculum: Fix showing schedule resource configuration on job view and job history view pages - baculum: Fix returning one line output from bconsole - baculum: Fix restore wizard error when no fileset available for normal user with limited access - baculum: Add capability to use many ACL Console directives in one config resource - baculum: Add client ls command to openapi file - baculum: Add text box list control to support directives that can be defined multiple times in one resource - baculum: Add cancel button to last step new job wizard - baculum: Change PoolType field from text box into combo box - baculum: Fix adding new schedule run directives - baculum: Add support to multiple schedule run directives - baculum: Fix in API part saving job runscript config if RunsWhen=Always - baculum: Fix adding paths to empty include block - baculum: Fix saving job runscript config if RunsWhen=Always - baculum: New create backup job wizard - baculum: Add API endpoint to list files/dirs on client Bugs fixed/closed since last release: 2466 4383 4433 4598 ======================================================================= Release 9.4.2 This is a bug fix release for version 9.4.1. It includes a number of bug fixes and patches. Thanks to the community for your participation. 9 bug reports were closed. This version should fix virtually all the problems found on FreeBSD. If you are trying to build the S3 drivers, please remember to use the community supplied (from Bacula Enterprise) version of libs3.so found at: https://www.bacula.org/downloads/libs3-20181010.tar.gz 04Feb19 - Update Windows .def files - Change create_postgresql_database.in script to be more flexible - Implement eliminate verify records in dbcheck bug #2434 - Enhance verify-voltocat-test to detect comparing deleted files - Fix bug #2452 VerifyToCatalog reports deleted files as being new - Use correct quoting for a character -- fixes previous patch - Recompile configure.in - Apply Carsten's multiarch patch fixes bug #2437 - Apply Carsten's patch for adding CPPFLAGS to tools/gigaslam.c compile - Allow . to terminate sql queries prompts - baculum: Update Baculum API OpenAPI documentation - Fix rwlock_test unittest bug #2449 Only call thr_setconcurrency if it's available. Fix order of linking and installation. - FixFix spelling errors found by lintian by Carston in bug #2436 - Apply chmods from Leo in bug #2445 - Add license files LICENSE and LICENSE-FOSS to the regression directory - Display daemon pid in .apiV2 status output - Attempt to ensure that ctest job output gets uploaded - Apply varargs patch from Martin for bug 2443 - Apply recv() hide patch from Martin - Fix lz4.c register compilation from bug #2443 - dbcheck: Improve error message when trying to prune Path records with BVFS is used. - Update cdash for version 9.4 - Fix bug #2448 bregex and bwild do not accept -l command line option - Partial update copyright year - Fix struct transfer_manager to be class transfer_manager - Print Device xxx requested by DIR disabled only if verbose is enabled in SD - Add migrate-job-no-resource-test to all-disk-tests - Remove unused berrno call + return - Remove mention of Beta release from ReleaseNotes - Fix #3225 about Migration issue when the Job resource is no longer defined - baculum: Fix restore paths with apostrophe - baculum: Fix data level - Change endblock edit to unsigned -- suggested by Martin Simmons - Update DEPKGS_VERSION - baculum: Adapt Apache configs to version 2.4 Bugs fixed/closed since last release: 2434 2436 2437 2443 2445 2448 2449 2452 3225 ==================================================================== Release 9.4.1 This is a minor bug fix release for 9.4.0. It should fix a few of the warning messages, but not all, on FreeBSD and Solaris. More importantly The ./configure process now properly detects that libs3 is installed on your system. If you do not want to use the Amazon S3 driver, this update is not required. In addition to this release, I have posted the current source code with patches for libs3 to bacula.org. This package is needed if you wish to build the S3 driver. You may download it from the following location: https://www.bacula.org/downloads/libs3-20181010.tar.gz 21Dec18 - Remove register attribute on variables as it is not supported by newer C++ compilers - Fix regression from 9.2 when backporting Enterprise code in bsock code - Add missing default flag so that configure looks for libs3 ===================================================================== Release 9.4.0 This is a major release comprised of more than 13,000 lines of differences since version 9.2.2. It has updates to Baculum and small number of bug fixes and back ports from Bacula Systems Enterprise since version 9.2.2, but primarily it has two new features ... The main new feature is the addition support for using Amazon S3 (and other *identical* S3 providers), and WORM tape cassettes. Note: Azur, Oracle S3, and Goggle S3 are not compatible with Amazon S3. 16Dec18 - Add copyright and correct name on stop-restart-test - Fix #4449 about an incorrect pool selected with the restart command - Fix #4386 About incorrect permission on directories after a restore with replace=ifnewer - Fix bug #4379 certain fields of Media record not reset after Truncate command - Revert "Update bdirjson.c" - Improve volume truncation error messages - Free ids buffer - Update PO files - Initial version and date update - Initial cut of ChangeLog and ReleaseNotes - Add use_dcr_only in cloud_dev.c so that manual truncate works - More Enterprise backports - More Enterprise backports + changes to the backporting - Minor backport from Enterprise + my own changes - Update bdirjson.c - Add pseudo WORM support for vtape - worm: Fix multiple display of the WORM Recycle message - Add first cut cloud drivers - Use bfopen in place of fopen - Fix #3574 Add "clients" option to the "help list" output - Add makedir() in fd_common.h - Add bfile is_plugin_data() API - Fix issue between FO_PORTABLE and FO_PORTABLE_DATA to api - Fix NOATTR detection - Implement worm cassette support - Make detection of duplicate M_SECURITY messages work - Remove unused prototype recv(len) - Add new security monitoring test - Implement new message numbers in stored/block.c - Fix incorrectly indicating: malformed message - Fix bugs #2335 and #2349 Volume messages printed many times - Add new test for bug printing many multiple Max Volume jobs= info - Add worning message about failure to update volume info - Improve error messages when JobMedia errors - Fix complier warning due to unused subroutine variable - Fix bug #2334 seg fault when releasing globals - Security: sleep(5) on error + aggregating identical messages - Update sellist unittests. - Update unittests for lockmgr.c and fix memory leak. - Update unittests fir ConfigFile/ini.c. - Update 'rm -f' for libtool $(RMF). - Correct libs/Makefile.in separator. - Update htable unittests. - Update sha1 unittests. - Add fnmatch unittests. - Update unit tests and add regression tests for it. - Fix escaping special characters in bvfs restore for sqlite catalog - Add new manual test - baculum: Do not store any main oauth2 client nor main http basic user in api config - Fix tls_bsock_shutdown() compilation when no TLS available. - Fix bsock compilation warning. - Fix bsock compilation problem in *BSD. - Permit negative FileIndex values in the catalog - Fix format string is not a string literal (potentially insecure). - baculum: Update Japanese translation files - baculum: Fix availability web config wizard when there is problem with access - baculum: Add new size directive control - baculum: Fix basic auth user setting in API install wizard - baculum: Fix undefined index error on web config wizard page - baculum: Fix #2418 creating or updating new resource - baculum: Fix size unit formatters in restore browser reported by Wanderlei Huttel - baculum: Fix logging output if it is not possible to decode to json - baculum: Improve error handling in web part - baculum: Fix formatted size and time values on the volume details page - baculum: Fix saving logs when an error occurs - baculum: API panel and wizard improvements - baculum: Add name field to api client parameters Bugs fixed/closed since last release: 2334 2335 2418 3574 4379 4386 4449 ====================== Release 9.2.2 ====================== Release 9.2.2 This is a minor bug fix release (6,143 lines of diff). The main fixes to this version are: eliminate most messages that are repeately printed, eliminate malformed message output, error when compiling without TLS, ... Note: if you are running MySQL and have not recently executed src/cats/update_bacula_tables, please do so. It will not change your database version but it will fix some potential MySQL problems (for more detals see the release notes for version 9.2.1). 06Nov18 - Fix bug #2421 by Adam about quoting Windows paths in CreateChildProcess() - Update po files - Implement new message numbers in stored/block.c - Fix incorrectly indicating: malformed message - Fix bugs #2335 and #2349 Volume messages printed many times - Add new test for bug printing many multiple Max Volume jobs= info - Fix complier warning due to unused subroutine variable - Fix bug #2334 seg fault when releasing globals - Fix escaping special characters in bvfs restore for sqlite catalog - Fix tls_bsock_shutdown() compilation when no TLS available. - Fix bsock compilation warning. - Fix bsock compilation problem in *BSD. - Add new manual test - rpm: Fix mysql dependency for bacula-postgresql - baculum: Fix basic auth user setting in API install wizard - baculum: Improve error handling in web part - baculum: Fix formatted size and time values on the volume details page - baculum: Fix undefined index error on web config wizard page - baculum: Fix #2418 creating or updating new resource - baculum: Fix size unit formatters in restore browser reported by Wanderlei Huttel - baculum: Do not store any main oauth2 client nor main http basic user in api config - baculum: Update Japanese translation files - baculum: Fix availability web config wizard when there is problem with access to api - baculum: Add new size directive control - baculum: Fix logging output if it is not possible to decode to json - baculum: Fix saving logs when an error occurs - baculum: API panel and wizard improvements - baculum: Add name field to api client parameters Bugs fixed/closed since last release: 2334 2335 2418 2421 ======================================================================= Release 9.2.1 This is a bug fix release. It also contains some refactoring. That said, there are 10,909 lines of diff between release 9.2.0 and this release. One major improvement is that this release should eliminate the persistent problem we have seen with MySQL unhappy with zero DATETIME fields. If you have problems with that, please simply execute the script update_bacula_tables found in the /src/cats library. It will modify the table default values for DATETIME fields to be friendly to the whims of MySQL and MariaDB. 12Aug18 - baculum: Fix saving directives in messages resource - Refactoring of BSOCK and introducing BSOCKCORE. - baculum: Update API documentation - baculum: Add status endpoint to available scopes endpoints - Make print_ls_output identify delete files more clearly - Backport stored/vbackup.c - baculum: Add status director and status storage endpoints - baculum: Add type and level filters to jobs endpoint - baculum: Add support for .api 2 command in bconsole module - Implement a keepalive on bpipe sockets fixes bug #2347 - Backport bpipe enhancements - Permit catalog to contain negative FileIndexes - Fix bug #2319 wrong port value stored in bsock giving incorrect error messages - baculum: Add to jobs endpoint filtering by client and clientid - Fix bug #2410 bdirjson output incorrect for day greater than 24 - Attempt to avoid MySQL complaints about not allowing zero or empty in DATETIME - Add M_SECURITY when connection is bad + fix bug where invalid probes sent to Dir - baculum: Fix schedule single day value setting - Fix bug #2286 copied jobs always have level=Incremental - baculum: Fix add slot parameter to label command - baculum: Fix restoring backup from deleted clients - baculum: Fix click action on remove config resource button - baculum: Fix framework validation for active list type controls - baculum: Implement ideas from Wanderlei Huttel - Fix bug 2395 problem with man dir - baculum: Fix saving subresources in config - Start work on HAVE_CLIENT_ONLY install - Switch to using /lib/systemd/system to install service files - Install Bacula systemd files in /etc/systemd/system - baculum: Update Portuguese translations - baculum: Fix group most recent backups option in restore wizard for mysql - Fix bug #2404 uninstall systemd service - Fix warning during compilations of mainwin.cpp - baculum: Implement second part ideas and fixes proposed by Wanderlei Huttel - Update catalog update scripts in updatedb directory - Fix bug #2340. Display of db_driver - Add warning messages for bad or old self-signed certificates - baculum: Fix #2403 error while writing diraddress directive in Bacula config - baculum: Implement ideas and fixes proposed by Wanderlei Huttel - baculum: Update Portuguese translations - baculum: Fix pool does not exist error on pool details page - baculum: Fix create directive base method - rpm: Fix MySQL dependency on bacula-postgresql package Bugs fixed/closed since last release: 2410 2389 2286 2319 2340 2347 2357 2403 2404 2405 2395 2392 ===================================================================== Release 9.2.0 This is one of the biggest Bacula release ever made. It has almost 540,000 lines of diff output between Release 9.0.8 and this release. This is a major new release with a new version number. It has been very thoroughly tested, but as always, please backup any previous version and test this version prior to putting it into production. For the most part the changes were contributed to the Bacula project by Bacula Systems SA and myself, but there were a number of other contributors that I thank. Database Update --------------- There are no changes required to the catalog database. Compatibility: -------------- As always, both the Community Director and Storage daemon(s) must be upgraded at the same time. Any File daemon running on the same machine as a Director or Storage daemon must be of the same version. Older File Daemons should be compatible with the 9.2.0 Director and Storage daemons. There should be no need to upgrade older File Daemons. 20Jul18 - Separate dequeuing msgs and dequeuing daemon msgs - Replace uint with uint32_t - Reset default status schedule limit to 30 - Comment out use of uint that breaks Windows build - Update win32 .def files - Fix concurrent acquire/release of device - Correct copyright - Fix compiler warning generated by prior commit 1aad2088d21a3 - Backport Enterprise src/findlib - Backport Enterprise src/filed - Backport Enterprise src/lib - Add debug code for bug #2356 - Fix bug #2385 -- compiler bug IMO - fix #3945: Add "ocfs2" to list of filesystems known by "FsType" directive - Backport parts of src/dird to community - Use bstrcmp in place of strcmp - Recompile configure - Update config.guess and config.sub - Fix #3615 about bconsole Socket errors reported in the bacula log file - Fix permissions of mtx-changer.conf - Use /dev/sg0 rather than /dev/sg1 so vtape devices work - Make out of freespace non-fatal for removable devices -- i.e. behaves like tape - Pull latest tls*.pem from BEE - Fix #3854 missing tls library initialization in bdirjson, bfdjson, bsdjson and bbconsjson - Fix bug #2212 where restore jobid=nn file=xxx restores the files twice - Apply patch from Wandlei Huttel to add Run Time and suffix to Restored bytes - Fix bug #2343 where truncate of explicit Volume name truncates non-purged volumes - Fix some file execute permissions. Fixes bug #2389 - Fix license problems in Bug #2382 - Apply patch from Leo to fix bug 2192 - Fix bad placement of jcr->pool reference as pointed out by Martin Simmons - rpm: Add OpenSuse Leap 42.3 - rpm: Update bacula.spec for Fedora 27 - Fix #3824 about incorrect setdebug command description - Fix Solaris 10 compilation error on BXATTR when no linkat(2) found. - win32: Fix backup issue with path > 250 char - Fix #3672 about bdirjson issue with the Autochanger directive - Enable build of Windows 64 bit tray monitor - Fix build of Windows tray-monitor - Some changes to configure.in - Update some old copyrights - Update some old copyrights - Fix showing PkiCipher and PkiDigest values in bfdjson output - Fix buffer overrun at BXATTR_Solaris::os_get_xattr_names. - Bring Branch-9.1 up to date with Branch-9.0 - Fix #3745 update the client SQL record after a reload - Fix 'grep -m' when '-m' option is not available. - Update the build for ACL/XATTR support. - Add some debugging information to bacl_solaris. - Fix backup ACL/XATTR when fatal error and not only error. - Fix Solaris XATTR support on Solaris 11. - Fix compile error on !HAVE_EXTENDED_ACL - Add some debugging messages. - Fix compilation warning on FreeBSD. - Add command to change the pool of a job and the associated volumes - Fix #3593 VirtualFull will select jobs to consolidate using Job name in addition to Client/FileSet - Do not increment the device num_writers if the call to dir_update_volume_info() fails - Add prune option to select volumes from a/all pool(s) - rpm: Add Fedora26-64 platform - Add the RestoreClient directive for Restore job. - Implementaion of .ls command for Plugins. - Use correct SQL table encoding for Postgresql 10.x - Fix Where/Replace parameter displayed in the Restore job summary - use pthread_kill() instead of pthread_cancel() to terminate SD_msg_chan - Recompile configure.in - Recompile configure.in - Correction of my_name_is() function using realpath() - Add a detection of realpath() function to configure. - Fix tray-monitor compilation - Use breaddir() in the tray monitor - file_dev.c: replace readdir_r() wit new breaddir() - new breaddir() function to replace readdir_r() + core update - Fix #3098 Add debug tag 'record' for traces about records in the SD - Fix #1826 Add Job Where and Replace variables to the Restore job summary - Remove tests about "NULL Volume name. This shouldn't happen!!!* options to api restore - Port missing RestoreObject Plugin Config code from BEE. - Enhance "status schedule" function to allow multiple job= and client= filters - Add next_name() function to scan string lists - Fix #1170. Enhance "status schedule" command. Display ordered output, add Client and FileSet filters. - bvfs: Add clients= option to .bvfs_get_jobids to handle clusters - Add delete client bconsole command - Fix #2910 about a problem in the "status network" command when the client is not reachable - Fix #1108 Enhance setdebug help command and console completion - baculum: Fix SQL grouping error in restore wizard reported by Rasmus Linden - baculum: Fix cancel button in web config wizard - baculum: Web interface password is no longer stored in settings.conf - baculum: Fix path validator for UTF-8 characters - baculum: Add capability to set maximum numer of jobs visible in tables - baculum: Add prune and purge actions to volume view page - baculum: Fix compatibility with old api for prune and purge actions - baculum: Update Portuguese translations - baculum: Fix catching API exceptions - baculum: Clean up theme Baculum-v1 - baculum: Fix initializing new resource page - baculum: Add button to set job log order - baculum: Add manual loading CSS files with versioning - baculum: Move API panel CSS files to separate directory - baculum: Move Web CSS files to separate directory - baculum: Fix not showing 'gui on' command in bconsole output - baculum: Loading table data performance improvements - baculum: Fix sending path load request by enter key - baculum: Add patch to fix gettext class file in framework - baculum: Add htaccess file to framework directory - baculum: Update rpm and deb templates with apache and lighttpd config files - baculum: Update example api endpoints - baculum: Adapt Web and API to new framework version - baculum: Updated PRADO framework to version 4.0.1 - baculum: Highlight main menu items for subpages - baculum: API v1 documentation as open API file - baculum: Update Web requests form for the new API v1 - baculum: New improved version API v1 - baculum: Fix link to job history page - baculum: Fix previous step button in restore wizard - baculum: Enable debug for first config wizard run - baculum: Fix directing to wizard if application config doesn't exist - baculum: Fix opening configuration tabs bug reported by Heitor Faria - baculum: Set curl connection timeout - baculum: Show error message after connection to api test - baculum: Update LICENSE file - baculum: Solve old browser cache problem for javascript after upgrade - baculum: New redesigned web interface - baculum: Changes in api for the redesigned web interface - baculum: Fix saving boolean values in schedule Run directive - baculum: Add link to go back from job configuration window - baculum: Add new volumes required api endpoint - baculum: Add listbox control and use it for base and device directives - baculum: Fix showing verify job fields in job run configuration window - baculum: Revert back volume pool name in volume list window - baculum: Fix error message about disabled bconsole - baculum: API endpoints code refactor - baculum: Add state, number, boolean and id validators - baculum: Return bconsole disabled error if bconsole support isn't enabled - baculum: Remove unused api endpoints - baculum: Fix oauth2 client working in the web part - baculum: Fix auth setting radio buttons alignement - baculum: Enlarge interface height to 100% - baculum: Add more information to cURL error - baculum: Stop using hidden fields to store item identifiers - baculum: Fix redundant loading users portlet - baculum: Add required config fields asterisk mark - baculum: New reworked restore wizard - baculum: Wizards view improvements - baculum: Add restore hardlinks support in api - baculum: Add strip_prefix, add_prefix, add_suffix and regex_where restore - baculum: Fix link to job history page - baculum: Fix previous step button in restore wizard - baculum: Enable debug for first config wizard run - baculum: Fix directing to wizard if application config doesn't exist - baculum: Fix opening configuration tabs bug reported by Heitor Faria - baculum: Set curl connection timeout - baculum: Show error message after connection to api test - baculum: Update LICENSE file - baculum: Solve old browser cache problem for javascript after upgrade - baculum: New redesigned web interface - baculum: Changes in api for the redesigned web interface Bugs fixed/closed since last release: 1108 1170 1826 2212 2343 2356 2382 2385 2389 2910 3098 3593 3615 3672 3745 3824 3854 3945 ======================================================================= Release 9.0.8 This is a minor release that fixes a couple of bugs and corrects some copyrights that were not totally correct. 28May18 - Fix bug #2212 where restore jobid=nn file=xxx restores the files twice - Pull regression truncate-test from Branch-9.1 - Apply patch from Wandlei Huttel to add Run Time and suffix to Restored bytes - Fix bug #2343 where truncate of explicit Volume name truncates non-purged volumes - Fix some file execute permissions. Fixes bug #2389 - Fix license problems in Bug #2382 - Apply patch from Leo to fix bug 2192 - Fix bad placement of jcr->pool reference as pointed out by Martin Simmons - rpm: Add OpenSuse Leap 42.3 - rpm: Update bacula.spec for Fedora 27 - baculum: Fix SQL grouping error in restore wizard reported by Rasmus Linden - Update some old copyrights - baculum: Update Portuguese translations - Remove old Bacula Systems notices Bugs fixed/closed since last release: 2212 2320 2349 2354 2379 2382 2383 2330 2054 2343 2369 2194 2359 2151 2366 2353 2381 2378 ======================================================= Release 9.0.7 This is a significant release because it now has the Windows code reintegrated and updated to work with this version. Other than Baculum updates and the new Windows update, there is no significant change to the other code. If you wish to use the Windows 9.0.7 File daemon binaries with your existing 9.0.x Bacula Director and Storage daemon it should work fine but has not been tested. The 64 bit version of the Windows binaries has been installed and very quickly tested, as a consequence, please test it carefully before putting into production. There seem to be some minor installation errors that are probably related to .conf files. Also the Windows binaries do not yet contain the tray-monitor or the old Exchange plug. Both currently fail to build. 18Apr18 - Remove NSIS debug - baculum: Fix opening configuration tabs bug reported by Heitor Faria - Restore win32 dir from Branch-5.2 and update it - Add Phil Stracchino's fix for Qt5 - baculum: Fix saving boolean values in schedule Run directive - rpm: Add Fedora26-64 platform - baculum: Add link to go back from job configuration window - Use correct SQL table encoding for Postgresql 10.x - baculum: Add listbox control and use it for base and device directives - baculum: Fix showing verify job fields in job run configuration window - baculum: Revert back volume pool name in volume list window - baculum: Fix error message about disabled bconsole - baculum: API endpoints code refactor - baculum: Add state, number, boolean and id validators - baculum: Return bconsole disabled error if bconsole support isn't enabled - baculum: Remove unused api endpoints - baculum: Fix oauth2 client working in the web part - baculum: Fix auth setting radio buttons alignement - baculum: Enlarge interface height to 100% - baculum: Add more information to cURL error - baculum: New reworked restore wizard - baculum: Wizards view improvements - baculum: Add restore hardlinks support in api - baculum: Add strip_prefix, add_prefix, add_suffix and regex_where restore options to api restore - Port missing RestoreObject Plugin Config code from BEE. - baculum: Stop using hidden fields to store item identifiers - baculum: Fix redundant loading users portlet - baculum: Add required config fields asterisk mark Bugs fixed/closed since last release: None ============================================================== Release 9.0.6 This is a bug fix and enhancement release. The two major enhancements are support for Qt5 in bat and the tray monitor, and support for OpenSSL-1.1. There were also a number of nice bug fixes. Thanks to the users who supplied patches for the enhancements and bug fixes. They are much appreciated. 19Nov17 - Update AUTHORS for recent commits - Remove incorrecly placed openssl-compat.h - Add openssl-compat.h which went in wrong directory - baculum: Add removing single resource - baculum: Add module to check resource dependencies - baculum: Fix saving names with spaces inside schedule Run directive - baculum: Fix saving entire config by api request - Backout vol size tests in previous attempt to fix bug #2349 - Fix compiler warning in previous patch - Apply patches from bugs #2325 and #2326 to fix FIFO bugs - Fix bug #2315 INTEGER misspelled in update_sqlite3_tables.in - Try to fix bug #2349 multiple recycle messages - Add support for items with comma in ini_store_alist_str() - Fix segfault after the reload of an incorrect configuration - Add temporary fix to avoid a deadlock after a reload command on an incorrect configuration - baculum: Throw 404 error if service not known - Fix race condition between setip and the access to CLIENT::address() - Fix #3284 about Client address not reloaded properly - baculum: Use home page url when an error is shown - Fix bug #2346 Dir blocks when max reloads reached - baculum: Send config to api server as json - Remove enterprise code that breaks Mac install -- fixes bug #2351 - Correct FS size calculation for FreeBSD, Solaris, and Windows - baculum: Enable Portuguese language support in makefile - baculum: Fix required directives in schedule resource configuration - baculum: Fix saving messages resource - baculum: Improve slow reloading config resource list - crypto: remove most of OpenSSL initcallbacks for 1.1 - Update ACL/XATTR code and define new ACL/XATTR API for Plugins. - baculum: Fix numeric password setting bug reported by Heitor Faria - crypto: convert EVP_PKEY access and remainings bits for OpenSSL 1.1 - crypto: convert EVP_MD_CTX + EVP_CIPHER_CTX to OpenSSL 1.1 - crypto: Use DEFINE_STACK_OF() - crypto: Add a tiny OpenSSL compat level - crypto: remove support for ancient openssl - fix #3269 obey the user choice of "Are you sure you want to delete X JobIds - Add restore wizard to the tray monitor. - Preparation fixes: remove some warning - Add ASSERTD() to track NULL Volume name error - Add "noautoparent" restore command option to disable the automatic parent directory selection - Make qt-console compatible to Qt5 (Qt4 still work) Bugs fixed/closed since last release: 2315 2325 2346 2349 2351 ====================================================================== Release 9.0.5 This is an important bug fix release. In particular it fixes the cases where Bacula would print a very large number of error messages. Additional backported code from Bacula Enterprise is included as well as updates to the rpm scripts. A number of minor Baculum issues have also been corrected. 01Nov17 - Use if exists on dropping MAC table in postgres. Fixes bug #2314 - Fix bdirjson display of Minutes. Fixes bug #2318 - baculum: Set default language if no language set - baculum: Fix language setting in api - baculum: Update generated .mo files for api - baculum: Add missing texts to translations - baculum: Fix add to translation static texts on the api default page - baculum: Fix missing session start - Make verify job log same as other logs -- fixes bug #2328 - Take a more conservative approach for setting killable true - Add extra safety for readdir buffer 31Oct17 - Retab systemd/Makefile.in - Don't require mount for @piddir@ - Use Debian systemd start/stop scripts supplied by Sven Hartge 29Oct17 - Fix bug #2316 add CacheRetention to Pool - Skip tape ioctls on FreeBSD when using a FIFO fixes bug #2324 - Fix bug #2338 to not truncate already truncated volumes - Remove some old C int code and use bool 28Oct17 - Remove unused lib/lz4.c.orig file - Update AUTHORS file - Mark Volume read-only only if no access rights or read-only partition - Add -P daemon option to supress creating PID file - Fix too big copy to test FD plugin_ctx 26Oct17 - Backport Enterprise code 23Oct17 - When read-only volume found mark it in catalog -- fixes bug #2337 - Make out of space on partition fatal - Fix bug 2323 -- loop exit condition was backward and add error message - Add missing copy-plugin-confs for regress - Fix bug reported by jesper@schmitz.computer where bat hangs on FreeBSD 08Oct17 - baculum: Fix reading and writing schedule resource 15Sep17 - baculum: Fix undefined offset error during saving director config - baculum: Fix listing days of week in schedule setting 14Sep17 - baculum: Fix saving schedule run directive value 12Sep17 - rpm: Add missing script baculabackupreport and query.sql for Suse - rpm: Add missing libbacsd* file and tapealert script to Suse rpm spec file - rpm: Add missing libs bbconsjson, bdirjson and bsdjson to Suse rpm spec file - rpm: Add aligned plugin rpm spec file for Suse - rpm: Add bacula-tray-monitor.desktop launcher in scripts directory - rpm: Add Suse Linux ES 12.1 platform 11Sep17 - rpm: Add bacula-tray-monitor.desktop file in script dir Bugs fixed/closed since last release: 2314 2316 2318 2324 2328 2337 2338 ================================================================= Release 9.0.4 This is a minor bug fix release. The main fix in this release is to allow SQLite3 to work. Please note: SQLite3 has been depreciated for a long time. If the community will step forward (as it did in this case) and prepare the appropriate make_sqlite3_tables and update_sqlite3_tables files, we can continue to leave the SQLite3 code in Bacula. However, we strongly urge users to update to MySQL, MariaDB, and PostgreSQL, which are our supported SQL databases. 06Sep17 - Update po files - Fix SQLite3 upgrade tables script fixes bug #2306 - baculum: Fix language setting in config file - Upgrade to latest lz4.c to fix bug #2310 bus error on 64 bit Solaris - Recompile configure.in - Ensure systemd/bacula.conf is created by configure fixed bug #2307 - Fix compiler warning noted in bug #2309 - Fix SQLite3 Version bug #2305 - Remove unused variable to elimiate compiler warning - Recompile configure.in - Fix #2925 Do not try to stop non backup jobs (virtualfull, copy, migration, restore, etc...) - baculum: Fix broken symbolic links for lang files - don't use add_event() when flag "l" is not set - core: bwlimit measure bandwidth - core: bwlimit handle backlog and allow burst - Do not purge running jobs in autoprune Bugs fixed/closed since last release: 2305 2306 2307 2309 2310 2925 ================================================================== Release 9.0.3 This is a minor bug fix release. 08Aug17 - baculum: Fix access denied error on api install wizard page - baculum: Remove assigning to api host when user is deleted - baculum: Fix empty admin setting - baculum: Add ability to assign host to specific user - baculum: Fix bconsole test connection for new api host that works with new director - baculum: Fix sqlite db support - Fix bug #2301 Solaris Available space incorrectly reported by turning off the output for Solaris - Fix bug #2300 mount/unmount/release of single tape drive does not work - baculum: Fix bconsole connection test in config wizard - baculum: Fix writing config for schedule and message names with space - bpipe: Fix compiler warning - baculum: Fix drag & drop file version elements - baculum: Add fileset info endpoint and use it in restore wizard - baculum: Use client name instead of clientid and start using fileset to prepare restore tree - baculum: Remove fileset parameter from run restore - baculum: Fix lstat regex pattern - baculum: Get the most recent jobs by client and fileset or by clientid and filesetid - Fix: bug #3048: jobs are stuck in endless loop in reserve.c - Add total time to test.out file - baculum: Add restore job selection in restore job wizard - Enhance verify job report from bug 2249 Bugs fixed/closed since last release: 2300 2301 3048 =============================================================== This is a minor bug fix release, but a few of the bugs are important. The main items fixed are: - Postgresql should now work with Postgresql prior to 9.0 Note: the ssl connection feature added in 9.0 is not available on postgresql servers older than 9.0 (it needs the new connection API). - The issues with MariaDB (reconnect variable) are now fixed - The problem of the btape "test" command finding a wrong number of files in the append test was a bug. It is now fixed. It is unlikely that it affected anything but btape. - The bacula-tray-monitor.deskop script is released in the scripts directory. - We recommend that you build with libz and lzo library support (the developer packages must be installed when building, and the shared object libraries must be installed at run time). However we have modified the code so that Bacula *should* build and run with either or both libz or lzo absent. 23Jul17 - Use Bacula in place of Libz variables so we can build with/without libz and lzo - Apply ideas from bug #2255 prettier status slots output - Configure and install bacula-tray-monitor.desktop - Fix btape test which counted files incorrectly on EOT - Fix bug #2296 where Bacula would not compile with postgres 8 or older - Fix bug #2294 Bacula does not build with MariaDB 10.2 - baculum: Fix multiple directors support - baculum: Fix showing errors from the API Bugs fixed/closed since last release: 2255 2294 2296 ================================================================== Release 9.0.1 12Jul17: This is a minor bug fix release that mainly to include the new tray-monitor files that were omitted. The tray-monitor now builds and runs at least on Ubuntu Linux. 12Jul17 - Remove two incorrect trailing commas in bsock.h - Fix bug #2293 bad big endian detection in lz4.c - Add new tray-monitor files that were omitted in the backport from Enterprise - bvfs: Do not insert deleted directories in PathVisibility table - Fix compilation for Debian Stretch with GCC 6.3 Bugs fixed/closed since last release: 2293 ======== This is either the biggest Bacula release ever made or one of the biggest ones. Even without the new Aligned Volumes source code, which is substantial, there are over 400,000 lines of diff output between Release 7.4.7 and the release of 9.0.0 This is a major new release with a new version number. It has been very thoroughly tested, but as always, please backup any previous version and test this version prior to putting it into production. For the most part the changes were contributed to the Bacula project by Bacula Systems SA and myself, but there were a number of other contributors that I thank. Database Update --------------- This version of Bacula requires a database update. So either you or the installation process must apply the update_bacula_tables script. As a precaution, please do a database dump or run your nightly database backup prior to running the update script. Compatibility: -------------- As always, both the Community Director and Storage daemon(s) must be upgraded at the same time. Any File daemon running on the same machine as a Director or Storage daemon must be of the same version. Older File Daemons should be compatible with the 9.0.0 Director and Storage daemons. There should be no need to upgrade older File Daemons. Packagers: --------- There are a good number of new binaries (e.g. bbconsjson, bdirjson, bfdjson, and bsdjson) to install; a new tapealert script file that should be installed; and some new shared objects (e.g. libbacsd). The dvd-handler script has been removed. Note also to run the update_bacula_tables script after having dumped the catalog to bring any existing catalog up to the new version needed for Bacula 9.0.0. New Features: ------------- Please see the New Features chapter of the manual for documentation on the new features. The new features are currently only in the New Features chapter and have not yet been integrated into the main chapters of the manual. New Features (summary): ----------------------- - Major rewrite of the Storage daemon to: put all drivers in class structures, provide better separation of core/driver code, add new drivers (aligned volumes, cloud), simplifies core code, allows loadable device drivers much like plugins but which are better integrated into the SD. - There are a number of new Bacula Systems whitepapers available on www.bacula.org, and a few more will be coming in the next few months. - New unique message id will be added to every message (designed but not yet implemented). Core Features: - Implement a drive usage counter to do round robin drive assignment - Enhance functionality of TapeAlert - Implement a "Perpetual Virtual Full" feature that creates a Virtual Full backup that is updated every day - Increase Director's default "Maximum Concurrent Jobs" setting from 1 to 20 - Add "PluginDirectory" by default in bacula-sd.conf and bacula-fd.conf - Add support for terabytes in sizes. Submitted by Wanderlei Huttel. - Restore mtime & atime for symlinks - New "status network" command to test the connection and the bandwidth between a Client and a Storage Daemon - New Tape Alert tracking - Loadable SD device drivers - PostgeSQL SSL connections permitted - JobStatistics improved - DB update required - Autochanger improvements to group Devices - Improved .estimate command - Comm line compression - Separate bxxjson programs for Console, Dir, FD, SD to output .conf contents in Json for easier reading with programs - Read Only storage devices Bconsole Features: - Add "ExpiresIn" field in list and llist media output - Add command to change the priority of a running job (update jobid=xxx priority=yyy) - Add level= and jobtype= parameters to the "list jobs" command - Add option to bconsole to list and select a specific Console - Add shortcut to RunScript console commands. Submitted by Wanderlei Huttel. - Display "IgnoreFileSetChanges" in show fileset command (#2107) - Display PrefixLinks in "show job" output - Display permission bits in .bvfs_decode - Display the Comment field in "llist job" command - Add "ActionOnPurge" field to "llist pool" command. Fix #2487 - Add "long" keyword to list command, ie "list long job". This is essentially an alias fo the "llist" command. - Modify the "setbandwidth" limit parameter to accept speed input. ex: limit=10kb/s - Modify the "setbandwidth" limit parameter so that the default is no longer kb/s but b/s. - Do not show disabled resources in selection list - Fix bconsole readline and "dumb" terminal handling of CTRL-C - Add the priority field to the .api 2 job listing output - Improved restricted consoles when accessing catalog. Misc Features: - New Tray Monitor program - Client Initiated Backups - Many performance enhancements - Bandwidth limitation timing improved - Global resource variables are not lost during a reload command - Add -w option to btape to specify a working directory - Enhance bls -D/-F help message - The "list" command now filters the results using the current Console ACLs - The WhereACL is now verified after the restore menu 02Jul17 - Skip verify-data-test if not running Linux - Skip lzo-test if lzo not in Bacula - Remove double define HAVE_LZO in config.h - Add documentation on baculabackupreport to delete_catalog_backup.in - Install baculabackupreport and ignore script without .in - Recompile configure.in - Add Bill's baculabackupreport script - Update po files - Fix error in FreeBSD during maxtime-test - Fix #2853 About character substitution for "virtual full" job level in RunAfterJob - Attempt to fix timing problem with console-dotcmd-test on FreeBSD - Ensure we have a DIR connection in dequeue_messages - Add more debug to regress for FreeBSD failures - Fix #2940 Allow specific Director job code in WriteBootstrap directive - Fix pragma pack to allow lz4.c work on Solaris and other machines - baculum: Fix working logout button - A more correct fix for lz4.c on Solaris 10 - Remove use of #pragma pack in lib/lz4.c for Solaris 10 - Recompile configure from configure.in - Detect Solaris 10 - Fix bug #2289 version 7.9 not compatible with old FDs -- comm compression - Make getmsg.c compatible with old FDs - Use one MAX_BLOCK_SIZE and set to 20M - rpm: Add Fedora 25 build platform - Remove vestiges of crc32_bad -- fixes Solaris build - Fix #2890 about segfault in .status command on Redhat 5 32bit - Add missing semi-colon in bsys.c - baculum: Fix incorrect table name error during restore start - Display the correct address in lockdump for db_lock() - Fix getmsg to handle additional forms of Progress messages - baculum: Fix double assets and runtime symbolic links in baculum-web deb package - baculum: Fix missing php-xml dependency in deb metafile - baculum: Improve errors handling in API restore pages - rpm: Remove libbacsd.la for both Red Hat and Suse - rpm: Add missing libs bbconsjson, bdirjson and bsdjson - rpm: Fix libstdc++ version in BAT spec file - Fix some problems noted by clang - baculum: Reorganize run job code - baculum: Reorganize estimate job code - baculum: Make get method definition not obligatory - Make file-span-vol-test portable - Attempt to fix deadlock in FreeBSD maxtime-test - Do not produce error if MySQL database exists in create_mysql_database - rpm: Add missing tapealert script - rpm: Add missing libbacsd - rpm: Remove dvd-handler script - Fix bvfs queries - Use FileId in place of Filename - Revert "Put FilenameId in .bvfs_lsfiles output" - Put FilenameId in .bvfs_lsfiles output - Add more debug in src/cats/bvfs.c - Fix bvfs_lsdirs and bvfs_lsfiles - baculum: Add Japanese language support in deb and rpm packages - Add DirectoryACL directive - baculum: New Baculum API and Baculum Web - Add forking info to LICENSE and LICENSE-FAQ - Minor improvement to error message - Fix race in steal_device_lock shown in truncate-concurrent-test - Apply Marcin's fix for 6th week of the month - Add new truncate test - Retab Makefile.in in platforms/systemd.in - Fix compiler warning - Add FD backwards compatibility - Fix regression minor scripting problems - Fix #2807 about an issue with the show command when using incorrectly JobToVerify directive - Fix #2806 about the director service started before the database with systemd - Update Dart control files - Massive (70,000+ lines) backport of code from Bacula Enterprise 8.8. See next line ... - Adapt update_bacula_tables scripts for catalog version 15 - Allow to use Base directive in a JobDefs - Add more debug to the bpipe plugin - Enhance error message when packets are too big - Add '.storage unique' bconsole command - Allow to use ".jobs type=!B" to display specific job type - Add lockdump storage daemon information - Fix #2698 Display loaded driver list in status storage output - Fix autochanger unload message that contains sometime an incorrect volume name - Fix issue with open_bpipe() function that may flush stdio buffer if the command is incorrect - Fix unload tape messages to print correct volume + improve output format - Fix unload/re-load same volume - Fix DIR get unexpected "Connection reset by peer" for FD - Fix #2548 about SQL connection leak with RunScript::Console commands - Fix #2588 about segfault in bdirjson with JobDefs/Base directive - Fix #2593 about incomplete jobs incorrectly rescheduled - Fix #2629 about pool argument not listed in the "help cloud" output - Fix #2632 about VolType not set correctly for Cloud volumes after a label problem - Fix #2640 about a reference to the source directory in query.sql file - Fix bug #2271 where poll interval causes tape mount message to repeat - Fix segfault in bdirjson with incorrect configuration files Bugs fixed/closed since last release: 2271 2548 2563 2567 2588 2593 2602 2624 2625 2627 2629 2632 2638 2640 2646 2698 2520 2559 2561 2582 2806 2807 2890 2289 2890 2853 2940 ====================================================================== ======================================================================= Release Version 7.4.7 This is a minor bug fix release, which hopefully corrects a seg fault on OpenBSD due to the new ACL/XATTR code, and it also fixes most build problems on Solaris 10 as well as EPROTO on OpenBSD. There is one minor new feature that allows you to specify the query item number on the bconsole query command line. 15Mar17 - Permit specifying query item number on bconsole query command line - Fix Solaris 10 problems reported by Phil Stracchino - Fix EPROTO on OpenBSD ===================================================== Release Version 7.4.6 This is a bug fix release, which hopefully corrects a seg fault on OpenBSD due to the new ACL/XATTR code, and it also fixes the large number of tape mount messages that are repeated at 5 minute intervals due to a bug in the poll code. Various small fixes for FreeBSD. Please note, the signature hash files (.sig) for the source code was previously SHA1. For this and future releases we have changed it to be SHA256. 10Mar17 - Fix bug #2271 where poll interval causes tape mount message to repeat - Attempt to fix IPV6 not configured - Possible fix for acl seg fault on OpenBSD where no acl code defined - Change release digest from SHA1 to SHA256 - Fix getnameinfo() for FreeBSD fixes bug #2083 Bugs fixed/closed since last release: 2083 2271 ===================================================== Release version 7.4.5 This is a minor bug fix plus a significant total rewrite of the ACL and XATTR code by Radoslaw Korzeniewski. 07Feb17 - Correct wrong word in message - Remove restriction on using the scratch pool that can cause restore failures - Remove debug code that breaks btape fill - Initialize freespace_mutex fixes bug 2207 - baculum: Update AUTHORS file - baculum: Enable Japanese language on web interface - baculum: Implement Japanese language support - XACL - refactoring an ACL and XATTR codes. - Revert "Warn of Storage Daemon version incompatibility if label fails. Bug #2193" - Make another attempt to resolve bug #2176 - Warn of Storage Daemon version incompatibility if label fails. Bug #2193 - Apply patch to list more pool info from bug #2202 - Fix status alignment output reported by Wanderlei Huttel Release version 7.4.4 This is a bug fix release. 20Sep16 - Fix #2085 About director segfault in cram-md5 function - Attempt to fix bug #2237 - Recompile configure.in - Fix systemd installation - If using readline reset terminal at bconsole exit - Fix compilation without SMARTALLOC - Fix #2060 about SQL false error message with "update volume fromallpools" command - Fix spurious MD5 update errors when nothing changed should fix bug #2237 and others - Fix small memory leak with the restart command - baculum: Update language files - Fix #335 Avoid backups going to the scratch pool - systemd: Give 3mins to the bacula-sd service to stop and close the dde - Minor modifications to Ubuntu packaging - Check if the ScratchPool points to the current Pool and print a warning message in such case - Fix #1968 print the ScratchPool name instead of just 'Scratch' - Display PrefixLinks in "show job" output - Add explicit LL to big integers to appease older compilers - Enable the plugin directory for the FileDaemon by default - Allow multiple mailcommand+operatorcommand in Messages. Fixes bug #2222 - Handle NULL pointers in smartdump() and asciidump() - Modify status to include Admin and Restore in Level field -- clearer - Ensure that zero JobMedias are written for labelling - Fix error message about the stream 26 (PLUGIN_NAME) in bextract Bugs fixed/closed since last release: 1968 2060 2085 2222 2237 335 Release version 7.4.3 This is a bug fix release. Most importantly, it fixes the new GCC 6.0 aggressive compiler behavior that elides (deletes) code written by the Bacula developers. There is no benefit to the new GCC agressive optimization and it breaks a lot of programs including Bacula. This problem showed up on ArchLinux and Fedora 24. 17Jul16 - Add LICENSE and LICENSE-FOSS files to the documentation - Add shortcut to RunScript console commands. Submitted by Wanderlei Huttel. Fixes bug #2224 - Fail when multiple mailcommand and other strings are specified in .conf. Fixes bug #2222 - Add support for terabytes in sizes. Submitted by Wanderlei Huttel. Fixes bug #2223 - Add error message for truncate command when actiononpurge not set. Fixes bug #2221 - Fix optimization error with GCC 6.1 - Fix compilation warnings with GCC 6.1 - Explicitly create MySQL user in grant_mysql_privileges.in Bugs fixed/closed since last release: 2221 2222 2223 2224 New feature: - There are two new Director directives that simplify doing console commands rather than using RunScripts. They are ConsoleRunBeforeJob = "console-command" ConsoleRunAfterJob = "console-command" =========================================================== Release version 7.4.2 This is an important bug fix release to version 7.4.1 mainly fixes detection of MySQL 5.7 (as found in Ubuntu 16.04). Certain bug fixes contributed by Bacula Systems. 06Jul16 - Fix #1926 about wrong duplicate job detection with Copy/Migration and Backup jobs - Recompile configure after db.m4 change - Fix batch insert for MySQL 5.7 - Fix zero level debug output -- now at 100 - Fix #766 about Job logs displayed with unneeded linefeed - Fix #1902 about a segfault with the "cancel inactive" command - Fix bug where MySQL 5.7 is improperly linked on Ubuntu 16.04 Bugs fixed/closed since last release: 1902 1926 766 ================================================= Release version 7.4.1 This is a minor bug fix release to version 7.4.0. Most of the fixes have been kindly contributed by Bacula Systems SA. 31May16 - Fix bug #1849 MySQL does not accept 0 for DATETIME default - Modify the alist object to be reused after a destroy() - baculum: Fix setting invalid timezone value for PHP - Fix compilation for AIX - Fix the restore termination string in the job report to take in account JobErrors and SDErrors - baculum: Show jobs for client - Fix bconsole "llist job=" output - Fix #146 about update volume command line usage - bat: Fix #1066 about bad update pool command - Fix #1653 about make_catalog_backup default user name - baculum: Show jobs stored on volume - Fix update Volume=x Slot=nn when Slot > MaxVols - Set exit code for create_postgresql_database.in - Fix bug #2197 -- build failure with --disable-libtool - Fix bug #2204 -- superfluous END-OF-DATA in update_mysql_tables.in - Convert a Migration job with errors into a Copy job - Remove exporting add_mtab_item -- fixes bug #2198 - Fix possible problem of show multiple resources - Comment out tools/smtp-orig.c as it is for reference only Bugs fixed/closed since last release: 1066 146 1653 1849 2197 2198 2204 ======================= Release version 7.4.0 For the most part the changes were contributed to the Bacula project by Bacula Systems SA. This is a new release with a new version number. It has been very thoroughly tested, but as always, the new features may not always work as expected. The Catalog database format has not changed since version the prior release (7.2.0). Compatibility: -------------- As always, both the Community Director and Storage daemon(s) must be upgraded at the same time. Any File daemon running on the same machine as a Director or Storage daemon must be of the same version. Older File Daemons should be compatible with the 7.4.0 Director and Storage daemons. There should be no need to upgrade older File Daemons. New features and changes: Please see the New Features chapter of the manual for documentation on the new features. The new features are currently only in the New Features chapter and have not yet been integrated into the main chapters of the manual. New Features and changes summary: - Support for KFREEBSD OS - Improved support for Clang - Configure SSL connection to MySQL - New chio-changer-freebase in examples/autochangers New directives in bacula-dir.conf in Catalog{} resource for the MySQL backend (not currently implemented for Postgresql or SQLite). dbsslkey dbsslcert dbsslca dbsslcapath dbsslcipher - examples/autochangers/rc-chio-changer removed - examples/devices/DVD.conf removed - updated copyrights - Add "Expires in" to list and llist volumes - Implement a more efficient Volume selection algorithm between DIR and SD - Implement new list/llist command keywords: order=asc|ascending order=desc|descending limit=nn jobstatus= Client= JobErrors - Implement new bconsole @tall command that outputs input and output to console and terminal. Note, this also outputs bconsole input commands. - Implement MaxVirtualFullInterval - Implement VirtualFullPool override - Pool overrides work better - Automatic selection of catalog from client where possible. - Implement VerifyData level type for Verify jobs. More detailed changes: 14Jan16 - Implement MaxVirtualFullInterval - Update AUTHORS - Ensure relabel has latest vol info may fix bug #1412 - Change license as per agreement with FSFE - Apply Carsten's patch that fixes bug #2192 builds on kfreebsd - baculum: Enable Portuguese language on web interface - baculum: Implement Portuguese language support - baculum: Assign Baculum copyright to Kern Sibbald - baculum: Fix sorting in restore by group most recent backups - baculum: Fix restore group most recent backups for MySQL - Fix FD DisableCommands - baculum: Fix to change user password - Add ExpiresIn field in list and llist media output - Fix #1548 about Solaris SIGBUS with accurate mode backup - Backport more Enterprise code to sql_list.c - Add info message of #jobs consolidated in Virtual Full - baculum: Unify user validation - Add HasBase+Comment to llist Jobs - Fix seg fault in btape fixes bug #2180 - Fix slight error in autoprune -- should fix bug #2151 - baculum: Add first unit tests - Fix #1545 about fix in manual_prune.pl script with large number of volumes - Fix false status output. Fixes bug #2103 - Integrate patch into latest version, which fixes bug #1882 - Fix bug #2090 correct detection of GCC - Fix CLANG warning messages -- fixes bug #2090 - Add new chio-changer-freebase from bug #2115 - Applied modified patch from bug#2117 to fix bpipe end of stream - Apply patch from bug #2165 to fix the update sqlite3 script - Fix update MD5 failure bug reported by Peter Keller - baculum: Add dashboard panel - Patch to add MySQL ssl access - Manually apply patch in bug #2156 to allow building on KFreeBSD - Fix bug #2153 with patch submitted by Ana Arruda - baculum: Switch to started job status just after job start - baculum: Add possibility to open configuration windows from URL - Fix restore when storage specified on command line - Fix restore of Windows streams to non-Windows machines - Implement level=Data to the Verify job - Fix #1524 about bextract trace file location - Fix truncate bug free_volume problem - baculum: Remember sort order for data grids - baculum: Improve size formatter precision - baculum: Fix jobs count in job list - baculum: Add jobbytes and jobfiles columns in job list - baculum: Get system timezone for PHP if possible - baculum: Fix restore when a lot of jobids given - baculum: Set default job attributes (level, client, fileset, pool, storage, priority) in Run job panel - Fix truncate race bug #1382 - baculum: Fix update pool action when no volumes in pool - baculum: Split configuration windows into two tabs: actions and console - baculum: Change default elements limit to 500 elements - baculum: Add drive parameter to bconsole release command execution - Fix #1470 Fix setdebug command when all components are selected - baculum: Fix expectation failed error during restore - Add new JOB_DBR field - #ifdef out bpluginfo since it does not compile - Fix #1449 about a FileDaemon segfault with the fstype option - Remove vestiges of rechdr_queue hopefully fixes bug #2180 - Apply bconsole manpage patch from bug #2182 - Apply ppc64el configure detection patch from bug #2183 - Fix #1414 When the FD is down, status dir now prints "is waiting for Client xx-fd" - Implement new options in list command - Add @tall command to log both input/output in a log file - Fix #1360 about bextract -t not documented in the man page - Update spec file for latest OSX versions - Fix compilation on MacOS - Improve Jmsg in response(), display SIGNAL number when appropriate - Avoid segfault in dump_block() when the block_len is invalid - Fix #1368 about xattr error not displayed correctly at restore time - Fix bug 2173 QT tray monitor can not be built due to missing files in configure - Move plugin_free() in free_jcr() - Fix bug #2083 -- Fix sockaddr_to_ascii for FreeBSD - Fix fadvise bug found by Robert Heinzmann - Fix compilation without zlib and lzo - Fix compilation error with new fstype_cmp() function - Fix compilation problem with AFS - Fix compilation on Solaris/FreeBSD - Fix segfault in open_bpipe() when the program is empty - Modify find_next_volume_for_append() to not send the same volume twice - Avoid string displayed in restore menu - Do not update state file after a bacula-xxx -t - Fix #804 about misleading message with the purge command - Fix automount feature after a label command - Reinsert tabs in systemd Makefile.in - baculum: Provide LICENSE-FOSS file content in Baculum deb packages (copyright file) - Use Client Catalog resource in get_catalog_resource() if "client" is specified in command line - Fix #1131 about Job::Next Pool resource precedence over the Pool::Next pool directive - Fix #898 truncate volumes larger than 200 bytes Bugs fixed/closed since last release: 1131 1360 1362 1368 1382 1412 1414 1449 1470 1524 1545 1548 1882 2083 2090 2103 2115 2117 2151 2153 2156 2165 2180 2182 2183 2192 804 898 ================================================================ Release version 7.2.0 Bacula code: Total files = 733 Total lines = 303,426 The diff between Bacula 7.0.6 and Bacula 7.2.0 is 254,442 which represents very large change, for the most part contributed to the Bacula project by Bacula Systems SA. This is a major new release with many new features and a number of changes. Please take care to test this code carefully before putting it into production. Although the new features have been tested, they have not run in a production environment. ============== !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! =================== New Catalog format in version 7.2.0 and greater ----------------------------------------------- This release of Bacula uses a new catalog format. We provide a script (update_bacula_tables in bacula/src/cats and in bacula/updatedb) that will update from Bacula 3.x, 5.2, or 7.0 to version 7.2.0 format. The database upgrade is fast and simply. As always we strongly recommand that you make a dump of your database prior to doing the upgrade. NOTE: The upgrade will work only for PostgreSQL and MySQL. Upgrading is not (yet) supported for SQLite3. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! For packagers, if you change options, naming, and the way we link our shared object files, as at least one of you does, you are creating a situation where the user may not be able to run multiple versions of Bacula on the same machine, which is often very useful, and in addition, you create a configuration that the project cannot properly support. Please note that the documentation has significantly changed. You will need additional packages to build it such as inkscape. Please see the README and README.pct files in the docs directory. The packages come with pre-build English pdf and html files, which are located in the docs/docs/manuals/en/pdf-and-html directory. Packagers: please note that the Bacula LICENSE has changed, it is still AGPLv3 and still open source. A new requirement has been added which requires other projects using the source to keep the acreditations. Packagers: please note that the docs license has changed. It is now licensed: Creative Commons Attribution-ShareAlike 4.0 International This is a common open source license. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Compatibility: -------------- As always, both the Community Director and Storage daemon(s) must be upgraded at the same time. Any File daemon running on the same machine as a Director or Storage daemon must be of the same version. Older File Daemons should be compatible with the 7.2.0 Director and Storage daemons. There should be no need to upgrade older File Daemons. However, this has not been fully tested yet. Since we expect some problems, please test before putting it into production. New Features: Please see the New Features chapter of the manual for documentation on the new features. The new features are currently only in the New Features chapter and have not yet been integrated into the main chapters of the manual. Also, since there were so many new features, it is possible that a few that previously existed in version 7.0.x are documented a second time in the 7.2.0 new features section. More detailed changes: 12Aug15 - Put back missing close_msg(NULL) to flush daemon messages at job end - Add LICENSE-FOSS and update LICENSE for baculum - Backport from Bacula Enterprise 29Jul15 - Fix max vol size test accidently deleted - Remove gigaslam and grow on uninstall -- from bug report - Revert to Branch-8.3 fd_snapshot.c - Pull more recent changes from Branch-8.2 - Fix bvfs_lsdir pattern parameter setting - Remove CheckList nolonger used - Revert "Use db_lock()/unlock() around JobMedia creation transaction" - Fix #1099 about director crash with rescheduled jobs - Fix #1209 about bat segfault when clicking on Media - Qmsg(M_FATAL) set jcr->JobStatus to JS_FatalError immediately - snapshot: Abort the job by default if a snapshot creation fails - Revert to old SD-FD close session protocol - Remove drive reservation if no Jobs running - Remove filename patch - snapshot: Try to detect LVM when the filesystem is ext3 or XFS - Fix bad debug message in mac_sql.c - Fix restore-multi-session test by incrementing found files only on next file - Add -T description in man pages - Correct incorrect Fatal error message text in bsock - mysql: Add support for multiple instances binary backup in the same fileset - Fix compilation with new debug hook - mysql: Avoid warning with abort_on_job plugin option - Fix compilation after patch "prune volume yes" - Do not print message about retention when using "prune volume yes" command - Fix #536 about Copy/Migration/VF that should not use Client "Maximum Concurrent Jobs" - Fix potential segfault with unused ConfigFile objects - Fix #1108 Enhance setdebug help command and console completion - Add more JCR variables in lockdump procedure - Fix error in update_postgresql_tables.in caused by bad search and replace - Fix #1127 about the repositioning enhancement during restore - Correct try_reposition() return code after a seek() - Add position information in the block structure - Fix a number of acl and xattr bugs + give more understandable variable names - Make btraceback.dbx and .gdb use new sql engine name - Revert most of patch ef57e6c4 and replace with old cats code - Revert useless parts of patch 08d8e2d29 - Revert patch d7f71d2c94a and rewrite it using simpler public domain example - Fix batch mode detection for SQLite3 - Revert d9aa76fa and simplify - Revert patch 30388e447fa3 + fix bug #1948 - Use a more appropriate name for the acl context - Use class pointer rather than jcr in src/lib/jcr.c - Revert patch f294b276 - Change B_DB to BDB to correspond to naming convention - Add -T option in bacula-sd to use trace file - Force use of newer TLS protocols - Avoid problem with db_get_job_record() when SchedTime or RealEndTime is NULL - Update our regexec() to support NULL argument - Add function to copy a file in bsys.c - Fix bug 2141 fork before TLS initialization - Update LICENSE-FOSS - Change license on src/lib/crc32.c as agreed with the author, Joakim Tjernlund - Update po - More license updates - Fix compilation - Add read_control command between Plugin/FD and Storage Daemon - Add .bvfs_get_jobs and .bvfs_get_bootstrap functions - Fix compilation for Solaris9 - Fix Makefile.in tabs - Update Windows .def files - More copyright notices - Fix Windows plugin licenses - Change license copyright for updatedb and qt-console/tray-monitor - Change copyright for logwatch - Update more copyrights - Update copyrights in pebuilder - Update plugin licenses - Add copyrights + license to platforms - Update copyrights in po - More license clarifications - One more copyright in src/cats - Update src/cats .in file copyrights - Compute Job "Compression Ratio" using SDJobBytes instead of JobBytes - Get correct attributions for bsmtp.c - Switch from LGPLv3 for scripts to BSD 2-Clause - Fix segfault on dot commands used in RunScript::Console directive - Fix patch c0f0e6c01c7 to optimize retries only for autochangers - Fix #876 about SD reads too far with complex bootstrap - Correct unmount test in dev.c - Add debug JobId in next-vol-test script - Fix patch c59e5da29 to not orphan buffers - Fix bad implementation of enable/disable job,client,schedules + implement enable/disable storage devices - Implement enable/disable schedule and client - Optimize Volume protocol when Volume not InChanger - Do not trash existing record during label of new volume - During accurate restore unstrip as soon as possible - Better handline of no storage device found - Fix #1075 The replace=never flag was not properly handled when combined with database= option in mysql/postgresql plugin - display timestamp in X_msg() in one single pass to avoid double flush() - Update copyrights in scripts directory - Fix bug #1083 RT14512 - configure.in: new HAVE_FCNTL_LOCK detect fcntl() locking capability - Fix #1008 about status storage that displays "Writing" and "Reading" information for the same DCR - Add new %E job code to use non fatal job errors in scripts - Revert to old htable, but add 64 bit hash - Fix possible race condition in smartalloc - Refactor + optimize fstype.c + revert mntent_cache.c/h - snap: Fix small initialization problem with LVM backend - Fix compilation warning in bextract - lock the pid file using fcntl(F_SETLK) - bat: Fix segfault in client view when the Uname field is empty - bat: Fix #1047 about segfaults in Client, Media and Pool view - Revert patch 62ab7eb5 for filed/backup.c - Revert patch 62ab7eb5 for filed/verify.c - Refactor mount/unmount to use class calls - Add return status to DEVICE:close and report error at end of Job - Fix seg fault - fix a Dmsg in match_bsr.c:match_volume() - Fix #861 about bad help command on status schedule - Add new cats header file - Refactor DB engine to be class based - Remove regression cancel_test from do_all - Fix invalid .mod command in BAT during restore (bugfix #858) - Use B_ISXDIGIT() in rangescanner - Handle hex numbers in str_to_uint64() - Fix prune-migration-test -- wait in wrong place - fix MA 987 cannot copy/migrate jobs with a Level=VF in the job resource - Fix basejob error caused by patch on bug #965 - Allow to list restore jobs in llist jobid= command - Fix #940 about segfault in bat when doing an "update slots" - Fix #983 about segfault on win32 filedaemon when using bat to monitor the status - Fix #969 about a segfault while doing a cancel of a copy job - Fill errmsg after an error with FETCH query in db_big_sql_query() - Fix #965 about an empty error message after a problem when sending accurate file list - Fix #972 about segfault in show command used with multiple resources - Work bsnapshot for SLES12 and fix issue with ZFS - Fix small memory leak in cancel command with ujobid and job parameters - Ensure that client resource is not freed during setbandwidth command - fix errors in the use of a Mmsg() - Use a specific mutex for auth instead of jcr->mutex - update po - Add missing call to free_jcr() in previous patch - Lock the jcr when using sd_calls_client_bsock variable - Ensure that only one thread can use the auth code in the Storage - Fix #951 about SDCallsClient not used during restore jobs - snapshot: Get the creation date from the zfs list snapshot command - snapshot: Fix small issue with Name parameter in list snapshot - Fix bsnapshot to return status=0 on error - fix a mempool error at SD shutdown - snapshot: Call support() only if the device is in the fileset - snapshot: Avoid double / in path and files when volume is / - Fix segfault with Console runscript introduced by "Stop ua commands if comm line drops" - handle ctrl-C and SIGTERM the same way in SD - Startup scripts return proper exitcode for service restart action - Implement tables configuration - Add ReadBytes to FD status output - Accept 0/1 for @BOOL@ type in ConfigFile module - Set cmd_plugin only in pluginCreateFile if not SKIP/ERROR/CORE - Fix #13680 about systemd message "unknown lvalue" - Stop ua commands if comm line drops - Fix weird compilation problem on rhel5 - Display TLS information in status client/storage - Fix rpms where unix user were not properly defined - update extrajs package in debs/rpm package - Fix segfault with new filesetcmd - snapshot: Reset JobId in Snapshot table when deleting a job - snapshot: Add ability to list snapshots from the FD - snapshot: Add a confirmation message when pruning snapshots - Add RunScript AfterSnapshot event - Fix #431 About upon upgrade, RPMs resets group membership - snapshot: Display bsnapshot error message if possible - Fix jobmedia-bug3 - Set error code in return from run regress script - snapshot: More work on LVM backend and on list/sync commands - snapshot: Add EnableSnapshot directive in fileset - snapshot: Add errmsg and status to SNAPSHOT_DBR - snapshot: Send SnapshotRetention parameter to the Client and work on the prune command - Add bacula-snapshot.spec - Add disabled=yes/no in bsnapshot.conf - Fix #875 about bvfs repeats the same output many times - Revert "Storing the result in a local variable from sql_num_fields saves us a lot of callbacks." - Remove passing args to cats driver class initialization - Simplify cats by eliminating the B_DB_PRIV class - Convert more db funcs to class calls - Add Snapshot Engine to bacula core - Change more db calls into class calls - Add files missed in last commit - Convert db_lock/unlock to be called via class - Fix small memory leak - Remove more vestages of Ingres - Fix #843 about "show storage" option missing in the help command output - Use bzip2 for sles dependency - Avoid warning with uninitialized variables - update "help status" - Revert "Small fix to Eric great patch for readline commandcompletion so it also compiles on non gcc compilers." - Separate out definitions into new header - Remove bad restore.h - Revert "Move restore struct defintions to seperate include file. Small change to acl.h and xattr.h to use define inline with other header files." - Revert "Fix MediaView::getSelection" - Bat: ensure sufficient rows to display drives in storage display - new MmsgDx() macro that combine Mmsg(errmsg, fmt, ...) and Dmsg in once - add a ASEERTD() for DEVELOPPER - Fix wrong KiB value - Revert "Fix bug #1934 Wrong values at media list in BAT" - Change bplugin_list to b_plugin_list which is more appropriate - Remove Ingres related unused files - Simplify rwlock coding - Make subroutine names clearer - Back out useless patches - Put back old code prior to excessive edits - Remove over complicated acl/xattr code - Add license to files without any - Fix #805 about nextpool command line argument not properly used - Remove recursion from free_bsr() and free_bsr_item() to handle very large BSR - Avoid segfault in connect_to_file_daemon() when jcr->client is NULL - #776 Volume created in the catalog but not on disk and #464 SD can't read an existing volume - Add schedule to show command tab completion - Make global my_name hold 127 chars - Mark file volumes that are not accessible in Error in mount_next_vol - Fix #743 about bat permission conflict on /opt/bacula/etc - Add copyright to Makefiles - change in lockmgr.c to avoid the report of a memory leak in testls - lib: integrate SHA2 into bacula - Fix #747 about restore problem failing on "Unexpected cryptographic session data stream - Revert previous copyright accidentally changed - Fix btape fill command by removing some debug code in empty_block() - Add Accurate Fileset option "M" to compare ctime/mtime with the save_time like with normal Incremental - Add index on Job(JobTDate) to mysql catalog - Fix bad check on bopen_rsrc return status. bug #2106 - Do not stop the storage daemon startup if the File device is not yet accessible - Fix double free in btape - Fix failed mount request in btape fill test - Avoid ASSERT() when using btape with vtape driver - Possible fix for NULL client bug #2105 - Fix compilation of Nagios check_bacula - Add test for restict c99 in autoconf - Allow to use device= option in release/mount/unmount command - Fix #699 about duplicated job name when starting two jobs at the same time - Fix #701 about status schedule missing from tab completion and correct job filter - remove autoconf/configre - Fix #346 Add ipv6 support for Solaris - Fix #692 about compatibility issue with community FD - Fix new match_bsr patch - Fix #588 Improve SD bsr read performance - Fix ownership bug in html-manuals package - Add EFS in the client status flag list - Implement Win EFS Support - Fix QT windows build for 32bit - Add SLES113 to spec files - Add @encode and sp_decode functions for plugins - Fix tls-duplicate-job seg fault + harden pthread_kill() code - Update plugin version to ensure 8.0 will not load 6.6 plugins - Add JobBytes and ReadBytes to llist jobid= output - Rewrite store_drivetype and store_fstype to allow a comma separated list of items - Fix #633 about JobDefs MaximumBandwidth Job inheritance - Fix possible editing truncation due to 32 bit calculations - Remove non-portable -ne in echo - update po - Add Makefile for mssql-fd plugin - Improve error message of open_bpipe() on win32 - Add jobid= parameter in .status dir running command - Add worker states - Pull latest worker files from development branch - Add comment about incorrect scripting - Put Dsm_check() on reasonable debug level - Remove auto-generated tray-monitor.pro.mingwxx file - Display message about MaximumBlockSize default value only if a value was specified - fix solaris : replace be64toh() by unserial_uint64() - update SD <-> SD capabilities exchange - Handle RestoreObjects with Copy/Migration jobs - Add free list to worker class - Fix bad caps with SDcallsClient + debug + fix seg fault on connection error - Implement blowup=nn for FD and hangup+blowup for SD - Correct bat copyright - Change sizeof expressions to be more standard - Remove regress trap that causes sd-sd-test to fail - Dmsg was not handling tag anymore - Fix for SD seg fault while swapping volumes - Make bextract able to handle dedup streams - Remove unused file - Make sure mount_next_read_volume() will cancel the current job - Forbid llist command in runscript - Fix #295 about query file message - Add no_mount_request to DCR - Update Windows .def file - Add spec file for redhat/suse html manual package - Fix bug #2091 bad vtape device definitions - Fix bug #2089 compiler warning - Make sure level is tag free when printing debug message - fix tags in Dmsg - Regenerated configure script - Remove spaces at the end of lines in Bat file - Revert bat.pro.in file - Fix recursive echo bug #2088 - Add new fifo class flist.h/c - Allow to create temp DEVICE from DEVRES - For bat always use g++ - Make selection by Volume Name or MediaId a bit clearer - Optimize Dmsg() with tags by keeping current tags into a separate variable - Make message more understandable ========================================================================= Bugs fixed in this version: 1099 1209 536 1108 1127 876 1075 1083 1008 1047 861 858 965 940 983 969 965 972 951 13680 431 875 843 1934 805 776 743 ================= Old 7.0.x Release ==================================== Release version 7.0.5 This is an important bug fix release to version 7.0.4. Since it fixes several major problems. We recommend that everyone upgrade to this version. 28Jul14 - Fix #547 by adding .schedule command - Update AUTHORS - Fix bug #2079 with patch from Robert Oschwald - Fix orphaned file descriptors during errors - Yet another client==NULL - Improve FD and SD cancel - Jim Raney's TLS patch - Fix bug #1679 pool overrides not shown in manual run display - Attempt to avoid client==NULL - Fix for bug #2082 (hopefully) - Fix seg fault in jobq.c - make stop after first error - Increase status schedule days from 500 to 3000 - Remove bad cherry-pick - Fix compiler warning - Allow options create_postgresql_database from patch in bug #2075 by roos - Fix bug #2074 crashes when no conf file present - Set pthread id in jcr at beginning so the job can be canceled. - Fix possible heartbeat interval timing problems - Fix some errors reported by valgrind. May fix the problem with bsmtp command. - Ensure b_sterror() is using the correct size of input buffer - Fix possible seg fault - Fix segfault when trying to stop the bnet_server thread in terminate_stored() - Fix bad link bug #2076 - Fix compilation of bsock.c when TLS is not available - Correct L suffix to be LL - Fix bad copy/migrate data header - On termination shutdown thread server - baculum: Updated README file - baculum: Update English language texts - baculum: Saving auth file for web server HTTP Basic auth - baculum: Added directory for web server logs - baculum: Added example Lighttpd configuration for Baculum and sample web server auth file - Expanded auth error message - baculum: Support for web servers which do not provide direct info about HTTP Basic auth - Fix limit bandwidth calculation - Eliminate strcpy() from bsmtp - Fix for configuring sudo option for bconsole access - Display correct NextPool overrides + use Job NextPool in restore if available - Fix Bacula to work with newer Windows pthreads library - Fix bug #180 ERR=success in btape when tape error Bugs fixed/closed since last release: 1679 180 2074 2075 2076 2079 2082 547 Release version 7.0.4 This is a bug fix release to version 7.0.3. We recommend that everyone upgrade to this version. The main fixes are to make copy/migration to a second SD work, and to cleanup some of the inconsistencies in the cancel command which could confuse the user. 02Jun14 - Better error handling for cancel command - Fix compiler warning + simplify some #ifdefs - Fix copy/migration to second SD - Fix calls to sl.set_string() - Improve sellist code ============================================================= Release version 7.0.3 This is a bug fix release to version 7.0.2. We recommend that everyone using version 7.0.2 upgrade to this version. 12May14 - Fix error handling in do_alist_prompt - Tighten error condition handling in sellist - Add new cancel test - Update LICENSE and LICENSE-FAQ - Also update autoconf/aclocal.m4 - Reschedule on error caused EndTime to be incorrect -- fixes bug #2029 - Flush console queued job messages -- should fix bug #2054 - Attempt to fix FreeBSD echo/printf, bug #2048 - Update to newer libtool + config.guess - Recompile configure - Apply fix supplied for acl.c in bug #2050 - Fix a SD seg fault that occurs with over committed drives - Clear bvfs cache and set debug options available only for admin - Moved auth params to curl opts - Filtred single results for restricted consoles - Removed unnecessary debug - Changed e-mail address in gettext file - Support for customized and restricted consoles - Misc changes for rpm building (made by Louis) - Updated requirements for Baculum - Apply fix for bug 2049: wrong drive selected - Fix #2047 about bthread_cond_wait_p not declared - Fix Bacula bug #2044 -- fix Makefile for bplugininfo linking - Fix Bacula bug #2046 -- sellist limited to 10000 - Fix Bacula bug #2045 -- multiply defined daemon_event - Fix Bacula bug #2020 overflow in btape -- Andreas Koch Bugs fixed/closed since last release: 2020 2029 2044 2045 2046 2047 2048 2050 2054 =================================================================== Release version 7.0.2 This is a minor update since version 7.0.1 that is mostly cleanup. However, there is one annoying bug concerning shell expansion of config directory names that is fixed, and there is at least one syntax error in building the full docs that appears on some systems that is also fixed. 02Apr14 - Remove more vestiges of libbacpy - Put back @PYTHON@ path in configure - Fix improper string in parser - Remove libbacpy from rpm spec files - Fix linking check_bacula - Fix new SD login in check_bacula - Tweak docs build process Release version 7.0.1 This is a minor update since version 7.0.0 that is mostly cleanup. 31Mar14 - Remove old plugin-test - Update po files - Enable installation of the bpluginfo utility - More tray-monitor updates - Add Simone Caronii to AUTHORS - Align command line switches in manpages. - Apply upgrade to config.guess - Remove bgnome-console and bwx-console leftovers. - Update tray-monitor header also for new bsock calls - Attempt to fix nagios to use new bsock calls - Update tray-monitor to new bsock calls ======================================== Release 7.0.0 Bacula code: Total files = 713 Total lines = 305,722 The diff between Bacula 5.2.13 and Bacula 7.0.0 is 622,577 lines, which represents very large change. This is a major new release with many new features and a number of changes. Please take care to test this code carefully before putting it into production. Although the new features have been tested, they have not run in a production environment. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! For packagers, if you change options, naming, and the way we link our shared object files, as at least one of you does, you are creating a situation where the user may not be able to run multiple versions of Bacula on the same machine, which is often very useful, and in addition, you create a configuration that the project cannot properly support. Please note that the documentation has significantly changed. You will need additional packages to build it such as inkscape. Please see the README and README.pct files in the docs directory. The packages come with pre-build English pdf and html files, which are located in the docs/docs/manuals/en/pdf-and-html directory. Packagers: please note that the Bacula LICENSE has changed, it is still AGPLv3 and still open source. A new requirement has been added which requires other projects using the source to keep the acreditations. Packagers: please note that the docs license has changed. It is now licensed: Creative Commons Attribution-ShareAlike 4.0 International This is a common open source license. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Compatibility: -------------- As always, both the Director and Storage daemon(s) must be upgraded at the same time. Any File daemon running on the same machine as a Director or Storage daemon must be of the same version. Older File Daemons should be compatible with the 7.0.0 Director and Storage daemons. There should be no need to upgrade older File Daemons. The following are new directives, commands and features: - New Baculum web GUI interface. See the gui/baculum directory. - Directive fdstorageaddress in Client - Directive SD Calls Client in Client - Directive Maximum Bandwidth per Job in Client - Directive FD Storage Address in Storage - Directive Maximum Spawned Jobs in Job - setbandwidth command in bconsole - Progress meter with FD in status dir - LastDay of month in schedule - sixth 6th week in month in schedule - Improvements in bconsole SQL calls - Allow list and ranges in cancel as well as the keyword: all - truncate command in bconsole - prune expired volumes? - New hardlink performance enhancements - restart command - restore optimizespeed=yes|no for hardlinks default yes - PkiCipher and PkiDigest in FD Client item Cipher aes128, aes192, aes256, blowfish Digest md5, sha1, sha256 - Maximum Bandwidth Per Job in FD Client resource - Maximum Bandwidth Per Job in FD Director Resource - .bvfs_decode_lstat - DisableCommand in FD Director resource - DisableCommand in FD Client resource - status scheduled bconsole command with the following options: days=nn (0-500 default 10); limit=nn (0-2000 default 100) time=YYYY-MM-DD HH:MM:SS schedule=xxx job=xxx - NextPool in Run override - Directive NextPool in Job Please see the New Features chapter of the manual for more details. The following features or directives have been removed: - Win32 - tray-monitor - wx_console - Removed cats DBI driver - Python Detailed changes: ================= 24Mar14 - Add Josip Almasi to AUTHORS - [PATCH] Support for restricted consoles in BAT config - [PATCH] Fix for free director directive - [PATCH] Fix auto-select restricted console for director in bconsole - Realign output display - Update ua_output.c from Branch-6.7 - Add some missing Branch-6.7 updates - Added needed empty directories to Baculum - Fix for support PostgreSQL, MySQL and SQLite - Framework adjusting to Baculum database connections - Framework fix for lower case tables names in MySQL - Fix for Baculum SQLite support - Initial commit Baculum - Add Marcin to AUTHORS file - Strip trailing blanks - Update copyright year - Update LICENSE and header files - Remove old file - Add new header in misc files - Remove tray-monitor bwx-console manual installation - Remove FD python and examples - Fixup spec files - Remove pythonlib from lib - Update package-list - Fix SDCallsClient daemon synchronization - Add debug code + make 127.0.0.1 same as localhost for tls tests - Fix multiple DIRs in console - Make failure for bat to connect to DIR non-fatal - Fix bat style to one that works - Take disk-changer from Branch-6.7 - Simplify Version output - Fix FDVersion for SD Calls Client test - Update accurate test - Update differential test - Add new regress timing scripts - Improve plugin make clean - Implement regress FORCE_SDCALLS - Remove win32 tray-monitor and wx-console directories - Remove regress-config need only regress-config.in - Add configure archivedir - Improve SQL failure reporting - First cut backport BEE to community - Add copyright to mtx-changer.in bacula-15.0.3/VERIFYING0000644000175000017500000000270014771010173014146 0ustar bsbuildbsbuild All Bacula packages released on Source Forge after 8 June 2003 will be signed with the Bacula Distribution Verification Key. By obtaining a copy of the Bacula Distribution Verification Public key from either the home site (www.bacula.org) or from the Source Forge project page, (www.sourceforge.net/projects/bacula). you can verify that the code you have is complete, unaltered, and packaged by myself (Kern Sibbald) or D. Scott Barninger. Putting the Bacula Key in your Keyring: Once you download the Bacula public key, you must insert it in your keyring. The procedure will differ depending on whether you are using PGP or GPG. For GPG, assuming you have put the key in bacula.key, the procedure is: gpg --import bacula.key Verifying an RPM: The procedure for verification differs slightly if you are using rpms or tar.gz files. For rpms, the signature becomes part of the rpm package, and once the Bacula public key is in your keyring you check the package with: rpm --checksig bacula-1.xx.rpm Verifying tar files: Tar files are distributed as they always have been in the past, unchanged. However, for each xxxx.tar.gz file that is released there will be a second file released with the same name but with .sig appended, for example xxxx.tar.gz.sig. To verify the Bacula distribution, you download both the files and put them in the same directory, then for GPG, you use the following command to do the verification: gpg --verify xxxx.tar.gz.sig bacula-15.0.3/AUTHORS0000644000175000017500000000430014771010173013727 0ustar bsbuildbsbuild This file contains a list of the people who have contributed code, programs or documentation to the Bacula project. Thanks to all of you. Without your participation, Bacula would not be the success it is today. If you have made a contribution and do not find your name on this list, please send a note to license@bacula.org or to the person from whom you received the distribution. Contributors: Adam Thorton Adrew J. Millar Adrian Close Aitor Matilla Alan Brown Aleksandar Milivojevic Alexander Bergolth Alexandre Baron Alexandre Simon Allan Black Ana Emilia Arruda Andreas Piesk Andrew Ford Arno Lehmann Attila Fülöp Ben Walton Bernd Frick Bill Moran Bastian Friedrich Carlos A. Molina G Carsten Leonhardt Carsten Paeth Chris Lee Christian Masopust Christopher Hull Craig Miskell Dan Langille Daniele Eccher David Boyes David Duchscher Davide Franco D. Scott Barninger Devin Reade Dirk Bartley Eamon Brosnan Edwin Groothuis Eric Bollengier Erich Prinz Felix Schwarz Francisco Reyes Frank Kardel Frank Sweetser Graham Keeling Grzegorz Grabowski Howard Thomson Jaime Ventura James Harper Jan Kesten jgorig Jim Raney Joakim Tjernlund Joao Henrique Freitas John Goerzen John Kodis John Walker Jorj Bauer Jose Herrera Jose Luis Tallon Josip Almasi Jo Simoens Juan Luis Frances Juergen Lock Karl Cunningham Kern Sibbald Kjetil Torgrim Homme Landon Fuller Laurent Papier Lukas Nykryn Lorenz Schori Luca Berra Lucas B. Cohen Lucas Di Pentima Ludovic Strappazon Marc Cousin Marc Schiffbauer Marcin Haba Marco van Wieringen Marcus Weinhold Martin Schmid Martin Simmons Meno Abels Michael Narigon Michael Renner Michael -buk- Scherer Michael Stapelberg Michel Meyers Morgan Nic Bellamy Nicolas Boichat Olivier Lehmann Peter Buschmann Peter Eriksson Peter Much Philippe Chauvat Phil Stracchino Preben Guldberg Radoslaw Korzeniewski Riccardo Ghetta Richard Mortimer Robert Nelson Robert Oschwald Rudolf Cejka Russel Howe Scott Bailey Sebastian Andrzej Siewior Sergey Svishchev Simone Caronni Stefan Reddig Sven Hartge Thomas Franz Thomas Lohman Thomas Glatthor Thorsten Enge Tim Oberfoell Tomas Cameron Tullio Andreatta Ulrich Leodolter V. Novy Vitaliy Kosharskiy Victor Hugo dos Santos Wanderlei Huttel Wolfgang Denk Yuri Timofeev Yves Orton bacula-15.0.3/src/0000755000175000017500000000000014771010173013451 5ustar bsbuildbsbuildbacula-15.0.3/src/host.h.in0000644000175000017500000000161214771010173015204 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2023 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Define Host machine * */ #define HOST_OS "@host@@post_host@" #define BACULA "@BACULA@" #define DISTNAME "@DISTNAME@" #define DISTVER "@DISTVER@" #ifdef HAVE_WIN32 #define HELPDIR "c://Program Files//Bacula//help" #else #define HELPDIR "@htmldir@" #endif bacula-15.0.3/src/cats/0000755000175000017500000000000014771010173014403 5ustar bsbuildbsbuildbacula-15.0.3/src/cats/protos.h0000644000175000017500000004253514771010173016113 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Database routines that are exported by the cats library for * use elsewhere in Bacula (mainly the Director). * * Note: the interface that is used by the core Bacula code outside * of the cats directory has names that are: * db_xxx(x, db, y, ...) * usually with a database pointer such as db as an argument. * This simplifies the vast bulk of the code and makes it easier to read. * These are translated into class calls on the db pointer by a #define * in this file. * * The actual class code is named bdb_xxx(x, y, ...) and is called with * the class pointer such as db->bdb_xxx(x, y, ...) The code in this * cats directory can use the db_xxx() calls or the db->bdb_xxx() calls. * In the Bacula core code we prefer using only the db_xxx() calls. * * Written by Kern Sibbald, MM */ #ifndef __SQL_PROTOS_H #define __SQL_PROTOS_H #include "cats.h" BDB *db_init_database(JCR *jcr, const char *db_driver, const char *db_name, const char *db_user, const char *db_password, const char *db_address, int db_port, const char *db_socket, const char *db_ssl_mode, const char *db_ssl_key, const char *db_ssl_cert, const char *db_ssl_ca, const char *db_ssl_capath, const char *db_ssl_cipher, bool mult_db_connections, bool disable_batch_insert); /* Database prototypes and defines */ /* Misc */ #define db_lock(mdb) \ mdb->bdb_lock(__FILE__, __LINE__) #define db_unlock(mdb) \ mdb->bdb_unlock() /* Virtual methods */ #define db_escape_string(jcr, mdb, snew, old, len) \ mdb->bdb_escape_string(jcr, snew, old, len) #define db_escape_object(jcr, mdb, old, len) \ mdb->bdb_escape_object(jcr, old, len) #define db_unescape_object(jcr, mdb, from, expected_len, dest, len) \ mdb->bdb_unescape_object(jcr, from, expected_len, dest, len) #define db_open_database(jcr, mdb) \ mdb->bdb_open_database(jcr) #define db_close_database(jcr, mdb) \ mdb->bdb_close_database(jcr) #define db_start_transaction(jcr, mdb) \ mdb->bdb_start_transaction(jcr) #define db_end_transaction(jcr, mdb) \ if (mdb) mdb->bdb_end_transaction(jcr) #define db_sql_query(mdb, query, result_handler, ctx) \ mdb->bdb_sql_query(query, result_handler, ctx) #define db_thread_cleanup(mdb) \ if (mdb) mdb->bdb_thread_cleanup() /* sql.c */ int db_jobids_handler(void *ctx, int num_fields, char **row); int db_mint64_handler(void *ctx, int num_fields, char **row); int db_int64_handler(void *ctx, int num_fields, char **row); int db_strtime_handler(void *ctx, int num_fields, char **row); int db_list_handler(void *ctx, int num_fields, char **row); int db_name_handler(void *ctx, int num_fields, char **row); int db_string_list_handler(void *ctx, int num_fields, char **row); int db_int_handler(void *ctx, int num_fields, char **row); void bdb_debug_print(JCR *jcr, FILE *fp); void db_free_restoreobject_record(JCR *jcr, ROBJECT_DBR *rr); #define db_open_batch_connection(jcr, mdb) \ mdb->bdb_open_batch_connection(jcr) #define db_strerror(mdb) \ mdb->bdb_strerror() #define db_debug_print(jcr, fp) \ bdb_debug_print(jcr, fp) #define db_check_max_connections(jcr, mdb, maxc) \ mdb->bdb_check_max_connections(jcr, maxc) /* sql_create.c */ bool bdb_write_batch_file_records(JCR *jcr); void bdb_disable_batch_insert(bool disable); /* sql_get.c */ void bdb_free_restoreobject_record(JCR *jcr, ROBJECT_DBR *rr); #define db_get_client_pool(jcr, mdb, results) \ mdb->bdb_get_client_pool(jcr, results) #define db_get_jobids(jcr, mdb, jobids, ret, append) \ mdb->bdb_get_jobids(jobids, ret, append) /* sql_create.c */ #define db_create_log_record(jcr, mdb, jobid, mtime, msg) \ mdb->bdb_create_log_record(jcr, jobid, mtime, msg) #define db_create_events_record(jcr, mdb, rec) \ mdb->bdb_create_events_record(jcr, rec) #define db_create_tag_record(jcr, mdb, rec) \ mdb->bdb_create_tag_record(jcr, rec) #define db_create_path_record(jcr, mdb, ar) \ mdb->bdb_create_path_record(jcr, ar) #define db_create_file_attributes_record(jcr, mdb, ar) \ mdb->bdb_create_file_attributes_record(jcr, ar) #define db_create_job_record(jcr, mdb, jr) \ mdb->bdb_create_job_record(jcr, jr) #define db_create_media_record(jcr, mdb, media_dbr) \ mdb->bdb_create_media_record(jcr, media_dbr) #define db_create_client_record(jcr, mdb, cr) \ mdb->bdb_create_client_record(jcr, cr) #define db_create_fileset_record(jcr, mdb, fsr) \ mdb->bdb_create_fileset_record(jcr, fsr) #define db_create_pool_record(jcr, mdb, pool_dbr) \ mdb->bdb_create_pool_record(jcr, pool_dbr) #define db_create_jobmedia_record(jcr, mdb, jr) \ mdb->bdb_create_jobmedia_record(jcr, jr) #define db_create_filemedia_record(jcr, mdb, fr) \ mdb->bdb_create_filemedia_record(jcr, fr) #define db_create_counter_record(jcr, mdb, cr) \ mdb->bdb_create_counter_record(jcr, cr) #define db_create_device_record(jcr, mdb, dr) \ mdb->bdb_create_device_record(jcr, dr) #define db_create_storage_record(jcr, mdb, sr) \ mdb->bdb_create_storage_record(jcr, sr) #define db_create_mediatype_record(jcr, mdb, mr) \ mdb->bdb_create_mediatype_record(jcr, mr) #define db_write_batch_file_records(jcr) \ bdb_write_batch_file_records(jcr) #define db_create_attributes_record(jcr, mdb, ar) \ mdb->bdb_create_attributes_record(jcr, ar) #define db_create_restore_object_record(jcr, mdb, ar) \ mdb->bdb_create_restore_object_record(jcr, ar) #define db_create_object_record(jcr, mdb, obj_r) \ mdb->bdb_create_object_record(jcr, obj_r) #define db_create_base_file_attributes_record(jcr, mdb, ar) \ mdb->bdb_create_base_file_attributes_record(jcr, ar) #define db_commit_base_file_attributes_record(jcr, mdb) \ mdb->bdb_commit_base_file_attributes_record(jcr) #define db_create_base_file_list(jcr, mdb, jobids) \ mdb->bdb_create_base_file_list(jcr, jobids) #define db_disable_batch_insert(disable) \ bdb_disable_batch_insert(disable) #define db_create_snapshot_record(jcr, mdb, sr) \ mdb->bdb_create_snapshot_record(jcr, sr) #define db_create_fileevent_record(jcr, mdb, ev) \ mdb->bdb_create_fileevent_record(jcr, ev) /* sql_delete.c */ #define db_delete_pool_record(jcr, mdb, pool_dbr) \ mdb->bdb_delete_pool_record(jcr, pool_dbr) #define db_delete_media_record(jcr, mdb, mr) \ mdb->bdb_delete_media_record(jcr, mr) #define db_purge_media_record(jcr, mdb, mr) \ mdb->bdb_purge_media_record(jcr, mr) #define db_delete_snapshot_record(jcr, mdb, sr) \ mdb->bdb_delete_snapshot_record(jcr, sr) #define db_delete_client_record(jcr, mdb, cr) \ mdb->bdb_delete_client_record(jcr, cr) #define db_delete_tag_record(jcr, mdb, tr) \ mdb->bdb_delete_tag_record(jcr, tr) /* sql_find.c */ #define db_find_last_job_end_time(jcr, mdb, jr, etime, job) \ mdb->bdb_find_last_job_end_time(jcr, jr, etime, job) #define db_find_last_job_start_time(jcr, mdb, jr, stime, job, JobLevel) \ mdb->bdb_find_last_job_start_time(jcr, jr, stime, job, JobLevel) #define db_find_job_start_time(jcr, mdb, jr, stime, job) \ mdb->bdb_find_job_start_time(jcr, jr, stime, job) #define db_find_last_jobid(jcr, mdb, Name, jr) \ mdb->bdb_find_last_jobid(jcr, Name, jr) #define db_find_next_volume(jcr, mdb, index, InChanger, mr) \ mdb->bdb_find_next_volume(jcr, index, InChanger, mr) #define db_find_failed_job_since(jcr, mdb, jr, stime, JobLevel) \ mdb->bdb_find_failed_job_since(jcr, jr, stime, JobLevel) /* sql_get.c */ #define db_get_file_record(jcr, mdb, jr, fr) \ mdb->bdb_get_file_record(jcr, jr, fr) #define db_get_volume_jobids(jcr, mdb, mr, lst) \ mdb->bdb_get_volume_jobids(jcr, mr, lst) #define db_get_client_jobids(jcr, mdb, cr, lst) \ mdb->bdb_get_client_jobids(jcr, cr, lst) #define db_get_base_file_list(jcr, mdb, use_md5, result_handler, ctx) \ mdb->bdb_get_base_file_list(jcr, use_md5, result_handler, ctx) #define db_get_path_record(jcr, mdb) \ mdb->bdb_get_path_record(jcr) #define db_get_pool_record(jcr, mdb, pdbr) \ mdb->bdb_get_pool_record(jcr, pdbr) #define db_get_pool_numvols(jcr, mdb, pdbr) \ mdb->bdb_get_pool_numvols(jcr, pdbr) #define db_get_client_record(jcr, mdb, cr) \ mdb->bdb_get_client_record(jcr, cr) #define db_get_jobmedia_record(jcr, mdb, jmr) \ mdb->bdb_get_jobmedia_record(jcr, jmr) #define db_get_job_record(jcr, mdb, jr) \ mdb->bdb_get_job_record(jcr, jr) #define db_get_job_volume_names(jcr, mdb, JobId, VolumeNames, LastVolumeName, maxlen) \ mdb->bdb_get_job_volume_names(jcr, JobId, VolumeNames, LastVolumeName, maxlen) #define db_get_file_attributes_record(jcr, mdb, fname, jr, fdbr) \ mdb->bdb_get_file_attributes_record(jcr, fname, jr, fdbr) #define db_get_fileset_record(jcr, mdb, fsr) \ mdb->bdb_get_fileset_record(jcr, fsr) #define db_get_media_record(jcr, mdb, mr) \ mdb->bdb_get_media_record(jcr, mr) #define db_get_num_media_records(jcr, mdb) \ mdb->bdb_get_num_media_records(jcr) #define db_get_num_pool_records(jcr, mdb) \ mdb->bdb_get_num_pool_records(jcr) #define db_get_pool_ids(jcr, mdb, num_ids, ids) \ mdb->bdb_get_pool_ids(jcr, num_ids, ids) #define db_get_client_ids(jcr, mdb, num_ids, ids) \ mdb->bdb_get_client_ids(jcr, num_ids, ids) #define db_get_media_ids(jcr, mdb, mr, num_ids, ids) \ mdb->bdb_get_media_ids(jcr, mr, num_ids, ids) #define db_get_job_volume_parameters(jcr, mdb, JobId, VolParams) \ mdb->bdb_get_job_volume_parameters(jcr, JobId, VolParams) #define db_get_counter_record(jcr, mdb, cr) \ mdb->bdb_get_counter_record(jcr, cr) #define db_get_query_dbids(jcr, mdb, query, ids) \ mdb->bdb_get_query_dbids(jcr, query, ids) #define db_get_prior_job(jcr, mdb, jobids, jr) \ mdb->bdb_get_prior_job(jcr, jobids, jr) #define db_get_file_list(jcr, mdb, jobids, opts, result_handler, ctx) \ mdb->bdb_get_file_list(jcr, jobids, opts, result_handler, ctx) #define db_get_base_jobid(jcr, mdb, jr, jobid) \ mdb->bdb_get_base_jobid(jcr, jr, jobid) #define db_get_accurate_jobids(jcr, mdb, jr, jobids) \ mdb->bdb_get_accurate_jobids(jcr, jr, 0, jobids) #define db_get_accurate_jobids_from_jobid(jcr, mdb, jr, jobid_hint, jobids) \ mdb->bdb_get_accurate_jobids(jcr, jr, jobid_hint, jobids) #define db_get_used_base_jobids(jcr, mdb, jobids, result) \ mdb->bdb_get_used_base_jobids(jcr, jobids, result) #define db_get_restoreobject_record(jcr, mdb, rr) \ mdb->bdb_get_restoreobject_record(jcr, rr) #define db_get_plugin_object_record(jcr, mdb, obj_r) \ mdb->bdb_get_plugin_object_record(jcr, obj_r) #define db_get_plugin_objects_ids(jcr, mdb, obj_r, ids) \ mdb->bdb_get_plugin_objects_ids(jcr, obj_r, ids) #define db_get_num_restoreobject_records(jcr, mdb, rr) \ mdb->bdb_get_num_restoreobject_records(jcr, rr) #define db_get_type_index(mdb) \ mdb->bdb_get_type_index() #define db_get_engine_name(mdb) \ mdb->bdb_get_engine_name() #define db_get_snapshot_record(jcr, mdb, sr) \ mdb->bdb_get_snapshot_record(jcr, sr) #define db_get_job_statistics(jcr, mdb, jr) \ mdb->bdb_get_job_statistics(jcr, jr) /* sql_list.c */ #define db_list_jobs_for_file(jcr, mdb, cli, fname, result_handler, ctx, type) \ mdb->bdb_list_jobs_for_file(jcr, cli, fname, result_handler, ctx, type) #define db_search_tag_records(jcr, mdb, tag, result_handler, ctx) \ mdb->bdb_search_tag_records(jcr, tag, result_handler, ctx) #define db_search_media_records(jcr, mdb, rec, result_handler, ctx) \ mdb->bdb_search_media_records(jcr, rec, result_handler, ctx) #define db_search_job_records(jcr, mdb, rec, result_handler, ctx) \ mdb->bdb_search_job_records(jcr, rec, result_handler, ctx) #define db_search_client_records(jcr, mdb, rec, result_handler, ctx) \ mdb->bdb_search_client_records(jcr, rec, result_handler, ctx) #define db_list_pool_records(jcr, mdb, pr, sendit, ctx, type) \ mdb->bdb_list_pool_records(jcr, pr, sendit, ctx, type) #define db_list_job_records(jcr, mdb, jr, sendit, ctx, type) \ mdb->bdb_list_job_records(jcr, jr, sendit, ctx, type) #define db_list_job_totals(jcr, mdb, jr, sendit, ctx) \ mdb->bdb_list_job_totals(jcr, jr, sendit, ctx) #define db_list_files_for_job(jcr, mdb, jobid, deleted, sendit, ctx) \ mdb->bdb_list_files_for_job(jcr, jobid, deleted, sendit, ctx) #define db_list_fileevents_for_job(jcr, mdb, jobid, etype, sendit, ctx, type) \ mdb->bdb_list_fileevents_for_job(jcr, jobid, etype, sendit, ctx, type) #define db_list_media_records(jcr, mdb, mdbr, sendit, ctx, type) \ mdb->bdb_list_media_records(jcr, mdbr, sendit, ctx, type) #define db_list_jobmedia_records(jcr, mdb, JobId, volume, sendit, ctx, type) \ mdb->bdb_list_jobmedia_records(jcr, JobId, volume, sendit, ctx, type) #define db_list_filemedia_records(jcr, mdb, JobId, FI, sendit, ctx, type) \ mdb->bdb_list_filemedia_records(jcr, JobId, FI, sendit, ctx, type) #define db_list_fileevents_records(jcr, mdb, event, sendit, ctx, type) \ mdb->bdb_list_fileevents_records(jcr, event, sendit, ctx, type) #define db_list_joblog_records(jcr, mdb, JobId, pattern, sendit, ctx, type) \ mdb->bdb_list_joblog_records(jcr, JobId, pattern, sendit, ctx, type) #define db_list_sql_query(jcr, mdb, title, query, sendit, ctx, verbose, type) \ mdb->bdb_list_sql_query(jcr, title, query, sendit, ctx, verbose, type) #define db_list_client_records(jcr, mdb, sendit, ctx, type) \ mdb->bdb_list_client_records(jcr, sendit, ctx, type) #define db_list_copies_records(jcr, mdb, limit, jobids, sendit, ctx, type) \ mdb->bdb_list_copies_records(jcr, limit, jobids, sendit, ctx, type) #define db_list_events_records(jcr, mdb, rec, sendit, ctx, type) \ mdb->bdb_list_events_records(jcr, rec, sendit, ctx, type) #define db_list_base_files_for_job(jcr, mdb, jobid, sendit, ctx) \ mdb->bdb_list_base_files_for_job(jcr, jobid, sendit, ctx) #define db_list_plugin_object_types(jcr, mdb, sendit, ctx, type) \ mdb->bdb_list_plugin_object_types(jcr, sendit, ctx, type) #define db_list_plugin_objects(jcr, mdb, obj_r, sendit, ctx, type) \ mdb->bdb_list_plugin_objects(jcr, obj_r, sendit, ctx, type) #define db_list_plugin_objects_ids(jcr, mdb, id_list, sendit, ctx, type) \ mdb->bdb_list_plugin_objects_ids(jcr, id_list, sendit, ctx, type); #define db_list_restore_objects(jcr, mdb, rr, sendit, ctx, type) \ mdb->bdb_list_restore_objects(jcr, rr, sendit, ctx, type) #define db_list_snapshot_records(jcr, mdb, snapdbr, sendit, ua, llist) \ mdb->bdb_list_snapshot_records(jcr, snapdbr, sendit, ua, llist) #define db_list_files(jcr, mdb, filedbr, sendit, ua) \ mdb->bdb_list_files(jcr, filedbr, sendit, ua); #define db_list_tag_records(jcr, mdb, tagdbr, sendit, ua, llist) \ mdb->bdb_list_tag_records(jcr, tagdbr, sendit, ua, llist); #define db_list_metadata_records(jcr, mdb, dbr, sendit, ua, llist) \ mdb->bdb_list_metadata_records(jcr, dbr, sendit, ua, llist); /* sql_update.c */ #define db_update_job_start_record(jcr, mdb, jr) \ mdb->bdb_update_job_start_record(jcr, jr) #define db_update_job_end_record(jcr, mdb, jr) \ mdb->bdb_update_job_end_record(jcr, jr) #define db_update_client_record(jcr, mdb, cr) \ mdb->bdb_update_client_record(jcr, cr) #define db_update_pool_record(jcr, mdb, pr) \ mdb->bdb_update_pool_record(jcr, pr) #define db_update_storage_record(jcr, mdb, sr) \ mdb->bdb_update_storage_record(jcr, sr) #define db_update_media_record(jcr, mdb, mr) \ mdb->bdb_update_media_record(jcr, mr) #define db_update_media_defaults(jcr, mdb, mr) \ mdb->bdb_update_media_defaults(jcr, mr) #define db_update_counter_record(jcr, mdb, cr) \ mdb->bdb_update_counter_record(jcr, cr) #define db_add_digest_to_file_record(jcr, mdb, FileId, digest, type) \ mdb->bdb_add_digest_to_file_record(jcr, FileId, digest, type) #define db_mark_file_record(jcr, mdb, FileId, JobId) \ mdb->bdb_mark_file_record(jcr, FileId, JobId) #define db_make_inchanger_unique(jcr, mdb, mr) \ mdb->bdb_make_inchanger_unique(jcr, mr) #define db_update_stats(jcr, mdb, age) \ mdb->bdb_update_stats(jcr, age) #define db_update_snapshot_record(jcr, mdb, sr) \ mdb->bdb_update_snapshot_record(jcr, sr) #endif /* __SQL_PROTOS_H */ bacula-15.0.3/src/cats/grant_privileges0000644000175000017500000000066114771010173017675 0ustar bsbuildbsbuilduse mysql grant all privileges on *.* to kern@localhost with grant option; grant all privileges on *.* to kern@"%" with grant option; grant all privileges on *.* to kelvin@localhost with grant option; grant all privileges on *.* to kelvin@"%" with grant option; grant all privileges on *.* to bacula@localhost with grant option; grant all privileges on *.* to bacula@"%" with grant option; select * from user; flush privileges; exit bacula-15.0.3/src/cats/bdb_sqlite.h0000644000175000017500000000477614771010173016702 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* SQLite driver specific definitions */ #ifdef __BDB_SQLITE_H_ class BDB_SQLITE: public BDB { private: struct sqlite3 *m_db_handle; char **m_result; /* sql_store_results() and sql_query() */ char **m_col_names; /* used to access fields when using db_sql_query() */ char *m_sqlite_errmsg; SQL_FIELD m_sql_field; /* used when using db_sql_query() and sql_fetch_field() */ public: BDB_SQLITE(); ~BDB_SQLITE(); /* Used internaly by sqlite.c to access fields in db_sql_query() */ void set_column_names(char **res, int nb) { m_col_names = res; m_num_fields = nb; m_field_number = 0; } /* Functions that we override */ bool bdb_open_database(JCR *jcr); void bdb_close_database(JCR *jcr); void bdb_thread_cleanup(void); void bdb_escape_string(JCR *jcr, char *snew, const char *old, int len); char *bdb_escape_object(JCR *jcr, char *old, int len); void bdb_unescape_object(JCR *jcr, char *from, int32_t expected_len, POOLMEM **dest, int32_t *len); void bdb_start_transaction(JCR *jcr); void bdb_end_transaction(JCR *jcr); bool bdb_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx); void sql_free_result(void); SQL_ROW sql_fetch_row(void); bool sql_query(const char *query, int flags=0); const char *sql_strerror(void); int sql_num_rows(void); void sql_data_seek(int row); int sql_affected_rows(void); uint64_t sql_insert_autokey_record(const char *query, const char *table_name); void sql_field_seek(int field); SQL_FIELD *sql_fetch_field(void); int sql_num_fields(void); bool sql_field_is_not_null(int field_type); bool sql_field_is_numeric(int field_type); bool sql_batch_start(JCR *jcr); bool sql_batch_end(JCR *jcr, const char *error); bool sql_batch_insert(JCR *jcr, ATTR_DBR *ar); }; #endif /* __BDB_SQLITE_H_ */ bacula-15.0.3/src/cats/sql_cmds.c0000644000175000017500000012460114771010173016360 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * This file contains all the SQL commands that are either issued by * the Director or which are database backend specific. * * Written by Kern Sibbald, July MMII */ /* * Note, PostgreSQL imposes some constraints on using DISTINCT and GROUP BY * for example, the following is illegal in PostgreSQL: * SELECT DISTINCT JobId FROM temp ORDER BY StartTime ASC; * because all the ORDER BY expressions must appear in the SELECT list! */ #include "bacula.h" const char *get_restore_objects = "SELECT JobId,ObjectLength,ObjectFullLength,ObjectIndex," "ObjectType,ObjectCompression,FileIndex,ObjectName," "RestoreObject,PluginName " "FROM RestoreObject " "WHERE JobId IN (%s) " "AND ObjectType = %d " "ORDER BY ObjectIndex ASC"; const char *get_created_running_job = "SELECT JobId, Job, JobStatus FROM Job WHERE JobStatus IN ('C', 'R')"; const char *cleanup_created_job = "UPDATE Job SET JobStatus='f', StartTime=SchedTime, EndTime=SchedTime " "WHERE JobStatus = 'C'"; const char *cleanup_running_job = "UPDATE Job SET JobStatus='f', EndTime=StartTime WHERE JobStatus = 'R'"; const char *sql_clear_malware_table_def = "TRUNCATE Malware%s"; const char *sql_clear_malware_table[] = { "DELETE * FROM Malware%s", // Truncate on Mysql doesn't work with transaction sql_clear_malware_table_def, sql_clear_malware_table_def }; const char *sync_jobhisto_def = "UPDATE JobHisto SET PurgedFiles=Job.PurgedFiles, HasCache=Job.HasCache,Reviewed=Job.Reviewed " "FROM Job WHERE Job.JobId=JobHisto.JobId and Job.Job = JobHisto.Job " "AND (Job.HasCache <> JobHisto.HasCache OR Job.PurgedFiles <> JobHisto.PurgedFiles OR Job.Reviewed <> JobHisto.Reviewed)"; const char *sync_jobhisto[] = { // MySQL "UPDATE JobHisto JOIN Job USING (JobId) SET JobHisto.PurgedFiles=Job.PurgedFiles, JobHisto.HasCache=Job.HasCache, JobHisto.Reviewed=Job.Reviewed " "WHERE Job.JobId=JobHisto.JobId and Job.Job = JobHisto.Job " "AND (Job.HasCache <> JobHisto.HasCache OR Job.PurgedFiles <> JobHisto.PurgedFiles OR Job.Reviewed <> JobHisto.Reviewed)", // PostgreSQL sync_jobhisto_def, // SQLite sync_jobhisto_def }; /* For sql_update.c db_update_stats */ const char *fill_jobhisto = "INSERT INTO JobHisto (JobId, Job, Name, Type, Level," "ClientId, JobStatus," "SchedTime, StartTime, EndTime, RealEndTime, RealStartTime, JobTDate," "VolSessionId, VolSessionTime, JobFiles, JobBytes, ReadBytes," "JobErrors, JobMissingFiles, PoolId, FileSetId, PriorJobId, PriorJob, " "PurgedFiles, HasBase, HasCache, Reviewed, Comment, FileTable, " "isVirtualFull,CompressRatio,Rate,LastReadStorageId,WriteStorageId,LastReadDevice,WriteDevice,StatusInfo,Encrypted)" "SELECT JobId, Job, Name, Type, Level, ClientId, JobStatus," "SchedTime, StartTime, EndTime, RealEndTime, RealStartTime, JobTDate," "VolSessionId, VolSessionTime, JobFiles, JobBytes, ReadBytes," "JobErrors, JobMissingFiles, PoolId, FileSetId, PriorJobId, PriorJob, " "PurgedFiles, HasBase, HasCache, Reviewed, Comment, FileTable, " "isVirtualFull,CompressRatio,Rate,LastReadStorageId,WriteStorageId,LastReadDevice,WriteDevice,StatusInfo,Encrypted " "FROM Job " "WHERE JobStatus IN ('T','W','f','A','E')" "AND NOT EXISTS " "(SELECT JobHisto.JobId " "FROM JobHisto WHERE JobHisto.Jobid=Job.JobId)" "AND JobTDate < %s "; /* For ua_update.c */ const char *list_pool = "SELECT * FROM Pool WHERE PoolId=%s"; /* For ua_dotcmds.c */ const char *client_backups = "SELECT DISTINCT Job.JobId,Client.Name as Client,Level,StartTime," "JobFiles,JobBytes,VolumeName,MediaType,FileSet,Media.Enabled as Enabled" " FROM Client,Job,JobMedia,Media,FileSet" " WHERE Client.Name='%s'" " AND FileSet='%s'" " AND Client.ClientId=Job.ClientId " " AND JobStatus IN ('T','W') AND Type='B' " " AND JobMedia.JobId=Job.JobId AND JobMedia.MediaId=Media.MediaId " " AND Job.FileSetId=FileSet.FileSetId" " ORDER BY Job.StartTime"; /* ====== ua_prune.c */ const char *sel_JobMedia = "SELECT DISTINCT JobMedia.JobId FROM JobMedia,Job" " WHERE MediaId=%s AND Job.JobId=JobMedia.JobId " " AND Job.JobTDate<%s AND Job.JobStatus NOT IN ('R', 'C') "; /* Delete temp tables and indexes */ const char *drop_deltabs[] = { "DROP TABLE IF EXISTS DelCandidates", NULL}; const char *create_delindex = "CREATE INDEX DelInx1 ON DelCandidates (JobId)"; /* ======= ua_restore.c */ const char *uar_count_files = "SELECT JobFiles FROM Job WHERE JobId=%s"; const char *uar_print_jobs = "SELECT DISTINCT JobId,Level,JobFiles,JobBytes,StartTime,VolumeName" " FROM Job JOIN JobMedia USING (JobId) JOIN Media USING (MediaId) " " WHERE JobId IN (%s) " " ORDER BY StartTime ASC"; /* * Find all files for a particular JobId and insert them into * the tree during a restore. */ const char *uar_sel_files = "SELECT Path.Path,File.Filename,FileIndex,JobId,LStat " "FROM File JOIN Path USING (PathId) " "WHERE File.JobId IN (%s)"; const char *uar_del_temp = "DROP TABLE IF EXISTS temp"; const char *uar_del_temp1 = "DROP TABLE IF EXISTS temp1"; const char *uar_last_full = "INSERT INTO temp1(JobId,JobTDate) SELECT Job.JobId,JobTdate " "FROM Client JOIN Job USING (ClientId) JOIN JobMedia USING (JobId) JOIN Media USING (MediaId) JOIN FileSet USING (FileSetId) " "WHERE Client.ClientId=%s " "AND Job.StartTime < '%s' " "AND Level='F' AND JobStatus IN ('T','W') AND Type='B' " "AND Media.Enabled=1 " "AND FileSet.FileSet='%s' " " %s " // Pool select " %s " // ACL restriction "ORDER BY Job.JobTDate DESC LIMIT 1"; const char *uar_full = "INSERT INTO temp (JobId,JobTDate,ClientId,Level,JobFiles,JobBytes,StartTime,VolumeName,StartFile,VolSessionId,VolSessionTime) " "SELECT Job.JobId,Job.JobTDate," "Job.ClientId,Job.Level,Job.JobFiles,Job.JobBytes," "StartTime,VolumeName,JobMedia.StartFile,VolSessionId,VolSessionTime " "FROM temp1,Job,JobMedia,Media WHERE temp1.JobId=Job.JobId " "AND Level='F' AND JobStatus IN ('T','W') AND Type='B' " "AND Media.Enabled=1 " "AND JobMedia.JobId=Job.JobId " "AND JobMedia.MediaId=Media.MediaId"; const char *uar_dif = "INSERT INTO temp (JobId,JobTDate,ClientId,Level,JobFiles,JobBytes,StartTime,VolumeName,StartFile,VolSessionId,VolSessionTime) " "SELECT Job.JobId,Job.JobTDate,Job.ClientId," "Job.Level,Job.JobFiles,Job.JobBytes," "Job.StartTime,Media.VolumeName,JobMedia.StartFile," "Job.VolSessionId,Job.VolSessionTime " "FROM Job JOIN JobMedia USING (JobId) JOIN Media USING (MediaId) JOIN FileSet USING (FileSetId) %s " "WHERE Job.JobTDate>%s AND Job.StartTime<'%s' " "AND Job.ClientId=%s " "AND Media.Enabled=1 " "AND Job.Level='D' AND JobStatus IN ('T','W') AND Type='B' " "AND FileSet.FileSet='%s' " " %s " // POOL select " %s " // ACL restriction "ORDER BY Job.JobTDate DESC LIMIT 1"; const char *uar_inc = "INSERT INTO temp (JobId,JobTDate,ClientId,Level,JobFiles,JobBytes,StartTime,VolumeName,StartFile,VolSessionId,VolSessionTime) " "SELECT Job.JobId,Job.JobTDate,Job.ClientId," "Job.Level,Job.JobFiles,Job.JobBytes," "Job.StartTime,Media.VolumeName,JobMedia.StartFile," "Job.VolSessionId,Job.VolSessionTime " "FROM Job JOIN JobMedia USING (JobId) JOIN Media USING (MediaId) JOIN FileSet USING (FileSetId) %s " "WHERE Job.JobTDate>%s AND Job.StartTime<'%s' " "AND Job.ClientId=%s " "AND Media.Enabled=1 " "AND Job.Level='I' AND JobStatus IN ('T','W') AND Type='B' " "AND FileSet.FileSet='%s' " " %s " // Pool select " %s " // ACL ; const char *uar_list_temp = "SELECT DISTINCT JobId,Level,JobFiles,JobBytes,StartTime,VolumeName" " FROM temp" " ORDER BY StartTime ASC"; const char *uar_sel_jobid_temp = "SELECT DISTINCT JobId,StartTime FROM temp ORDER BY StartTime ASC"; const char *uar_sel_all_temp1 = "SELECT * FROM temp1"; const char *uar_sel_all_temp = "SELECT * FROM temp"; /* Select FileSet names for this Client */ const char *uar_sel_fileset = "SELECT DISTINCT FileSet.FileSet FROM Job JOIN Client USING (ClientId) JOIN FileSet USING (FileSetId) " "WHERE Client.ClientId=%s %s " "ORDER BY FileSet.FileSet"; /* Select all different FileSet for this client * This query doesn't guarantee that the Id is the latest * version of the FileSet. Can be used with other queries that * use Ids to select the FileSet name. (like in accurate) */ const char *uar_sel_filesetid = "SELECT MAX(FileSet.FileSetId) " "FROM FileSet JOIN Job USING (FileSetId) " "WHERE Job.ClientId=%s " "GROUP BY FileSet"; /* * Find JobId, FileIndex for a given path/file and date * for use when inserting individual files into the tree. */ const char *uar_jobid_fileindex = "SELECT Job.JobId,File.FileIndex " "FROM Job JOIN File USING (JobId) JOIN Path USING (PathId) JOIN Client USING (ClientId) " "WHERE Job.StartTime<='%s' " "AND Path.Path='%s' " "AND File.Filename='%s' " "AND Client.Name='%s' " "AND JobStatus IN ('T','W') AND Type='B' " " %s " // ACL "ORDER BY Job.StartTime DESC LIMIT 1"; const char *uar_jobids_fileindex = "SELECT Job.JobId,File.FileIndex " "FROM Job JOIN File USING (JobId) JOIN Path USING (PathId) JOIN Client USING (ClientId) " "WHERE Job.JobId IN (%s) " "AND Job.StartTime<='%s' " "AND Path.Path='%s' " "AND File.Filename='%s' " "AND Client.Name='%s' " " %s " // ACL "ORDER BY Job.StartTime DESC LIMIT 1"; /* Query to get list of files from table -- presuably built by an external program * we expect filters on join and where */ const char *uar_jobid_fileindex_from_table = "SELECT JobId, FileIndex FROM %s %s %s ORDER BY JobId, FileIndex ASC"; /* Get the list of the last recent version per Delta with a given * jobid list. This is a tricky part because with SQL the result of: * * SELECT MAX(A), B, C, D FROM... GROUP BY (B,C) * * doesn't give the good result (for D). * * With PostgreSQL, we can use DISTINCT ON(), but with Mysql or Sqlite, * we need an extra join using JobTDate. */ static const char *select_recent_version_with_basejob_default = "SELECT FileId, Job.JobId AS JobId, FileIndex, File.PathId AS PathId," "File.Filename AS Filename, LStat, MD5, DeltaSeq," "Job.JobTDate AS JobTDate " "FROM Job, File, ( " "SELECT MAX(JobTDate) AS JobTDate, PathId, Filename " "FROM ( " "SELECT JobTDate, PathId, Filename " /* Get all normal files */ "FROM File JOIN Job USING (JobId) " /* from selected backup */ "WHERE File.JobId IN (%s) " "UNION ALL " "SELECT JobTDate, PathId, Filename " /* Get all files from */ "FROM BaseFiles " /* BaseJob */ "JOIN File USING (FileId) " "JOIN Job ON (BaseJobId = Job.JobId) " "WHERE BaseFiles.JobId IN (%s) " /* Use Max(JobTDate) to find */ ") AS tmp " "GROUP BY PathId, Filename " /* the latest file version */ ") AS T1 " "WHERE (Job.JobId IN ( " /* Security, we force JobId to be valid */ "SELECT DISTINCT BaseJobId FROM BaseFiles WHERE JobId IN (%s)) " "OR Job.JobId IN (%s)) " "AND T1.JobTDate = Job.JobTDate " /* Join on JobTDate to get the orginal */ "AND Job.JobId = File.JobId " /* Job/File record */ "AND T1.PathId = File.PathId " "AND T1.Filename = File.Filename"; const char *select_recent_version_with_basejob[] = { /* MySQL */ select_recent_version_with_basejob_default, /* PostgreSQL */ /* The DISTINCT ON () permits to avoid extra join */ "SELECT DISTINCT ON (Filename, PathId) JobTDate, JobId, FileId, " "FileIndex, PathId, Filename, LStat, MD5, DeltaSeq " "FROM " "(SELECT FileId, JobId, PathId, Filename, FileIndex, LStat, MD5, DeltaSeq " "FROM File WHERE JobId IN (%s) " "UNION ALL " "SELECT File.FileId, File.JobId, PathId, Filename, " "File.FileIndex, LStat, MD5, DeltaSeq " "FROM BaseFiles JOIN File USING (FileId) " "WHERE BaseFiles.JobId IN (%s) " ") AS T JOIN Job USING (JobId) " "ORDER BY Filename, PathId, JobTDate DESC ", /* SQLite */ select_recent_version_with_basejob_default }; /* We do the same thing than the previous query, but we include * all delta parts. If the file has been deleted, we can have irrelevant * parts. * * The code that uses results should control the delta sequence with * the following rules: * First Delta = 0 * Delta = Previous Delta + 1 * * If we detect a gap, we can discard further pieces * If a file starts at 1 instead of 0, the file has been deleted, and further * pieces are useless. * This control should be reset for each new file */ static const char *select_recent_version_with_basejob_and_delta_default = "SELECT FileId, Job.JobId AS JobId, FileIndex, File.PathId AS PathId," "File.Filename AS Filename, LStat, MD5, File.DeltaSeq AS DeltaSeq," "Job.JobTDate AS JobTDate" " FROM Job, File, (" "SELECT MAX(JobTDate) AS JobTDate, PathId, Filename, DeltaSeq " "FROM (" "SELECT JobTDate, PathId, Filename, DeltaSeq " /*Get all normal files*/ "FROM File JOIN Job USING (JobId) " /* from selected backup */ "WHERE File.JobId IN (%s) " "UNION ALL " "SELECT JobTDate, PathId, Filename, DeltaSeq " /*Get all files from */ "FROM BaseFiles " /* BaseJob */ "JOIN File USING (FileId) " "JOIN Job ON (BaseJobId = Job.JobId) " "WHERE BaseFiles.JobId IN (%s) " /* Use Max(JobTDate) to find */ ") AS tmp " "GROUP BY PathId, Filename, DeltaSeq " /* the latest file version */ ") AS T1" " WHERE (Job.JobId IN ( " /* Security, we force JobId to be valid */ "SELECT DISTINCT BaseJobId FROM BaseFiles WHERE JobId IN (%s))" " OR Job.JobId IN (%s))" " AND T1.JobTDate = Job.JobTDate" /* Join on JobTDate to get the orginal */ " AND Job.JobId = File.JobId" /* Job/File record */ " AND T1.PathId = File.PathId" " AND T1.Filename = File.Filename"; const char *select_recent_version_with_basejob_and_delta[] = { /* MySQL */ select_recent_version_with_basejob_and_delta_default, /* Postgresql -- The DISTINCT ON () permits to avoid extra join */ "SELECT DISTINCT ON (Filename, PathId, DeltaSeq) JobTDate, JobId, FileId, " "FileIndex, PathId, Filename, LStat, MD5, DeltaSeq " "FROM " "(SELECT FileId, JobId, PathId, Filename, FileIndex, LStat, MD5,DeltaSeq " "FROM File WHERE JobId IN (%s) " "UNION ALL " "SELECT File.FileId, File.JobId, PathId, Filename, " "File.FileIndex, LStat, MD5, DeltaSeq " "FROM BaseFiles JOIN File USING (FileId) " "WHERE BaseFiles.JobId IN (%s) " ") AS T JOIN Job USING (JobId) " "ORDER BY Filename, PathId, DeltaSeq, JobTDate DESC ", /* SQLite */ select_recent_version_with_basejob_and_delta_default }; /* Get the list of the last recent version with a given BaseJob jobid list * We don't handle Delta with BaseJobs, they have only Full files */ static const char *select_recent_version_default = "SELECT j1.JobId AS JobId, f1.FileId AS FileId, f1.FileIndex AS FileIndex, " "f1.PathId AS PathId, f1.Filename AS Filename, " "f1.LStat AS LStat, f1.MD5 AS MD5, j1.JobTDate " "FROM ( " /* Choose the last version for each Path/Filename */ "SELECT max(JobTDate) AS JobTDate, PathId, Filename " "FROM File JOIN Job USING (JobId) " "WHERE File.JobId IN (%s) " "GROUP BY PathId, Filename " ") AS t1, Job AS j1, File AS f1 " "WHERE t1.JobTDate = j1.JobTDate " "AND j1.JobId IN (%s) " "AND t1.Filename = f1.Filename " "AND t1.PathId = f1.PathId " "AND j1.JobId = f1.JobId"; const char *select_recent_version[] = { /* MySQL */ select_recent_version_default, /* PostgreSQL */ "SELECT DISTINCT ON (Filename, PathId) JobTDate, JobId, FileId, " "FileIndex, PathId, Filename, LStat, MD5 " "FROM File JOIN Job USING (JobId) " "WHERE JobId IN (%s) " "ORDER BY Filename, PathId, JobTDate DESC ", /* SQLite */ select_recent_version_default }; /* We don't create this table as TEMPORARY because MySQL MyISAM 5.0 and 5.1 are unable to run further queries in this mode */ static const char *create_temp_accurate_jobids_default = "CREATE TABLE btemp3%s AS " "SELECT JobId, StartTime, EndTime, JobTDate, PurgedFiles, FileSetId " "FROM Job JOIN FileSet USING (FileSetId) " "WHERE ClientId = %s " "AND Level='F' AND JobStatus IN ('T','W') AND Type='B' " "AND StartTime<'%s' " "AND FileSet.FileSet=(SELECT FileSet FROM FileSet WHERE FileSetId = %s) " " %s " /* Any filter */ "ORDER BY Job.JobTDate DESC LIMIT 1"; static const char *create_temp_accurate_jobids_mysql = "CREATE TABLE btemp3%s /*PKEY (DummyPkey INTEGER AUTO_INCREMENT PRIMARY KEY)*/ AS (" "SELECT JobId, StartTime, EndTime, JobTDate, PurgedFiles, FileSetId " "FROM Job JOIN FileSet USING (FileSetId) " "WHERE ClientId = %s " "AND Level='F' AND JobStatus IN ('T','W') AND Type='B' " "AND StartTime<'%s' " "AND FileSet.FileSet=(SELECT FileSet FROM FileSet WHERE FileSetId = %s) " " %s " /* Any filter */ "ORDER BY Job.JobTDate DESC LIMIT 1)"; const char *create_temp_accurate_jobids[] = { /* MySQL */ create_temp_accurate_jobids_mysql, /* PostgreSQL */ create_temp_accurate_jobids_default, /* SQLite */ create_temp_accurate_jobids_default }; const char *create_temp_basefile[] = { /* MySQL */ "CREATE TEMPORARY TABLE basefile%lld" "(Path BLOB NOT NULL, Name BLOB NOT NULL, " "INDEX (Path(255), Name(255))" "/*PKEY, DummyPkey INTEGER AUTO_INCREMENT PRIMARY KEY*/)", /* PostgreSQL */ "CREATE TEMPORARY TABLE basefile%lld" "(Path TEXT, Name TEXT)", /* SQLite */ "CREATE TEMPORARY TABLE basefile%lld" "(Path TEXT, Name TEXT)" }; const char *create_temp_new_basefile[] = { /* MySQL */ "CREATE TEMPORARY TABLE new_basefile%lld /*PKEY(DummyPkey INTEGER AUTO_INCREMENT PRIMARY KEY)*/ AS " "SELECT Path.Path AS Path, Temp.Filename AS Name, Temp.FileIndex AS FileIndex," " Temp.JobId AS JobId, Temp.LStat AS LStat, Temp.FileId AS FileId," " Temp.MD5 AS MD5" " FROM (%s) AS Temp" " JOIN Path ON (Path.PathId = Temp.PathId)" " WHERE Temp.FileIndex > 0", /* PostgreSQL */ "CREATE TEMPORARY TABLE new_basefile%lld AS " "SELECT Path.Path AS Path, Temp.Filename AS Name, Temp.FileIndex AS FileIndex," " Temp.JobId AS JobId, Temp.LStat AS LStat, Temp.FileId AS FileId," " Temp.MD5 AS MD5" " FROM ( %s ) AS Temp" " JOIN Path ON (Path.PathId = Temp.PathId)" " WHERE Temp.FileIndex > 0", /* SQLite */ "CREATE TEMPORARY TABLE new_basefile%lld AS " "SELECT Path.Path AS Path, Temp.Filename AS Name, Temp.FileIndex AS FileIndex," " Temp.JobId AS JobId, Temp.LStat AS LStat, Temp.FileId AS FileId," " Temp.MD5 AS MD5" " FROM ( %s ) AS Temp" " JOIN Path ON (Path.PathId = Temp.PathId)" " WHERE Temp.FileIndex > 0" }; /* ====== ua_prune.c ====== */ /* List of SQL commands to create temp table and indicies */ const char *create_deltabs[] = { /* MySQL */ "CREATE TEMPORARY TABLE DelCandidates (" "JobId INTEGER UNSIGNED NOT NULL, " "PurgedFiles TINYINT, " "FileSetId INTEGER UNSIGNED, " "JobFiles INTEGER UNSIGNED, " "JobStatus BINARY(1)" "/*PKEY, DummyPkey INTEGER AUTO_INCREMENT PRIMARY KEY*/)", /* PostgreSQL */ "CREATE TEMPORARY TABLE DelCandidates ( " "JobId INTEGER NOT NULL, " "PurgedFiles SMALLINT, " "FileSetId INTEGER, " "JobFiles INTEGER, " "JobStatus char(1))", /* SQLite */ "CREATE TEMPORARY TABLE DelCandidates (" "JobId INTEGER UNSIGNED NOT NULL, " "PurgedFiles TINYINT, " "FileSetId INTEGER UNSIGNED, " "JobFiles INTEGER UNSIGNED, " "JobStatus CHAR)" }; /* ======= ua_purge.c ====== */ /* Select the first available Copy Job that must be upgraded * to a Backup job when the original backup job is expired. */ static const char *uap_upgrade_copies_oldest_job_default = "CREATE TEMPORARY TABLE cpy_tmp /*PKEY (DummyPkey INTEGER PRIMARY KEY DEFAULT 1)*/ AS " "SELECT MIN(JobId) AS JobId FROM Job " /* Choose the oldest job */ "WHERE Type='%c' " /* JT_JOB_COPY */ "AND ( PriorJobId IN (%s) " /* JobId selection */ "OR " " PriorJobId IN ( " "SELECT PriorJobId " "FROM Job " "WHERE JobId IN (%s) " /* JobId selection */ " AND Type='B' AND JobStatus IN ('T', 'W') " ") " ") " "GROUP BY PriorJobId "; /* one result per copy */ const char *uap_upgrade_copies_oldest_job[] = { /* MySQL */ uap_upgrade_copies_oldest_job_default, /* PostgreSQL */ uap_upgrade_copies_oldest_job_default, /* SQLite */ uap_upgrade_copies_oldest_job_default }; /* ======= ua_restore.c ====== */ const char *uar_create_temp[] = { /* MySQL */ "CREATE TEMPORARY TABLE temp (" "JobId INTEGER UNSIGNED NOT NULL," "JobTDate BIGINT UNSIGNED," "ClientId INTEGER UNSIGNED," "Level CHAR," "JobFiles INTEGER UNSIGNED," "JobBytes BIGINT UNSIGNED," "StartTime TEXT," "VolumeName TEXT," "StartFile INTEGER UNSIGNED," "VolSessionId INTEGER UNSIGNED," "VolSessionTime INTEGER UNSIGNED" "/*PKEY, DummyPKey INTEGER AUTO_INCREMENT PRIMARY KEY*/)", /* PostgreSQL */ "CREATE TEMPORARY TABLE temp (" "JobId INTEGER NOT NULL," "JobTDate BIGINT," "ClientId INTEGER," "Level CHAR," "JobFiles INTEGER," "JobBytes BIGINT," "StartTime TEXT," "VolumeName TEXT," "StartFile INTEGER," "VolSessionId INTEGER," "VolSessionTime INTEGER)", /* SQLite */ "CREATE TEMPORARY TABLE temp (" "JobId INTEGER UNSIGNED NOT NULL," "JobTDate BIGINT UNSIGNED," "ClientId INTEGER UNSIGNED," "Level CHAR," "JobFiles INTEGER UNSIGNED," "JobBytes BIGINT UNSIGNED," "StartTime TEXT," "VolumeName TEXT," "StartFile INTEGER UNSIGNED," "VolSessionId INTEGER UNSIGNED," "VolSessionTime INTEGER UNSIGNED)" }; const char *uar_create_temp1[] = { /* MySQL */ "CREATE TEMPORARY TABLE temp1 (" "JobId INTEGER UNSIGNED NOT NULL," "JobTDate BIGINT UNSIGNED" "/*PKEY, DummyPKey INTEGER AUTO_INCREMENT PRIMARY KEY*/)", /* PostgreSQL */ "CREATE TEMPORARY TABLE temp1 (" "JobId INTEGER NOT NULL," "JobTDate BIGINT)", /* SQLite */ "CREATE TEMPORARY TABLE temp1 (" "JobId INTEGER UNSIGNED NOT NULL," "JobTDate BIGINT UNSIGNED)" }; /* Query to get all files in a directory no recursing * Note, for PostgreSQL since it respects the "Single Value * rule", the results of the SELECT will be unoptimized. * I.e. the same file will be restored multiple times, once * for each time it was backed up. */ const char *uar_jobid_fileindex_from_dir[] = { /* MySQL */ "SELECT Job.JobId,File.FileIndex FROM Job JOIN File USING (JobId) JOIN Path USING (PathId) JOIN Client USING (ClientId) " "WHERE Job.JobId IN (%s) " "AND Path.Path='%s' " "AND Client.Name='%s' " " %s " "GROUP BY File.FileIndex ORDER BY Job.StartTime", /* PostgreSQL */ "SELECT Job.JobId,File.FileIndex FROM Job JOIN File USING (JobId) JOIN Path USING (PathId) JOIN Client USING (ClientId) " "WHERE Job.JobId IN (%s) " "AND Path.Path='%s' " "AND Client.Name='%s' " " %s " "ORDER BY Job.StartTime ", /* SQLite */ "SELECT Job.JobId,File.FileIndex FROM Job JOIN File USING (JobId) JOIN Path USING (PathId) JOIN Client USING (ClientId) " "WHERE Job.JobId IN (%s) " "AND Path.Path='%s' " "AND Client.Name='%s' " " %s " "GROUP BY File.FileIndex ORDER BY Job.StartTime " }; const char *sql_media_order_most_recently_written[] = { /* MySQL */ "ORDER BY LastWritten IS NULL,LastWritten DESC, MediaId", /* PostgreSQL */ "ORDER BY LastWritten IS NULL,LastWritten DESC, MediaId", /* SQLite */ "ORDER BY LastWritten IS NULL,LastWritten DESC, MediaId" }; const char *sql_get_max_connections[] = { /* MySQL */ "SHOW VARIABLES LIKE 'max_connections'", /* PostgreSQL */ "SHOW max_connections", /* SQLite */ "SELECT 0" }; /* * The Group By can return strange numbers when having multiple * version of a file in the same dataset. */ const char *default_sql_bvfs_select = "CREATE TABLE %s AS " "SELECT File.JobId, File.FileIndex, File.FileId " "FROM Job, File, ( " "SELECT MAX(JobTDate) AS JobTDate, PathId, Filename " "FROM btemp%s GROUP BY PathId, Filename " ") AS T1 " "WHERE T1.JobTDate = Job.JobTDate " "AND Job.JobId = File.JobId " "AND T1.PathId = File.PathId " "AND T1.Filename = File.Filename " "AND File.FileIndex > 0 " "AND Job.JobId IN (SELECT DISTINCT JobId FROM btemp%s) "; const char *default_sql_bvfs_select_mysql = "CREATE TABLE %s /*PKEY (DummyPkey INTEGER AUTO_INCREMENT PRIMARY KEY)*/ AS " "SELECT File.JobId, File.FileIndex, File.FileId " "FROM Job, File, ( " "SELECT MAX(JobTDate) AS JobTDate, PathId, Filename " "FROM btemp%s GROUP BY PathId, Filename " ") AS T1 " "WHERE T1.JobTDate = Job.JobTDate " "AND Job.JobId = File.JobId " "AND T1.PathId = File.PathId " "AND T1.Filename = File.Filename " "AND File.FileIndex > 0 " "AND Job.JobId IN (SELECT DISTINCT JobId FROM btemp%s) "; const char *sql_bvfs_select[] = { /* MySQL */ default_sql_bvfs_select_mysql, /* PostgreSQL */ "CREATE TABLE %s AS ( " "SELECT JobId, FileIndex, FileId " "FROM ( " "SELECT DISTINCT ON (PathId, Filename) " "JobId, FileIndex, FileId " "FROM btemp%s " "ORDER BY PathId, Filename, JobTDate DESC " ") AS T " "WHERE FileIndex > 0)", /* SQLite */ default_sql_bvfs_select }; static const char *sql_bvfs_list_all_files_default = "SELECT 'F', File.PathId, Path.Path || File.Filename, " "File.JobId, File.LStat, File.FileId " "FROM Job JOIN File USING (JobId) JOIN Path USING (PathId) " "WHERE File.Filename != '' " "AND File.FileIndex > 0 " " %s " /* AND Name LIKE '' */ "AND Job.JobId IN ( %s) ORDER BY File.FileId " // Needed to support LIMIT/OFFSET "LIMIT %lld OFFSET %lld"; const char *sql_bvfs_list_all_files[] = { /* MySQL */ "SELECT 'F', File.PathId, CONCAT(Path.Path, File.Filename), " "File.JobId, File.LStat, File.FileId " "FROM Job JOIN File USING (JobId) JOIN Path USING (PathId) " "WHERE File.Filename != '' " "AND File.FileIndex > 0 " " %s " /* AND Name LIKE '' */ "AND Job.JobId IN (%s) ORDER BY File.FileId " // Needed to support LIMIT/OFFSET "LIMIT %lld OFFSET %lld", /* PostgreSQL */ sql_bvfs_list_all_files_default, /* SQLite */ sql_bvfs_list_all_files_default }; static const char *sql_bvfs_list_files_default = "SELECT 'F', T.PathId, T.Filename, " "File.JobId, File.LStat, File.FileId " "FROM Job, File, ( " "SELECT MAX(JobTDate) AS JobTDate, MAX(DeltaSeq) AS DeltaSeq, PathId, Filename " "FROM ( " "SELECT JobTDate, PathId, Filename, DeltaSeq " "FROM File JOIN Job USING (JobId) " "WHERE File.JobId IN (%s) AND PathId = %s " "UNION ALL " "SELECT JobTDate, PathId, Filename, DeltaSeq " "FROM BaseFiles " "JOIN File USING (FileId) " "JOIN Job ON (BaseJobId = Job.JobId) " "WHERE BaseFiles.JobId IN (%s) AND PathId = %s " ") AS tmp GROUP BY PathId, Filename " ") AS T " "WHERE T.JobTDate = Job.JobTDate " "AND T.DeltaSeq = File.DeltaSeq " "AND Job.JobId = File.JobId " "AND T.PathId = File.PathId " "AND T.Filename = File.Filename " "AND File.Filename != '' " "AND File.FileIndex > 0 " " %s " /* AND Name LIKE '' */ "AND (Job.JobId IN ( " "SELECT DISTINCT BaseJobId FROM BaseFiles WHERE JobId IN (%s)) " "OR Job.JobId IN (%s)) " "LIMIT %lld OFFSET %lld"; const char *sql_bvfs_list_files[] = { /* MySQL */ sql_bvfs_list_files_default, /* PostgreSQL */ "SELECT Type, PathId, Filename, JobId, LStat, A.FileId " "FROM (" "SELECT DISTINCT ON (Filename) 'F' as Type, PathId, FileId, " "T.Filename, JobId, LStat, FileIndex " "FROM " "(SELECT FileId, JobId, PathId, Filename, FileIndex, LStat, MD5, DeltaSeq " "FROM File WHERE JobId IN (%s) AND PathId = %s " "UNION ALL " "SELECT File.FileId, File.JobId, PathId, Filename, " "File.FileIndex, LStat, MD5, DeltaSeq " "FROM BaseFiles JOIN File USING (FileId) " "WHERE BaseFiles.JobId IN (%s) AND File.PathId = %s " ") AS T JOIN Job USING (JobId) " " WHERE T.Filename != '' " " %s " /* AND Filename LIKE '' */ "ORDER BY Filename, DeltaSeq DESC, StartTime DESC " ") AS A WHERE A.FileIndex > 0 " "LIMIT %lld OFFSET %lld ", /* SQLite */ sql_bvfs_list_files_default }; /* Basically the same thing than select_recent_version_with_basejob_and_delta_default, * but we specify a single file with Filename/PathId * * Input: * 1 JobId to look at * 2 Filename * 3 PathId * 4 JobId to look at * 5 Filename * 6 PathId * 7 Jobid * 8 JobId */ const char *bvfs_select_delta_version_with_basejob_and_delta_default = "SELECT FileId, Job.JobId AS JobId, FileIndex, File.PathId AS PathId, " "File.Filename AS Filename, LStat, MD5, File.DeltaSeq AS DeltaSeq, " "Job.JobTDate AS JobTDate " "FROM Job, File, ( " "SELECT MAX(JobTDate) AS JobTDate, PathId, Filename, DeltaSeq " "FROM ( " "SELECT JobTDate, PathId, Filename, DeltaSeq " /*Get all normal files*/ "FROM File JOIN Job USING (JobId) " /* from selected backup */ "WHERE File.JobId IN (%s) AND Filename = '%s' AND PathId = %s " "UNION ALL " "SELECT JobTDate, PathId, Filename, DeltaSeq " /*Get all files from */ "FROM BaseFiles " /* BaseJob */ "JOIN File USING (FileId) " "JOIN Job ON (BaseJobId = Job.JobId) " "WHERE BaseFiles.JobId IN (%s) " /* Use Max(JobTDate) to find */ " AND Filename = '%s' AND PathId = %s " ") AS tmp " "GROUP BY PathId, Filename, DeltaSeq " /* the latest file version */ ") AS T1 " "WHERE (Job.JobId IN ( " /* Security, we force JobId to be valid */ "SELECT DISTINCT BaseJobId FROM BaseFiles WHERE JobId IN (%s)) " "OR Job.JobId IN (%s)) " "AND T1.JobTDate = Job.JobTDate " /* Join on JobTDate to get the orginal */ "AND Job.JobId = File.JobId " /* Job/File record */ "AND T1.PathId = File.PathId " "AND T1.Filename = File.Filename"; const char *bvfs_select_delta_version_with_basejob_and_delta[] = { /* MySQL */ bvfs_select_delta_version_with_basejob_and_delta_default, /* PostgreSQL */ /* The DISTINCT ON () permits to avoid extra join */ "SELECT DISTINCT ON (Filename, PathId, DeltaSeq) JobTDate, JobId, FileId, " "FileIndex, PathId, Filename, LStat, MD5, DeltaSeq " "FROM " "(SELECT FileId, JobId, PathId, Filename, FileIndex, LStat, MD5,DeltaSeq " "FROM File WHERE JobId IN (%s) AND Filename = '%s' AND PathId = %s " "UNION ALL " "SELECT File.FileId, File.JobId, PathId, Filename, " "File.FileIndex, LStat, MD5, DeltaSeq " "FROM BaseFiles JOIN File USING (FileId) " "WHERE BaseFiles.JobId IN (%s) AND Filename = '%s' AND PathId = %s " ") AS T JOIN Job USING (JobId) " "ORDER BY Filename, PathId, DeltaSeq, JobTDate DESC ", /* SQLite */ bvfs_select_delta_version_with_basejob_and_delta_default }; const char *batch_lock_path_query[] = { /* MySQL */ "LOCK TABLES Path write,batch write,Path as p write", /* PostgreSQL */ "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE ", /* SQLite */ "BEGIN " }; const char *batch_unlock_tables_query[] = { /* MySQL */ "UNLOCK TABLES", /* PostgreSQL */ "COMMIT", /* SQLite */ "COMMIT" }; const char *batch_fill_path_query[] = { /* MySQL */ "INSERT INTO Path (Path)" "SELECT a.Path FROM " "(SELECT DISTINCT Path FROM batch) AS a WHERE NOT EXISTS " "(SELECT Path FROM Path AS p WHERE p.Path = a.Path)", /* PostgreSQL */ "INSERT INTO Path (Path)" "SELECT a.Path FROM " "(SELECT DISTINCT Path FROM batch) AS a " "WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) ", /* SQLite */ "INSERT INTO Path (Path)" "SELECT DISTINCT Path FROM batch " "EXCEPT SELECT Path FROM Path" }; const char *match_query[] = { /* MySQL */ "REGEXP", /* PostgreSQL */ "~", /* SQLite */ "LIKE" /* MATCH doesn't seems to work anymore... */ }; const char *sql_like[] = { /* MySQL */ "LIKE", /* PostgreSQL */ "ILIKE", /* SQLite */ "LIKE" }; static const char *insert_counter_values_default = "INSERT INTO Counters (Counter, MinValue, " "MaxValue, CurrentValue, WrapCounter) " "VALUES ('%s','%d','%d','%d','%s')"; const char *insert_counter_values[] = { /* MySQL */ "INSERT INTO Counters (Counter, Counters.MinValue, " "Counters.MaxValue, CurrentValue, WrapCounter) " "VALUES ('%s','%d','%d','%d','%s')", /* PostgreSQL */ insert_counter_values_default, /* SQLite */ insert_counter_values_default }; static const char *select_counter_values_default = "SELECT MinValue, MaxValue, CurrentValue, WrapCounter" " FROM Counters WHERE Counter='%s'"; const char *select_counter_values[] = { /* MySQL */ "SELECT Counters.MinValue, Counters.MaxValue, CurrentValue, WrapCounter" " FROM Counters WHERE Counter='%s'", /* PostgreSQL */ select_counter_values_default, /* SQLite */ select_counter_values_default }; static const char *update_counter_values_default = "UPDATE Counters SET MinValue=%d, MaxValue=%d, CurrentValue=%d," "WrapCounter='%s' WHERE Counter='%s'"; const char *update_counter_values[] = { /* MySQL */ "UPDATE Counters SET Counters.MinValue=%d, Counters.MaxValue=%d," "CurrentValue=%d, WrapCounter='%s' WHERE Counter='%s'", /* PostgreSQL */ update_counter_values_default, /* SQLite */ update_counter_values_default }; const char *sql_now[] = { /* MySQL */ " NOW() ", /* PostgreSQL */ " NOW() ", /* SQLite */ " strftime('%s', datetime('now', 'localtime')) " }; const char *prune_cache[] = { /* MySQL */ " (Media.LastWritten + Media.CacheRetention) < NOW() ", /* PostgreSQL */ " (Media.LastWritten + (interval '1 second' * Media.CacheRetention)) < NOW() ", /* SQLite */ " ( (strftime('%s', Media.LastWritten) + Media.CacheRetention - strftime('%s', datetime('now', 'localtime'))) < 0) " }; const char *expired_volumes[] = { /* MySQL */ "SELECT Media.VolumeName AS volumename," "Media.LastWritten AS lastwritten" " FROM Media" " WHERE VolStatus IN ('Full', 'Used')" " AND Media.VolRetention > 0 " " AND ( Media.LastWritten + Media.VolRetention ) < NOW()" " %s ", /* PostgreSQL */ "SELECT Media.VolumeName, Media.LastWritten " " FROM Media " " WHERE VolStatus IN ('Full', 'Used') " " AND Media.VolRetention > 0 " " AND ( Media.LastWritten + (interval '1 second' * Media.VolRetention ) < NOW()) " " %s ", /* SQLite */ "SELECT Media.VolumeName AS volumename," "Media.LastWritten AS lastwritten" " FROM Media" " WHERE VolStatus IN ('Full', 'Used')" " AND Media.VolRetention > 0 " " AND ((strftime('%%s', LastWritten) + Media.VolRetention - strftime('%%s', datetime('now', 'localtime'))) < 0) " " %s " }; const char *to_unix_timestamp[] = { /* MySQL */ "UNIX_TIMESTAMP(%s)", /* PostgreSQL */ "EXTRACT(EPOCH FROM %s)", /* SQLite */ "strftime('%%s', %s)" }; const char *expires_in[] = { /* MySQL */ "(GREATEST(0, CAST(UNIX_TIMESTAMP(LastWritten) + Media.VolRetention AS SIGNED) - UNIX_TIMESTAMP(NOW())))", /* PostgreSQL */ "GREATEST(0, (extract('epoch' from LastWritten + Media.VolRetention * interval '1second' - NOW())::bigint))", /* SQLite */ "MAX(0, (strftime('%s', LastWritten) + Media.VolRetention - strftime('%s', datetime('now', 'localtime'))))" }; const char *strip_restore[] = { /* MySQL */ "DELETE FROM %s WHERE FileId IN (SELECT * FROM (SELECT FileId FROM %s as B JOIN File USING (FileId) WHERE PathId IN (%s)) AS C)", /* PostgreSQL */ "DELETE FROM %s WHERE FileId IN (SELECT FileId FROM %s JOIN File USING (FileId) WHERE PathId IN (%s))", /* SQLite */ "DELETE FROM %s WHERE FileId IN (SELECT FileId FROM %s JOIN File USING (FileId) WHERE PathId IN (%s))" }; const char *poolbytes[] = { /* MySQL */ "COALESCE((SELECT SUM(VolBytes+VolABytes) FROM Media WHERE Media.PoolId=Pool.PoolId), 0)", /* PostgreSQL */ "COALESCE((SELECT SUM(VolBytes+VolABytes) FROM Media WHERE Media.PoolId=Pool.PoolId), 0)::bigint", /* SQLite */ "COALESCE((SELECT SUM(VolBytes+VolABytes) FROM Media WHERE Media.PoolId=Pool.PoolId), 0)" }; const char *count_all_jobs = "SELECT COUNT(1) FROM Job"; const char *count_success_jobs = "SELECT COUNT(1) FROM Job WHERE JobStatus IN ('T', 'I') AND JobErrors=0"; const char *count_success_jobids = "SELECT COUNT(1) FROM Job WHERE JobStatus IN ('T', 'I') AND JobErrors=0 and JobId in (%s)"; const char *count_error_jobs = "SELECT COUNT(1) FROM Job WHERE JobStatus IN ('E','f','A')"; const char *count_error_jobids = "SELECT COUNT(1) FROM Job WHERE JobStatus IN ('E','f','A') and JobId in (%s)"; const char *count_warning_jobs = "SELECT COUNT(1) FROM Job WHERE JobStatus IN ('T','I') AND NOT JobErrors=0"; const char *count_warning_jobids = "SELECT COUNT(1) FROM Job WHERE JobStatus IN ('T','I') AND NOT JobErrors=0 and JobId in (%s)"; const char *sum_jobs_bytes = "SELECT SUM(JobBytes) FROM Job"; const char *sum_jobids_bytes = "SELECT SUM(JobBytes) FROM Job WHERE JobId in (%s)"; const char *sum_jobs_files = "SELECT SUM(JobFiles) FROM Job"; const char *sum_jobids_files = "SELECT SUM(JobFiles) FROM Job WHERE JobId in (%s)"; const char *count_all_volumes = "SELECT COUNT(1) FROM Media"; const char *count_avl_volumes = "SELECT COUNT(1) FROM Media WHERE VolStatus='Append'"; const char *count_error_volumes = "SELECT COUNT(1) FROM Media WHERE VolStatus='Error'"; const char *count_full_volumes = "SELECT COUNT(1) FROM Media WHERE VolStatus='Full'"; const char *count_used_volumes = "SELECT COUNT(1) FROM Media WHERE VolStatus='Used'"; const char *sum_volumes_bytes = "SELECT SUM(VolBytes) FROM Media"; const char *get_volume_size = "SELECT VolBytes FROM Media WHERE VolumeName='%s'"; static const char *escape_char_value_default = "\\"; const char *escape_char_value[] = { /* MySQL */ "\\\\", /* PostgreSQL */ escape_char_value_default, /* SQLite */ escape_char_value_default }; static const char *regexp_value_default = "REGEXP"; const char *regexp_value[] = { /* MySQL */ regexp_value_default, /* PostgreSQL */ "~", /* SQLite */ regexp_value_default }; /* Here we choose the last Object for a list of Job, we check if the object points to a place * that still exists. We take the last version of them */ /* In the MySQL version, we cannot use DISTINCT ON(), so, when we have our MAX(JobTDate) (last record), we * need to find back the other values by joining back to Object. Of course, if we have multiple objects * with the same kind of attributes for the same job, it can cause a problem. */ const char *copy_object_default = "INSERT INTO Object (JobId, Path, Filename, PluginName, ObjectCategory, ObjectType, ObjectName, ObjectSource, ObjectUUID, " " ObjectSize, ObjectStatus, ObjectCount) " // If we rename a VM for example, the Path and the Filename will stay, but not the Name or maybe the PluginName "SELECT %lu, A.Path, A.Filename, Object.PluginName, A.ObjectCategory, A.ObjectType, Object.ObjectName, Object.ObjectSource, Object.ObjectUUID, Object.ObjectSize, Object.ObjectStatus, Object.ObjectCount " "FROM (SELECT O.Path, O.Filename, O.ObjectCategory, O.ObjectType, MAX(J.JobTDate) AS JobTDate " "FROM Object AS O JOIN Path USING (Path) JOIN Job AS J USING (JobId) WHERE J.JobId IN (%s) AND O.Path = Path.Path AND " "EXISTS (SELECT 1 FROM File WHERE O.JobId = File.JobId AND Path.PathId = File.PathId AND (O.Filename = File.Filename OR O.Filename = '')) " "GROUP BY O.Path, O.Filename, O.ObjectCategory, O.ObjectType) AS A " "JOIN Job USING (JobTDate) JOIN Object ON (Job.JobId = Object.JobId AND Object.Path = A.Path AND Object.Filename = A.Filename AND A.ObjectCategory = Object.ObjectCategory AND A.ObjectType = Object.ObjectType) WHERE Job.JobId IN (%s)"; const char *copy_object[] = { /* MySQL */ copy_object_default, /* PostgreSQL */ "INSERT INTO Object (JobId, Path, Filename, PluginName, ObjectCategory, ObjectType, ObjectName, ObjectSource, ObjectUUID, " " ObjectSize, ObjectStatus, ObjectCount) " // If we rename a VM for example, the Path and the Filename will stay, but not the Name or maybe the PluginName "SELECT DISTINCT ON (Object.Path, Object.Filename, ObjectCategory, ObjectType) " " %lu, Object.Path, Object.Filename, PluginName, ObjectCategory, ObjectType, ObjectName, ObjectSource, ObjectUUID, " " ObjectSize, ObjectStatus, ObjectCount " "FROM Object JOIN Path USING (Path) JOIN Job USING (JobId) " "WHERE Job.JobId IN (%s) AND Object.Path = Path.Path " // In some plugins, the filename is empty but the associated record in the DB doesn't exist "AND EXISTS (SELECT 1 FROM File WHERE Object.JobId = File.JobId AND Path.PathId = File.PathId AND (Object.Filename = File.Filename OR Object.Filename = '')) " "ORDER BY Object.Path, Object.Filename, ObjectCategory, ObjectType, JobTDate DESC", /* SQLite */ copy_object_default }; bacula-15.0.3/src/cats/create_sqlite3_database.in0000644000175000017500000000043314771010173021466 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # shell script to create Bacula SQLite tables bindir=@SQLITE_BINDIR@ PATH=$bindir:$PATH cd @working_dir@ db_name=@db_name@ sqlite3 $* ${db_name}.db <inc_use_count(); db = mdb; /* need to inc ref count */ jobids = get_pool_memory(PM_NAME); prev_dir = get_pool_memory(PM_NAME); pattern = get_pool_memory(PM_NAME); filename = get_pool_memory(PM_NAME); tmp = get_pool_memory(PM_NAME); escaped_list = get_pool_memory(PM_NAME); *filename = *jobids = *prev_dir = *pattern = 0; pwd_id = offset = 0; see_copies = see_all_versions = false; compute_delta = true; limit = 1000; attr = new_attr(jcr); list_entries = result_handler; user_data = this; username = NULL; job_acl = client_acl = restoreclient_acl = NULL; pool_acl = fileset_acl = NULL; last_dir_acl = NULL; uid_acl = NULL; gid_acl = NULL; dir_acl = NULL; use_acl = false; } Bvfs::~Bvfs() { free_pool_memory(jobids); free_pool_memory(pattern); free_pool_memory(prev_dir); free_pool_memory(filename); free_pool_memory(tmp); free_pool_memory(escaped_list); if (username) { free(username); } free_attr(attr); jcr->dec_use_count(); if (dir_acl) { delete dir_acl; } if (uid_acl) { delete uid_acl; } if (gid_acl) { delete gid_acl; } if (client_acl) { delete client_acl; } } char *Bvfs::escape_list(alist *lst) { char *elt; int len; /* List is empty, reject everything */ if (!lst || lst->size() == 0) { Mmsg(escaped_list, "''"); return escaped_list; } *tmp = 0; *escaped_list = 0; foreach_alist(elt, lst) { if (elt && *elt) { len = strlen(elt); /* Escape + ' ' */ tmp = check_pool_memory_size(tmp, 2 * len + 2 + 2); tmp[0] = '\''; db->bdb_escape_string(jcr, tmp + 1 , elt, len); pm_strcat(tmp, "'"); if (*escaped_list) { pm_strcat(escaped_list, ","); } pm_strcat(escaped_list, tmp); } } return escaped_list; } /* Returns the number of jobids in the result */ int Bvfs::filter_jobid() { POOL_MEM query; POOL_MEM sub_join; POOLMEM *sub_where; /* No ACL, no username, no check */ if (!job_acl && !fileset_acl && !client_acl && !restoreclient_acl && !pool_acl && !username) { Dmsg0(dbglevel_sql, "No ACL\n"); /* Just count the number of items in the list */ int nb = (*jobids != 0) ? 1 : 0; for (char *p=jobids; *p ; p++) { if (*p == ',') { nb++; } } return nb; } sub_where = get_pool_memory(PM_FNAME); *sub_where = 0; if (job_acl) { pm_strcat(sub_where, " AND "); db->BDB::escape_acl_list(jcr, "Job.Name", &sub_where, job_acl); } if (fileset_acl) { pm_strcat(sub_where, " AND "); db->BDB::escape_acl_list(jcr, "FileSet.FileSet", &sub_where, fileset_acl); pm_strcat(sub_join, " JOIN FileSet USING (FileSetId) "); } if (client_acl) { pm_strcat(sub_where, " AND "); db->BDB::escape_acl_list(jcr, "Client.Name", &sub_where, client_acl); } if (pool_acl) { pm_strcat(sub_where, " AND "); db->BDB::escape_acl_list(jcr, "Pool.Name", &sub_where, pool_acl); pm_strcat(sub_join, " JOIN Pool USING (PoolId) "); } if (username) { /* Query used by Bweb to filter clients, activated when using * set_username() */ Mmsg(query, "SELECT DISTINCT JobId FROM Job JOIN Client USING (ClientId) %s " "JOIN (SELECT ClientId FROM client_group_member " "JOIN client_group USING (client_group_id) " "JOIN bweb_client_group_acl USING (client_group_id) " "JOIN bweb_user USING (userid) " "WHERE bweb_user.username = '%s' " ") AS filter USING (ClientId) " " WHERE JobId IN (%s) %s", sub_join.c_str(), username, jobids, sub_where); } else { Mmsg(query, "SELECT DISTINCT JobId FROM Job JOIN Client USING (ClientId) %s " " WHERE JobId IN (%s) %s", sub_join.c_str(), jobids, sub_where); } db_list_ctx ctx; Dmsg1(dbglevel_sql, "q=%s\n", query.c_str()); db->bdb_sql_query(query.c_str(), db_list_handler, &ctx); pm_strcpy(jobids, ctx.list); free_pool_memory(sub_where); return ctx.count; } /* Return the number of jobids after the filter */ int Bvfs::set_jobid(JobId_t id) { Mmsg(jobids, "%lld", (uint64_t)id); return filter_jobid(); } /* Return the number of jobids after the filter */ int Bvfs::set_jobids(char *ids) { pm_strcpy(jobids, ids); return filter_jobid(); } /* * TODO: Find a way to let the user choose how he wants to display * files and directories */ /* * Working Object to store PathId already seen (avoid * database queries), equivalent to %cache_ppathid in perl */ #define NITEMS 50000 class pathid_cache { private: hlink *nodes; int nb_node; int max_node; alist *table_node; htable *cache_ppathid; public: pathid_cache() { hlink link; cache_ppathid = (htable *)malloc(sizeof(htable)); cache_ppathid->init(&link, &link, NITEMS); max_node = NITEMS; nodes = (hlink *) malloc(max_node * sizeof (hlink)); nb_node = 0; table_node = New(alist(5, owned_by_alist)); table_node->append(nodes); }; hlink *get_hlink() { if (++nb_node >= max_node) { nb_node = 0; nodes = (hlink *)malloc(max_node * sizeof(hlink)); table_node->append(nodes); } return nodes + nb_node; }; bool lookup(char *pathid) { bool ret = cache_ppathid->lookup(pathid) != NULL; return ret; }; void insert(char *pathid) { hlink *h = get_hlink(); cache_ppathid->insert(pathid, h); }; ~pathid_cache() { cache_ppathid->destroy(); free(cache_ppathid); delete table_node; }; private: pathid_cache(const pathid_cache &); /* prohibit pass by value */ pathid_cache &operator= (const pathid_cache &);/* prohibit class assignment*/ } ; /* Return the parent_dir with the trailing / (update the given string) * TODO: see in the rest of bacula if we don't have already this function * dir=/tmp/toto/ * dir=/tmp/ * dir=/ * dir= */ char *bvfs_parent_dir(char *path) { char *p = path; int len = strlen(path) - 1; /* windows directory / */ if (len == 2 && B_ISALPHA(path[0]) && path[1] == ':' && path[2] == '/') { len = 0; path[0] = '\0'; } if (len >= 0 && path[len] == '/') { /* if directory, skip last / */ path[len] = '\0'; } if (len > 0) { p += len; while (p > path && !IsPathSeparator(*p)) { p--; } p[1] = '\0'; } return path; } /* Return the basename of the with the trailing / * TODO: see in the rest of bacula if we don't have * this function already */ char *bvfs_basename_dir(char *path) { char *p = path; int len = strlen(path) - 1; if (path[len] == '/') { /* if directory, skip last / */ len -= 1; } if (len > 0) { p += len; while (p > path && !IsPathSeparator(*p)) { p--; } if (*p == '/') { p++; /* skip first / */ } } return p; } static void build_path_hierarchy(JCR *jcr, BDB *mdb, pathid_cache &ppathid_cache, char *org_pathid, char *path) { Dmsg1(dbglevel, "build_path_hierarchy(%s)\n", path); char pathid[50]; ATTR_DBR parent; char *bkp = mdb->path; bstrncpy(pathid, org_pathid, sizeof(pathid)); /* Does the ppathid exist for this ? we use a memory cache... In order to * avoid the full loop, we consider that if a dir is allready in the * PathHierarchy table, then there is no need to calculate all the * hierarchy */ while (path && *path) { if (!ppathid_cache.lookup(pathid)) { Mmsg(mdb->cmd, "SELECT PPathId FROM PathHierarchy WHERE PathId = %s", pathid); if (!mdb->QueryDB(jcr, mdb->cmd)) { goto bail_out; /* Query failed, just leave */ } /* Do we have a result ? */ if (mdb->sql_num_rows() > 0) { ppathid_cache.insert(pathid); /* This dir was in the db ... * It means we can leave, the tree has allready been built for * this dir */ goto bail_out; } else { /* search or create parent PathId in Path table */ mdb->path = bvfs_parent_dir(path); mdb->pnl = strlen(mdb->path); if (!mdb->bdb_create_path_record(jcr, &parent)) { goto bail_out; } ppathid_cache.insert(pathid); Mmsg(mdb->cmd, "INSERT INTO PathHierarchy (PathId, PPathId) " "VALUES (%s,%lld)", pathid, (uint64_t) parent.PathId); if (!mdb->InsertDB(jcr, mdb->cmd)) { goto bail_out; /* Can't insert the record, just leave */ } edit_uint64(parent.PathId, pathid); path = mdb->path; /* already done */ } } else { /* It's already in the cache. We can leave, no time to waste here, * all the parent dirs have allready been done */ goto bail_out; } } bail_out: mdb->path = bkp; mdb->fnl = 0; } /* * Internal function to update path_hierarchy cache with a shared pathid cache * return Error 0 * OK 1 */ static int update_path_hierarchy_cache(JCR *jcr, BDB *mdb, pathid_cache &ppathid_cache, JobId_t JobId) { Dmsg0(dbglevel, "update_path_hierarchy_cache()\n"); uint32_t ret=0; uint32_t num; char jobid[50]; edit_uint64(JobId, jobid); mdb->bdb_lock(); /* We don't really want to harm users with spurious messages, * everything is handled by transaction */ mdb->set_use_fatal_jmsg(false); mdb->bdb_start_transaction(jcr); Mmsg(mdb->cmd, "SELECT 1 FROM Job WHERE JobId = %s AND HasCache=1", jobid); if (!mdb->QueryDB(jcr, mdb->cmd) || mdb->sql_num_rows() > 0) { Dmsg1(dbglevel, "already computed %d\n", (uint32_t)JobId ); ret = 1; goto bail_out; } /* Inserting path records for JobId */ Mmsg(mdb->cmd, "INSERT INTO PathVisibility (PathId, JobId) " "SELECT DISTINCT PathId, JobId " "FROM (SELECT PathId, JobId FROM File WHERE JobId = %s AND FileIndex > 0 " "UNION " "SELECT PathId, BaseFiles.JobId " "FROM BaseFiles JOIN File AS F USING (FileId) " "WHERE BaseFiles.JobId = %s) AS B", jobid, jobid); if (!mdb->QueryDB(jcr, mdb->cmd)) { Dmsg1(dbglevel, "Can't fill PathVisibility %d\n", (uint32_t)JobId ); goto bail_out; } /* Now we have to do the directory recursion stuff to determine missing * visibility We try to avoid recursion, to be as fast as possible We also * only work on not allready hierarchised directories... */ Mmsg(mdb->cmd, "SELECT PathVisibility.PathId, Path " "FROM PathVisibility " "JOIN Path ON( PathVisibility.PathId = Path.PathId) " "LEFT JOIN PathHierarchy " "ON (PathVisibility.PathId = PathHierarchy.PathId) " "WHERE PathVisibility.JobId = %s " "AND PathHierarchy.PathId IS NULL " "ORDER BY Path", jobid); Dmsg1(dbglevel_sql, "q=%s\n", mdb->cmd); if (!mdb->QueryDB(jcr, mdb->cmd)) { Dmsg1(dbglevel, "Can't get new Path %d\n", (uint32_t)JobId ); goto bail_out; } /* TODO: I need to reuse the DB connection without emptying the result * So, now i'm copying the result in memory to be able to query the * catalog descriptor again. */ num = mdb->sql_num_rows(); if (num > 0) { char **result = (char **)malloc (num * 2 * sizeof(char *)); SQL_ROW row; int i=0; while((row = mdb->sql_fetch_row())) { result[i++] = bstrdup(row[0]); result[i++] = bstrdup(row[1]); } i=0; while (num > 0) { build_path_hierarchy(jcr, mdb, ppathid_cache, result[i], result[i+1]); free(result[i++]); free(result[i++]); num--; } free(result); } if (mdb->bdb_get_type_index() == SQL_TYPE_SQLITE3) { Mmsg(mdb->cmd, "INSERT INTO PathVisibility (PathId, JobId) " "SELECT DISTINCT h.PPathId AS PathId, %s " "FROM PathHierarchy AS h " "WHERE h.PathId IN (SELECT PathId FROM PathVisibility WHERE JobId=%s) " "AND h.PPathId NOT IN (SELECT PathId FROM PathVisibility WHERE JobId=%s)", jobid, jobid, jobid ); } else if (mdb->bdb_get_type_index() == SQL_TYPE_MYSQL) { Mmsg(mdb->cmd, "INSERT INTO PathVisibility (PathId, JobId) " "SELECT a.PathId,%s " "FROM ( " "SELECT DISTINCT h.PPathId AS PathId " "FROM PathHierarchy AS h " "JOIN PathVisibility AS p ON (h.PathId=p.PathId) " "WHERE p.JobId=%s) AS a " "LEFT JOIN PathVisibility AS b ON (b.JobId=%s and a.PathId = b.PathId) " "WHERE b.PathId IS NULL", jobid, jobid, jobid); } else { Mmsg(mdb->cmd, "INSERT INTO PathVisibility (PathId, JobId) " "SELECT a.PathId,%s " "FROM ( " "SELECT DISTINCT h.PPathId AS PathId " "FROM PathHierarchy AS h " "JOIN PathVisibility AS p ON (h.PathId=p.PathId) " "WHERE p.JobId=%s) AS a LEFT JOIN " "(SELECT PathId " "FROM PathVisibility " "WHERE JobId=%s) AS b ON (a.PathId = b.PathId) " "WHERE b.PathId IS NULL", jobid, jobid, jobid); } do { ret = mdb->QueryDB(jcr, mdb->cmd); } while (ret && mdb->sql_affected_rows() > 0); Mmsg(mdb->cmd, "UPDATE Job SET HasCache=1 WHERE JobId=%s", jobid); ret = mdb->UpdateDB(jcr, mdb->cmd, false); bail_out: mdb->bdb_end_transaction(jcr); if (!ret) { Mmsg(mdb->cmd, "SELECT HasCache FROM Job WHERE JobId=%s", jobid); mdb->bdb_sql_query(mdb->cmd, db_int_handler, &ret); } /* Enable back the FATAL message if something is wrong */ mdb->set_use_fatal_jmsg(true); mdb->bdb_unlock(); return ret; } /* Compute the cache for the bfileview compoment */ void Bvfs::fv_update_cache() { int64_t pathid; int64_t size=0, count=0; Dmsg0(dbglevel, "fv_update_cache()\n"); if (!*jobids) { return; /* Nothing to build */ } db->bdb_lock(); /* We don't really want to harm users with spurious messages, * everything is handled by transaction */ db->set_use_fatal_jmsg(false); db->bdb_start_transaction(jcr); pathid = get_root(); fv_compute_size_and_count(pathid, &size, &count); db->bdb_end_transaction(jcr); /* Enable back the FATAL message if something is wrong */ db->set_use_fatal_jmsg(true); db->bdb_unlock(); } /* Not yet working */ void Bvfs::fv_get_big_files(int64_t pathid, int64_t min_size, int32_t limit) { Mmsg(db->cmd, "SELECT S.Filename AS filename, S.size " "FROM ( " "SELECT Filename, base64_decode_lstat(8,LStat) AS size " "FROM File " "WHERE PathId = %lld " "AND JobId = %s " ") AS S " "WHERE S.size > %lld " "ORDER BY S.size DESC " "LIMIT %d ", pathid, jobids, min_size, limit); } /* Get the current path size and files count */ void Bvfs::fv_get_current_size_and_count(int64_t pathid, int64_t *size, int64_t *count) { SQL_ROW row; *size = *count = 0; Mmsg(db->cmd, "SELECT Size AS size, Files AS files " " FROM PathVisibility " " WHERE PathId = %lld " " AND JobId = %s ", pathid, jobids); if (!db->QueryDB(jcr, db->cmd)) { return; } if ((row = db->sql_fetch_row())) { *size = str_to_int64(row[0]); *count = str_to_int64(row[1]); } } /* Compute for the current path the size and files count */ void Bvfs::fv_get_size_and_count(int64_t pathid, int64_t *size, int64_t *count) { SQL_ROW row; *size = *count = 0; Mmsg(db->cmd, "SELECT sum(base64_decode_lstat(8,LStat)) AS size, count(1) AS files " " FROM File " " WHERE PathId = %lld " " AND JobId = %s ", pathid, jobids); if (!db->QueryDB(jcr, db->cmd)) { return; } if ((row = db->sql_fetch_row())) { *size = str_to_int64(row[0]); *count = str_to_int64(row[1]); } } void Bvfs::fv_compute_size_and_count(int64_t pathid, int64_t *size, int64_t *count) { Dmsg1(dbglevel, "fv_compute_size_and_count(%lld)\n", pathid); fv_get_current_size_and_count(pathid, size, count); if (*size > 0) { return; } /* Update stats for the current directory */ fv_get_size_and_count(pathid, size, count); /* Update stats for all sub directories */ Mmsg(db->cmd, " SELECT PathId " " FROM PathVisibility " " INNER JOIN PathHierarchy USING (PathId) " " WHERE PPathId = %lld " " AND JobId = %s ", pathid, jobids); db->QueryDB(jcr, db->cmd); int num = db->sql_num_rows(); if (num > 0) { int64_t *result = (int64_t *)malloc (num * sizeof(int64_t)); SQL_ROW row; int i=0; while((row = db->sql_fetch_row())) { result[i++] = str_to_int64(row[0]); /* PathId */ } i=0; while (num > 0) { int64_t c=0, s=0; fv_compute_size_and_count(result[i], &s, &c); *size += s; *count += c; i++; num--; } free(result); } fv_update_size_and_count(pathid, *size, *count); } void Bvfs::fv_update_size_and_count(int64_t pathid, int64_t size, int64_t count) { Mmsg(db->cmd, "UPDATE PathVisibility SET Files = %lld, Size = %lld " " WHERE JobId = %s " " AND PathId = %lld ", count, size, jobids, pathid); db->UpdateDB(jcr, db->cmd, false); } void bvfs_update_cache(JCR *jcr, BDB *mdb) { uint32_t nb=0; db_list_ctx jobids_list; mdb->bdb_lock(); #ifdef xxx /* TODO: Remove this code when updating make_bacula_table script */ Mmsg(mdb->cmd, "SELECT 1 FROM Job WHERE HasCache<>2 LIMIT 1"); if (!mdb->QueryDB(jcr, mdb->cmd)) { Dmsg0(dbglevel, "Creating cache table\n"); Mmsg(mdb->cmd, "ALTER TABLE Job ADD HasCache int DEFAULT 0"); mdb->QueryDB(jcr, mdb->cmd); Mmsg(mdb->cmd, "CREATE TABLE PathHierarchy ( " "PathId integer NOT NULL, " "PPathId integer NOT NULL, " "CONSTRAINT pathhierarchy_pkey " "PRIMARY KEY (PathId))"); mdb->QueryDB(jcr, mdb->cmd); Mmsg(mdb->cmd, "CREATE INDEX pathhierarchy_ppathid " "ON PathHierarchy (PPathId)"); mdb->QueryDB(jcr, mdb->cmd); Mmsg(mdb->cmd, "CREATE TABLE PathVisibility (" "PathId integer NOT NULL, " "JobId integer NOT NULL, " "Size int8 DEFAULT 0, " "Files int4 DEFAULT 0, " "CONSTRAINT pathvisibility_pkey " "PRIMARY KEY (JobId, PathId))"); mdb->QueryDB(jcr, mdb->cmd); Mmsg(mdb->cmd, "CREATE INDEX pathvisibility_jobid " "ON PathVisibility (JobId)"); mdb->QueryDB(jcr, mdb->cmd); } #endif Mmsg(mdb->cmd, "SELECT JobId from Job " "WHERE HasCache = 0 " "AND Type IN ('B') AND JobStatus IN ('T', 'f', 'A') " "ORDER BY JobId"); mdb->bdb_sql_query(mdb->cmd, db_list_handler, &jobids_list); bvfs_update_path_hierarchy_cache(jcr, mdb, jobids_list.list); mdb->bdb_start_transaction(jcr); Dmsg0(dbglevel, "Cleaning pathvisibility\n"); Mmsg(mdb->cmd, "DELETE FROM PathVisibility " "WHERE NOT EXISTS " "(SELECT 1 FROM Job WHERE JobId=PathVisibility.JobId)"); nb = mdb->DeleteDB(jcr, mdb->cmd); Dmsg1(dbglevel, "Affected row(s) = %d\n", nb); mdb->bdb_end_transaction(jcr); mdb->bdb_unlock(); } /* * Update the bvfs cache for given jobids (1,2,3,4) */ int bvfs_update_path_hierarchy_cache(JCR *jcr, BDB *mdb, char *jobids) { pathid_cache ppathid_cache; JobId_t JobId; char *p; int ret=1; for (p=jobids; ; ) { int stat = get_next_jobid_from_list(&p, &JobId); if (stat < 0) { ret = 0; break; } if (stat == 0) { break; } Dmsg1(dbglevel, "Updating cache for %lld\n", (uint64_t)JobId); if (!update_path_hierarchy_cache(jcr, mdb, ppathid_cache, JobId)) { ret = 0; } } return ret; } /* * Update the bvfs fileview for given jobids */ void bvfs_update_fv_cache(JCR *jcr, BDB *mdb, char *jobids) { char *p; JobId_t JobId; Bvfs bvfs(jcr, mdb); for (p=jobids; ; ) { int stat = get_next_jobid_from_list(&p, &JobId); if (stat < 0) { return; } if (stat == 0) { break; } Dmsg1(dbglevel, "Trying to create cache for %lld\n", (int64_t)JobId); bvfs.set_jobid(JobId); bvfs.fv_update_cache(); } } /* * Update the bvfs cache for current jobids */ void Bvfs::update_cache() { bvfs_update_path_hierarchy_cache(jcr, db, jobids); } bool Bvfs::ch_dir(DBId_t pathid) { reset_offset(); if (need_to_check_permissions()) { sellist sel; db_list_ctx ids; char ed1[50]; sel.set_string(edit_uint64(pathid, ed1), false); if (check_full_path_access(1, &sel, &ids) > 0) { Dmsg1(DT_BVFS, "Access denied for pathid %d\n", (int)pathid); pathid = 0; } } pwd_id = pathid; return pwd_id != 0; } /* Change the current directory, returns true if the path exists */ bool Bvfs::ch_dir(const char *path) { db->bdb_lock(); pm_strcpy(db->path, path); db->pnl = strlen(db->path); ch_dir(db->bdb_get_path_record(jcr)); db->bdb_unlock(); return pwd_id != 0; } /* * Get all file versions for a specified list of clients * TODO: Handle basejobs using different client */ void Bvfs::get_all_file_versions(DBId_t pathid, FileId_t fnid, alist *clients) { char ed1[50], ed2[50], *eclients; POOL_MEM fname, q, query; if (see_copies) { Mmsg(q, " AND Job.Type IN ('C', 'B') "); } else { Mmsg(q, " AND Job.Type = 'B' "); } if (*filename && fnid == 0) { Mmsg(fname, " '%s' ", filename); } else { Mmsg(fname, " (SELECT Filename FROM File AS F2 WHERE FileId = %s) ", edit_uint64(fnid, ed1)); } eclients = escape_list(clients); Dmsg3(dbglevel, "get_all_file_versions(%lld, %lld, %s)\n", (uint64_t)pathid, (uint64_t)fnid, eclients); Mmsg(query,// 1 2 3 "SELECT DISTINCT 'V', File.PathId, File.FileId, File.JobId, " // 4 5 6 "File.LStat, File.FileId, File.Md5, " // 7 8 9 "Media.VolumeName, Media.InChanger, Media.VolType " "FROM File, Job, Client, JobMedia, Media " "WHERE File.Filename = %s " "AND File.PathId = %s " "AND File.JobId = Job.JobId " "AND Job.JobId = JobMedia.JobId " "AND File.FileIndex >= JobMedia.FirstIndex " "AND File.FileIndex <= JobMedia.LastIndex " "AND JobMedia.MediaId = Media.MediaId " "AND Job.ClientId = Client.ClientId " "AND Client.Name IN (%s) " "%s ORDER BY FileId LIMIT %d OFFSET %d" ,fname.c_str(), edit_uint64(pathid, ed2), eclients, q.c_str(), limit, offset); Dmsg1(dbglevel_sql, "q=%s\n", query.c_str()); db->bdb_sql_query(query.c_str(), list_entries, user_data); } /* * Get all file versions for a specified client * TODO: Handle basejobs using different client */ bool Bvfs::get_delta(FileId_t fileid) { Dmsg1(dbglevel, "get_delta(%lld)\n", (uint64_t)fileid); char ed1[50]; int32_t num; SQL_ROW row; POOL_MEM q; POOL_MEM query; char *fn = NULL; bool ret = false; db->bdb_lock(); /* Check if some FileId have DeltaSeq > 0 * Foreach of them we need to get the accurate_job list, and compute * what are dependencies */ Mmsg(query, "SELECT F.JobId, F.Filename, F.PathId, F.DeltaSeq " "FROM File AS F WHERE FileId = %lld " "AND DeltaSeq > 0", fileid); if (!db->QueryDB(jcr, query.c_str())) { Dmsg1(dbglevel_sql, "Can't execute query=%s\n", query.c_str()); goto bail_out; } /* TODO: Use an other DB connection can avoid to copy the result of the * previous query into a temporary buffer */ num = db->sql_num_rows(); Dmsg2(dbglevel, "Found %d Delta parts q=%s\n", num, query.c_str()); if (num > 0 && (row = db->sql_fetch_row())) { JOB_DBR jr, jr2; db_list_ctx lst; memset(&jr, 0, sizeof(jr)); memset(&jr2, 0, sizeof(jr2)); fn = bstrdup(row[1]); /* Filename */ int64_t jid = str_to_int64(row[0]); /* JobId */ int64_t pid = str_to_int64(row[2]); /* PathId */ /* Need to limit the query to StartTime, Client/Fileset */ jr2.JobId = jid; if (!db->bdb_get_job_record(jcr, &jr2)) { Dmsg1(0, "Unable to get job record for jobid %d\n", jid); goto bail_out; } jr.JobId = jid; jr.ClientId = jr2.ClientId; jr.FileSetId = jr2.FileSetId; jr.JobLevel = L_INCREMENTAL; jr.StartTime = jr2.StartTime; /* Get accurate jobid list */ if (!db->bdb_get_accurate_jobids(jcr, &jr, jid, &lst)) { Dmsg1(0, "Unable to get Accurate list for jobid %d\n", jid); goto bail_out; } /* Escape filename */ db->fnl = strlen(fn); db->esc_name = check_pool_memory_size(db->esc_name, 2*db->fnl+2); db->bdb_escape_string(jcr, db->esc_name, fn, db->fnl); edit_int64(pid, ed1); /* pathid */ int id=db->bdb_get_type_index(); Mmsg(query, bvfs_select_delta_version_with_basejob_and_delta[id], lst.list, db->esc_name, ed1, lst.list, db->esc_name, ed1, lst.list, lst.list); Mmsg(db->cmd, // 0 1 2 3 4 5 6 7 "SELECT 'd', PathId, 0, JobId, LStat, FileId, DeltaSeq, JobTDate" " FROM (%s) AS F1 " "ORDER BY DeltaSeq ASC", query.c_str()); Dmsg1(dbglevel_sql, "q=%s\n", db->cmd); if (!db->bdb_sql_query(db->cmd, list_entries, user_data)) { Dmsg1(dbglevel_sql, "Can't exec q=%s\n", db->cmd); goto bail_out; } } ret = true; bail_out: if (fn) { free(fn); } db->bdb_unlock(); return ret; } /* * Get all volumes for a specific file */ void Bvfs::get_volumes(FileId_t fileid) { Dmsg1(dbglevel, "get_volumes(%lld)\n", (uint64_t)fileid); char ed1[50]; POOL_MEM query; Mmsg(query, // 7 8 9 "SELECT DISTINCT 'L',0,0,0,0,0,0, Media.VolumeName, Media.InChanger, Media.VolType " "FROM File JOIN JobMedia USING (JobId) JOIN Media USING (MediaId) " "WHERE File.FileId = %s " "AND File.FileIndex >= JobMedia.FirstIndex " "AND File.FileIndex <= JobMedia.LastIndex " " LIMIT %d OFFSET %d" ,edit_uint64(fileid, ed1), limit, offset); Dmsg1(dbglevel_sql, "q=%s\n", query.c_str()); db->bdb_sql_query(query.c_str(), list_entries, user_data); } DBId_t Bvfs::get_root() { int p; *db->path = 0; db->bdb_lock(); p = db->bdb_get_path_record(jcr); db->bdb_unlock(); return p; } static int path_handler(void *ctx, int fields, char **row) { Bvfs *fs = (Bvfs *) ctx; return fs->_handle_path(ctx, fields, row); } int Bvfs::_handle_path(void *ctx, int fields, char **row) { if (bvfs_is_dir(row)) { /* can have the same path 2 times */ if (strcmp(row[BVFS_PathId], prev_dir)) { pm_strcpy(prev_dir, row[BVFS_PathId]); if (strcmp(NPRTB(row[BVFS_FileIndex]), "") != 0 && /* Can be negative, empty, or positive */ str_to_int64(row[BVFS_FileIndex]) <= 0 && /* Was deleted */ strcmp(NPRTB(row[BVFS_FileId]), "0") != 0) /* We have a fileid */ { /* The directory was probably deleted */ return 0; } return list_entries(user_data, fields, row); } } return 0; } /* * Retrieve . and .. information */ void Bvfs::ls_special_dirs() { Dmsg1(dbglevel, "ls_special_dirs(%lld)\n", (uint64_t)pwd_id); char ed1[50]; if (*jobids == 0) { return; } /* Will fetch directories */ *prev_dir = 0; POOL_MEM query; Mmsg(query, "(SELECT PathHierarchy.PPathId AS PathId, '..' AS Path " "FROM PathHierarchy JOIN PathVisibility USING (PathId) " "WHERE PathHierarchy.PathId = %s " "AND PathVisibility.JobId IN (%s) " "UNION " "SELECT %s AS PathId, '.' AS Path)", edit_uint64(pwd_id, ed1), jobids, ed1); POOL_MEM query2; Mmsg(query2,// 1 2 3 4 5 6 "SELECT 'D', tmp.PathId, tmp.Path, JobId, LStat, FileId, FileIndex " "FROM %s AS tmp LEFT JOIN ( " // get attributes if any "SELECT File1.PathId AS PathId, File1.JobId AS JobId, " "File1.LStat AS LStat, File1.FileId AS FileId, " "File1.FileIndex AS FileIndex, " "Job1.JobTDate AS JobTDate " "FROM File AS File1 JOIN Job AS Job1 USING (JobId)" "WHERE File1.Filename = '' " "AND File1.JobId IN (%s)) AS listfile1 " "ON (tmp.PathId = listfile1.PathId) " "ORDER BY tmp.Path, JobTDate DESC ", query.c_str(), jobids); Dmsg1(dbglevel_sql, "q=%s\n", query2.c_str()); db->bdb_sql_query(query2.c_str(), path_handler, this); } /* Returns true if we have dirs to read */ bool Bvfs::ls_dirs() { Dmsg1(dbglevel, "ls_dirs(%lld)\n", (uint64_t)pwd_id); char ed1[50]; if (*jobids == 0) { return false; } POOL_MEM query; POOL_MEM filter; if (*pattern) { Mmsg(filter, " AND Path2.Path %s '%s' ", match_query[db->bdb_get_type_index()], pattern); } /* the sql query displays same directory multiple time, take the first one */ *prev_dir = 0; /* Let's retrieve the list of the visible dirs in this dir ... */ /* Then we get all the dir entries from File ... */ Mmsg(query, // 0 1 2 3 4 5 6 "SELECT 'D', PathId, Path, JobId, LStat, FileId, FileIndex FROM ( " "SELECT Path1.PathId AS PathId, Path1.Path AS Path, " "lower(Path1.Path) AS lpath, " "listfile1.JobId AS JobId, listfile1.LStat AS LStat, " "listfile1.FileId AS FileId, " "listfile1.JobTDate AS JobTDate, " "listfile1.FileIndex AS FileIndex " "FROM ( " "SELECT DISTINCT PathHierarchy1.PathId AS PathId " "FROM PathHierarchy AS PathHierarchy1 " "JOIN Path AS Path2 " "ON (PathHierarchy1.PathId = Path2.PathId) " "JOIN PathVisibility AS PathVisibility1 " "ON (PathHierarchy1.PathId = PathVisibility1.PathId) " "WHERE PathHierarchy1.PPathId = %s " "AND PathVisibility1.JobId IN (%s) " "%s " ") AS listpath1 " "JOIN Path AS Path1 ON (listpath1.PathId = Path1.PathId) " "LEFT JOIN ( " /* get attributes if any */ "SELECT File1.PathId AS PathId, File1.JobId AS JobId, " "File1.LStat AS LStat, File1.FileId AS FileId, " "File1.FileIndex, Job1.JobTDate AS JobTDate " "FROM File AS File1 JOIN Job AS Job1 USING (JobId) " "WHERE File1.Filename = '' " "AND File1.JobId IN (%s)) AS listfile1 " "ON (listpath1.PathId = listfile1.PathId) " ") AS A ORDER BY Path,JobTDate DESC LIMIT %d OFFSET %d", edit_uint64(pwd_id, ed1), jobids, filter.c_str(), jobids, limit, offset); Dmsg1(dbglevel_sql, "q=%s\n", query.c_str()); db->bdb_lock(); db->bdb_sql_query(query.c_str(), path_handler, this); nb_record = db->sql_num_rows(); db->bdb_unlock(); return nb_record == limit; } /* List all files from a set of jobs */ bool Bvfs::ls_all_files() { POOL_MEM query; POOL_MEM filter; if (*jobids == 0) { return false; } if (*pattern) { Mmsg(filter, " AND File.Filename %s '%s' ", match_query[db->bdb_get_type_index()], pattern); } else if (*filename) { Mmsg(filter, " AND File.Filename = '%s' ", filename); } Mmsg(query, sql_bvfs_list_all_files[db->bdb_get_type_index()], filter.c_str(), jobids, limit, offset); /* TODO: check all parent directories to know if we can * list the files here. */ Dmsg1(dbglevel_sql, "q=%s\n", query.c_str()); db->bdb_lock(); db->bdb_sql_query(query.c_str(), list_entries, user_data); nb_record = db->sql_num_rows(); db->bdb_unlock(); return nb_record == limit; } static void build_ls_files_query(BDB *db, POOL_MEM &query, const char *JobId, const char *PathId, const char *filter, int64_t limit, int64_t offset) { if (db->bdb_get_type_index() == SQL_TYPE_POSTGRESQL) { Mmsg(query, sql_bvfs_list_files[db->bdb_get_type_index()], JobId, PathId, JobId, PathId, filter, limit, offset); } else { Mmsg(query, sql_bvfs_list_files[db->bdb_get_type_index()], JobId, PathId, JobId, PathId, filter, JobId, JobId, limit, offset); } } /* Returns true if we have files to read */ bool Bvfs::ls_files() { POOL_MEM query; POOL_MEM filter; char pathid[50]; Dmsg1(dbglevel, "ls_files(%lld)\n", (uint64_t)pwd_id); if (*jobids == 0) { return false; } if (!pwd_id) { if (!ch_dir(get_root())) { return false; } } edit_uint64(pwd_id, pathid); if (*pattern) { Mmsg(filter, " AND T.Filename %s '%s' ", match_query[db->bdb_get_type_index()], pattern); } else if (*filename) { Mmsg(filter, " AND T.Filename = '%s' ", filename); } build_ls_files_query(db, query, jobids, pathid, filter.c_str(), limit, offset); /* TODO: check all parent directories to know if we can * list the files here. */ Dmsg1(dbglevel_sql, "q=%s\n", query.c_str()); db->bdb_lock(); db->bdb_sql_query(query.c_str(), list_entries, user_data); nb_record = db->sql_num_rows(); db->bdb_unlock(); return nb_record == limit; } /* * Return next Id from comma separated list * * Returns: * 1 if next Id returned * 0 if no more Ids are in list * -1 there is an error * TODO: merge with get_next_jobid_from_list() and get_next_dbid_from_list() */ static int get_next_id_from_list(char **p, int64_t *Id) { const int maxlen = 30; char id[maxlen+1]; char *q = *p; id[0] = 0; for (int i=0; ibdb_sql_query("BEGIN", NULL, NULL); db->bdb_sql_query("UPDATE Job SET HasCache=0", NULL, NULL); if (db->bdb_get_type_index() == SQL_TYPE_SQLITE3) { // sqlite3 don't have the "TRUNCATE TABLE" command but optimize the // "DELETE3 one when the "WHERE" clause is not used db->bdb_sql_query("DELETE FROM PathHierarchy", NULL, NULL); db->bdb_sql_query("DELETE FROM PathVisibility", NULL, NULL); } else { db->bdb_sql_query("TRUNCATE PathHierarchy", NULL, NULL); db->bdb_sql_query("TRUNCATE PathVisibility", NULL, NULL); } db->bdb_sql_query("COMMIT", NULL, NULL); } bool Bvfs::drop_restore_list(char *output_table) { POOL_MEM query; if (check_temp(output_table)) { Mmsg(query, "DROP TABLE IF EXISTS %s", output_table); db->bdb_sql_query(query.c_str(), NULL, NULL); return true; } return false; } struct hardlink { hlink lnk; uint32_t jobid; int32_t fileindex; }; static int checkhardlinks_handler(void *ctx, int fields, char **row) { Bvfs *self = (Bvfs *)ctx; return self->checkhardlinks_cb(fields, row); } /* Callback to check hardlinks for a given file */ int Bvfs::checkhardlinks_cb(int fields, char **row) { int32_t LinkFI=-1; struct stat statp; memset(&statp, 0, sizeof(statp)); if (row[2] && row[2][0]) { decode_stat(row[2], &statp, sizeof(statp), &LinkFI); } if (statp.st_nlink > 1) { hardlink *hl = NULL; uint32_t jobid = str_to_uint64(row[1]); uint64_t key = (((uint64_t)jobid) << 32) | (uint64_t)LinkFI; /* Look in our list if the original file is here, or add it */ if (LinkFI == 0) { /* Keep the first item in our list */ hl = (hardlink *) hardlinks->hash_malloc(sizeof(hardlink)); } else if (LinkFI > 0) { if (hardlinks->lookup(key) == NULL) { hl = (hardlink *)hardlinks->hash_malloc(sizeof(hardlink)); hl->jobid = jobid; hl->fileindex = LinkFI; missing_hardlinks->append(hl); } } if (hl) { hardlinks->insert(key, hl); } } return 0; } #ifdef COMMUNITY bool Bvfs::can_use_insert_hardlinks_fast() { return false; } bool Bvfs::insert_hardlinks_fast(char *output_table) { return insert_hardlinks(output_table); } #endif bool Bvfs::insert_hardlinks(char *output_table) { /* Check hardlinks. We get LStat fields, and we check if we need to add a FileIndex */ bool ret=false, first=true; hardlink *hl = NULL; POOL_MEM query, tmp1, tmp2; int nb=0; hardlinks = New(htable(hl, &hl->lnk)); missing_hardlinks = New(alist(100, not_owned_by_alist)); Dmsg0(dbglevel, "Inserting hardlinks method=standard\n"); Mmsg(query, "SELECT T.FileId, T.JobId, File.LStat FROM %s AS T JOIN File USING (FileId) WHERE Filename <> '' ORDER By T.JobId, T.FileIndex ASC", output_table); if (!db->bdb_sql_query(query.c_str(), checkhardlinks_handler, this)) { Dmsg1(dbglevel, "Can't execute query=%s\n", query.c_str()); goto bail_out; } Dmsg1(dbglevel, "Inserting %d hardlink records\n", missing_hardlinks->size()); Mmsg(query, "CREATE TEMPORARY TABLE h%s (JobId INTEGER, FileIndex INTEGER" "/*PKEY, DummyPkey INTEGER AUTO_INCREMENT PRIMARY KEY*/)", output_table); Dmsg1(dbglevel, "q=%s\n", query.c_str()); if (!db->bdb_sql_query(query.c_str(), NULL, NULL)) { Dmsg1(dbglevel, "Can't execute query=%s\n", query.c_str()); goto bail_out; } foreach_alist(hl, missing_hardlinks) { if (first) { first=false; } else { pm_strcat(tmp2, ","); } Mmsg(tmp1, "(%ld, %ld)", hl->jobid, hl->fileindex); pm_strcat(tmp2, tmp1.c_str()); if (nb++ >= 500) { /* flush the current query */ Dmsg1(dbglevel, " Inserting %d hardlinks\n", nb - 1); Mmsg(query, "INSERT INTO h%s (JobId, FileIndex) VALUES %s", output_table, tmp2.c_str()); if (!db->bdb_sql_query(query.c_str(), NULL, NULL)) { Dmsg1(dbglevel, "Can't execute query=%s\n", query.c_str()); goto bail_out; } pm_strcpy(tmp2, ""); /* Start again the concat */ first=true; nb=0; } } if (!first) { /* To the last round of the insertion */ Mmsg(query, "INSERT INTO h%s (JobId, FileIndex) VALUES %s", output_table, tmp2.c_str()); if (!db->bdb_sql_query(query.c_str(), NULL, NULL)) { Dmsg1(dbglevel, "Can't execute query=%s\n", query.c_str()); goto bail_out; } } Dmsg0(dbglevel, " Finishing hardlink insertion\n"); Mmsg(query, "INSERT INTO %s (JobId, FileIndex, FileId) " "SELECT File.JobId, File.FileIndex, File.FileId FROM File JOIN h%s AS T ON (T.JobId = File.JobId AND T.FileIndex = File.FileIndex)", output_table, output_table); if (!db->bdb_sql_query(query.c_str(), NULL, NULL)) { Dmsg1(dbglevel, "Can't execute query=%s\n", query.c_str()); goto bail_out; } Mmsg(query, "DROP TABLE IF EXISTS h%s", output_table); if (!db->bdb_sql_query(query.c_str(), NULL, NULL)) { Dmsg1(dbglevel, "Can't execute query=%s\n", query.c_str()); goto bail_out; } ret = true; bail_out: delete missing_hardlinks; missing_hardlinks = NULL; delete hardlinks; hardlinks = NULL; return ret; } bool Bvfs::compute_restore_list(char *fileid, char *dirid, char *fileindex, char *output_table) { POOL_MEM query; POOL_MEM tmp, tmp2; int64_t id; uint32_t nb = 0; int num; bool init=false; bool ret=false; bool use_insert_hardlinks_fast = false; /* check args */ if ((fileid && *fileid && !is_a_number_list(fileid)) || (dirid && *dirid && !is_a_number_list(dirid)) || (fileindex && *fileindex && !is_a_number_list(fileindex)) || (!*fileid && !*dirid && (!fileindex || !*fileindex)) || (!output_table) ) { Dmsg0(dbglevel, "Invalid parameters\n"); return false; } if (!check_temp(output_table)) { return false; } db->bdb_lock(); if (can_use_insert_hardlinks_fast()) { use_insert_hardlinks_fast = true; } /* Cleanup old tables first */ Mmsg(query, "DROP TABLE IF EXISTS btemp%s", output_table); db->bdb_sql_query(query.c_str()); Mmsg(query, "DROP TABLE IF EXISTS %s", output_table); db->bdb_sql_query(query.c_str()); /* Now start the transaction to protect ourself from other commands */ db->bdb_start_transaction(jcr); Mmsg(query, "CREATE TABLE btemp%s /*PKEY (DummyPkey INTEGER AUTO_INCREMENT PRIMARY KEY)*/ AS ", output_table); if (*fileid) { /* Select files with their direct id */ init=true; Mmsg(tmp,"SELECT Job.JobId, JobTDate, FileIndex, Filename, " "PathId, FileId " "FROM File JOIN Job USING (JobId) WHERE FileId IN (%s)", fileid); pm_strcat(query, tmp.c_str()); } if (fileindex && *fileindex) { /* Select files with their fileindex */ int64_t jobid, fidx; sellist sel; sel.set_string(fileindex, true); /* check the format */ foreach_sellist(jobid, &sel) { fidx = sel.next(); if (fidx <= 0) { goto bail_out; } if (init) { query.strcat(" UNION "); } Mmsg(tmp,"SELECT Job.JobId, JobTDate, FileIndex, Filename, " "PathId, FileId " "FROM File JOIN Job USING (JobId) " "WHERE JobId = %lld AND FileIndex = %lld AND JobId IN (%s) ", jobid, fidx, jobids); pm_strcat(query, tmp.c_str()); init=true; } } /* Add a directory content */ while (get_next_id_from_list(&dirid, &id) == 1) { Mmsg(tmp, "SELECT Path FROM Path WHERE PathId=%lld", id); if (!db->bdb_sql_query(tmp.c_str(), get_path_handler, (void *)&tmp2)) { Dmsg0(dbglevel, "Can't search for path\n"); /* print error */ goto bail_out; } if (!strcmp(tmp2.c_str(), "")) { /* path not found */ Dmsg3(dbglevel, "Path not found %lld q=%s s=%s\n", id, tmp.c_str(), tmp2.c_str()); break; } /* escape % and _ for LIKE search */ tmp.check_size((strlen(tmp2.c_str())+1) * 2); char *p = tmp.c_str(); for (char *s = tmp2.c_str(); *s ; s++) { if (*s == '%' || *s == '_' || *s == '\\') { *p = '\\'; p++; } *p = *s; p++; } *p = '\0'; tmp.strcat("%"); size_t len = strlen(tmp.c_str()); tmp2.check_size((len+1) * 2); db->bdb_escape_string(jcr, tmp2.c_str(), tmp.c_str(), len); if (init) { query.strcat(" UNION "); } Mmsg(tmp, "SELECT Job.JobId, JobTDate, File.FileIndex, File.Filename, " "File.PathId, FileId " "FROM Path JOIN File USING (PathId) JOIN Job USING (JobId) " "WHERE Path.Path LIKE '%s' ESCAPE '%s' AND File.JobId IN (%s) ", tmp2.c_str(), escape_char_value[db->bdb_get_type_index()], jobids); query.strcat(tmp.c_str()); init = true; query.strcat(" UNION "); /* A directory can have files from a BaseJob */ Mmsg(tmp, "SELECT File.JobId, JobTDate, BaseFiles.FileIndex, " "File.Filename, File.PathId, BaseFiles.FileId " "FROM BaseFiles " "JOIN File USING (FileId) " "JOIN Job ON (BaseFiles.JobId = Job.JobId) " "JOIN Path USING (PathId) " "WHERE Path.Path LIKE '%s' ESCAPE '%s' AND BaseFiles.JobId IN (%s) ", tmp2.c_str(), escape_char_value[db->bdb_get_type_index()], jobids); query.strcat(tmp.c_str()); } Dmsg1(dbglevel_sql, "query=%s\n", query.c_str()); if (!db->bdb_sql_query(query.c_str(), NULL, NULL)) { Dmsg1(dbglevel, "Can't execute query=%s\n", query.c_str()); goto bail_out; } Mmsg(query, sql_bvfs_select[db->bdb_get_type_index()], output_table, output_table, output_table); /* TODO: handle jobid filter */ Dmsg1(dbglevel_sql, "query=%s\n", query.c_str()); if (!db->bdb_sql_query(query.c_str(), NULL, NULL)) { Dmsg1(dbglevel, "Can't execute query=%s\n", query.c_str()); goto bail_out; } /* MySQL needs the index */ if (db->bdb_get_type_index() == SQL_TYPE_MYSQL) { Mmsg(query, "CREATE INDEX idx_%s ON %s (JobId)", output_table, output_table); Dmsg1(dbglevel_sql, "query=%s\n", query.c_str()); if (!db->bdb_sql_query(query.c_str(), NULL, NULL)) { Dmsg1(dbglevel, "Can't execute query=%s\n", query.c_str()); goto bail_out; } } /* SQLite needs extra indexes */ if (db->bdb_get_type_index() == SQL_TYPE_SQLITE3) { Mmsg(query, "CREATE INDEX idx1_%s ON %s (JobId)", output_table, output_table); Dmsg1(dbglevel_sql, "query=%s\n", query.c_str()); if (!db->bdb_sql_query(query.c_str(), NULL, NULL)) { Dmsg1(dbglevel, "Can't execute query=%s\n", query.c_str()); goto bail_out; } Mmsg(query, "CREATE INDEX idx2_%s ON %s (FileIndex)", output_table, output_table); Dmsg1(dbglevel_sql, "query=%s\n", query.c_str()); if (!db->bdb_sql_query(query.c_str(), NULL, NULL)) { Dmsg1(dbglevel, "Can't execute query=%s\n", query.c_str()); goto bail_out; } } if (compute_delta) { /* Check if some FileId have DeltaSeq > 0 * Foreach of them we need to get the accurate_job list, and compute * what are dependencies */ Mmsg(query, "SELECT F.FileId, F.JobId, F.Filename, F.PathId, F.DeltaSeq " "FROM File AS F JOIN Job USING (JobId) JOIN %s USING (FileId) " "WHERE DeltaSeq > 0", output_table); if (!db->QueryDB(jcr, query.c_str())) { Dmsg1(dbglevel_sql, "Can't execute query=%s\n", query.c_str()); } /* TODO: Use an other DB connection can avoid to copy the result of the * previous query into a temporary buffer */ num = db->sql_num_rows(); Dmsg2(dbglevel, "Found %d Delta parts in restore selection q=%s\n", num, query.c_str()); if (num > 0) { int64_t *result = (int64_t *)malloc (num * 4 * sizeof(int64_t)); SQL_ROW row; int i=0; while((row = db->sql_fetch_row())) { result[i++] = str_to_int64(row[0]); /* FileId */ result[i++] = str_to_int64(row[1]); /* JobId */ result[i++] = (intptr_t)bstrdup(row[2]); /* Filename TODO: use pool */ result[i++] = str_to_int64(row[3]); /* PathId */ } i=0; while (num > 0) { insert_missing_delta(output_table, result + i); free((char *)(result[i+2])); /* TODO: use pool */ i += 4; num--; } free(result); } } if (use_insert_hardlinks_fast) { if (!insert_hardlinks_fast(output_table)) { goto bail_out; } } else { if (!insert_hardlinks(output_table)) { goto bail_out; } } if (!check_permissions(output_table)) { goto bail_out; } /* Final check to see if we have at least one file to restore */ Mmsg(query, "SELECT 1 FROM %s LIMIT 1", output_table); if (!db->bdb_sql_query(query.c_str(), db_int_handler, &nb)) { Dmsg1(dbglevel, "Can't execute query=%s\n", query.c_str()); goto bail_out; } if (nb == 1) { ret = true; } bail_out: if (!ret) { Mmsg(query, "DROP TABLE IF EXISTS %s", output_table); db->bdb_sql_query(query.c_str()); } Mmsg(query, "DROP TABLE IF EXISTS btemp%s", output_table); db->bdb_sql_query(query.c_str()); db->bdb_end_transaction(jcr); /* the destination table might be visible now */ db->bdb_unlock(); return ret; } void Bvfs::insert_missing_delta(char *output_table, int64_t *res) { char ed1[50]; db_list_ctx lst; POOL_MEM query; JOB_DBR jr, jr2; memset(&jr, 0, sizeof(jr)); memset(&jr2, 0, sizeof(jr2)); /* Need to limit the query to StartTime, Client/Fileset */ jr2.JobId = res[1]; db->bdb_get_job_record(jcr, &jr2); jr.JobId = res[1]; jr.ClientId = jr2.ClientId; jr.FileSetId = jr2.FileSetId; jr.JobLevel = L_INCREMENTAL; jr.StartTime = jr2.StartTime; /* Get accurate jobid list */ db->bdb_get_accurate_jobids(jcr, &jr, jr.JobId, &lst); Dmsg2(dbglevel_sql, "JobId list for %lld is %s\n", res[0], lst.list); /* The list contains already the last DeltaSeq element, so * we don't need to select it in the next query */ for (int l = strlen(lst.list); l > 0; l--) { if (lst.list[l] == ',') { lst.list[l] = '\0'; break; } } Dmsg1(dbglevel_sql, "JobId list after strip is %s\n", lst.list); /* Escape filename */ db->fnl = strlen((char *)res[2]); db->esc_name = check_pool_memory_size(db->esc_name, 2*db->fnl+2); db->bdb_escape_string(jcr, db->esc_name, (char *)res[2], db->fnl); edit_int64(res[3], ed1); /* pathid */ int id=db->bdb_get_type_index(); Mmsg(query, bvfs_select_delta_version_with_basejob_and_delta[id], lst.list, db->esc_name, ed1, lst.list, db->esc_name, ed1, lst.list, lst.list); Mmsg(db->cmd, "INSERT INTO %s " "SELECT JobId, FileIndex, FileId FROM (%s) AS F1", output_table, query.c_str()); if (!db->bdb_sql_query(db->cmd, NULL, NULL)) { Dmsg1(dbglevel_sql, "Can't exec q=%s\n", db->cmd); } } /* This function is used with caution by tools such as SAP plugin * We need to be able to delete a record from the File table, quite ugly... */ bool Bvfs::delete_fileid(char *fileid) { bool ret=true; /* The jobid was filtered before */ if (!jobids || *jobids == '\0' || !fileid || *fileid == '\0') { return false; } db->bdb_lock(); Mmsg(db->cmd, "DELETE FROM File WHERE FileId IN (%s) AND JobId IN (%s)", fileid, jobids); if (!db->bdb_sql_query(db->cmd, NULL, NULL)) { ret = false; } db->bdb_unlock(); return ret; } #if COMMUNITY bool Bvfs::check_path_access(DBId_t pid) { return true;} bool Bvfs::check_full_path_access(int count, sellist *sel, db_list_ctx *toexcl){return true;} bool Bvfs::can_access_dir(const char *path) { return true;} bool Bvfs::can_access(struct stat *p) { return true;} bool Bvfs::need_to_check_permissions() { return false;} bool Bvfs::check_permissions(char *output_table) {return true;} int Bvfs::checkuid_cb(int fields, char **row) { return 0;} #endif #endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ bacula-15.0.3/src/cats/drop_postgresql_database.in0000644000175000017500000000051714771010173022011 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # shell script to drop Bacula database(s) # bindir=@POSTGRESQL_BINDIR@ db_name=@db_name@ if $bindir/dropdb ${db_name} then echo "Drop of ${db_name} database succeeded." else echo "Drop of ${db_name} database failed." fi exit 0 bacula-15.0.3/src/cats/update_bacula_tables.in0000755000175000017500000000170314771010173021062 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # This routine alters the appropriately configured # Bacula tables for PostgreSQL, Ingres, MySQL, or SQLite. # # can be used to change the current user with su pre_command="sh -c" default_db_type=@DEFAULT_DB_TYPE@ # # See if the first argument is a valid backend name. # If so the user overrides the default database backend. # if [ $# -gt 0 ]; then case $1 in sqlite3) db_type=$1 shift ;; mysql) db_type=$1 shift ;; postgresql) db_type=$1 shift ;; *) ;; esac fi # # If no new db_type is gives use the default db_type. # if [ -z "${db_type}" ]; then db_type="${default_db_type}" fi if [ $db_type = postgresql -a "$UID" = 0 ]; then pre_command="su - postgres -c" fi echo "Altering ${db_type} tables" $pre_command "@scriptdir@/update_${db_type}_tables $*" bacula-15.0.3/src/cats/sqlite.in0000644000175000017500000000035414771010173016236 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # shell script to invoke SQLite on Bacula database bindir=@SQLITE_BINDIR@ db_name=@db_name@ $bindir/sqlite @working_dir@/${db_name}.db bacula-15.0.3/src/cats/drop_mysql_database.in0000644000175000017500000000061014771010173020745 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # shell script to drop Bacula database(s) # bindir=@MYSQL_BINDIR@ db_name=@db_name@ $bindir/mysql $* -f < #include #define __BDB_MYSQL_H_ 1 #include "bdb_mysql.h" /* ----------------------------------------------------------------------- * * MySQL dependent defines and subroutines * * ----------------------------------------------------------------------- */ /* List of open databases */ static dlist *db_list = NULL; static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; BDB_MYSQL::BDB_MYSQL(): BDB() { BDB_MYSQL *mdb = this; if (db_list == NULL) { db_list = New(dlist(this, &this->m_link)); } mdb->m_db_driver_type = SQL_DRIVER_TYPE_MYSQL; mdb->m_db_type = SQL_TYPE_MYSQL; mdb->m_db_driver = bstrdup("MySQL"); mdb->errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */ mdb->errmsg[0] = 0; mdb->cmd = get_pool_memory(PM_EMSG); /* get command buffer */ mdb->cached_path = get_pool_memory(PM_FNAME); mdb->cached_path_id = 0; mdb->m_ref_count = 1; mdb->fname = get_pool_memory(PM_FNAME); mdb->path = get_pool_memory(PM_FNAME); mdb->esc_name = get_pool_memory(PM_FNAME); mdb->esc_path = get_pool_memory(PM_FNAME); mdb->esc_obj = get_pool_memory(PM_FNAME); mdb->m_use_fatal_jmsg = true; /* Initialize the private members. */ mdb->m_db_handle = NULL; mdb->m_result = NULL; db_list->append(this); } BDB_MYSQL::~BDB_MYSQL() { } /* * Initialize database data structure. In principal this should * never have errors, or it is really fatal. */ BDB *db_init_database(JCR *jcr, const char *db_driver, const char *db_name, const char *db_user, const char *db_password, const char *db_address, int db_port, const char *db_socket, const char *db_ssl_mode, const char *db_ssl_key, const char *db_ssl_cert, const char *db_ssl_ca, const char *db_ssl_capath, const char *db_ssl_cipher, bool mult_db_connections, bool disable_batch_insert) { BDB_MYSQL *mdb = NULL; if (!db_user) { Jmsg(jcr, M_FATAL, 0, _("A user name for MySQL must be supplied.\n")); return NULL; } P(mutex); /* lock DB queue */ /* * Look to see if DB already open */ if (db_list && !mult_db_connections) { foreach_dlist(mdb, db_list) { if (mdb->bdb_match_database(db_driver, db_name, db_address, db_port)) { Dmsg1(100, "DB REopen %s\n", db_name); mdb->increment_refcount(); goto get_out; } } } Dmsg0(100, "db_init_database first time\n"); mdb = New(BDB_MYSQL()); if (!mdb) goto get_out; /* * Initialize the parent class members. */ mdb->m_db_name = bstrdup(db_name); mdb->m_db_user = bstrdup(db_user); if (db_password) { mdb->m_db_password = bstrdup(db_password); } if (db_address) { mdb->m_db_address = bstrdup(db_address); } if (db_socket) { mdb->m_db_socket = bstrdup(db_socket); } if (db_ssl_mode) { mdb->m_db_ssl_mode = bstrdup(db_ssl_mode); } else { mdb->m_db_ssl_mode = bstrdup("preferred"); } if (db_ssl_key) { mdb->m_db_ssl_key = bstrdup(db_ssl_key); } if (db_ssl_cert) { mdb->m_db_ssl_cert = bstrdup(db_ssl_cert); } if (db_ssl_ca) { mdb->m_db_ssl_ca = bstrdup(db_ssl_ca); } if (db_ssl_capath) { mdb->m_db_ssl_capath = bstrdup(db_ssl_capath); } if (db_ssl_cipher) { mdb->m_db_ssl_cipher = bstrdup(db_ssl_cipher); } mdb->m_db_port = db_port; if (disable_batch_insert) { mdb->m_disabled_batch_insert = true; mdb->m_have_batch_insert = false; } else { mdb->m_disabled_batch_insert = false; #ifdef USE_BATCH_FILE_INSERT #ifdef HAVE_MYSQL_THREAD_SAFE mdb->m_have_batch_insert = mysql_thread_safe(); #else mdb->m_have_batch_insert = false; #endif /* HAVE_MYSQL_THREAD_SAFE */ #else mdb->m_have_batch_insert = false; #endif /* USE_BATCH_FILE_INSERT */ } mdb->m_allow_transactions = mult_db_connections; /* At this time, when mult_db_connections == true, this is for * specific console command such as bvfs or batch mode, and we don't * want to share a batch mode or bvfs. In the future, we can change * the creation function to add this parameter. */ mdb->m_dedicated = mult_db_connections; get_out: V(mutex); return mdb; } bool BDB_MYSQL::is_pkey_required() { int ret = mysql_query(m_db_handle, "show variables like 'sql_require_primary_key'"); bool req_pkey = false; if (!ret) { if ((m_result = mysql_use_result(m_db_handle)) != NULL) { SQL_ROW row; while ((row = mysql_fetch_row(m_result))) { req_pkey = !strncmp(row[1], "ON", 2); } sql_free_result(); } } return req_pkey; } /* * Now actually open the database. This can generate errors, * which are returned in the errmsg * * DO NOT close the database or delete mdb here !!!! */ bool BDB_MYSQL::bdb_open_database(JCR *jcr) { BDB_MYSQL *mdb = this; bool retval = false; int errstat; bool reconnect = 1; P(mutex); if (mdb->m_connected) { retval = true; goto get_out; } if ((errstat=rwl_init(&mdb->m_lock)) != 0) { berrno be; Mmsg1(&mdb->errmsg, _("Unable to initialize DB lock. ERR=%s\n"), be.bstrerror(errstat)); goto get_out; } /* * Connect to the database */ #ifdef xHAVE_EMBEDDED_MYSQL // mysql_server_init(0, NULL, NULL); #endif mysql_init(&mdb->m_instance); Dmsg0(50, "mysql_init done\n"); /* * Sets the appropriate certificate options for * establishing secure connection using SSL to the database. */ if (mdb->m_db_ssl_key) { mysql_ssl_set(&(mdb->m_instance), mdb->m_db_ssl_key, mdb->m_db_ssl_cert, mdb->m_db_ssl_ca, mdb->m_db_ssl_capath, mdb->m_db_ssl_cipher); } /* * If connection fails, try at 5 sec intervals for 30 seconds. */ for (int retry=0; retry < 6; retry++) { mdb->m_db_handle = mysql_real_connect( &(mdb->m_instance), /* db */ mdb->m_db_address, /* default = localhost */ mdb->m_db_user, /* login name */ mdb->m_db_password, /* password */ mdb->m_db_name, /* database name */ mdb->m_db_port, /* default port */ mdb->m_db_socket, /* default = socket */ CLIENT_FOUND_ROWS); /* flags */ /* * If no connect, try once more in case it is a timing problem */ if (mdb->m_db_handle != NULL) { break; } bmicrosleep(5,0); } #if MYSQL_VERSION_ID < 80034 // Deprecated in 8.0.34 # if MYSQL_VERSION_ID <= 50117 mysql_options(&mdb->m_instance, MYSQL_OPT_RECONNECT, (char*)&reconnect); /* so connection does not timeout */ # else mysql_options(&mdb->m_instance, MYSQL_OPT_RECONNECT, &reconnect); /* so connection does not timeout */ # endif Dmsg0(50, "mysql_real_connect done\n"); #endif Dmsg3(50, "db_user=%s db_name=%s db_password=%s\n", mdb->m_db_user, mdb->m_db_name, (mdb->m_db_password == NULL) ? "(NULL)" : mdb->m_db_password); if (mdb->m_db_handle == NULL) { Mmsg2(&mdb->errmsg, _("Unable to connect to MySQL server.\n" "Database=%s User=%s\n" "MySQL connect failed either server not running or your authorization is incorrect.\n"), mdb->m_db_name, mdb->m_db_user); #if MYSQL_VERSION_ID >= 40101 Dmsg3(50, "Error %u (%s): %s\n", mysql_errno(&(mdb->m_instance)), mysql_sqlstate(&(mdb->m_instance)), mysql_error(&(mdb->m_instance))); #else Dmsg2(50, "Error %u: %s\n", mysql_errno(&(mdb->m_instance)), mysql_error(&(mdb->m_instance))); #endif goto get_out; } /* get the current cipher used for SSL connection */ if (mdb->m_db_ssl_key) { const char *cipher; if (mdb->m_db_ssl_cipher) { free(mdb->m_db_ssl_cipher); } cipher = (const char *)mysql_get_ssl_cipher(&(mdb->m_instance)); if (cipher) { mdb->m_db_ssl_cipher = bstrdup(cipher); } Dmsg1(50, "db_ssl_ciper=%s\n", (mdb->m_db_ssl_cipher == NULL) ? "(NULL)" : mdb->m_db_ssl_cipher); } mdb->m_connected = true; if (!bdb_check_version(jcr)) { goto get_out; } /* Check if all tables created must have a Primary Key */ if (is_pkey_required()) { m_pkey_query_buffer = get_pool_memory(PM_FNAME); } else { m_pkey_query_buffer = NULL; } Dmsg3(100, "opendb ref=%d connected=%d db=%p\n", mdb->m_ref_count, mdb->m_connected, mdb->m_db_handle); /* * Set connection timeout to 8 days specialy for batch mode */ sql_query("SET wait_timeout=691200"); sql_query("SET interactive_timeout=691200"); retval = true; get_out: V(mutex); return retval; } void BDB_MYSQL::bdb_close_database(JCR *jcr) { BDB_MYSQL *mdb = this; if (mdb->m_connected) { bdb_end_transaction(jcr); } P(mutex); mdb->m_ref_count--; Dmsg3(100, "closedb ref=%d connected=%d db=%p\n", mdb->m_ref_count, mdb->m_connected, mdb->m_db_handle); if (mdb->m_ref_count == 0) { if (mdb->m_connected) { sql_free_result(); } db_list->remove(mdb); if (mdb->m_connected) { Dmsg1(100, "close db=%p\n", mdb->m_db_handle); mysql_close(&mdb->m_instance); } if (is_rwl_valid(&mdb->m_lock)) { rwl_destroy(&mdb->m_lock); } free_pool_memory(mdb->errmsg); free_pool_memory(mdb->cmd); free_pool_memory(mdb->cached_path); free_pool_memory(mdb->fname); free_pool_memory(mdb->path); free_pool_memory(mdb->esc_name); free_pool_memory(mdb->esc_path); free_pool_memory(mdb->esc_obj); if (mdb->m_pkey_query_buffer) { free_pool_memory(mdb->m_pkey_query_buffer); } if (mdb->m_db_driver) { free(mdb->m_db_driver); } if (mdb->m_db_name) { free(mdb->m_db_name); } if (mdb->m_db_user) { free(mdb->m_db_user); } if (mdb->m_db_password) { free(mdb->m_db_password); } if (mdb->m_db_address) { free(mdb->m_db_address); } if (mdb->m_db_socket) { free(mdb->m_db_socket); } if (mdb->m_db_ssl_mode) { free(mdb->m_db_ssl_mode); } if (mdb->m_db_ssl_key) { free(mdb->m_db_ssl_key); } if (mdb->m_db_ssl_cert) { free(mdb->m_db_ssl_cert); } if (mdb->m_db_ssl_ca) { free(mdb->m_db_ssl_ca); } if (mdb->m_db_ssl_capath) { free(mdb->m_db_ssl_capath); } if (mdb->m_db_ssl_cipher) { free(mdb->m_db_ssl_cipher); } delete mdb; if (db_list->size() == 0) { delete db_list; db_list = NULL; } } V(mutex); } /* * This call is needed because the message channel thread * opens a database on behalf of a jcr that was created in * a different thread. MySQL then allocates thread specific * data, which is NOT freed when the original jcr thread * closes the database. Thus the msgchan must call here * to cleanup any thread specific data that it created. */ void BDB_MYSQL::bdb_thread_cleanup(void) { #ifndef HAVE_WIN32 mysql_thread_end(); /* Cleanup thread specific data */ #endif } /* * Escape strings so MySQL is happy * * len is the length of the old string. Your new * string must be long enough (max 2*old+1) to hold * the escaped output. */ void BDB_MYSQL::bdb_escape_string(JCR *jcr, char *snew, const char *old, int len) { BDB_MYSQL *mdb = this; mysql_real_escape_string(mdb->m_db_handle, snew, old, len); } /* * Escape binary object so that MySQL is happy * Memory is stored in BDB struct, no need to free it */ char *BDB_MYSQL::bdb_escape_object(JCR *jcr, char *old, int len) { BDB_MYSQL *mdb = this; mdb->esc_obj = check_pool_memory_size(mdb->esc_obj, len*2+1); mysql_real_escape_string(mdb->m_db_handle, mdb->esc_obj, old, len); return mdb->esc_obj; } /* * Unescape binary object so that MySQL is happy */ void BDB_MYSQL::bdb_unescape_object(JCR *jcr, char *from, int32_t expected_len, POOLMEM **dest, int32_t *dest_len) { if (!from) { *dest[0] = 0; *dest_len = 0; return; } *dest = check_pool_memory_size(*dest, expected_len+1); *dest_len = expected_len; memcpy(*dest, from, expected_len); (*dest)[expected_len]=0; } void BDB_MYSQL::bdb_start_transaction(JCR *jcr) { if (jcr) { if (!jcr->attr) { jcr->attr = get_pool_memory(PM_FNAME); } if (!jcr->ar) { jcr->ar = (ATTR_DBR *)malloc(sizeof(ATTR_DBR)); memset(jcr->ar, 0, sizeof(ATTR_DBR)); } } } void BDB_MYSQL::bdb_end_transaction(JCR *jcr) { } /* * Helper function to dynamically add Primary Keys to the tables being created. */ const char* BDB_MYSQL::enable_pkey(const char *query) { if (!m_pkey_query_buffer) return query; const char *tag = strstr(query, "/*PKEY"); if (!tag) return query; pm_strcpy(m_pkey_query_buffer, query); char *pos = strstr(m_pkey_query_buffer, "/*PKEY"); for (int i=0; i<6; i++) *pos++=' '; pos = strstr(pos, "*/"); for (int i=0; i<2; i++) *pos++=' '; return m_pkey_query_buffer; } /* * Submit a general SQL command (cmd), and for each row returned, * the result_handler is called with the ctx. */ bool BDB_MYSQL::bdb_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx) { int ret; SQL_ROW row; bool send = true; bool retval = false; BDB_MYSQL *mdb = this; int retry=5; Dmsg1(500, "db_sql_query starts with %s\n", query); bdb_lock(); errmsg[0] = 0; query = enable_pkey(query); do { ret = mysql_query(m_db_handle, query); if (ret != 0) { uint32_t merrno = mysql_errno(m_db_handle); switch (merrno) { case ER_LOCK_DEADLOCK: if (retry > 0) { Dmsg0(500, "db_sql_query failed because of a deadlock, retrying in few seconds...\n"); bmicrosleep(2, 0); /* TODO: When we will manage transactions, it's important to test if we are in or not */ } break; default: Dmsg1(50, "db_sql_query failed errno=%d\n", merrno); retry=0; /* Stop directly here, no retry */ break; } } } while (ret != 0 && retry-- > 0); if (ret != 0) { Mmsg(mdb->errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror()); Dmsg0(500, "db_sql_query failed\n"); goto get_out; } Dmsg0(500, "db_sql_query succeeded. checking handler\n"); if (result_handler) { if ((mdb->m_result = mysql_use_result(mdb->m_db_handle)) != NULL) { mdb->m_num_fields = mysql_num_fields(mdb->m_result); /* * We *must* fetch all rows */ while ((row = mysql_fetch_row(m_result))) { if (send) { /* the result handler returns 1 when it has * seen all the data it wants. However, we * loop to the end of the data. */ if (result_handler(ctx, mdb->m_num_fields, row)) { send = false; } } } sql_free_result(); } } Dmsg0(500, "db_sql_query finished\n"); retval = true; get_out: bdb_unlock(); return retval; } bool BDB_MYSQL::sql_query(const char *query, int flags) { int ret; bool retval = true; BDB_MYSQL *mdb = this; Dmsg1(500, "sql_query starts with '%s'\n", query); /* * We are starting a new query. reset everything. */ mdb->m_num_rows = -1; mdb->m_row_number = -1; mdb->m_field_number = -1; if (mdb->m_result) { mysql_free_result(mdb->m_result); mdb->m_result = NULL; } query = enable_pkey(query); ret = mysql_query(mdb->m_db_handle, query); if (ret == 0) { Dmsg0(500, "we have a result\n"); if (flags & QF_STORE_RESULT) { mdb->m_result = mysql_store_result(mdb->m_db_handle); if (mdb->m_result != NULL) { mdb->m_num_fields = mysql_num_fields(mdb->m_result); Dmsg1(500, "we have %d fields\n", mdb->m_num_fields); mdb->m_num_rows = mysql_num_rows(mdb->m_result); Dmsg1(500, "we have %d rows\n", mdb->m_num_rows); } else { mdb->m_num_fields = 0; mdb->m_num_rows = mysql_affected_rows(mdb->m_db_handle); Dmsg1(500, "we have %d rows\n", mdb->m_num_rows); } } else { mdb->m_num_fields = 0; mdb->m_num_rows = mysql_affected_rows(mdb->m_db_handle); Dmsg1(500, "we have %d rows\n", mdb->m_num_rows); } } else { Dmsg0(500, "we failed\n"); mdb->m_status = 1; /* failed */ retval = false; } return retval; } void BDB_MYSQL::sql_free_result(void) { BDB_MYSQL *mdb = this; bdb_lock(); if (mdb->m_result) { mysql_free_result(mdb->m_result); mdb->m_result = NULL; } if (mdb->m_fields) { free(mdb->m_fields); mdb->m_fields = NULL; } mdb->m_num_rows = mdb->m_num_fields = 0; bdb_unlock(); } SQL_ROW BDB_MYSQL::sql_fetch_row(void) { BDB_MYSQL *mdb = this; if (!mdb->m_result) { return NULL; } else { return mysql_fetch_row(mdb->m_result); } } const char *BDB_MYSQL::sql_strerror(void) { BDB_MYSQL *mdb = this; return mysql_error(mdb->m_db_handle); } void BDB_MYSQL::sql_data_seek(int row) { BDB_MYSQL *mdb = this; return mysql_data_seek(mdb->m_result, row); } int BDB_MYSQL::sql_affected_rows(void) { BDB_MYSQL *mdb = this; return mysql_affected_rows(mdb->m_db_handle); } uint64_t BDB_MYSQL::sql_insert_autokey_record(const char *query, const char *table_name) { BDB_MYSQL *mdb = this; /* * First execute the insert query and then retrieve the currval. */ if (mysql_query(mdb->m_db_handle, query) != 0) { return 0; } mdb->m_num_rows = mysql_affected_rows(mdb->m_db_handle); if (mdb->m_num_rows != 1) { return 0; } mdb->changes++; return mysql_insert_id(mdb->m_db_handle); } SQL_FIELD *BDB_MYSQL::sql_fetch_field(void) { int i; MYSQL_FIELD *field; BDB_MYSQL *mdb = this; if (!mdb->m_fields || mdb->m_fields_size < mdb->m_num_fields) { if (mdb->m_fields) { free(mdb->m_fields); mdb->m_fields = NULL; } Dmsg1(500, "allocating space for %d fields\n", mdb->m_num_fields); mdb->m_fields = (SQL_FIELD *)malloc(sizeof(SQL_FIELD) * mdb->m_num_fields); mdb->m_fields_size = mdb->m_num_fields; for (i = 0; i < mdb->m_num_fields; i++) { Dmsg1(500, "filling field %d\n", i); if ((field = mysql_fetch_field(mdb->m_result)) != NULL) { mdb->m_fields[i].name = field->name; mdb->m_fields[i].max_length = field->max_length; mdb->m_fields[i].type = field->type; mdb->m_fields[i].flags = field->flags; Dmsg4(500, "sql_fetch_field finds field '%s' has length='%d' type='%d' and IsNull=%d\n", mdb->m_fields[i].name, mdb->m_fields[i].max_length, mdb->m_fields[i].type, mdb->m_fields[i].flags); } } } /* * Increment field number for the next time around */ return &mdb->m_fields[mdb->m_field_number++]; } bool BDB_MYSQL::sql_field_is_not_null(int field_type) { return IS_NOT_NULL(field_type); } bool BDB_MYSQL::sql_field_is_numeric(int field_type) { return IS_NUM(field_type); } /* * Returns true if OK * false if failed */ bool BDB_MYSQL::sql_batch_start(JCR *jcr) { BDB_MYSQL *mdb = this; bool retval; bdb_lock(); retval = sql_query("CREATE TEMPORARY TABLE batch (" "FileIndex integer not null," "JobId integer not null," "Path blob," "Name blob," "LStat tinyblob," "MD5 tinyblob," "DeltaSeq integer" "/*PKEY, DummyPkey INTEGER AUTO_INCREMENT PRIMARY KEY*/)"); bdb_unlock(); /* * Keep track of the number of changes in batch mode. */ mdb->changes = 0; return retval; } /* set error to something to abort operation */ /* * Returns true if OK * false if failed */ bool BDB_MYSQL::sql_batch_end(JCR *jcr, const char *error) { BDB_MYSQL *mdb = this; mdb->m_status = 0; /* * Flush any pending inserts. */ if (mdb->changes) { return sql_query(mdb->cmd); } return true; } /* * Returns true if OK * false if failed */ bool BDB_MYSQL::sql_batch_insert(JCR *jcr, ATTR_DBR *ar) { BDB_MYSQL *mdb = this; const char *digest; char ed1[50]; mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1); bdb_escape_string(jcr, mdb->esc_name, mdb->fname, mdb->fnl); mdb->esc_path = check_pool_memory_size(mdb->esc_path, mdb->pnl*2+1); bdb_escape_string(jcr, mdb->esc_path, mdb->path, mdb->pnl); if (ar->Digest == NULL || ar->Digest[0] == 0) { digest = "0"; } else { digest = ar->Digest; } /* * Try to batch up multiple inserts using multi-row inserts. */ if (mdb->changes == 0) { Mmsg(cmd, "INSERT INTO batch(FileIndex, JobId, Path, Name, LStat, MD5, DeltaSeq) VALUES " "(%d,%s,'%s','%s','%s','%s',%u)", ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path, mdb->esc_name, ar->attr, digest, ar->DeltaSeq); mdb->changes++; } else { /* * We use the esc_obj for temporary storage otherwise * we keep on copying data. */ Mmsg(mdb->esc_obj, ",(%d,%s,'%s','%s','%s','%s',%u)", ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path, mdb->esc_name, ar->attr, digest, ar->DeltaSeq); pm_strcat(mdb->cmd, mdb->esc_obj); mdb->changes++; } /* * See if we need to flush the query buffer filled * with multi-row inserts. */ if ((mdb->changes % MYSQL_CHANGES_PER_BATCH_INSERT) == 0) { if (!sql_query(mdb->cmd)) { mdb->changes = 0; return false; } else { mdb->changes = 0; } } return true; } const char *BDB_MYSQL::search_op(JCR *jcr, const char *table_col, char *value, POOLMEM **esc, POOLMEM **dest) { int len = strlen(value); *esc = check_pool_memory_size(*esc, len*2+1); bdb_escape_string(jcr, *esc, value, len); Mmsg(dest, " MATCH(%s) AGAINST ('%s') ", table_col, *esc); return *dest; } #endif /* HAVE_MYSQL */ bacula-15.0.3/src/cats/make_mysql_tables.in0000644000175000017500000005351714771010173020442 0ustar bsbuildbsbuild#!/bin/sh # # shell script to create Bacula MySQL tables # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Important note: # We cannot provide support for performance issues # if you changed the default schema. In partcular # if you change any of the indexes. # # Useful commands: # mysql -u root # show databases; # show tables from ; # show columns from from ; # # use mysql; # select user from user; # bindir=@MYSQL_BINDIR@ PATH="$bindir:$PATH" db_name=${db_name:-@db_name@} if mysql $* -f < '$wd/$args{db_name}.sql'"); print "Error while executing sqlite dump $!\n"; return 1; } # TODO: use just ENV and drop the pg_service.conf file sub setup_env_pgsql { my %args = @_; my $username = getpwuid $ENV{'UID'}; umask(0077); if ($args{db_address}) { $ENV{PGHOST}=$args{db_address}; } if ($args{db_socket}) { $ENV{PGHOST}=$args{db_socket}; } if ($args{db_port}) { $ENV{PGPORT}=$args{db_port}; } if ($args{db_user}) { $ENV{PGUSER}=$args{db_user}; } if ($args{db_password}) { $ENV{PGPASSWORD}=$args{db_password}; } $ENV{PGDATABASE}=$args{db_name}; system("echo '\\q' | HOME='$wd' psql") == 0 or die "$username doesn't have access to the catalog database\n"; } sub dump_pgsql { my %args = @_; setup_env_pgsql(%args); exec("HOME='$wd' pg_dump -c > '$wd/$args{db_name}.sql'"); print "Error while executing postgres dump $!\n"; return 1; # in case of error } sub analyse_pgsql { my %args = @_; setup_env_pgsql(%args); my @output =`LANG=C HOME='$wd' vacuumdb -z 2>&1`; my $exitcode = $? >> 8; print grep { !/^WARNING:\s+skipping\s\"(pg_|sql_)/ } @output; if ($exitcode != 0) { print "Error while executing postgres analyse. Exitcode=$exitcode\n"; } return $exitcode; } sub setup_env_mysql { my %args = @_; umask(0077); unlink("$wd/.my.cnf"); open(MY, ">$wd/.my.cnf") or die "Can't open $wd/.my.cnf for writing $@"; $args{db_address} = $args{db_address} || "localhost"; my $addr = "host=$args{db_address}"; if ($args{db_socket}) { # unix socket is fastest than net socket $addr = "socket=\"$args{db_socket}\""; } my $mode = $args{mode} || 'client'; print MY "[$mode] $addr user=\"$args{db_user}\" password=\"$args{db_password}\" "; if ($args{db_port}) { print MY "port=$args{db_port}\n"; } close(MY); } sub dump_mysql { my %args = @_; setup_env_mysql(%args); exec("HOME='$wd' mysqldump -f --opt $args{db_name} > '$wd/$args{db_name}.sql'"); print "Error while executing mysql dump $!\n"; return 1; } sub analyse_mysql { my %args = @_; $args{mode} = 'mysqlcheck'; setup_env_mysql(%args); exec("HOME='$wd' mysqlcheck -a $args{db_name}"); print "Error while executing mysql analyse $!\n"; return 1; } sub handle_catalog { my ($mode, %args) = @_; if (exists $args{working_dir} and $wd ne $args{working_dir}) { $wd = $args{working_dir}; } if ($args{db_type} eq 'SQLite3') { $ENV{PATH}="@SQLITE_BINDIR@:$ENV{PATH}"; if ($mode eq 'dump') { dump_sqlite3(%args); } } elsif ($args{db_type} eq 'PostgreSQL') { $ENV{PATH}="@POSTGRESQL_BINDIR@:$ENV{PATH}"; if ($mode eq 'dump') { dump_pgsql(%args); } else { analyse_pgsql(%args); } } elsif ($args{db_type} eq 'MySQL') { $ENV{PATH}="@MYSQL_BINDIR@:$ENV{PATH}"; if ($mode eq 'dump') { dump_mysql(%args); } else { analyse_mysql(%args); } } else { die "This database type isn't supported"; } } open(FP, "$dir_conf -C '$cat'|") or die "Can't get catalog information $@"; # catalog=MyCatalog # db_type=SQLite # db_name=regress # db_driver= # db_user=regress # db_password= # db_address= # db_port=0 # db_socket= my %cfg; while(my $l = ) { if ($l =~ /catalog=(.+)/) { if (exists $cfg{catalog} and $cfg{catalog} eq $cat) { exit handle_catalog($mode, %cfg); } %cfg = (); # reset } if ($l =~ /(\w+)=(.+)/) { $cfg{$1}=$2; } } if (exists $cfg{catalog} and $cfg{catalog} eq $cat) { exit handle_catalog($mode, %cfg); } print "Can't find your catalog ($cat) in director configuration\n"; exit 1; bacula-15.0.3/src/cats/grant_postgresql_privileges.in0000644000175000017500000000663014771010173022567 0ustar bsbuildbsbuild#!/bin/sh # # shell script to grant privileges to the bacula database # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # db_user=${db_user:-@db_user@} bindir=@POSTGRESQL_BINDIR@ PATH="$PATH:$bindir" db_name=${db_name:-@db_name@} db_password=@db_password@ if [ "$db_password" != "" ]; then pass="password '$db_password'" fi psql -f - -d ${db_name} $* <m_link)); } mdb->m_db_driver_type = SQL_DRIVER_TYPE_POSTGRESQL; mdb->m_db_type = SQL_TYPE_POSTGRESQL; mdb->m_db_driver = bstrdup("PostgreSQL"); mdb->errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */ mdb->errmsg[0] = 0; mdb->cmd = get_pool_memory(PM_EMSG); /* get command buffer */ mdb->cached_path = get_pool_memory(PM_FNAME); mdb->cached_path_id = 0; mdb->m_ref_count = 1; mdb->fname = get_pool_memory(PM_FNAME); mdb->path = get_pool_memory(PM_FNAME); mdb->esc_name = get_pool_memory(PM_FNAME); mdb->esc_path = get_pool_memory(PM_FNAME); mdb->esc_obj = get_pool_memory(PM_FNAME); mdb->m_use_fatal_jmsg = true; /* Initialize the private members. */ mdb->m_db_handle = NULL; mdb->m_result = NULL; mdb->m_buf = get_pool_memory(PM_FNAME); db_list->append(this); } BDB_POSTGRESQL::~BDB_POSTGRESQL() { } /* * Initialize database data structure. In principal this should * never have errors, or it is really fatal. */ BDB *db_init_database(JCR *jcr, const char *db_driver, const char *db_name, const char *db_user, const char *db_password, const char *db_address, int db_port, const char *db_socket, const char *db_ssl_mode, const char *db_ssl_key, const char *db_ssl_cert, const char *db_ssl_ca, const char *db_ssl_capath, const char *db_ssl_cipher, bool mult_db_connections, bool disable_batch_insert) { BDB_POSTGRESQL *mdb = NULL; if (!db_user) { Jmsg(jcr, M_FATAL, 0, _("A user name for PostgreSQL must be supplied.\n")); return NULL; } P(mutex); /* lock DB queue */ if (db_list && !mult_db_connections) { /* * Look to see if DB already open */ foreach_dlist(mdb, db_list) { if (mdb->bdb_match_database(db_driver, db_name, db_address, db_port)) { Dmsg1(dbglvl_info, "DB REopen %s\n", db_name); mdb->increment_refcount(); goto get_out; } } } Dmsg0(dbglvl_info, "db_init_database first time\n"); /* Create the global Bacula db context */ mdb = New(BDB_POSTGRESQL()); if (!mdb) goto get_out; /* * Initialize the parent class members. */ mdb->m_db_name = bstrdup(db_name); mdb->m_db_user = bstrdup(db_user); if (db_password) { mdb->m_db_password = bstrdup(db_password); } if (db_address) { mdb->m_db_address = bstrdup(db_address); } if (db_socket) { mdb->m_db_socket = bstrdup(db_socket); } if (db_ssl_mode) { mdb->m_db_ssl_mode = bstrdup(db_ssl_mode); } else { mdb->m_db_ssl_mode = bstrdup("prefer"); } if (db_ssl_key) { mdb->m_db_ssl_key = bstrdup(db_ssl_key); } if (db_ssl_cert) { mdb->m_db_ssl_cert = bstrdup(db_ssl_cert); } if (db_ssl_ca) { mdb->m_db_ssl_ca = bstrdup(db_ssl_ca); } mdb->m_db_port = db_port; if (disable_batch_insert) { mdb->m_disabled_batch_insert = true; mdb->m_have_batch_insert = false; } else { mdb->m_disabled_batch_insert = false; #ifdef USE_BATCH_FILE_INSERT #if defined(HAVE_POSTGRESQL_BATCH_FILE_INSERT) || defined(HAVE_PQISTHREADSAFE) #ifdef HAVE_PQISTHREADSAFE mdb->m_have_batch_insert = PQisthreadsafe(); #else mdb->m_have_batch_insert = true; #endif /* HAVE_PQISTHREADSAFE */ #else mdb->m_have_batch_insert = true; #endif /* HAVE_POSTGRESQL_BATCH_FILE_INSERT || HAVE_PQISTHREADSAFE */ #else mdb->m_have_batch_insert = false; #endif /* USE_BATCH_FILE_INSERT */ } mdb->m_allow_transactions = mult_db_connections; /* At this time, when mult_db_connections == true, this is for * specific console command such as bvfs or batch mode, and we don't * want to share a batch mode or bvfs. In the future, we can change * the creation function to add this parameter. */ mdb->m_dedicated = mult_db_connections; get_out: V(mutex); return mdb; } /* Check that the database corresponds to the encoding we want * return: 0 OK, M_ERROR, M_FATAL, M_WARNING to print errmsg */ static int pgsql_check_database_encoding(JCR *jcr, BDB_POSTGRESQL *mdb) { SQL_ROW row; int ret = false; if (!mdb->sql_query("SELECT getdatabaseencoding()", QF_STORE_RESULT)) { return M_ERROR; } if ((row = mdb->sql_fetch_row()) == NULL) { Mmsg1(mdb->errmsg, _("Can't check database encoding. Error fetching row: %s\n"), mdb->sql_strerror()); return M_ERROR; } else { ret = bstrcmp(row[0], "SQL_ASCII"); if (ret) { /* If we are in SQL_ASCII, we can force the client_encoding to SQL_ASCII too */ mdb->sql_query("SET client_encoding TO 'SQL_ASCII'"); } else { /* Something is wrong with database encoding */ Mmsg(mdb->errmsg, _("Encoding error for database \"%s\". Wanted SQL_ASCII, got %s\n"), mdb->get_db_name(), row[0]); return M_WARNING; } } return 0; } /* * Get the time offset of the Postgress server in second, can be negative * * SELECT CURRENT_TIMESTAMP; * 2022-01-20 07:49:35.19033-05 <== US/Eastern ==> return -5*3600 * 2022-01-20 16:27:30.096223+03:30 <== Asia/Tehran ==> return 3*3600+30*60 * 2022-01-20 12:59:33.42909+00 <= UTC ==> return 0 */ int get_utc_off(const char *st, int *offset) { int err = 0; /* no error */ const char *p = st; int i = strlen(p) - 1; int off = 0; int off_multiplier = 60; bool got_colon = false; /* searching for [0-9:+-] starting from the end */ while (i>=0) { if (isdigit(p[i])) { off = off + (p[i]-'0') * off_multiplier; switch (off_multiplier) { case 60: off_multiplier = 600; break; case 600: off_multiplier = 3600; break; case 3600: off_multiplier = 36000; break; case 36000: off_multiplier = 0; break; case 0: err = 1; // too much digit default: break; } } else if (p[i] == ':') { got_colon = true; if (off_multiplier != 3600) { err = 1; } } else if (p[i] == '+' || p[i] == '-' || (p[i] == ' ' && off_multiplier == 3600)) { if (got_colon && off_multiplier != 0) { err = 1; } if (off_multiplier == 3600) { off *= 60; // there was no minutes (no colon) } if (p[i] == '-') { off = - off; } *offset = off; return err; } i--; } return 1; } /* return current system time zone offset in second using strfstime() */ int get_system_utc_offset() { char buf[128]; time_t now; struct tm tm; time(&now); localtime_r(&now, &tm); int l=strftime(buf, sizeof(buf), "%z", &tm); /* expect "+hhmm" or "-hhmm" */ if (l != 5) { return 0; /* Unknown timezone on the system (Windows can do that) */ } int off = ((buf[1]-'0')*10+(buf[2]-'0'))*3600 + ((buf[3]-'0')*10+(buf[4]-'0'))*60; if (buf[0] == '-') { off = -off; } return off; } static int pgsql_get_utc_offset(BDB_POSTGRESQL *mdb, int *offset) { SQL_ROW row; if (!mdb->sql_query("SELECT CURRENT_TIMESTAMP;", QF_STORE_RESULT)) { return M_ERROR; } if ((row = mdb->sql_fetch_row()) == NULL) { Mmsg1(mdb->errmsg, _("Can't retrieve time offset. Error fetching row: %s\n"), mdb->sql_strerror()); return M_ERROR; } int err = get_utc_off(row[0], offset); if (err) { Mmsg1(mdb->errmsg, _("Can't retrieve time offset. Invalid time format: %s\n"), row[0]); return M_WARNING; } return 0; } /* * Now actually open the database. This can generate errors, * which are returned in the errmsg * * DO NOT close the database or delete mdb here !!!! */ bool BDB_POSTGRESQL::bdb_open_database(JCR *jcr) { bool retval = false; int errstat; char buf[10], *port; BDB_POSTGRESQL *mdb = this; int print_msg=0; /* 1: warning, 2: error, 3 fatal */ P(mutex); if (mdb->m_connected) { retval = true; goto get_out; } if ((errstat=rwl_init(&mdb->m_lock)) != 0) { berrno be; Mmsg1(&mdb->errmsg, _("Unable to initialize DB lock. ERR=%s\n"), be.bstrerror(errstat)); goto get_out; } if (mdb->m_db_port) { bsnprintf(buf, sizeof(buf), "%d", mdb->m_db_port); port = buf; } else { port = NULL; } /* Tells libpq that the SSL library has already been initialized */ PQinitSSL(0); /* If connection fails, try at 5 sec intervals for 30 seconds. */ for (int retry=0; retry < 6; retry++) { #if PG_VERSION_NUM < 90000 /* connect to the database */ /* Old "depreciated" connection call */ mdb->m_db_handle = PQsetdbLogin( mdb->m_db_address, /* default = localhost */ port, /* default port */ NULL, /* pg options */ NULL, /* tty, ignored */ mdb->m_db_name, /* database name */ mdb->m_db_user, /* login name */ mdb->m_db_password); /* password */ #else /* Code for Postgresql 9.0 and greater */ const char *keywords[10] = {"host", "port", "dbname", "user", "password", "sslmode", "sslkey", "sslcert", "sslrootcert", NULL }; const char *values[10] = {mdb->m_db_address, /* default localhost */ port, /* default port */ mdb->m_db_name, mdb->m_db_user, mdb->m_db_password, mdb->m_db_ssl_mode, mdb->m_db_ssl_key, mdb->m_db_ssl_cert, mdb->m_db_ssl_ca, NULL }; mdb->m_db_handle = PQconnectdbParams(keywords, values, 0); #endif /* If no connect, try once more in case it is a timing problem */ if (PQstatus(mdb->m_db_handle) == CONNECTION_OK) { break; } bmicrosleep(5, 0); } Dmsg0(dbglvl_info, "pg_real_connect done\n"); Dmsg3(dbglvl_info, "db_user=%s db_name=%s db_password=%s\n", mdb->m_db_user, mdb->m_db_name, mdb->m_db_password==NULL?"(NULL)":mdb->m_db_password); #ifdef HAVE_OPENSSL #define USE_OPENSSL 1 SSL *ssl; if (PQgetssl(mdb->m_db_handle) != NULL) { Dmsg0(dbglvl_info, "SSL in use\n"); ssl = (SSL *)PQgetssl(mdb->m_db_handle); Dmsg2(dbglvl_info, "Version:%s Cipher:%s\n", SSL_get_version(ssl), SSL_get_cipher(ssl)); } else { Dmsg0(dbglvl_info, "SSL not in use\n"); } #endif if (PQstatus(mdb->m_db_handle) != CONNECTION_OK) { Mmsg2(&mdb->errmsg, _("Unable to connect to PostgreSQL server. Database=%s User=%s\n" "Possible causes: SQL server not running; password incorrect; max_connections exceeded.\n"), mdb->m_db_name, mdb->m_db_user); goto get_out; } mdb->m_connected = true; if (!bdb_check_version(jcr)) { print_msg = M_FATAL; goto get_out; } sql_query("SET datestyle TO 'ISO, YMD'"); sql_query("SET cursor_tuple_fraction=1"); sql_query("SET client_min_messages TO WARNING"); /* Check that PGSQL timezone match system timezone */ { int pgsql_offset = 0; int sys_offset = get_system_utc_offset(); int ret = pgsql_get_utc_offset(this, &pgsql_offset); if (ret != 0) { Qmsg(jcr, ret, 0, "%s", errmsg); } else { if (sys_offset != pgsql_offset) { /* try again in case we would be just on Daylight Saving Time switch */ sys_offset = get_system_utc_offset(); } if (sys_offset != pgsql_offset) { Qmsg(jcr, M_WARNING, 0, _("Postgresql and system timezone mismatch detected\n")); } } } /* * Tell PostgreSQL we are using standard conforming strings and avoid warnings such as: * WARNING: nonstandard use of \\ in a string literal */ sql_query("SET standard_conforming_strings=on"); /* Check that encoding is SQL_ASCII */ print_msg = pgsql_check_database_encoding(jcr, mdb); retval = true; get_out: V(mutex); /* We can use Jmsg() only outside of the P/V because it can try to write to * the DB, we should even not issue a Jmsg here */ if (print_msg) { Jmsg(jcr, print_msg, 0, "%s", errmsg); } dequeue_daemon_messages(jcr); return retval; } void BDB_POSTGRESQL::bdb_close_database(JCR *jcr) { BDB_POSTGRESQL *mdb = this; if (mdb->m_connected) { bdb_end_transaction(jcr); } P(mutex); mdb->m_ref_count--; if (mdb->m_ref_count == 0) { if (mdb->m_connected) { sql_free_result(); } db_list->remove(mdb); if (mdb->m_connected && mdb->m_db_handle) { PQfinish(mdb->m_db_handle); } if (is_rwl_valid(&mdb->m_lock)) { rwl_destroy(&mdb->m_lock); } free_pool_memory(mdb->errmsg); free_pool_memory(mdb->cmd); free_pool_memory(mdb->cached_path); free_pool_memory(mdb->fname); free_pool_memory(mdb->path); free_pool_memory(mdb->esc_name); free_pool_memory(mdb->esc_path); free_pool_memory(mdb->esc_obj); free_pool_memory(mdb->m_buf); if (mdb->m_db_driver) { free(mdb->m_db_driver); } if (mdb->m_db_name) { free(mdb->m_db_name); } if (mdb->m_db_user) { free(mdb->m_db_user); } if (mdb->m_db_password) { free(mdb->m_db_password); } if (mdb->m_db_address) { free(mdb->m_db_address); } if (mdb->m_db_socket) { free(mdb->m_db_socket); } if (mdb->m_db_ssl_mode) { free(mdb->m_db_ssl_mode); } if (mdb->m_db_ssl_key) { free(mdb->m_db_ssl_key); } if (mdb->m_db_ssl_cert) { free(mdb->m_db_ssl_cert); } if (mdb->m_db_ssl_ca) { free(mdb->m_db_ssl_ca); } delete mdb; if (db_list->size() == 0) { delete db_list; db_list = NULL; } } V(mutex); } void BDB_POSTGRESQL::bdb_thread_cleanup(void) { } /* * Escape strings so PostgreSQL is happy * * len is the length of the old string. Your new * string must be long enough (max 2*old+1) to hold * the escaped output. */ void BDB_POSTGRESQL::bdb_escape_string(JCR *jcr, char *snew, const char *old, int len) { BDB_POSTGRESQL *mdb = this; int failed; PQescapeStringConn(mdb->m_db_handle, snew, old, len, &failed); if (failed) { Jmsg(jcr, M_FATAL, 0, _("PQescapeStringConn returned non-zero.\n")); /* failed on encoding, probably invalid multibyte encoding in the source string see PQescapeStringConn documentation for details. */ Dmsg0(dbglvl_err, "PQescapeStringConn failed\n"); } } /* * Escape binary so that PostgreSQL is happy * */ char *BDB_POSTGRESQL::bdb_escape_object(JCR *jcr, char *old, int len) { size_t new_len; unsigned char *obj; BDB_POSTGRESQL *mdb = this; mdb->esc_obj[0] = 0; obj = PQescapeByteaConn(mdb->m_db_handle, (unsigned const char *)old, len, &new_len); if (!obj) { Jmsg(jcr, M_FATAL, 0, _("PQescapeByteaConn returned NULL.\n")); } else { mdb->esc_obj = check_pool_memory_size(mdb->esc_obj, new_len+1); memcpy(mdb->esc_obj, obj, new_len); mdb->esc_obj[new_len] = 0; PQfreemem(obj); } return (char *)mdb->esc_obj; } /* * Unescape binary object so that PostgreSQL is happy * */ void BDB_POSTGRESQL::bdb_unescape_object(JCR *jcr, char *from, int32_t expected_len, POOLMEM **dest, int32_t *dest_len) { size_t new_len; unsigned char *obj; if (!from) { *dest[0] = 0; *dest_len = 0; return; } obj = PQunescapeBytea((unsigned const char *)from, &new_len); if (!obj) { Jmsg(jcr, M_FATAL, 0, _("PQunescapeByteaConn returned NULL.\n")); } *dest_len = new_len; *dest = check_pool_memory_size(*dest, new_len+1); memcpy(*dest, obj, new_len); (*dest)[new_len]=0; PQfreemem(obj); Dmsg1(dbglvl_info, "obj size: %d\n", *dest_len); } /* * Start a transaction. This groups inserts and makes things more efficient. * Usually started when inserting file attributes. */ void BDB_POSTGRESQL::bdb_start_transaction(JCR *jcr) { BDB_POSTGRESQL *mdb = this; if (jcr) { if (!jcr->attr) { jcr->attr = get_pool_memory(PM_FNAME); } if (!jcr->ar) { jcr->ar = (ATTR_DBR *)bmalloc(sizeof(ATTR_DBR)); } } /* * This is turned off because transactions break if * multiple simultaneous jobs are run. */ if (!mdb->m_allow_transactions) { return; } bdb_lock(); /* Allow only 25,000 changes per transaction */ if (mdb->m_transaction && changes > 25000) { bdb_end_transaction(jcr); } if (!mdb->m_transaction) { sql_query("BEGIN"); /* begin transaction */ Dmsg0(dbglvl_info, "Start PosgreSQL transaction\n"); mdb->m_transaction = true; } bdb_unlock(); } void BDB_POSTGRESQL::bdb_end_transaction(JCR *jcr) { BDB_POSTGRESQL *mdb = this; if (!mdb->m_allow_transactions) { return; } bdb_lock(); if (mdb->m_transaction) { sql_query("COMMIT"); /* end transaction */ mdb->m_transaction = false; Dmsg1(dbglvl_info, "End PostgreSQL transaction changes=%d\n", changes); } changes = 0; bdb_unlock(); } /* * Submit a general SQL command, and for each row returned, * the result_handler is called with the ctx. */ bool BDB_POSTGRESQL::bdb_big_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx) { BDB_POSTGRESQL *mdb = this; SQL_ROW row; bool retval = false; bool in_transaction = mdb->m_transaction; Dmsg1(dbglvl_info, "db_sql_query starts with '%s'\n", query); mdb->errmsg[0] = 0; /* This code handles only SELECT queries */ if (strncasecmp(query, "SELECT", 6) != 0) { return bdb_sql_query(query, result_handler, ctx); } if (!result_handler) { /* no need of big_query without handler */ return false; } bdb_lock(); if (!in_transaction) { /* CURSOR needs transaction */ sql_query("BEGIN"); } Mmsg(m_buf, "DECLARE _bac_cursor CURSOR FOR %s", query); if (!sql_query(mdb->m_buf)) { Mmsg(mdb->errmsg, _("Query failed: %s: ERR=%s\n"), mdb->m_buf, sql_strerror()); Dmsg1(dbglvl_err, "%s\n", mdb->errmsg); goto get_out; } do { if (!sql_query("FETCH 100 FROM _bac_cursor")) { Mmsg(mdb->errmsg, _("Fetch failed: ERR=%s\n"), sql_strerror()); Dmsg1(dbglvl_err, "%s\n", mdb->errmsg); goto get_out; } while ((row = sql_fetch_row()) != NULL) { Dmsg1(dbglvl_info, "Fetching %d rows\n", mdb->m_num_rows); if (result_handler(ctx, mdb->m_num_fields, row)) break; } PQclear(mdb->m_result); m_result = NULL; } while (m_num_rows > 0); /* TODO: Can probably test against 100 */ sql_query("CLOSE _bac_cursor"); Dmsg0(dbglvl_info, "db_big_sql_query finished\n"); sql_free_result(); retval = true; get_out: if (!in_transaction) { sql_query("COMMIT"); /* end transaction */ } bdb_unlock(); return retval; } /* * Submit a general SQL command, and for each row returned, * the result_handler is called with the ctx. */ bool BDB_POSTGRESQL::bdb_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx) { SQL_ROW row; bool retval = true; BDB_POSTGRESQL *mdb = this; Dmsg1(dbglvl_info, "db_sql_query starts with '%s'\n", query); bdb_lock(); mdb->errmsg[0] = 0; if (!sql_query(query, QF_STORE_RESULT)) { Mmsg(mdb->errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror()); Dmsg0(dbglvl_err, "db_sql_query failed\n"); retval = false; goto get_out; } Dmsg0(dbglvl_info, "db_sql_query succeeded. checking handler\n"); if (result_handler) { Dmsg0(dbglvl_dbg, "db_sql_query invoking handler\n"); while ((row = sql_fetch_row())) { Dmsg0(dbglvl_dbg, "db_sql_query sql_fetch_row worked\n"); if (result_handler(ctx, mdb->m_num_fields, row)) break; } sql_free_result(); } Dmsg0(dbglvl_info, "db_sql_query finished\n"); get_out: bdb_unlock(); return retval; } /* * If this routine returns false (failure), Bacula expects * that no result has been stored. * This is where QueryDB calls to with Postgresql. * * Returns: true on success * false on failure * */ bool BDB_POSTGRESQL::sql_query(const char *query, int flags) { int i; bool retval = false; BDB_POSTGRESQL *mdb = this; Dmsg1(dbglvl_info, "sql_query starts with '%s'\n", query); /* We are starting a new query. reset everything. */ mdb->m_num_rows = -1; mdb->m_row_number = -1; mdb->m_field_number = -1; if (mdb->m_result) { PQclear(mdb->m_result); /* hmm, someone forgot to free?? */ mdb->m_result = NULL; } for (i = 0; i < 10; i++) { mdb->m_result = PQexec(mdb->m_db_handle, query); if (mdb->m_result) { break; } bmicrosleep(5, 0); } if (!mdb->m_result) { Dmsg1(dbglvl_err, "Query failed: %s\n", query); goto get_out; } mdb->m_status = PQresultStatus(mdb->m_result); if (mdb->m_status == PGRES_TUPLES_OK || mdb->m_status == PGRES_COMMAND_OK) { Dmsg0(dbglvl_dbg, "we have a result\n"); /* How many fields in the set? */ mdb->m_num_fields = (int)PQnfields(mdb->m_result); Dmsg1(dbglvl_dbg, "we have %d fields\n", mdb->m_num_fields); mdb->m_num_rows = PQntuples(mdb->m_result); Dmsg1(dbglvl_dbg, "we have %d rows\n", mdb->m_num_rows); mdb->m_row_number = 0; /* we can start to fetch something */ mdb->m_status = 0; /* succeed */ retval = true; } else { Dmsg1(dbglvl_err, "Result status failed: %s\n", query); goto get_out; } Dmsg0(dbglvl_info, "sql_query finishing\n"); goto ok_out; get_out: Dmsg0(dbglvl_err, "we failed\n"); PQclear(mdb->m_result); mdb->m_result = NULL; mdb->m_status = 1; /* failed */ ok_out: return retval; } void BDB_POSTGRESQL::sql_free_result(void) { BDB_POSTGRESQL *mdb = this; bdb_lock(); if (mdb->m_result) { PQclear(mdb->m_result); mdb->m_result = NULL; } if (mdb->m_rows) { free(mdb->m_rows); mdb->m_rows = NULL; } if (mdb->m_fields) { free(mdb->m_fields); mdb->m_fields = NULL; } mdb->m_num_rows = mdb->m_num_fields = 0; bdb_unlock(); } SQL_ROW BDB_POSTGRESQL::sql_fetch_row(void) { SQL_ROW row = NULL; /* by default, return NULL */ BDB_POSTGRESQL *mdb = this; Dmsg0(dbglvl_info, "sql_fetch_row start\n"); if (mdb->m_num_fields == 0) { /* No field, no row */ Dmsg0(dbglvl_err, "sql_fetch_row finishes returning NULL, no fields\n"); return NULL; } if (!mdb->m_rows || mdb->m_rows_size < mdb->m_num_fields) { if (mdb->m_rows) { Dmsg0(dbglvl_dbg, "sql_fetch_row freeing space\n"); free(mdb->m_rows); } Dmsg1(dbglvl_dbg, "we need space for %d bytes\n", sizeof(char *) * mdb->m_num_fields); mdb->m_rows = (SQL_ROW)malloc(sizeof(char *) * mdb->m_num_fields); mdb->m_rows_size = mdb->m_num_fields; /* Now reset the row_number now that we have the space allocated */ mdb->m_row_number = 0; } /* If still within the result set */ if (mdb->m_row_number >= 0 && mdb->m_row_number < mdb->m_num_rows) { Dmsg2(dbglvl_dbg, "sql_fetch_row row number '%d' is acceptable (0..%d)\n", mdb->m_row_number, m_num_rows); /* Get each value from this row */ for (int j = 0; j < mdb->m_num_fields; j++) { mdb->m_rows[j] = PQgetvalue(mdb->m_result, mdb->m_row_number, j); Dmsg2(dbglvl_dbg, "sql_fetch_row field '%d' has value '%s'\n", j, mdb->m_rows[j]); } mdb->m_row_number++; /* Increment the row number for the next call */ row = mdb->m_rows; } else { Dmsg2(dbglvl_dbg, "sql_fetch_row row number '%d' is NOT acceptable (0..%d)\n", mdb->m_row_number, m_num_rows); } Dmsg1(dbglvl_info, "sql_fetch_row finishes returning %p\n", row); return row; } const char *BDB_POSTGRESQL::sql_strerror(void) { BDB_POSTGRESQL *mdb = this; return PQerrorMessage(mdb->m_db_handle); } void BDB_POSTGRESQL::sql_data_seek(int row) { BDB_POSTGRESQL *mdb = this; /* Set the row number to be returned on the next call to sql_fetch_row */ mdb->m_row_number = row; } int BDB_POSTGRESQL::sql_affected_rows(void) { BDB_POSTGRESQL *mdb = this; return (unsigned)str_to_int32(PQcmdTuples(mdb->m_result)); } uint64_t BDB_POSTGRESQL::sql_insert_autokey_record(const char *query, const char *table_name) { uint64_t id = 0; char sequence[NAMEDATALEN-1]; char getkeyval_query[NAMEDATALEN+50]; PGresult *p_result; BDB_POSTGRESQL *mdb = this; /* First execute the insert query and then retrieve the currval. */ if (!sql_query(query)) { return 0; } mdb->m_num_rows = sql_affected_rows(); if (mdb->m_num_rows != 1) { return 0; } mdb->changes++; /* * Obtain the current value of the sequence that * provides the serial value for primary key of the table. * * currval is local to our session. It is not affected by * other transactions. * * Determine the name of the sequence. * PostgreSQL automatically creates a sequence using *
__seq. * At the time of writing, all tables used this format for * for their primary key:
id * Except for basefiles which has a primary key on baseid. * Therefore, we need to special case that one table. * * everything else can use the PostgreSQL formula. */ if (strcasecmp(table_name, "basefiles") == 0) { bstrncpy(sequence, "basefiles_baseid", sizeof(sequence)); } else { bstrncpy(sequence, table_name, sizeof(sequence)); bstrncat(sequence, "_", sizeof(sequence)); bstrncat(sequence, table_name, sizeof(sequence)); bstrncat(sequence, "id", sizeof(sequence)); } bstrncat(sequence, "_seq", sizeof(sequence)); bsnprintf(getkeyval_query, sizeof(getkeyval_query), "SELECT currval('%s')", sequence); Dmsg1(dbglvl_info, "sql_insert_autokey_record executing query '%s'\n", getkeyval_query); for (int i = 0; i < 10; i++) { p_result = PQexec(mdb->m_db_handle, getkeyval_query); if (p_result) { break; } bmicrosleep(5, 0); } if (!p_result) { Dmsg1(dbglvl_err, "Query failed: %s\n", getkeyval_query); goto get_out; } Dmsg0(dbglvl_dbg, "exec done"); if (PQresultStatus(p_result) == PGRES_TUPLES_OK) { Dmsg0(dbglvl_dbg, "getting value"); id = str_to_uint64(PQgetvalue(p_result, 0, 0)); Dmsg2(dbglvl_dbg, "got value '%s' which became %d\n", PQgetvalue(p_result, 0, 0), id); } else { Dmsg1(dbglvl_err, "Result status failed: %s\n", getkeyval_query); Mmsg1(&mdb->errmsg, _("error fetching currval: %s\n"), PQerrorMessage(mdb->m_db_handle)); } get_out: PQclear(p_result); return id; } SQL_FIELD *BDB_POSTGRESQL::sql_fetch_field(void) { int max_len; int this_len; BDB_POSTGRESQL *mdb = this; Dmsg0(dbglvl_dbg, "sql_fetch_field starts\n"); if (!mdb->m_fields || mdb->m_fields_size < mdb->m_num_fields) { if (mdb->m_fields) { free(mdb->m_fields); mdb->m_fields = NULL; } Dmsg1(dbglvl_dbg, "allocating space for %d fields\n", mdb->m_num_fields); mdb->m_fields = (SQL_FIELD *)malloc(sizeof(SQL_FIELD) * mdb->m_num_fields); mdb->m_fields_size = mdb->m_num_fields; for (int i = 0; i < mdb->m_num_fields; i++) { Dmsg1(dbglvl_dbg, "filling field %d\n", i); mdb->m_fields[i].name = PQfname(mdb->m_result, i); mdb->m_fields[i].type = PQftype(mdb->m_result, i); mdb->m_fields[i].flags = 0; /* For a given column, find the max length. */ max_len = 0; for (int j = 0; j < mdb->m_num_rows; j++) { if (PQgetisnull(mdb->m_result, j, i)) { this_len = 4; /* "NULL" */ } else { this_len = cstrlen(PQgetvalue(mdb->m_result, j, i)); } if (max_len < this_len) { max_len = this_len; } } mdb->m_fields[i].max_length = max_len; Dmsg4(dbglvl_dbg, "sql_fetch_field finds field '%s' has length='%d' type='%d' and IsNull=%d\n", mdb->m_fields[i].name, mdb->m_fields[i].max_length, mdb->m_fields[i].type, mdb->m_fields[i].flags); } } /* Increment field number for the next time around */ return &mdb->m_fields[mdb->m_field_number++]; } bool BDB_POSTGRESQL::sql_field_is_not_null(int field_type) { if (field_type == 1) { return true; } return false; } bool BDB_POSTGRESQL::sql_field_is_numeric(int field_type) { /* * TEMP: the following is taken from select OID, typname from pg_type; */ switch (field_type) { case 20: case 21: case 23: case 700: case 701: return true; default: return false; } } /* * Escape strings so PostgreSQL is happy on COPY * * len is the length of the old string. Your new * string must be long enough (max 2*old+1) to hold * the escaped output. */ static char *pgsql_copy_escape(char *dest, char *src, size_t len) { /* we have to escape \t, \n, \r, \ */ char c = '\0' ; while (len > 0 && *src) { switch (*src) { case '\n': c = 'n'; break; case '\\': c = '\\'; break; case '\t': c = 't'; break; case '\r': c = 'r'; break; default: c = '\0' ; } if (c) { *dest = '\\'; dest++; *dest = c; } else { *dest = *src; } len--; src++; dest++; } *dest = '\0'; return dest; } bool BDB_POSTGRESQL::sql_batch_start(JCR *jcr) { BDB_POSTGRESQL *mdb = this; const char *query = "COPY batch FROM STDIN"; Dmsg0(dbglvl_info, "sql_batch_start started\n"); if (!sql_query("CREATE TEMPORARY TABLE batch (" "FileIndex int," "JobId int," "Path varchar," "Name varchar," "LStat varchar," "Md5 varchar," "DeltaSeq smallint)")) { Dmsg0(dbglvl_err, "sql_batch_start failed\n"); return false; } /* We are starting a new query. reset everything. */ mdb->m_num_rows = -1; mdb->m_row_number = -1; mdb->m_field_number = -1; sql_free_result(); for (int i=0; i < 10; i++) { mdb->m_result = PQexec(mdb->m_db_handle, query); if (mdb->m_result) { break; } bmicrosleep(5, 0); } if (!mdb->m_result) { Dmsg1(dbglvl_err, "Query failed: %s\n", query); goto get_out; } mdb->m_status = PQresultStatus(mdb->m_result); if (mdb->m_status == PGRES_COPY_IN) { /* How many fields in the set? */ mdb->m_num_fields = (int) PQnfields(mdb->m_result); mdb->m_num_rows = 0; mdb->m_status = 1; } else { Dmsg1(dbglvl_err, "Result status failed: %s\n", query); goto get_out; } Dmsg0(dbglvl_info, "sql_batch_start finishing\n"); return true; get_out: Mmsg1(&mdb->errmsg, _("error starting batch mode: %s"), PQerrorMessage(mdb->m_db_handle)); mdb->m_status = 0; PQclear(mdb->m_result); mdb->m_result = NULL; return false; } /* * Set error to something to abort the operation */ bool BDB_POSTGRESQL::sql_batch_end(JCR *jcr, const char *error) { int res; int count=30; PGresult *p_result; BDB_POSTGRESQL *mdb = this; Dmsg0(dbglvl_info, "sql_batch_end started\n"); do { res = PQputCopyEnd(mdb->m_db_handle, error); } while (res == 0 && --count > 0); if (res == 1) { Dmsg0(dbglvl_dbg, "ok\n"); mdb->m_status = 0; } if (res <= 0) { mdb->m_status = 1; Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->m_db_handle)); Dmsg1(dbglvl_err, "failure %s\n", errmsg); } /* Check command status and return to normal libpq state */ p_result = PQgetResult(mdb->m_db_handle); if (PQresultStatus(p_result) != PGRES_COMMAND_OK) { Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->m_db_handle)); mdb->m_status = 1; } /* Get some statistics to compute the best plan */ sql_query("ANALYZE batch"); PQclear(p_result); Dmsg0(dbglvl_info, "sql_batch_end finishing\n"); return true; } bool BDB_POSTGRESQL::sql_batch_insert(JCR *jcr, ATTR_DBR *ar) { int res; int count=30; size_t len; const char *digest; char ed1[50]; BDB_POSTGRESQL *mdb = this; mdb->esc_name = check_pool_memory_size(mdb->esc_name, fnl*2+1); pgsql_copy_escape(mdb->esc_name, fname, fnl); mdb->esc_path = check_pool_memory_size(mdb->esc_path, pnl*2+1); pgsql_copy_escape(mdb->esc_path, path, pnl); if (ar->Digest == NULL || ar->Digest[0] == 0) { digest = "0"; } else { digest = ar->Digest; } len = Mmsg(mdb->cmd, "%d\t%s\t%s\t%s\t%s\t%s\t%u\n", ar->FileIndex, edit_int64(ar->JobId, ed1), mdb->esc_path, mdb->esc_name, ar->attr, digest, ar->DeltaSeq); do { res = PQputCopyData(mdb->m_db_handle, mdb->cmd, len); } while (res == 0 && --count > 0); if (res == 1) { Dmsg0(dbglvl_dbg, "ok\n"); mdb->changes++; mdb->m_status = 1; } if (res <= 0) { mdb->m_status = 0; Mmsg1(&mdb->errmsg, _("error copying in batch mode: %s"), PQerrorMessage(mdb->m_db_handle)); Dmsg1(dbglvl_err, "failure %s\n", mdb->errmsg); } Dmsg0(dbglvl_info, "sql_batch_insert finishing\n"); return true; } const char *BDB_POSTGRESQL::search_op(JCR *jcr, const char *table_col, char *value, POOLMEM **esc, POOLMEM **dest) { int len = strlen(value); *esc = check_pool_memory_size(*esc, len*2+1); bdb_escape_string(jcr, *esc, value, len); Mmsg(dest, " %s %%> '%s'", table_col, *esc); return *dest; } #endif /* HAVE_POSTGRESQL */ bacula-15.0.3/src/cats/create_bacula_database.in0000644000175000017500000000166314771010173021337 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # This routine creates the Bacula database # using PostgreSQL, Ingres, MySQL, or SQLite. # # can be used to change the current user with su pre_command="sh -c" default_db_type=@DEFAULT_DB_TYPE@ # # See if the first argument is a valid backend name. # If so the user overrides the default database backend. # if [ $# -gt 0 ]; then case $1 in sqlite3) db_type=$1 shift ;; mysql) db_type=$1 shift ;; postgresql) db_type=$1 shift ;; *) ;; esac fi # # If no new db_type is gives use the default db_type. # if [ -z "${db_type}" ]; then db_type="${default_db_type}" fi if [ $db_type = postgresql -a "$UID" = 0 ]; then pre_command="su - postgres -c" fi echo "Creating ${db_type} database" $pre_command "@scriptdir@/create_${db_type}_database $*" bacula-15.0.3/src/cats/sqlite.c0000644000175000017500000005153714771010173016063 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Bacula Catalog Database routines specific to SQLite * * Written by Kern Sibbald, January 2002 * * Note: at one point, this file was changed to class based by a certain * programmer, and other than "wrapping" in a class, which is a trivial * change for a C++ programmer, nothing substantial was done, yet all the * code was recommitted under this programmer's name. Consequently, we * undo those changes here. */ #include "bacula.h" #if HAVE_SQLITE3 #include "cats.h" #include #define __BDB_SQLITE_H_ 1 #include "bdb_sqlite.h" #include "lib/bregex.h" static int b_sqlite3_extension_init(sqlite3 *db); /* ----------------------------------------------------------------------- * * SQLite dependent defines and subroutines * * ----------------------------------------------------------------------- */ /* List of open databases */ static dlist *db_list = NULL; static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; /* * When using mult_db_connections * sqlite can be BUSY. We just need sleep a little in this case. */ static int my_sqlite_busy_handler(void *arg, int calls) { bmicrosleep(0, 500); return 1; } BDB_SQLITE::BDB_SQLITE() : BDB() { BDB_SQLITE *mdb = this; if (db_list == NULL) { db_list = New(dlist(mdb, &mdb->m_link)); } mdb->m_db_driver_type = SQL_DRIVER_TYPE_SQLITE3; mdb->m_db_type = SQL_TYPE_SQLITE3; mdb->m_db_driver = bstrdup("SQLite3"); mdb->errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */ mdb->errmsg[0] = 0; mdb->cmd = get_pool_memory(PM_EMSG); /* get command buffer */ mdb->cached_path = get_pool_memory(PM_FNAME); mdb->cached_path_id = 0; mdb->m_ref_count = 1; mdb->fname = get_pool_memory(PM_FNAME); mdb->path = get_pool_memory(PM_FNAME); mdb->esc_name = get_pool_memory(PM_FNAME); mdb->esc_path = get_pool_memory(PM_FNAME); mdb->esc_obj = get_pool_memory(PM_FNAME); mdb->m_use_fatal_jmsg = true; /* Initialize the private members. */ mdb->m_db_handle = NULL; mdb->m_result = NULL; mdb->m_sqlite_errmsg = NULL; db_list->append(this); } BDB_SQLITE::~BDB_SQLITE() { } /* * Initialize database data structure. In principal this should * never have errors, or it is really fatal. */ BDB *db_init_database(JCR *jcr, const char *db_driver, const char *db_name, const char *db_user, const char *db_password, const char *db_address, int db_port, const char *db_socket, const char *db_ssl_mode, const char *db_ssl_key, const char *db_ssl_cert, const char *db_ssl_ca, const char *db_ssl_capath, const char *db_ssl_cipher, bool mult_db_connections, bool disable_batch_insert) { BDB_SQLITE *mdb = NULL; P(mutex); /* lock DB queue */ /* * Look to see if DB already open */ if (db_list && !mult_db_connections) { foreach_dlist(mdb, db_list) { if (mdb->bdb_match_database(db_driver, db_name, db_address, db_port)) { Dmsg1(300, "DB REopen %s\n", db_name); mdb->increment_refcount(); goto bail_out; } } } Dmsg0(300, "db_init_database first time\n"); mdb = New(BDB_SQLITE()); mdb->m_db_name = bstrdup(db_name); if (disable_batch_insert) { mdb->m_disabled_batch_insert = true; mdb->m_have_batch_insert = false; } else { mdb->m_disabled_batch_insert = false; #ifdef USE_BATCH_FILE_INSERT #ifdef HAVE_SQLITE3_THREADSAFE mdb->m_have_batch_insert = sqlite3_threadsafe(); #else mdb->m_have_batch_insert = false; #endif /* HAVE_SQLITE3_THREADSAFE */ #else mdb->m_have_batch_insert = false; #endif /* USE_BATCH_FILE_INSERT */ } mdb->m_allow_transactions = mult_db_connections; /* At this time, when mult_db_connections == true, this is for * specific console command such as bvfs or batch mode, and we don't * want to share a batch mode or bvfs. In the future, we can change * the creation function to add this parameter. */ mdb->m_dedicated = mult_db_connections; bail_out: V(mutex); return mdb; } /* * Now actually open the database. This can generate errors, * which are returned in the errmsg * * DO NOT close the database or delete mdb here !!!! */ bool BDB_SQLITE::bdb_open_database(JCR *jcr) { bool retval = false; char *db_file; int len; struct stat statbuf; int ret; int errstat; int retry = 0; int64_t starttime; BDB_SQLITE *mdb = this; P(mutex); if (mdb->m_connected) { retval = true; goto bail_out; } if ((errstat=rwl_init(&mdb->m_lock)) != 0) { berrno be; Mmsg1(&mdb->errmsg, _("Unable to initialize DB lock. ERR=%s\n"), be.bstrerror(errstat)); goto bail_out; } /* * Open the database */ len = strlen(working_directory) + strlen(mdb->m_db_name) + 5; db_file = (char *)malloc(len); strcpy(db_file, working_directory); strcat(db_file, "/"); strcat(db_file, m_db_name); strcat(db_file, ".db"); if (stat(db_file, &statbuf) != 0) { Mmsg1(&mdb->errmsg, _("Database %s does not exist, please create it.\n"), db_file); free(db_file); goto bail_out; } for (mdb->m_db_handle = NULL; !mdb->m_db_handle && retry++ < 10; ) { ret = sqlite3_open(db_file, &mdb->m_db_handle); if (ret != SQLITE_OK) { mdb->m_sqlite_errmsg = (char *)sqlite3_errmsg(mdb->m_db_handle); sqlite3_close(mdb->m_db_handle); mdb->m_db_handle = NULL; } else { mdb->m_sqlite_errmsg = NULL; } Dmsg0(300, "sqlite_open\n"); if (!mdb->m_db_handle) { bmicrosleep(1, 0); } } if (mdb->m_db_handle == NULL) { Mmsg2(&mdb->errmsg, _("Unable to open Database=%s. ERR=%s\n"), db_file, mdb->m_sqlite_errmsg ? mdb->m_sqlite_errmsg : _("unknown")); free(db_file); goto bail_out; } mdb->m_connected = true; free(db_file); /* * Set busy handler to wait when we use mult_db_connections = true */ sqlite3_busy_handler(mdb->m_db_handle, my_sqlite_busy_handler, NULL); #if defined(SQLITE3_INIT_QUERY) sql_query(SQLITE3_INIT_QUERY); #endif #if 0 /* Allow large queries */ sqlite3_limit(mdb->m_db_handle, SQLITE_LIMIT_SQL_LENGTH, 1073741824); #endif if (!bdb_check_version(jcr)) { goto bail_out; } if (!bdb_check_settings(jcr, &starttime, 30*60*60*24, 0)) { goto bail_out; } b_sqlite3_extension_init(mdb->m_db_handle); retval = true; bail_out: V(mutex); return retval; } void BDB_SQLITE::bdb_close_database(JCR *jcr) { BDB_SQLITE *mdb = this; if (mdb->m_connected) { bdb_end_transaction(jcr); } P(mutex); mdb->m_ref_count--; if (mdb->m_ref_count == 0) { if (mdb->m_connected) { sql_free_result(); } db_list->remove(mdb); if (mdb->m_connected && mdb->m_db_handle) { sqlite3_close(mdb->m_db_handle); } if (is_rwl_valid(&mdb->m_lock)) { rwl_destroy(&mdb->m_lock); } free_pool_memory(mdb->errmsg); free_pool_memory(mdb->cmd); free_pool_memory(mdb->cached_path); free_pool_memory(mdb->fname); free_pool_memory(mdb->path); free_pool_memory(mdb->esc_name); free_pool_memory(mdb->esc_path); free_pool_memory(mdb->esc_obj); if (mdb->m_db_driver) { free(mdb->m_db_driver); } if (mdb->m_db_name) { free(mdb->m_db_name); } delete this; if (db_list->size() == 0) { delete db_list; db_list = NULL; } } V(mutex); } void BDB_SQLITE::bdb_thread_cleanup(void) { sqlite3_thread_cleanup(); } /* * Escape strings so SQLite is happy * * len is the length of the old string. Your new * string must be long enough (max 2*old+1) to hold * the escaped output. */ void BDB_SQLITE::bdb_escape_string(JCR *jcr, char *snew, const char *sold, int len) { char *n; const char *o; n = snew; o = sold; while (len--) { switch (*o) { case '\'': *n++ = '\''; *n++ = '\''; o++; break; case 0: *n++ = '\\'; *n++ = 0; o++; break; default: *n++ = *o++; break; } } *n = 0; } /* * Escape binary object so that SQLite is happy * Memory is stored in BDB struct, no need to free it * * TODO: this should be implemented (escape \0) */ char *BDB_SQLITE::bdb_escape_object(JCR *jcr, char *old, int len) { int l; int max = len*2; /* TODO: too big, should be *4/3 */ esc_obj = check_pool_memory_size(esc_obj, max); l = bin_to_base64(esc_obj, max, old, len, true); esc_obj[l] = 0; ASSERT(l < max); /* TODO: add check for l */ return esc_obj; } /* * Unescape binary object so that SQLIte is happy * * TODO: need to be implemented (escape \0) */ void BDB_SQLITE::bdb_unescape_object(JCR *jcr, char *from, int32_t expected_len, POOLMEM **dest, int32_t *dest_len) { if (!from) { *dest[0] = 0; *dest_len = 0; return; } *dest = check_pool_memory_size(*dest, strlen(from)+1); // don't use expected_len here base64_to_bin(*dest, sizeof_pool_memory(*dest), from, strlen(from)); *dest_len = expected_len; // we could also use the value returned above that should be the same (*dest)[expected_len] = 0; } /* * Start a transaction. This groups inserts and makes things * more efficient. Usually started when inserting file attributes. */ void BDB_SQLITE::bdb_start_transaction(JCR *jcr) { BDB_SQLITE *mdb = this; if (jcr) { if (!jcr->attr) { jcr->attr = get_pool_memory(PM_FNAME); } if (!jcr->ar) { jcr->ar = (ATTR_DBR *)malloc(sizeof(ATTR_DBR)); memset(jcr->ar, 0, sizeof(ATTR_DBR)); } } if (!mdb->m_allow_transactions) { return; } bdb_lock(); /* * Allow only 10,000 changes per transaction */ if (mdb->m_transaction && mdb->changes > 10000) { bdb_end_transaction(jcr); } if (!mdb->m_transaction) { sql_query("BEGIN"); /* begin transaction */ Dmsg0(400, "Start SQLite transaction\n"); mdb->m_transaction = true; } bdb_unlock(); } void BDB_SQLITE::bdb_end_transaction(JCR *jcr) { BDB_SQLITE *mdb = this; if (!mdb->m_allow_transactions) { return; } bdb_lock(); if (mdb->m_transaction) { sql_query("COMMIT"); /* end transaction */ mdb->m_transaction = false; Dmsg1(400, "End SQLite transaction changes=%d\n", changes); } mdb->changes = 0; bdb_unlock(); } struct rh_data { BDB_SQLITE *mdb; DB_RESULT_HANDLER *result_handler; void *ctx; bool initialized; }; /* * Convert SQLite's callback into Bacula DB callback */ static int sqlite_result_handler(void *arh_data, int num_fields, char **rows, char **col_names) { struct rh_data *rh_data = (struct rh_data *)arh_data; /* The db_sql_query doesn't have access to m_results, so if we wan't to get * fields information, we need to use col_names */ if (!rh_data->initialized) { rh_data->mdb->set_column_names(col_names, num_fields); rh_data->initialized = true; } if (rh_data->result_handler) { (*(rh_data->result_handler))(rh_data->ctx, num_fields, rows); } return 0; } /* * Submit a general SQL command (cmd), and for each row returned, * the result_handler is called with the ctx. */ bool BDB_SQLITE::bdb_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx) { BDB_SQLITE *mdb = this; bool retval = false; int stat; struct rh_data rh_data; Dmsg1(500, "db_sql_query starts with '%s'\n", query); bdb_lock(); mdb->errmsg[0] = 0; if (mdb->m_sqlite_errmsg) { sqlite3_free(mdb->m_sqlite_errmsg); mdb->m_sqlite_errmsg = NULL; } sql_free_result(); rh_data.ctx = ctx; rh_data.mdb = this; rh_data.initialized = false; rh_data.result_handler = result_handler; stat = sqlite3_exec(m_db_handle, query, sqlite_result_handler, (void *)&rh_data, &m_sqlite_errmsg); if (stat != SQLITE_OK) { Mmsg(mdb->errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror()); Dmsg0(500, "db_sql_query finished\n"); goto bail_out; } Dmsg0(500, "db_sql_query finished\n"); sql_free_result(); retval = true; bail_out: bdb_unlock(); return retval; } /* * Submit a sqlite query and retrieve all the data */ bool BDB_SQLITE::sql_query(const char *query, int flags) { int stat; bool retval = false; BDB_SQLITE *mdb = this; Dmsg1(500, "sql_query starts with '%s'\n", query); sql_free_result(); if (mdb->m_sqlite_errmsg) { sqlite3_free(mdb->m_sqlite_errmsg); mdb->m_sqlite_errmsg = NULL; } stat = sqlite3_get_table(m_db_handle, (char *)query, &m_result, &m_num_rows, &m_num_fields, &m_sqlite_errmsg); mdb->m_row_number = 0; /* no row fetched */ if (stat != 0) { /* something went wrong */ mdb->m_num_rows = mdb->m_num_fields = 0; Dmsg0(500, "sql_query finished\n"); } else { Dmsg0(500, "sql_query finished\n"); retval = true; } return retval; } void BDB_SQLITE::sql_free_result(void) { BDB_SQLITE *mdb = this; bdb_lock(); if (mdb->m_fields) { free(mdb->m_fields); mdb->m_fields = NULL; } if (mdb->m_result) { sqlite3_free_table(mdb->m_result); mdb->m_result = NULL; } mdb->m_col_names = NULL; mdb->m_num_rows = mdb->m_num_fields = 0; bdb_unlock(); } /* * Fetch one row at a time */ SQL_ROW BDB_SQLITE::sql_fetch_row(void) { BDB_SQLITE *mdb = this; if (!mdb->m_result || (mdb->m_row_number >= mdb->m_num_rows)) { return NULL; } mdb->m_row_number++; return &mdb->m_result[mdb->m_num_fields * mdb->m_row_number]; } const char *BDB_SQLITE::sql_strerror(void) { BDB_SQLITE *mdb = this; return mdb->m_sqlite_errmsg ? mdb->m_sqlite_errmsg : "unknown"; } void BDB_SQLITE::sql_data_seek(int row) { BDB_SQLITE *mdb = this; /* Set the row number to be returned on the next call to sql_fetch_row */ mdb->m_row_number = row; } int BDB_SQLITE::sql_affected_rows(void) { BDB_SQLITE *mdb = this; return sqlite3_changes(mdb->m_db_handle); } uint64_t BDB_SQLITE::sql_insert_autokey_record(const char *query, const char *table_name) { BDB_SQLITE *mdb = this; /* First execute the insert query and then retrieve the currval. */ if (!sql_query(query)) { return 0; } mdb->m_num_rows = sql_affected_rows(); if (mdb->m_num_rows != 1) { return 0; } mdb->changes++; return sqlite3_last_insert_rowid(mdb->m_db_handle); } SQL_FIELD *BDB_SQLITE::sql_fetch_field(void) { BDB_SQLITE *mdb = this; int i, j, len; /* We are in the middle of a db_sql_query and we want to get fields info */ if (mdb->m_col_names != NULL) { if (mdb->m_num_fields > mdb->m_field_number) { mdb->m_sql_field.name = mdb->m_col_names[mdb->m_field_number]; /* We don't have the maximum field length, so we can use 80 as * estimation. */ len = MAX(cstrlen(mdb->m_sql_field.name), 80/mdb->m_num_fields); mdb->m_sql_field.max_length = len; mdb->m_field_number++; mdb->m_sql_field.type = 0; /* not numeric */ mdb->m_sql_field.flags = 1; /* not null */ return &mdb->m_sql_field; } else { /* too much fetch_field() */ return NULL; } } /* We are after a sql_query() that stores the result in m_results */ if (!mdb->m_fields || mdb->m_fields_size < mdb->m_num_fields) { if (mdb->m_fields) { free(mdb->m_fields); mdb->m_fields = NULL; } Dmsg1(500, "allocating space for %d fields\n", m_num_fields); mdb->m_fields = (SQL_FIELD *)malloc(sizeof(SQL_FIELD) * mdb->m_num_fields); mdb->m_fields_size = mdb->m_num_fields; for (i = 0; i < mdb->m_num_fields; i++) { Dmsg1(500, "filling field %d\n", i); mdb->m_fields[i].name = mdb->m_result[i]; mdb->m_fields[i].max_length = cstrlen(mdb->m_fields[i].name); for (j = 1; j <= mdb->m_num_rows; j++) { if (mdb->m_result[i + mdb->m_num_fields * j]) { len = (uint32_t)cstrlen(mdb->m_result[i + mdb->m_num_fields * j]); } else { len = 0; } if (len > mdb->m_fields[i].max_length) { mdb->m_fields[i].max_length = len; } } mdb->m_fields[i].type = 0; mdb->m_fields[i].flags = 1; /* not null */ Dmsg4(500, "sql_fetch_field finds field '%s' has length='%d' type='%d' and IsNull=%d\n", mdb->m_fields[i].name, mdb->m_fields[i].max_length, mdb->m_fields[i].type, mdb->m_fields[i].flags); } } /* Increment field number for the next time around */ return &mdb->m_fields[mdb->m_field_number++]; } bool BDB_SQLITE::sql_field_is_not_null(int field_type) { if (field_type == 1) { return true; } return false; } bool BDB_SQLITE::sql_field_is_numeric(int field_type) { if (field_type == 1) { return true; } return false; } /* * Returns true if OK * false if failed */ bool BDB_SQLITE::sql_batch_start(JCR *jcr) { bool ret; bdb_lock(); ret = sql_query("CREATE TEMPORARY TABLE batch (" "FileIndex integer," "JobId integer," "Path blob," "Name blob," "LStat tinyblob," "MD5 tinyblob," "DeltaSeq integer)"); bdb_unlock(); return ret; } /* Set error to something to abort operation */ /* * Returns true if OK * false if failed */ bool BDB_SQLITE::sql_batch_end(JCR *jcr, const char *error) { m_status = 0; return true; } /* * Returns true if OK * false if failed */ bool BDB_SQLITE::sql_batch_insert(JCR *jcr, ATTR_DBR *ar) { BDB_SQLITE *mdb = this; const char *digest; char ed1[50]; mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1); bdb_escape_string(jcr, mdb->esc_name, mdb->fname, mdb->fnl); mdb->esc_path = check_pool_memory_size(mdb->esc_path, mdb->pnl*2+1); bdb_escape_string(jcr, mdb->esc_path, mdb->path, mdb->pnl); if (ar->Digest == NULL || ar->Digest[0] == 0) { digest = "0"; } else { digest = ar->Digest; } Mmsg(mdb->cmd, "INSERT INTO batch VALUES " "(%d,%s,'%s','%s','%s','%s',%u)", ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path, mdb->esc_name, ar->attr, digest, ar->DeltaSeq); return sql_query(mdb->cmd); } /* Implementation of the REGEXP function for SQLite3 */ static void b_sqlite_regexp(sqlite3_context *ctx, int argc, sqlite3_value **argv) { const char *re, *str; int rc; regex_t reg; re = (const char *) sqlite3_value_text(argv[0]); if (!re) { sqlite3_result_error(ctx, "no regexp", -1); return; } str = (const char *) sqlite3_value_text(argv[1]); if (!str) { sqlite3_result_error(ctx, "no string", -1); return; } if (regcomp(®, re, 0) < 0) { sqlite3_result_error(ctx, "regexp compilation error", -1); return; } rc = regexec(®, str, 0, NULL, 0); sqlite3_result_int(ctx, rc == 0); regfree(®); return; } static int b_sqlite3_extension_init(sqlite3 *db) { /* TODO: May implement a regex cache */ return sqlite3_create_function(db, "REGEXP", 2, SQLITE_UTF8, NULL, b_sqlite_regexp, NULL, NULL); } #endif /* HAVE_SQLITE3 */ bacula-15.0.3/src/cats/sql_create.c0000644000175000017500000013460014771010173016675 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Bacula Catalog Database Create record interface routines * * Written by Kern Sibbald, March 2000 * */ #include "bacula.h" static const int dbglevel = 100; #if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL #include "cats.h" /* ----------------------------------------------------------------------- * * Generic Routines (or almost generic) * * ----------------------------------------------------------------------- */ /** Create a new record for the Job * Returns: false on failure * true on success */ bool BDB::bdb_create_job_record(JCR *jcr, JOB_DBR *jr) { POOL_MEM buf; char dt[MAX_TIME_LENGTH], dt2[MAX_TIME_LENGTH]; time_t stime, starttime; struct tm tm; bool ok; int len; utime_t JobTDate; char ed1[30],ed2[30]; char esc_job[MAX_ESCAPE_NAME_LENGTH]; char esc_name[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); stime = jr->SchedTime; starttime = jr->StartTime; ASSERT(stime != 0); (void)localtime_r(&stime, &tm); strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); (void)localtime_r(&starttime, &tm); strftime(dt2, sizeof(dt2), "%Y-%m-%d %H:%M:%S", &tm); JobTDate = (utime_t)stime; len = strlen(jcr->comment); /* TODO: use jr instead of jcr to get comment */ buf.check_size(len*2+1); bdb_escape_string(jcr, buf.c_str(), jcr->comment, len); bdb_escape_string(jcr, esc_job, jr->Job, strlen(jr->Job)); bdb_escape_string(jcr, esc_name, jr->Name, strlen(jr->Name)); /* Must create it */ Mmsg(cmd, "INSERT INTO Job (Job,Name,Type,Level,JobStatus,StartTime,SchedTime,JobTDate," "ClientId,Comment,isVirtualFull) " "VALUES ('%s','%s','%c','%c','%c','%s','%s',%s,%s,'%s',%d)", esc_job, esc_name, (char)(jr->JobType), (char)(jr->JobLevel), (char)(jr->JobStatus), dt2, dt, edit_uint64(JobTDate, ed1), edit_int64(jr->ClientId, ed2), buf.c_str(), jr->isVirtualFull); if ((jr->JobId = sql_insert_autokey_record(cmd, NT_("Job"))) == 0) { Mmsg2(&errmsg, _("Create DB Job record %s failed. ERR=%s\n"), cmd, sql_strerror()); ok = false; } else { ok = true; } bdb_unlock(); return ok; } /** Create a JobMedia record for medium used this job * Returns: false on failure * true on success */ bool BDB::bdb_create_jobmedia_record(JCR *jcr, JOBMEDIA_DBR *jm) { bool ok = true; int count; char ed1[50], ed2[50]; bdb_lock(); /* Now get count for VolIndex */ Mmsg(cmd, "SELECT MAX(VolIndex) from JobMedia WHERE JobId=%s", edit_int64(jm->JobId, ed1)); count = get_sql_record_max(jcr, this); if (count < 0) { count = 0; } count++; Mmsg(cmd, "INSERT INTO JobMedia (JobId,MediaId,FirstIndex,LastIndex," "StartFile,EndFile,StartBlock,EndBlock,VolIndex) " "VALUES (%s,%s,%u,%u,%u,%u,%u,%u,%u)", edit_int64(jm->JobId, ed1), edit_int64(jm->MediaId, ed2), jm->FirstIndex, jm->LastIndex, jm->StartFile, jm->EndFile, jm->StartBlock, jm->EndBlock,count); Dmsg0(300, cmd); if (!InsertDB(jcr, cmd)) { Mmsg2(&errmsg, _("Create JobMedia record %s failed: ERR=%s\n"), cmd, sql_strerror()); ok = false; } else { /* Worked, now update the Media record with the EndFile and EndBlock */ Mmsg(cmd, "UPDATE Media SET EndFile=%lu, EndBlock=%lu WHERE MediaId=%lu", jm->EndFile, jm->EndBlock, jm->MediaId); if (!UpdateDB(jcr, cmd, false)) { Mmsg2(&errmsg, _("Update Media record %s failed: ERR=%s\n"), cmd, sql_strerror()); ok = false; } } bdb_unlock(); Dmsg0(300, "Return from JobMedia\n"); return ok; } /** Create a fileMedia record for File index * Returns: false on failure * true on success */ bool BDB::bdb_create_filemedia_record(JCR *jcr, FILEMEDIA_DBR *fm) { bool ok = true; char ed1[50], ed2[50]; bdb_lock(); Mmsg(cmd, "INSERT INTO FileMedia (JobId,MediaId,FileIndex,BlockAddress,RecordNo," "FileOffset) " "VALUES (%s,%s,%u,%lld,%u,%lld)", edit_int64(fm->JobId, ed1), edit_int64(fm->MediaId, ed2), fm->FileIndex, fm->BlockAddress, fm->RecordNo, fm->FileOffset); Dmsg0(300, cmd); if (!InsertDB(jcr, cmd)) { Mmsg2(&errmsg, _("Create FileMedia record %s failed: ERR=%s\n"), cmd, sql_strerror()); ok = false; } bdb_unlock(); return ok; } /** Create Unique Pool record * Returns: false on failure * true on success */ bool BDB::bdb_create_pool_record(JCR *jcr, POOL_DBR *pr) { bool stat; char ed1[30], ed2[30], ed3[50], ed4[50], ed5[50], ed6[50], ed7[50]; char esc_name[MAX_ESCAPE_NAME_LENGTH]; char esc_lf[MAX_ESCAPE_NAME_LENGTH]; char esc_type[MAX_ESCAPE_NAME_LENGTH]; /* The pooltype should match the case when we insert it to the catalog * backup -> Backup, BACKUP -> Backup */ ucfirst(esc_type, pr->PoolType, sizeof(esc_type)); Dmsg0(200, "In create pool\n"); bdb_lock(); bdb_escape_string(jcr, esc_name, pr->Name, strlen(pr->Name)); bdb_escape_string(jcr, esc_lf, pr->LabelFormat, strlen(pr->LabelFormat)); Mmsg(cmd, "SELECT PoolId,Name FROM Pool WHERE Name='%s'", esc_name); Dmsg1(200, "selectpool: %s\n", cmd); if (QueryDB(jcr, cmd)) { if (sql_num_rows() > 0) { Mmsg1(&errmsg, _("pool record %s already exists\n"), pr->Name); sql_free_result(); bdb_unlock(); Dmsg1(200, "%s", errmsg); /* pool already exists */ return false; } sql_free_result(); } /* Must create it */ Mmsg(cmd, "INSERT INTO Pool (Name,NumVols,MaxVols,UseOnce,UseCatalog," "AcceptAnyVolume,AutoPrune,Recycle,VolRetention,VolUseDuration," "MaxVolJobs,MaxVolFiles,MaxVolBytes,PoolType,LabelType,LabelFormat," "RecyclePoolId,ScratchPoolId,ActionOnPurge,CacheRetention,MaxPoolBytes) " "VALUES ('%s',%u,%u,%d,%d,%d,%d,%d,%s,%s,%u,%u,%s,'%s',%d,'%s',%s,%s,%d,%s,%s)", esc_name, pr->NumVols, pr->MaxVols, pr->UseOnce, pr->UseCatalog, pr->AcceptAnyVolume, pr->AutoPrune, pr->Recycle, edit_uint64(pr->VolRetention, ed1), edit_uint64(pr->VolUseDuration, ed2), pr->MaxVolJobs, pr->MaxVolFiles, edit_uint64(pr->MaxVolBytes, ed3), esc_type, pr->LabelType, esc_lf, edit_int64(pr->RecyclePoolId,ed4), edit_int64(pr->ScratchPoolId,ed5), pr->ActionOnPurge, edit_uint64(pr->CacheRetention, ed6), edit_int64(pr->MaxPoolBytes, ed7) ); Dmsg1(200, "Create Pool: %s\n", cmd); if ((pr->PoolId = sql_insert_autokey_record(cmd, NT_("Pool"))) == 0) { Mmsg2(&errmsg, _("Create db Pool record %s failed: ERR=%s\n"), cmd, sql_strerror()); stat = false; } else { stat = true; } bdb_unlock(); return stat; } /** * Create Unique Device record * Returns: false on failure * true on success */ bool BDB::bdb_create_device_record(JCR *jcr, DEVICE_DBR *dr) { bool ok; char ed1[30], ed2[30]; char esc[MAX_ESCAPE_NAME_LENGTH]; Dmsg0(200, "In create Device\n"); bdb_lock(); bdb_escape_string(jcr, esc, dr->Name, strlen(dr->Name)); Mmsg(cmd, "SELECT DeviceId,Name FROM Device WHERE Name='%s'", esc); Dmsg1(200, "selectdevice: %s\n", cmd); if (QueryDB(jcr, cmd)) { if (sql_num_rows() > 0) { Mmsg1(&errmsg, _("Device record %s already exists\n"), dr->Name); sql_free_result(); bdb_unlock(); return false; } sql_free_result(); } /* Must create it */ Mmsg(cmd, "INSERT INTO Device (Name,MediaTypeId,StorageId) VALUES ('%s',%s,%s)", esc, edit_uint64(dr->MediaTypeId, ed1), edit_int64(dr->StorageId, ed2)); Dmsg1(200, "Create Device: %s\n", cmd); if ((dr->DeviceId = sql_insert_autokey_record(cmd, NT_("Device"))) == 0) { Mmsg2(&errmsg, _("Create db Device record %s failed: ERR=%s\n"), cmd, sql_strerror()); ok = false; } else { ok = true; } bdb_unlock(); return ok; } /** * Create a Unique record for Storage -- no duplicates * Returns: false on failure * true on success with id in sr->StorageId */ bool BDB::bdb_create_storage_record(JCR *jcr, STORAGE_DBR *sr) { SQL_ROW row; bool ok; char esc[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); bdb_escape_string(jcr, esc, sr->Name, strlen(sr->Name)); Mmsg(cmd, "SELECT StorageId,AutoChanger FROM Storage WHERE Name='%s'",esc); sr->StorageId = 0; sr->created = false; /* Check if it already exists */ if (QueryDB(jcr, cmd)) { /* If more than one, report error, but return first row */ if (sql_num_rows() > 1) { Mmsg1(&errmsg, _("More than one Storage record!: %d\n"), sql_num_rows()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); } if (sql_num_rows() >= 1) { if ((row = sql_fetch_row()) == NULL) { Mmsg1(&errmsg, _("error fetching Storage row: %s\n"), sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); sql_free_result(); bdb_unlock(); return false; } sr->StorageId = str_to_int64(row[0]); sr->AutoChanger = atoi(row[1]); /* bool */ sql_free_result(); bdb_unlock(); return true; } sql_free_result(); } /* Must create it */ Mmsg(cmd, "INSERT INTO Storage (Name,AutoChanger)" " VALUES ('%s',%d)", esc, sr->AutoChanger); if ((sr->StorageId = sql_insert_autokey_record(cmd, NT_("Storage"))) == 0) { Mmsg2(&errmsg, _("Create DB Storage record %s failed. ERR=%s\n"), cmd, sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); ok = false; } else { sr->created = true; ok = true; } bdb_unlock(); return ok; } /** * Create Unique MediaType record * Returns: false on failure * true on success */ bool BDB::bdb_create_mediatype_record(JCR *jcr, MEDIATYPE_DBR *mr) { bool stat; char esc[MAX_ESCAPE_NAME_LENGTH]; Dmsg0(200, "In create mediatype\n"); bdb_lock(); bdb_escape_string(jcr, esc, mr->MediaType, strlen(mr->MediaType)); Mmsg(cmd, "SELECT MediaTypeId,MediaType FROM MediaType WHERE MediaType='%s'", esc); Dmsg1(200, "selectmediatype: %s\n", cmd); if (QueryDB(jcr, cmd)) { if (sql_num_rows() > 0) { Mmsg1(&errmsg, _("mediatype record %s already exists\n"), mr->MediaType); sql_free_result(); bdb_unlock(); return false; } sql_free_result(); } /* Must create it */ Mmsg(cmd, "INSERT INTO MediaType (MediaType,ReadOnly) " "VALUES ('%s',%d)", mr->MediaType, mr->ReadOnly); Dmsg1(200, "Create mediatype: %s\n", cmd); if ((mr->MediaTypeId = sql_insert_autokey_record(cmd, NT_("MediaType"))) == 0) { Mmsg2(&errmsg, _("Create db mediatype record %s failed: ERR=%s\n"), cmd, sql_strerror()); stat = false; } else { stat = true; } bdb_unlock(); return stat; } /** * Create Media record. VolumeName and non-zero Slot must be unique * * Returns: 0 on failure * 1 on success */ int BDB::bdb_create_media_record(JCR *jcr, MEDIA_DBR *mr) { int stat; char ed1[50], ed2[50], ed3[50], ed4[50], ed5[50], ed6[50], ed7[50], ed8[50]; char ed9[50], ed10[50], ed11[50], ed12[50], ed13[50], ed14[50]; struct tm tm; char esc_name[MAX_ESCAPE_NAME_LENGTH]; char esc_mtype[MAX_ESCAPE_NAME_LENGTH]; char esc_status[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); bdb_escape_string(jcr, esc_name, mr->VolumeName, strlen(mr->VolumeName)); bdb_escape_string(jcr, esc_mtype, mr->MediaType, strlen(mr->MediaType)); bdb_escape_string(jcr, esc_status, mr->VolStatus, strlen(mr->VolStatus)); Mmsg(cmd, "SELECT MediaId FROM Media WHERE VolumeName='%s'", esc_name); Dmsg1(500, "selectpool: %s\n", cmd); if (QueryDB(jcr, cmd)) { if (sql_num_rows() > 0) { Mmsg1(&errmsg, _("Volume \"%s\" already exists.\n"), mr->VolumeName); sql_free_result(); bdb_unlock(); return 0; } sql_free_result(); } /* Must create it */ Mmsg(cmd, "INSERT INTO Media (VolumeName,MediaType,MediaTypeId,PoolId,MaxVolBytes," "VolCapacityBytes,Recycle,VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles," "VolStatus,Slot,VolBytes,InChanger,VolReadTime,VolWriteTime,VolType," "VolParts,VolCloudParts,LastPartBytes," "EndFile,EndBlock,LabelType,StorageId,DeviceId,LocationId," "ScratchPoolId,RecyclePoolId,Enabled,ActionOnPurge,CacheRetention,UseProtect," "Protected,VolEncrypted)" "VALUES ('%s','%s',0,%lu,%s,%s,%ld,%s,%s,%lu,%lu,'%s',%ld,%s,%ld,%s,%s,%ld," "%ld,%ld,'%s',%ld,%ld,%ld,%s,%s,%s,%s,%s,%ld,%ld,%s,%ld,%ld,%ld)", esc_name, esc_mtype, mr->PoolId, edit_uint64(mr->MaxVolBytes,ed1), edit_uint64(mr->VolCapacityBytes, ed2), mr->Recycle, edit_uint64(mr->VolRetention, ed3), edit_uint64(mr->VolUseDuration, ed4), mr->MaxVolJobs, mr->MaxVolFiles, esc_status, mr->Slot, edit_uint64(mr->VolBytes, ed5), mr->InChanger, edit_int64(mr->VolReadTime, ed6), edit_int64(mr->VolWriteTime, ed7), mr->VolType, mr->VolParts, mr->VolCloudParts, edit_uint64(mr->LastPartBytes, ed8), mr->EndFile, mr->EndBlock, mr->LabelType, edit_int64(mr->StorageId, ed9), edit_int64(mr->DeviceId, ed10), edit_int64(mr->LocationId, ed11), edit_int64(mr->ScratchPoolId, ed12), edit_int64(mr->RecyclePoolId, ed13), mr->Enabled, mr->ActionOnPurge, edit_uint64(mr->CacheRetention, ed14), mr->UseProtect, mr->Protected, mr->VolEncrypted ); Dmsg1(500, "Create Volume: %s\n", cmd); if ((mr->MediaId = sql_insert_autokey_record(cmd, NT_("Media"))) == 0) { Mmsg2(&errmsg, _("Create DB Media record %s failed. ERR=%s\n"), cmd, sql_strerror()); stat = 0; } else { stat = 1; if (mr->set_label_date) { char dt[MAX_TIME_LENGTH]; if (mr->LabelDate == 0) { mr->LabelDate = time(NULL); } (void)localtime_r(&mr->LabelDate, &tm); strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); Mmsg(cmd, "UPDATE Media SET LabelDate='%s' " "WHERE MediaId=%lu", dt, mr->MediaId); stat = UpdateDB(jcr, cmd, false); } /* * Make sure that if InChanger is non-zero any other identical slot * has InChanger zero. */ db_make_inchanger_unique(jcr, this, mr); } bdb_unlock(); return stat; } /** * Create a Unique record for the client -- no duplicates * Returns: 0 on failure * 1 on success with id in cr->ClientId */ int BDB::bdb_create_client_record(JCR *jcr, CLIENT_DBR *cr) { SQL_ROW row; int stat; char ed1[50], ed2[50]; char esc_name[MAX_ESCAPE_NAME_LENGTH]; char esc_uname[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); bdb_escape_string(jcr, esc_name, cr->Name, strlen(cr->Name)); bdb_escape_string(jcr, esc_uname, cr->Uname, strlen(cr->Uname)); Mmsg(cmd, "SELECT ClientId,Uname,AutoPrune," "FileRetention,JobRetention FROM Client WHERE Name='%s'",esc_name); cr->ClientId = 0; if (QueryDB(jcr, cmd)) { /* If more than one, report error, but return first row */ if (sql_num_rows() > 1) { Mmsg1(&errmsg, _("More than one Client!: %d\n"), sql_num_rows()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); } if (sql_num_rows() >= 1) { if ((row = sql_fetch_row()) == NULL) { Mmsg1(&errmsg, _("error fetching Client row: %s\n"), sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); sql_free_result(); bdb_unlock(); return 0; } cr->ClientId = str_to_int64(row[0]); if (row[1]) { bstrncpy(cr->Uname, row[1], sizeof(cr->Uname)); } else { cr->Uname[0] = 0; /* no name */ } cr->AutoPrune = str_to_int64(row[2]); cr->FileRetention = str_to_int64(row[3]); cr->JobRetention = str_to_int64(row[4]); sql_free_result(); bdb_unlock(); return 1; } sql_free_result(); } /* Must create it */ Mmsg(cmd, "INSERT INTO Client (Name,Uname,AutoPrune," "FileRetention,JobRetention) VALUES " "('%s','%s',%d,%s,%s)", esc_name, esc_uname, cr->AutoPrune, edit_uint64(cr->FileRetention, ed1), edit_uint64(cr->JobRetention, ed2)); if ((cr->ClientId = sql_insert_autokey_record(cmd, NT_("Client"))) == 0) { Mmsg2(&errmsg, _("Create DB Client record %s failed. ERR=%s\n"), cmd, sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); stat = 0; } else { stat = 1; } bdb_unlock(); return stat; } /** Create a Unique record for the Path -- no duplicates */ int BDB::bdb_create_path_record(JCR *jcr, ATTR_DBR *ar) { SQL_ROW row; int stat; errmsg[0] = 0; esc_name = check_pool_memory_size(esc_name, 2*pnl+2); bdb_escape_string(jcr, esc_name, path, pnl); if (cached_path_id != 0 && cached_path_len == pnl && strcmp(cached_path, path) == 0) { ar->PathId = cached_path_id; return 1; } Mmsg(cmd, "SELECT PathId FROM Path WHERE Path='%s'", esc_name); if (QueryDB(jcr, cmd)) { if (sql_num_rows() > 1) { char ed1[30]; Mmsg2(&errmsg, _("More than one Path!: %s for path: %s\n"), edit_uint64(sql_num_rows(), ed1), path); Jmsg(jcr, M_WARNING, 0, "%s", errmsg); } /* Even if there are multiple paths, take the first one */ if (sql_num_rows() >= 1) { if ((row = sql_fetch_row()) == NULL) { Mmsg1(&errmsg, _("error fetching row: %s\n"), sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); sql_free_result(); ar->PathId = 0; ASSERT2(ar->PathId, "Your Path table is broken. " "Please, use dbcheck to correct it."); return 0; } ar->PathId = str_to_int64(row[0]); sql_free_result(); /* Cache path */ if (ar->PathId != cached_path_id) { cached_path_id = ar->PathId; cached_path_len = pnl; pm_strcpy(cached_path, path); } ASSERT(ar->PathId); return 1; } sql_free_result(); } Mmsg(cmd, "INSERT INTO Path (Path) VALUES ('%s')", esc_name); if ((ar->PathId = sql_insert_autokey_record(cmd, NT_("Path"))) == 0) { Mmsg2(&errmsg, _("Create db Path record %s failed. ERR=%s\n"), cmd, sql_strerror()); Jmsg(jcr, M_FATAL, 0, "%s", errmsg); ar->PathId = 0; stat = 0; } else { stat = 1; } /* Cache path */ if (stat && ar->PathId != cached_path_id) { cached_path_id = ar->PathId; cached_path_len = pnl; pm_strcpy(cached_path, path); } return stat; } /** * Create a Unique record for the counter -- no duplicates * Returns: 0 on failure * 1 on success with counter filled in */ int BDB::bdb_create_counter_record(JCR *jcr, COUNTER_DBR *cr) { char esc[MAX_ESCAPE_NAME_LENGTH]; COUNTER_DBR mcr; int stat; bdb_lock(); memset(&mcr, 0, sizeof(mcr)); bstrncpy(mcr.Counter, cr->Counter, sizeof(mcr.Counter)); if (bdb_get_counter_record(jcr, &mcr)) { /* If the counter definition changed, we must update the record */ if (mcr.MinValue != cr->MinValue || mcr.MaxValue != cr->MaxValue || strcmp(mcr.WrapCounter, cr->WrapCounter) != 0) { /* With the update, the current value can be wrong, we need * to adjust it */ if (mcr.CurrentValue > 0) { if (cr->MinValue > mcr.CurrentValue) { cr->CurrentValue = cr->MinValue; } else if (cr->MaxValue < mcr.CurrentValue) { cr->CurrentValue = cr->MaxValue; } else { cr->CurrentValue = mcr.CurrentValue; } } Dmsg3(dbglevel, "org: MinValue=%ld MaxValue=%ld CurrentValue=%ld\n", mcr.MinValue, mcr.MaxValue, mcr.CurrentValue); Dmsg3(dbglevel, "new: MinValue=%ld MaxValue=%ld CurrentValue=%ld\n", cr->MinValue, cr->MaxValue, cr->CurrentValue); stat = bdb_update_counter_record(jcr, cr); } else { memcpy(cr, &mcr, sizeof(COUNTER_DBR)); stat = 1; } bdb_unlock(); return stat; } bdb_escape_string(jcr, esc, cr->Counter, strlen(cr->Counter)); /* Must create it */ Mmsg(cmd, insert_counter_values[bdb_get_type_index()], esc, cr->MinValue, cr->MaxValue, cr->CurrentValue, cr->WrapCounter); if (!InsertDB(jcr, cmd)) { Mmsg2(&errmsg, _("Create DB Counters record %s failed. ERR=%s\n"), cmd, sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); stat = 0; } else { stat = 1; } bdb_unlock(); return stat; } /** * Create a FileSet record. This record is unique in the * name and the MD5 signature of the include/exclude sets. * Returns: 0 on failure * 1 on success with FileSetId in record */ bool BDB::bdb_create_fileset_record(JCR *jcr, FILESET_DBR *fsr) { SQL_ROW row; bool stat; struct tm tm; char esc_fs[MAX_ESCAPE_NAME_LENGTH]; char esc_md5[MAX_ESCAPE_NAME_LENGTH]; char esc_content[MAX_ESCAPE_PLUGIN_LENGTH]; /* TODO: Escape FileSet and MD5 */ bdb_lock(); fsr->created = false; bdb_escape_string(jcr, esc_fs, fsr->FileSet, strlen(fsr->FileSet)); bdb_escape_string(jcr, esc_md5, fsr->MD5, strlen(fsr->MD5)); bdb_escape_string(jcr, esc_content, fsr->Content, strlen(fsr->Content)); Mmsg(cmd, "SELECT FileSetId,CreateTime,Content FROM FileSet WHERE " "FileSet='%s' AND MD5='%s'", esc_fs, esc_md5); fsr->FileSetId = 0; if (QueryDB(jcr, cmd)) { if (sql_num_rows() > 1) { Mmsg1(&errmsg, _("More than one FileSet!: %d\n"), sql_num_rows()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); } if (sql_num_rows() >= 1) { if ((row = sql_fetch_row()) == NULL) { Mmsg1(&errmsg, _("error fetching FileSet row: ERR=%s\n"), sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); sql_free_result(); bdb_unlock(); return false; } fsr->FileSetId = str_to_int64(row[0]); if (row[1] == NULL) { fsr->cCreateTime[0] = 0; } else { bstrncpy(fsr->cCreateTime, row[1], sizeof(fsr->cCreateTime)); } if ((row[2] == NULL || strcmp(row[2], "") == 0) && esc_content[0]) { Mmsg(cmd, "UPDATE FileSet SET Content='%s' WHERE FileSetId=%ld", esc_content, fsr->FileSetId); if (!UpdateDB(jcr, cmd, false /* should match*/)) { /* If we cannot update, this is not the end of the world, but we * can display a message */ Dmsg2(50, "Unable to update FileSet content field for %ld ERR=%s\n", fsr->FileSetId, sql_strerror()); } } sql_free_result(); bdb_unlock(); return true; } sql_free_result(); } if (fsr->CreateTime == 0 && fsr->cCreateTime[0] == 0) { fsr->CreateTime = time(NULL); } (void)localtime_r(&fsr->CreateTime, &tm); strftime(fsr->cCreateTime, sizeof(fsr->cCreateTime), "%Y-%m-%d %H:%M:%S", &tm); /* Must create it */ Mmsg(cmd, "INSERT INTO FileSet (FileSet,MD5,CreateTime,Content) " "VALUES ('%s','%s','%s','%s')", esc_fs, esc_md5, fsr->cCreateTime, esc_content); if ((fsr->FileSetId = sql_insert_autokey_record(cmd, NT_("FileSet"))) == 0) { Mmsg2(&errmsg, _("Create DB FileSet record %s failed. ERR=%s\n"), cmd, sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); stat = false; } else { fsr->created = true; stat = true; } bdb_unlock(); return stat; } /** * struct stat * { * dev_t st_dev; * device * * ino_t st_ino; * inode * * mode_t st_mode; * protection * * nlink_t st_nlink; * number of hard links * * uid_t st_uid; * user ID of owner * * gid_t st_gid; * group ID of owner * * dev_t st_rdev; * device type (if inode device) * * off_t st_size; * total size, in bytes * * unsigned long st_blksize; * blocksize for filesystem I/O * * unsigned long st_blocks; * number of blocks allocated * * time_t st_atime; * time of last access * * time_t st_mtime; * time of last modification * * time_t st_ctime; * time of last inode change * * }; */ /* For maintenance, we can put batch mode in hold */ static bool batch_mode_enabled = true; void bdb_disable_batch_insert(bool enabled) { batch_mode_enabled = enabled; } /* * All sql_batch_xx functions are used to do bulk batch * insert in File/Filename/Path tables. * * To sum up : * - bulk load a temp table * - table before that to avoid possible duplicate inserts with concurrent update) * - insert missing paths into path with another single query * - then insert the join between the temp and path tables into file. */ /* * Returns true if OK * false if failed */ bool bdb_write_batch_file_records(JCR *jcr) { bool retval = false; int JobStatus = jcr->JobStatus; if (!jcr->batch_started) { /* no files to backup ? */ Dmsg0(50,"db_write_batch_file_records: no files\n"); return true; } if (job_canceled(jcr)) { goto bail_out; } jcr->JobStatus = JS_AttrInserting; /* Check if batch mode is on hold */ while (!batch_mode_enabled) { Dmsg0(50, "batch mode is on hold\n"); bmicrosleep(10, 0); if (job_canceled(jcr)) { goto bail_out; } } Dmsg1(50,"db_write_batch_file_records changes=%u\n",jcr->db_batch->changes); if (!jcr->db_batch->sql_batch_end(jcr, NULL)) { Jmsg1(jcr, M_FATAL, 0, "Batch end %s\n", jcr->db_batch->errmsg); goto bail_out; } if (job_canceled(jcr)) { goto bail_out; } /* We have to lock tables */ if (!jcr->db_batch->bdb_sql_query(batch_lock_path_query[jcr->db_batch->bdb_get_type_index()], NULL, NULL)) { Jmsg1(jcr, M_FATAL, 0, "Lock Path table %s\n", jcr->db_batch->errmsg); goto bail_out; } if (!jcr->db_batch->bdb_sql_query(batch_fill_path_query[jcr->db_batch->bdb_get_type_index()], NULL, NULL)) { Jmsg1(jcr, M_FATAL, 0, "Fill Path table %s\n",jcr->db_batch->errmsg); jcr->db_batch->bdb_sql_query(batch_unlock_tables_query[jcr->db_batch->bdb_get_type_index()], NULL, NULL); goto bail_out; } if (!jcr->db_batch->bdb_sql_query(batch_unlock_tables_query[jcr->db_batch->bdb_get_type_index()], NULL, NULL)) { Jmsg1(jcr, M_FATAL, 0, "Unlock Path table %s\n", jcr->db_batch->errmsg); goto bail_out; } if (!jcr->db_batch->bdb_sql_query( "INSERT INTO File (FileIndex, JobId, PathId, Filename, LStat, MD5, DeltaSeq) " "SELECT batch.FileIndex, batch.JobId, Path.PathId, " "batch.Name, batch.LStat, batch.MD5, batch.DeltaSeq " "FROM batch JOIN Path ON (batch.Path = Path.Path) ", NULL, NULL)) { Jmsg1(jcr, M_FATAL, 0, "Fill File table %s\n", jcr->db_batch->errmsg); goto bail_out; } jcr->JobStatus = JobStatus; /* reset entry status */ retval = true; bail_out: jcr->db_batch->bdb_sql_query("DROP TABLE IF EXISTS batch", NULL,NULL); jcr->batch_started = false; return retval; } /** * Create File record in BDB * * In order to reduce database size, we store the File attributes, the Path * separately. In principle, there is a single Path record, no matter how * many times it occurs. This is this subroutine, we separate the file and * the path and fill temporary tables with this three records. * * Note: all routines that call this expect to be able to call * db_strerror(mdb) to get the error message, so the error message * MUST be edited into errmsg before returning an error status. */ bool BDB::bdb_create_batch_file_attributes_record(JCR *jcr, ATTR_DBR *ar) { ASSERT(ar->FileType != FT_BASE); Dmsg2(dbglevel, "FileIndex=%d Fname=%s\n", ar->FileIndex, ar->fname); Dmsg0(dbglevel, "put_file_into_catalog\n"); if (jcr->batch_started && jcr->db_batch->changes > 500000) { bdb_write_batch_file_records(jcr); jcr->db_batch->changes = 0; } /* Open the dedicated connexion */ if (!jcr->batch_started) { if (!bdb_open_batch_connection(jcr)) { return false; /* error already printed */ } if (!jcr->db_batch->sql_batch_start(jcr)) { Mmsg1(&errmsg, "Can't start batch mode: ERR=%s", jcr->db_batch->bdb_strerror()); Jmsg(jcr, M_FATAL, 0, "%s", errmsg); return false; } jcr->batch_started = true; } split_path_and_file(jcr, jcr->db_batch, ar->fname); return jcr->db_batch->sql_batch_insert(jcr, ar); } /** * Create File record in BDB * * In order to reduce database size, we store the File attributes and the Path * separately. In principle, there is a single Path record, no matter how * many times it occurs. This is this subroutine, we separate the file and * the path and create two database records. */ bool BDB::bdb_create_file_attributes_record(JCR *jcr, ATTR_DBR *ar) { bdb_lock(); Dmsg2(dbglevel, "FileIndex=%d Fname=%s\n", ar->FileIndex, ar->fname); Dmsg0(dbglevel, "put_file_into_catalog\n"); split_path_and_file(jcr, this, ar->fname); if (!bdb_create_path_record(jcr, ar)) { goto bail_out; } Dmsg1(dbglevel, "db_create_path_record: %s\n", esc_name); esc_name = check_pool_memory_size(esc_name, 2*fnl+2); bdb_escape_string(jcr, esc_name, fname, fnl); ar->Filename = esc_name; /* Now create master File record */ if (!bdb_create_file_record(jcr, ar)) { goto bail_out; } Dmsg0(dbglevel, "db_create_file_record OK\n"); Dmsg3(dbglevel, "CreateAttributes Path=%s File=%s Filename=%s\n", path, fname, ar->Filename); bdb_unlock(); return true; bail_out: bdb_unlock(); return false; } /** * This is the master File entry containing the attributes. * The filename and path records have already been created. */ int BDB::bdb_create_file_record(JCR *jcr, ATTR_DBR *ar) { int stat; static const char *no_digest = "0"; const char *digest; ASSERT(ar->JobId); ASSERT(ar->PathId); ASSERT(ar->Filename != NULL); if (ar->Digest == NULL || ar->Digest[0] == 0) { digest = no_digest; } else { digest = ar->Digest; } /* Must create it */ Mmsg(cmd, "INSERT INTO File (FileIndex,JobId,PathId,Filename," "LStat,MD5,DeltaSeq) VALUES (%d,%u,%u,'%s','%s','%s',%u)", ar->FileIndex, ar->JobId, ar->PathId, ar->Filename, ar->attr, digest, ar->DeltaSeq); if ((ar->FileId = sql_insert_autokey_record(cmd, NT_("File"))) == 0) { Mmsg2(&errmsg, _("Create db File record %s failed. ERR=%s"), cmd, sql_strerror()); Jmsg(jcr, M_FATAL, 0, "%s", errmsg); stat = 0; } else { stat = 1; } return stat; } /** * Create file attributes record, or base file attributes record */ bool BDB::bdb_create_attributes_record(JCR *jcr, ATTR_DBR *ar) { bool ret; Dmsg2(dbglevel, "FileIndex=%d Fname=%s\n", ar->FileIndex, ar->fname); errmsg[0] = 0; /* * Make sure we have an acceptable attributes record. */ if (!(ar->Stream == STREAM_UNIX_ATTRIBUTES || ar->Stream == STREAM_UNIX_ATTRIBUTE_UPDATE || ar->Stream == STREAM_UNIX_ATTRIBUTES_EX)) { Mmsg1(&errmsg, _("Attempt to put non-attributes into catalog. Stream=%d\n"), ar->Stream); Jmsg(jcr, M_FATAL, 0, "%s", errmsg); return false; } if (ar->FileType != FT_BASE) { if (batch_insert_available()) { ret = bdb_create_batch_file_attributes_record(jcr, ar); /* Error message already printed */ } else { ret = bdb_create_file_attributes_record(jcr, ar); } } else if (jcr->HasBase) { ret = bdb_create_base_file_attributes_record(jcr, ar); } else { Mmsg0(&errmsg, _("Cannot Copy/Migrate job using BaseJob.\n")); Jmsg(jcr, M_FATAL, 0, "%s", errmsg); ret = true; /* in copy/migration what do we do ? */ } return ret; } /** * Create Base File record in BDB * */ bool BDB::bdb_create_base_file_attributes_record(JCR *jcr, ATTR_DBR *ar) { bool ret; Dmsg1(dbglevel, "create_base_file Fname=%s\n", ar->fname); Dmsg0(dbglevel, "put_base_file_into_catalog\n"); bdb_lock(); split_path_and_file(jcr, this, ar->fname); esc_name = check_pool_memory_size(esc_name, fnl*2+1); bdb_escape_string(jcr, esc_name, fname, fnl); esc_path = check_pool_memory_size(esc_path, pnl*2+1); bdb_escape_string(jcr, esc_path, path, pnl); Mmsg(cmd, "INSERT INTO basefile%lld (Path, Name) VALUES ('%s','%s')", (uint64_t)jcr->JobId, esc_path, esc_name); ret = InsertDB(jcr, cmd); bdb_unlock(); return ret; } /** * Cleanup the base file temporary tables */ static void db_cleanup_base_file(JCR *jcr, BDB *mdb) { POOL_MEM buf(PM_MESSAGE); Mmsg(buf, "DROP TABLE IF EXISTS new_basefile%lld", (uint64_t) jcr->JobId); mdb->bdb_sql_query(buf.c_str(), NULL, NULL); Mmsg(buf, "DROP TABLE IF EXISTS basefile%lld", (uint64_t) jcr->JobId); mdb->bdb_sql_query(buf.c_str(), NULL, NULL); } /** * Put all base file seen in the backup to the BaseFile table * and cleanup temporary tables */ bool BDB::bdb_commit_base_file_attributes_record(JCR *jcr) { bool ret; char ed1[50]; bdb_lock(); Mmsg(cmd, "INSERT INTO BaseFiles (BaseJobId, JobId, FileId, FileIndex) " "SELECT B.JobId AS BaseJobId, %s AS JobId, " "B.FileId, B.FileIndex " "FROM basefile%s AS A, new_basefile%s AS B " "WHERE A.Path = B.Path " "AND A.Name = B.Name " "ORDER BY B.FileId", edit_uint64(jcr->JobId, ed1), ed1, ed1); ret = bdb_sql_query(cmd, NULL, NULL); /* * Display error now, because the subsequent cleanup destroys the * error message from the above query. */ if (!ret) { Jmsg1(jcr, M_FATAL, 0, "%s", jcr->db->bdb_strerror()); } jcr->nb_base_files_used = sql_affected_rows(); db_cleanup_base_file(jcr, this); bdb_unlock(); return ret; } /** * Find the last "accurate" backup state with Base jobs * 1) Get all files with jobid in list (F subquery) * 2) Take only the last version of each file (Temp subquery) => accurate list is ok * 3) Put the result in a temporary table for the end of job * */ bool BDB::bdb_create_base_file_list(JCR *jcr, char *jobids) { POOL_MEM buf; bool ret = false; bdb_lock(); if (!*jobids) { Mmsg(errmsg, _("ERR=JobIds are empty\n")); goto bail_out; } Mmsg(cmd, create_temp_basefile[bdb_get_type_index()], (uint64_t) jcr->JobId); if (!bdb_sql_query(cmd, NULL, NULL)) { goto bail_out; } Mmsg(buf, select_recent_version[bdb_get_type_index()], jobids, jobids); Mmsg(cmd, create_temp_new_basefile[bdb_get_type_index()], (uint64_t)jcr->JobId, buf.c_str()); ret = bdb_sql_query(cmd, NULL, NULL); bail_out: bdb_unlock(); return ret; } /** * Create Plugin Object record in BDB * */ bool BDB::bdb_create_object_record(JCR *jcr, OBJECT_DBR *obj) { bool ret = false; int str_len; POOLMEM *esc_path = get_pool_memory(PM_MESSAGE); POOLMEM *esc_filename = get_pool_memory(PM_MESSAGE); POOLMEM *esc_plugin_name = get_pool_memory(PM_MESSAGE); char esc_obj_category[MAX_ESCAPE_NAME_LENGTH]; char esc_obj_type[MAX_ESCAPE_NAME_LENGTH]; char esc_obj_name[MAX_ESCAPE_NAME_LENGTH]; char esc_obj_source[MAX_ESCAPE_NAME_LENGTH]; char esc_obj_uuid[MAX_ESCAPE_NAME_LENGTH]; str_len = strlen(obj->Path); esc_path = check_pool_memory_size(esc_path, str_len*2+1); bdb_escape_string(jcr, esc_path, obj->Path, str_len); str_len = strlen(obj->Filename); esc_filename = check_pool_memory_size(esc_filename, str_len*2+1); bdb_escape_string(jcr, esc_filename, obj->Filename, str_len); str_len = strlen(obj->PluginName); esc_plugin_name = check_pool_memory_size(esc_plugin_name, str_len*2+1); bdb_escape_string(jcr, esc_plugin_name, obj->PluginName, str_len); bdb_escape_string(jcr, esc_obj_category, obj->ObjectCategory, strlen(obj->ObjectCategory)); bdb_escape_string(jcr, esc_obj_type, obj->ObjectType, strlen(obj->ObjectType)); bdb_escape_string(jcr, esc_obj_name, obj->ObjectName, strlen(obj->ObjectName)); bdb_escape_string(jcr, esc_obj_source, obj->ObjectSource, strlen(obj->ObjectSource)); bdb_escape_string(jcr, esc_obj_uuid, obj->ObjectUUID, strlen(obj->ObjectUUID)); bdb_lock(); Mmsg(cmd, "INSERT INTO Object (JobId, Path, Filename, PluginName, ObjectCategory, " "ObjectType, ObjectName, ObjectSource, ObjectUUID, ObjectSize, ObjectStatus, ObjectCount) " "VALUES (%lu, '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', %llu, '%c', %lu)", obj->JobId, esc_path, esc_filename, esc_plugin_name, esc_obj_category, esc_obj_type, esc_obj_name, esc_obj_source, esc_obj_uuid, obj->ObjectSize, obj->ObjectStatus ? (char)obj->ObjectStatus : 'U', obj->ObjectCount); obj->ObjectId = sql_insert_autokey_record(cmd, NT_("Object")); if (obj->ObjectId == 0) { Mmsg2(&errmsg, _("Create database Plugin Object record %s failed. ERR=%s"), cmd, sql_strerror()); Jmsg(jcr, M_FATAL, 0, "%s", errmsg); ret = false; } else { ret = true; } bdb_unlock(); free_pool_memory(esc_path); free_pool_memory(esc_filename); free_pool_memory(esc_plugin_name); return ret; } /** * Create Restore Object record in BDB * */ bool BDB::bdb_create_restore_object_record(JCR *jcr, ROBJECT_DBR *ro) { bool stat; int plug_name_len; POOLMEM *esc_plug_name = get_pool_memory(PM_MESSAGE); bdb_lock(); Dmsg1(dbglevel, "Oname=%s\n", ro->object_name); Dmsg0(dbglevel, "put_object_into_catalog\n"); fnl = strlen(ro->object_name); esc_name = check_pool_memory_size(esc_name, fnl*2+1); bdb_escape_string(jcr, esc_name, ro->object_name, fnl); bdb_escape_object(jcr, ro->object, ro->object_len); plug_name_len = strlen(ro->plugin_name); esc_plug_name = check_pool_memory_size(esc_plug_name, plug_name_len*2+1); bdb_escape_string(jcr, esc_plug_name, ro->plugin_name, plug_name_len); Mmsg(cmd, "INSERT INTO RestoreObject (ObjectName,PluginName,RestoreObject," "ObjectLength,ObjectFullLength,ObjectIndex,ObjectType," "ObjectCompression,FileIndex,JobId) " "VALUES ('%s','%s','%s',%d,%d,%d,%d,%d,%d,%u)", esc_name, esc_plug_name, esc_obj, ro->object_len, ro->object_full_len, ro->object_index, ro->FileType, ro->object_compression, ro->FileIndex, ro->JobId); ro->RestoreObjectId = sql_insert_autokey_record(cmd, NT_("RestoreObject")); if (ro->RestoreObjectId == 0) { Mmsg2(&errmsg, _("Create db Object record %s failed. ERR=%s"), cmd, sql_strerror()); Jmsg(jcr, M_FATAL, 0, "%s", errmsg); stat = false; } else { stat = true; } bdb_unlock(); free_pool_memory(esc_plug_name); return stat; } bool BDB::bdb_create_snapshot_record(JCR *jcr, SNAPSHOT_DBR *snap) { bool status = false; char esc_name[MAX_ESCAPE_NAME_LENGTH]; POOLMEM *esc_vol = get_pool_memory(PM_MESSAGE); POOLMEM *esc_dev = get_pool_memory(PM_MESSAGE); POOLMEM *esc_type = get_pool_memory(PM_MESSAGE); POOLMEM *esc_client = get_pool_memory(PM_MESSAGE); POOLMEM *esc_fs = get_pool_memory(PM_MESSAGE); char esc_comment[MAX_ESCAPE_NAME_LENGTH]; char dt[MAX_TIME_LENGTH], ed1[50], ed2[50]; time_t stime; struct tm tm; bdb_lock(); esc_vol = check_pool_memory_size(esc_vol, strlen(snap->Volume) * 2 + 1); bdb_escape_string(jcr, esc_vol, snap->Volume, strlen(snap->Volume)); esc_dev = check_pool_memory_size(esc_dev, strlen(snap->Device) * 2 + 1); bdb_escape_string(jcr, esc_dev, snap->Device, strlen(snap->Device)); esc_type = check_pool_memory_size(esc_type, strlen(snap->Type) * 2 + 1); bdb_escape_string(jcr, esc_type, snap->Type, strlen(snap->Type)); bdb_escape_string(jcr, esc_comment, snap->Comment, strlen(snap->Comment)); if (*snap->Client) { bdb_escape_string(jcr, esc_name, snap->Client, strlen(snap->Client)); Mmsg(esc_client, "(SELECT ClientId FROM Client WHERE Name='%s')", esc_name); } else { Mmsg(esc_client, "%d", snap->ClientId); } if (*snap->FileSet) { bdb_escape_string(jcr, esc_name, snap->FileSet, strlen(snap->FileSet)); Mmsg(esc_fs, "(SELECT FileSetId FROM FileSet WHERE FileSet='%s' ORDER BY CreateTime DESC LIMIT 1)", esc_name); } else { Mmsg(esc_fs, "%d", snap->FileSetId); } bdb_escape_string(jcr, esc_name, snap->Name, strlen(snap->Name)); stime = snap->CreateTDate; (void)localtime_r(&stime, &tm); strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); Mmsg(cmd, "INSERT INTO Snapshot " "(Name, JobId, CreateTDate, CreateDate, ClientId, FileSetId, Volume, Device, Type, Retention, Comment) " "VALUES ('%s', %s, %d, '%s', %s, %s, '%s', '%s', '%s', %s, '%s')", esc_name, edit_uint64(snap->JobId, ed2), stime, dt, esc_client, esc_fs, esc_vol, esc_dev, esc_type, edit_int64(snap->Retention, ed1), esc_comment); if (bdb_sql_query(cmd, NULL, NULL)) { snap->SnapshotId = sql_insert_autokey_record(cmd, NT_("Snapshot")); status = true; } bdb_unlock(); free_pool_memory(esc_vol); free_pool_memory(esc_dev); free_pool_memory(esc_type); free_pool_memory(esc_client); free_pool_memory(esc_fs); return status; } int BDB::bdb_create_events_record(JCR *jcr, EVENTS_DBR *event) { bool status = false; int len; POOL_MEM tmp, type, from, source, time, text; char dt[MAX_TIME_LENGTH]; bdb_lock(); if (!is_name_valid(event->EventsCode, tmp.handle(), "")) { Mmsg(errmsg, "Invalid EventsCode %s", tmp.c_str()); goto bail_out; } if (!is_name_valid(event->EventsType, tmp.handle(), "")) { Mmsg(errmsg, "Invalid EventsType %s", tmp.c_str()); goto bail_out; } len = strlen(event->EventsType); type.check_size(len*2+1); db_escape_string(jcr, this, type.c_str(), event->EventsType, len); if (!is_name_valid(event->EventsSource, tmp.handle(), "*-.,:")) { /* Add *None* */ Mmsg(errmsg, "Invalid EventsSource %s", tmp.c_str()); goto bail_out; } len = strlen(event->EventsSource); source.check_size(len*2+1); db_escape_string(jcr, this, source.c_str(), event->EventsSource, len); if (!is_name_valid(event->EventsDaemon, tmp.handle())) { Mmsg(errmsg, "Invalid EventsDaemon %s", tmp.c_str()); goto bail_out; } len = strlen(event->EventsDaemon); from.check_size(len*2+1); db_escape_string(jcr, this, from.c_str(), event->EventsDaemon, len); len = strlen(event->EventsText); text.check_size(len*2+1); db_escape_string(jcr, this, text.c_str(), event->EventsText, len); bstrutime(dt, sizeof(dt), event->EventsTime); Mmsg(cmd, "INSERT INTO Events " "(EventsDaemon, EventsCode, EventsType, EventsSource, EventsRef, EventsTime, EventsText) " "VALUES ('%s', '%s', '%s', '%s', '0x%p', '%s', '%s')", from.c_str(), event->EventsCode, type.c_str(), source.c_str(), event->EventsRef, dt, text.c_str()); if (bdb_sql_query(cmd, NULL, NULL)) { status = true; } bail_out: bdb_unlock(); return status; } bool BDB::bdb_create_log_record(JCR *jcr, JobId_t jobid, utime_t mtime, char *msg) { bool ret; POOLMEM *tmp = get_pool_memory(PM_MESSAGE); POOLMEM *esc_msg = get_pool_memory(PM_MESSAGE); char dt[MAX_TIME_LENGTH], ed1[50]; int len = strlen(msg) + 1; esc_msg = check_pool_memory_size(esc_msg, len*2+1); bdb_escape_string(jcr, esc_msg, msg, len); bstrutime(dt, sizeof(dt), mtime); Mmsg(tmp, "INSERT INTO Log (JobId, Time, LogText) VALUES (%s,'%s','%s')", edit_int64(jcr->JobId, ed1), dt, esc_msg); ret = bdb_sql_query(tmp, NULL, NULL); free_pool_memory(tmp); free_pool_memory(esc_msg); return ret; } bool BDB::bdb_create_tag_record(JCR *jcr, TAG_DBR *tag) { uint64_t aclbits, aclbits_extra; char esc[MAX_ESCAPE_NAME_LENGTH]; char esc_name[MAX_ESCAPE_NAME_LENGTH]; const char *name; const char *table; const char *id; bool ret=false; tag->gen_sql(jcr, this, &table, &name, &id, esc, esc_name, &aclbits, &aclbits_extra); bdb_lock(); /* TODO: Need a special kind of ACL */ const char *whereand = get_acls(aclbits, false); const char *join = get_acl_join_filter(aclbits_extra); if (*esc_name && *esc) { /* We have a tag name */ Mmsg(cmd, "INSERT INTO Tag%s (Tag, %s) VALUES ('%s', (SELECT %s FROM %s %s WHERE %s = '%s' %s))", table, id, esc_name, id, table, join, (tag->JobId>0)?id:name, esc, whereand); ret = bdb_sql_query(cmd, NULL, (void *)NULL); } else { Dmsg2(DT_SQL|50, "Tag invalid esc_name='%s' esc='%s'\n", esc_name, esc); } bdb_unlock(); return ret; } bool BDB::bdb_create_fileevent_record(JCR *jcr, FILEEVENT_DBR *event) { char esc1[MAX_ESCAPE_NAME_LENGTH]; char esc2[MAX_ESCAPE_NAME_LENGTH]; bool ret=false; bdb_lock(); bdb_escape_string(jcr, esc1, event->Description, strlen(event->Description)); bdb_escape_string(jcr, esc2, event->Source, strlen(event->Source)); Mmsg(cmd, "INSERT INTO FileEvents (SourceJobId, JobId, FileIndex, Type, Description, Severity, Source) " " VALUES (%lu, %s, %ld, '%c', '%s', %d, '%s')", event->SourceJobId, event->JobId, event->FileIndex, event->Type, esc1, event->Severity, esc2); ret = bdb_sql_query(cmd, NULL, (void *)NULL); bdb_unlock(); return ret; } #endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ bacula-15.0.3/src/cats/delete_catalog_backup.in0000644000175000017500000000134314771010173021215 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # This script deletes a catalog dump # db_name=@db_name@ rm -f @working_dir@/${db_name}.sql # # We recommend that you email a copy of the bsr file that was # made for the Job that runs this script to yourself. # You just need to put the bsr file in /opt/bacula/working/BackupCatalog.bsr # or adjust the script below. Please replace all %xxx% with what # is appropriate at your site. # #@bindir@/bsmtp -h %smtp-server% -s "catalog.bsr" \ # %your-name@company.org% <@bsrdir@/catalog.bsr # # The following script will email a summary of the backup jobs that # were completed in the last 24 hours #@scriptdir@/baculabackupreport 24 # bacula-15.0.3/src/cats/cats.c0000644000175000017500000006605014771010173015510 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Generic catalog class methods. * * Note: at one point, this file was assembled from parts of other files * by a programmer, and other than "wrapping" in a class, which is a trivial * change for a C++ programmer, nothing substantial was done, yet all the * code was recommitted under this programmer's name. Consequently, we * undo those changes here. */ #include "bacula.h" #if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL #include "cats.h" static int dbglvl=100; void append_filter(POOLMEM **buf, char *cond) { if (*buf[0] != '\0') { pm_strcat(buf, " AND "); } else { pm_strcpy(buf, " WHERE "); } pm_strcat(buf, cond); } static void append_AND_OR_filter(bool and_or, POOLMEM **buf, char *cond) { if (*buf[0] != '\0') { if (and_or) { pm_strcat(buf, " OR "); } else { pm_strcat(buf, " AND "); } } else { if (and_or) { pm_strcpy(buf, " WHERE ( "); } else { pm_strcat(buf, " WHERE "); } } pm_strcat(buf, cond); } bool BDB::bdb_match_database(const char *db_driver, const char *db_name, const char *db_address, int db_port) { BDB *mdb = this; bool match; if (db_driver) { match = strcasecmp(mdb->m_db_driver, db_driver) == 0 && bstrcmp(mdb->m_db_name, db_name) && bstrcmp(mdb->m_db_address, db_address) && mdb->m_db_port == db_port && mdb->m_dedicated == false; } else { match = bstrcmp(mdb->m_db_name, db_name) && bstrcmp(mdb->m_db_address, db_address) && mdb->m_db_port == db_port && mdb->m_dedicated == false; } return match; } BDB *BDB::bdb_clone_database_connection(JCR *jcr, bool mult_db_connections) { BDB *mdb = this; /* * See if its a simple clone e.g. with mult_db_connections set to false * then we just return the calling class pointer. */ if (!mult_db_connections) { mdb->m_ref_count++; return mdb; } /* * A bit more to do here just open a new session to the database. */ return db_init_database(jcr, mdb->m_db_driver, mdb->m_db_name, mdb->m_db_user, mdb->m_db_password, mdb->m_db_address, mdb->m_db_port, mdb->m_db_socket, mdb->m_db_ssl_mode, mdb->m_db_ssl_key, mdb->m_db_ssl_cert, mdb->m_db_ssl_ca, mdb->m_db_ssl_capath, mdb->m_db_ssl_cipher, true, mdb->m_disabled_batch_insert); } const char *BDB::bdb_get_engine_name(void) { BDB *mdb = this; switch (mdb->m_db_driver_type) { case SQL_DRIVER_TYPE_MYSQL: return "MySQL"; case SQL_DRIVER_TYPE_POSTGRESQL: return "PostgreSQL"; case SQL_DRIVER_TYPE_SQLITE3: return "SQLite3"; default: return "Unknown"; } } /* * Lock database, this can be called multiple times by the same * thread without blocking, but must be unlocked the number of * times it was locked using db_unlock(). */ void BDB::bdb_lock(const char *file, int line) { int errstat; BDB *mdb = this; if ((errstat = rwl_writelock_p(&mdb->m_lock, file, line)) != 0) { berrno be; e_msg(file, line, M_FATAL, 0, "rwl_writelock failure. stat=%d: ERR=%s\n", errstat, be.bstrerror(errstat)); } } /* * Unlock the database. This can be called multiple times by the * same thread up to the number of times that thread called * db_lock()/ */ void BDB::bdb_unlock(const char *file, int line) { int errstat; BDB *mdb = this; if ((errstat = rwl_writeunlock(&mdb->m_lock)) != 0) { berrno be; e_msg(file, line, M_FATAL, 0, "rwl_writeunlock failure. stat=%d: ERR=%s\n", errstat, be.bstrerror(errstat)); } } bool BDB::bdb_sql_query(const char *query, int flags) { bool retval; BDB *mdb = this; bdb_lock(); retval = sql_query(query, flags); if (!retval) { Mmsg(mdb->errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror()); } bdb_unlock(); return retval; } void BDB::print_lock_info(FILE *fp) { BDB *mdb = this; if (mdb->m_lock.valid == RWLOCK_VALID) { fprintf(fp, "\tRWLOCK=%p w_active=%i w_wait=%i\n", &mdb->m_lock, mdb->m_lock.w_active, mdb->m_lock.w_wait); } } bool OBJECT_DBR::parse_plugin_object_string(char **obj_str) { bool ret = false; int fnl, pnl; uint64_t val = 0; char *tmp = get_next_tag(obj_str); if (!tmp) { goto bail_out; } if (tmp[strlen(tmp) - 1] == '/') { pm_strcpy(Path, tmp); unbash_spaces(Path); } else { split_path_and_filename(tmp, &Path, &pnl, &Filename, &fnl); unbash_spaces(Path); unbash_spaces(Filename); } tmp = get_next_tag(obj_str); if (!tmp) { goto bail_out; } pm_strcpy(PluginName, tmp); unbash_spaces(PluginName); tmp = get_next_tag(obj_str); if (!tmp) { goto bail_out; } bstrncpy(ObjectCategory, tmp, sizeof(ObjectCategory)); unbash_spaces(ObjectCategory); tmp = get_next_tag(obj_str); if (!tmp) { goto bail_out; } bstrncpy(ObjectType, tmp, sizeof(ObjectType)); unbash_spaces(ObjectType); tmp = get_next_tag(obj_str); if (!tmp) { goto bail_out; } bstrncpy(ObjectName, tmp, sizeof(ObjectName)); unbash_spaces(ObjectName); tmp = get_next_tag(obj_str); if (!tmp) { goto bail_out; } bstrncpy(ObjectSource, tmp, sizeof(ObjectSource)); unbash_spaces(ObjectSource); tmp = get_next_tag(obj_str); if (!tmp) { goto bail_out; } bstrncpy(ObjectUUID, tmp, sizeof(ObjectUUID)); unbash_spaces(ObjectUUID); tmp = get_next_tag(obj_str); if (!tmp) { goto bail_out; } val = str_to_uint64(tmp); ObjectSize = (val > 9223372036854775808ULL /*2^63 */) ? 0 : val; /* We should have status and count in the end of the stream */ tmp = get_next_tag(obj_str); if (!tmp) { /* We want to work with plugins that does not send the status and count since it's not required, * so we're good to proceed here - simply return success */ ret = true; goto bail_out; } ObjectStatus = (int)*tmp; tmp = get_next_tag(obj_str); if (!tmp) { goto bail_out; } ObjectCount = str_to_uint64(*obj_str); ret = true; bail_out: /* Print whatever was parsed */ Dmsg11(dbglvl, "Parsed PluginObject: Path: %s Fname: %s PluginName: %s Category: %s " "Type: %s Name: %s Source: %s UUID: %s Size: %lld Status: %d Count: %lld\n", Path, Filename, PluginName, ObjectCategory, ObjectType, ObjectName, ObjectSource, ObjectUUID, ObjectSize, (char)ObjectStatus, ObjectCount); if (!ret) { /* Reset parsed fields */ reset(); } return ret; } void OBJECT_DBR::create_db_filter(JCR *jcr, POOLMEM **where) { POOL_MEM esc(PM_MESSAGE), tmp(PM_MESSAGE); if (ObjectId > 0) { Mmsg(tmp, " Object.ObjectId=%lu", ObjectId); append_filter(where, tmp.c_str()); } else { if (JobId != 0) { Mmsg(tmp, " Object.JobId=%lu", JobId); append_filter(where, tmp.c_str()); } if (is_a_number_list(JobIds)) { Mmsg(tmp, " Object.JobId IN (%s) ", JobIds); append_filter(where, tmp.c_str()); } if (Path[0] != 0) { db_escape_string(jcr, jcr->db, esc.c_str(), Path, strlen(Path)); Mmsg(tmp, " Object.Path='%s'", esc.c_str()); append_filter(where, tmp.c_str()); } if (Filename[0] != 0) { db_escape_string(jcr, jcr->db, esc.c_str(), Filename, strlen(Filename)); Mmsg(tmp, " Object.Filename='%s'", esc.c_str()); append_filter(where, tmp.c_str()); } if (PluginName[0] != 0) { db_escape_string(jcr, jcr->db, esc.c_str(), PluginName, strlen(PluginName)); Mmsg(tmp, " Object.PluginName='%s'", esc.c_str()); append_filter(where, tmp.c_str()); } if (ObjectCategory[0] != 0) { db_escape_string(jcr, jcr->db, esc.c_str(), ObjectCategory, strlen(ObjectCategory)); Mmsg(tmp, " Object.ObjectCategory='%s'", esc.c_str()); append_filter(where, tmp.c_str()); } if (ObjectType[0] != 0) { db_escape_string(jcr, jcr->db, esc.c_str(), ObjectType, strlen(ObjectType)); Mmsg(tmp, " Object.ObjectType='%s'", esc.c_str()); append_filter(where, tmp.c_str()); } if (ObjectName[0] != 0) { db_escape_string(jcr, jcr->db, esc.c_str(), ObjectName, strlen(ObjectName)); Mmsg(tmp, " Object.Objectname='%s'", esc.c_str()); append_filter(where, tmp.c_str()); } if (ObjectSource[0] != 0) { db_escape_string(jcr, jcr->db, esc.c_str(), ObjectSource, strlen(ObjectSource)); Mmsg(tmp, " Object.ObjectSource='%s'", esc.c_str()); append_filter(where, tmp.c_str()); } if (ObjectUUID[0] != 0) { db_escape_string(jcr, jcr->db, esc.c_str(), ObjectUUID, strlen(ObjectUUID)); Mmsg(tmp, " Object.ObjectUUID='%s'", esc.c_str()); append_filter(where, tmp.c_str()); } if (ObjectSize > 0) { Mmsg(tmp, " Object.ObjectSize=%llu", ObjectSize); append_filter(where, tmp.c_str()); } if (ObjectStatus != 0) { Mmsg(tmp, " Object.ObjectStatus='%c'", ObjectStatus); append_filter(where, tmp.c_str()); } } } void parse_restore_object_string(char **r_obj_str, ROBJECT_DBR *robj_r) { char *p = *r_obj_str; int len; robj_r->FileIndex = str_to_int32(p); /* FileIndex */ skip_nonspaces(&p); skip_spaces(&p); robj_r->FileType = str_to_int32(p); /* FileType */ skip_nonspaces(&p); skip_spaces(&p); robj_r->object_index = str_to_int32(p); /* Object Index */ skip_nonspaces(&p); skip_spaces(&p); robj_r->object_len = str_to_int32(p); /* object length possibly compressed */ skip_nonspaces(&p); skip_spaces(&p); robj_r->object_full_len = str_to_int32(p); /* uncompressed object length */ skip_nonspaces(&p); skip_spaces(&p); robj_r->object_compression = str_to_int32(p); /* compression */ skip_nonspaces(&p); skip_spaces(&p); robj_r->plugin_name = p; /* point to plugin name */ len = strlen(robj_r->plugin_name); robj_r->object_name = &robj_r->plugin_name[len+1]; /* point to object name */ len = strlen(robj_r->object_name); robj_r->object = &robj_r->object_name[len+1]; /* point to object */ robj_r->object[robj_r->object_len] = 0; /* add zero for those who attempt printing */ Dmsg7(dbglvl, "oname=%s stream=%d FT=%d FI=%d JobId=%ld, obj_len=%d\nobj=\"%s\"\n", robj_r->object_name, robj_r->Stream, robj_r->FileType, robj_r->FileIndex, robj_r->JobId, robj_r->object_len, robj_r->object); } /**************************************************************** * Interface to the MetaXXX tables (MetaEmail, MetaAttachments * * The main idea is to get a JSON object fron the FD/SD that represents data to * index in our catalog. We need to verify the input and convert/insert the * data into the right table. * ****************************************************************/ /* The cJSON lib is a small JSON parser/writer */ #include "lib/cJSON.h" /* We need one scanner per type and per version of data */ struct json_sql { const char *json_name; const char *sql_name; OutputType type; }; /* This class is the common interface for all JSON parser. It can be used in * simple mapping. For more complex handling, we can subclass it * * We will have a specific implementation for email, attachments, ... * Each JSON input must be very carefully checked!! */ class META_JSON_SCANNER: public SMARTALLOC { public: const char *m_table; struct json_sql *m_j2s; META_JSON_SCANNER(const char *table, struct json_sql *j2s): m_table(table), m_j2s(j2s) {}; virtual ~META_JSON_SCANNER(){}; /* Parse a JSON node and validate the input */ virtual bool parse(JCR *jcr, BDB *db, DBId_t jid, int64_t fidx, cJSON *root, POOLMEM **dest); }; /* Email JSON data { "Version": 1, "Type" : "EMAIL", "EmailBodyPreview" : "-------- Forwarded Message --------\r\nSubject: Re: [Bacula-devel] When 5.1?\r\nDate: Sun, 5 Jun 2011 17:36:33 +0200\r\nFrom: Bastian Friedrich \r\nTo: bacula-devel@lists.sourceforge.net\r\nCC: Kern Sibbald ", "EmailCc" : "", "EmailConversationId" : "AAQkADkzMjFhOGZjLTM4NWQtNDU1OC1iODA0LWRmNzRiOTRkZjEzMgAQAFu5M6Q-lKhMhvlmDMcwbug=", "EmailFolderName" : "Inbox", "EmailFrom" : "eric.bollengier@baculasystems.com", "EmailId" : "AAMkADkzMjFhOGZjLTM4NWQtNDU1OC1iODA0LWRmNzRiOTRkZjEzMgBGAAAAAACLSt6kXFgwSaU_laiYbZmvBwBUvb47c4T-R5BYeUNUiTxqAADsn4zsAABUvb47c4T-R5BYeUNUiTxqAADsn8WxAAA=", "EmailImportance" : "NORMAL", "EmailInternetMessageId" : "<2fe3fc53-e83d-70e6-ebcf-38859c84ec26@baculasystems.com>", "EmailIsDraft" : 0, "EmailIsRead" : 0, "EmailTime" : "Sep 10, 2021, 2:44:36 PM", "EmailSubject" : "Fwd: [Bacula-devel] When 5.1?", "EmailTags" : "category1, category2", "EmailTo" : "jorgegea@jorgegea.onmicrosoft.com", "EmailHasAttachment" : 0, "Plugin" : "m365" } */ #define SAME_KW(keyword, type) {keyword, keyword, type} static struct json_sql email_json_v1[] = { SAME_KW("EmailTenant", OT_STRING), SAME_KW("EmailOwner", OT_STRING), SAME_KW("EmailBodyPreview", OT_STRING), SAME_KW("EmailCc", OT_STRING), SAME_KW("EmailConversationId", OT_STRING), SAME_KW("EmailFolderName", OT_STRING), SAME_KW("EmailFrom", OT_STRING), SAME_KW("EmailId", OT_STRING), SAME_KW("EmailImportance", OT_STRING), SAME_KW("EmailInternetMessageId", OT_STRING), SAME_KW("EmailIsRead", OT_BOOL), SAME_KW("EmailIsDraft", OT_BOOL), SAME_KW("EmailTime", OT_DATE), SAME_KW("EmailSubject", OT_STRING), SAME_KW("EmailTags", OT_STRING), SAME_KW("EmailTo", OT_STRING), SAME_KW("EmailSize", OT_INT), SAME_KW("EmailHasAttachment", OT_INT), SAME_KW("Plugin", OT_STRING), {NULL, NULL, OT_END} }; /* { "AttachmentContentType" : "application/octet-stream", "AttachmentEmailId" : "AAMkAGZmZjBlMjI0LTMxMmEtNDFkMi1hM2YxLWEzNjI5MjY4M2JkMQBGAAAAAAChUr1sDFmcSYm7PK3nvLVxBwB-S4yOymgVRpR5CA4-eilAAABABKybAAB-S4yOymgVRpR5CA4-eilAAABABUnfAAA=", "AttachmentId" : "AAMkAGZmZjBlMjI0LTMxMmEtNDFkMi1hM2YxLWEzNjI5MjY4M2JkMQBGAAAAAAChUr1sDFmcSYm7PK3nvLVxBwB-S4yOymgVRpR5CA4-eilAAABABKybAAB-S4yOymgVRpR5CA4-eilAAABABUnfAAABEgAQAKT86cEi1S9PgA8I5xS0vKA=", "AttachmentIsInline" : 0, "AttachmentName" : "Ancillae.gen", "Plugin" : "m365", "Type" : "ATTACHMENT", "Version" : 1 } */ static struct json_sql email_attachment_json_v1[] = { SAME_KW("AttachmentTenant", OT_STRING), SAME_KW("AttachmentOwner", OT_STRING), SAME_KW("AttachmentContentType", OT_STRING), SAME_KW("AttachmentEmailId", OT_STRING), //SAME_KW("AttachmentId", OT_STRING), SAME_KW("AttachmentIsInline", OT_BOOL), SAME_KW("AttachmentName", OT_STRING), SAME_KW("AttachmentSize", OT_INT), SAME_KW("Plugin", OT_STRING), {NULL, NULL, OT_END} }; bool META_JSON_SCANNER::parse(JCR *jcr, BDB *db, DBId_t jid, int64_t fidx, cJSON *root, POOLMEM **dest) { POOL_MEM values, tmp, esc; bool status = false; bool first = true; cJSON *val; int len; Mmsg(dest, "INSERT INTO %s (", m_table); for (int i=0; m_j2s[i].json_name ; i++) { if (!first) { pm_strcat(dest, ","); } pm_strcat(dest, m_j2s[i].sql_name); val = cJSON_GetObjectItemCaseSensitive(root, m_j2s[i].json_name); switch(m_j2s[i].type) { case OT_BOOL: if (!cJSON_IsNumber(val)) { Mmsg(dest, "JSON Error: Unable to find %s", m_j2s[i].json_name); goto bail_out; } Mmsg(tmp, "%c%d", first?' ':',', val->valuedouble == 0 ? 0 : 1); break; case OT_DATE: { const char *def = sql_now[db_get_type_index(db)]; if (!cJSON_IsString(val) || (val->valuestring == NULL)) { Mmsg(dest, "JSON Error: Unable to find %s", m_j2s[i].json_name); goto bail_out; } // TODO: Need to check the date format if (val->valuestring[0] != 0) { // not empty string def = val->valuestring; } len = strlen(def); esc.check_size(len*2+1); db_escape_string(jcr, db, esc.c_str(), def, len); Mmsg(tmp, "%c'%s'", first?' ':',', esc.c_str()); } break; case OT_STRING: if (!cJSON_IsString(val) || (val->valuestring == NULL)) { Mmsg(dest, "JSON Error: Unable to find %s", m_j2s[i].json_name); goto bail_out; } len = strlen(val->valuestring); esc.check_size(len*2+1); db_escape_string(jcr, db, esc.c_str(), val->valuestring, len); Mmsg(tmp, "%c'%s'", first?' ':',', esc.c_str()); break; case OT_INT: if (!cJSON_IsNumber(val)) { Mmsg(dest, "JSON Error: Unable to find %s", m_j2s[i].json_name); goto bail_out; } Mmsg(tmp, "%c%lld", first?' ':',', (int64_t)val->valuedouble); break; default: Mmsg(dest, "Implenentation issue with type %d", m_j2s[i].type); goto bail_out; } first = false; pm_strcat(values, tmp.c_str()); } /* Finish the query with job information */ pm_strcat(dest, ",JobId,FileIndex) VALUES ("); pm_strcat(dest, values.c_str()); Mmsg(tmp, ", %lld, %lld)", jid, fidx); pm_strcat(dest, tmp.c_str()); status = true; bail_out: return status; } static void *cats_malloc(size_t size) { return malloc(size); } /**************************************************************** * The META_JSON class is here to initilize and find the META_XXX_JSON * implementation to use to decode the JSON stream. ****************************************************************/ bool META_JSON::parse(JCR *jcr, BDB *db, DBId_t jid, int64_t fidx, const char *string, int len, POOLMEM **dest) { bool status = false; cJSON *type = NULL; cJSON *version = NULL; META_JSON_SCANNER *impl = NULL; /* We use our own memory allocator to track orphan buffers */ cJSON_Hooks hook; hook.malloc_fn = cats_malloc; hook.free_fn = bfree; cJSON_InitHooks(&hook); cJSON *json = cJSON_ParseWithLength(string, len); if (json == NULL) { const char *error_ptr = cJSON_GetErrorPtr(); if (error_ptr != NULL) { Mmsg(dest, "JSON Error before: %s\n", error_ptr); } goto bail_out; } type = cJSON_GetObjectItemCaseSensitive(json, "Type"); if (!cJSON_IsString(type) || (type->valuestring == NULL)) { Mmsg(dest, "JSON Error: Unable to find Type"); goto bail_out; } version = cJSON_GetObjectItemCaseSensitive(json, "Version"); if (!cJSON_IsNumber(version) || (version->valueint == 0)) { Mmsg(dest, "JSON Error: Unable to find Version"); goto bail_out; } if (strcmp(type->valuestring, "EMAIL") == 0) { if (version->valueint >= 1) { /* If the parser cannot handle a new format, adjust it */ impl = New(META_JSON_SCANNER("MetaEmail", email_json_v1)); } } else if (strcmp(type->valuestring, "ATTACHMENT") == 0) { if (version->valueint >= 1) { impl = New(META_JSON_SCANNER("MetaAttachment", email_attachment_json_v1)); } } if (!impl) { Mmsg(dest, "JSON Error: Incorrect Type"); goto bail_out; } if (!impl->parse(jcr, db, jid, fidx, json, dest)) { goto bail_out; } status = true; bail_out: if (impl) { delete impl; } /* TODO: Need to see if we need to free all members */ if (json) { cJSON_Delete(json); } return status; } void META_DBR::get_important_keys(POOLMEM **where) { if (bstrcasecmp(Type, "email")) { Mmsg(where, "EmailTenant, EmailOwner, EmailFrom, EmailTo, EmailTime, EmailSubject, MetaEmail.FileIndex, MetaEmail.JobId"); } else { Mmsg(where, "AttachmentEmailId, AttachmentSize, AttachmentName, MetaAttachment.FileIndex, MetaAttachment.JobId"); } } void META_DBR::get_all_keys(POOLMEM **where) { struct json_sql *p; const char *sep=NULL; if (bstrcasecmp(Type, "email")) { p = email_json_v1; sep = ",MetaEmail."; } else { p = email_attachment_json_v1; sep = ",MetaAttachment."; } Mmsg(where, "Meta%s.JobId,Meta%s.FileIndex", Type, Type); for (int i = 0; p[i].sql_name ; i++) { pm_strcat(where, sep); pm_strcat(where, p[i].sql_name); } } bool META_DBR::check() { if (!Type[0]) { bsnprintf(errmsg, sizeof(errmsg), _("Type is not set")); return false; } if (!Tenant[0]) { bsnprintf(errmsg, sizeof(errmsg), _("Tenant not set")); return false; } return true; } void META_DBR::create_db_filter(JCR *jcr, BDB *db, POOLMEM **where) { bool and_or = false; POOL_MEM esc(PM_MESSAGE), tmp(PM_MESSAGE); if (bstrcasecmp(Type, "email")) { bstrncpy(Type, "Email", sizeof(Type)); } else { bstrncpy(Type, "Attachment", sizeof(Type)); } if (strcmp(Type, "Email") == 0) { if (all && (*From || *To || *Cc || *Subject || *Tags || *BodyPreview || *Category)) { and_or = true; } if (Id[0] != 0) { db->search_op(jcr, "MetaEmail.EmailId", Id, esc.handle(), tmp.handle()); append_AND_OR_filter(and_or, where, tmp.c_str()); } if (From[0] != 0) { db->search_op(jcr, "MetaEmail.EmailFrom", From, esc.handle(), tmp.handle()); append_AND_OR_filter(and_or, where, tmp.c_str()); } if (To[0] != 0) { db->search_op(jcr, "MetaEmail.EmailTo", To, esc.handle(), tmp.handle()); append_AND_OR_filter(and_or, where, tmp.c_str()); } if (Cc[0] != 0) { db->search_op(jcr, "MetaEmail.EmailCc", Cc, esc.handle(), tmp.handle()); append_AND_OR_filter(and_or, where, tmp.c_str()); } if (Subject[0] != 0) { db->search_op(jcr, "MetaEmail.EmailSubject", Subject, esc.handle(), tmp.handle()); append_AND_OR_filter(and_or, where, tmp.c_str()); } if (FolderName[0] != 0) { db->search_op(jcr, "MetaEmail.EmailFolderName", FolderName, esc.handle(), tmp.handle()); append_AND_OR_filter(and_or, where, tmp.c_str()); } if (Tags[0] != 0) { db->search_op(jcr, "MetaEmail.EmailTags", Tags, esc.handle(), tmp.handle()); append_AND_OR_filter(and_or, where, tmp.c_str()); } if (BodyPreview[0] != 0) { db->search_op(jcr, "MetaEmail.EmailBodyPreview", BodyPreview, esc.handle(), tmp.handle()); append_AND_OR_filter(and_or, where, tmp.c_str()); } #if 0 if (Category[0] != 0) { db->search_op(jcr, "MetaEmail.EmailCategory", Category, esc.handle(), tmp.handle()); append_AND_OR_filter(and_or, where, tmp.c_str()); } #endif if (and_or) { pm_strcat(where, ") "); } if (ClientName[0] != 0) { db_escape_string(jcr, jcr->db, esc.c_str(), ClientName, strlen(ClientName)); Mmsg(tmp, " Client.Name='%s'", esc.c_str()); append_filter(where, tmp.c_str()); } if (ConversationId[0] != 0) { db_escape_string(jcr, jcr->db, esc.c_str(), ConversationId, strlen(ConversationId)); Mmsg(tmp, " MetaEmail.EmailConversationId = '%s'", esc.c_str()); append_filter(where, tmp.c_str()); } if (HasAttachment > 0) { Mmsg(tmp, " MetaEmail.EmailHasAttachment = %d", HasAttachment); append_filter(where, tmp.c_str()); } if (isDraft > 0) { Mmsg(tmp, " MetaEmail.EmailIsDraft = %d", isDraft); append_filter(where, tmp.c_str()); } if (isRead > 0) { Mmsg(tmp, " MetaEmail.EmailIsRead = %d", isRead); append_filter(where, tmp.c_str()); } if (MinTime[0]) { db_escape_string(jcr, jcr->db, esc.c_str(), MinTime, strlen(MinTime)); Mmsg(tmp, " MetaEmail.EmailTime >= '%s'", esc.c_str()); append_filter(where, tmp.c_str()); } if (MaxTime[0]) { db_escape_string(jcr, jcr->db, esc.c_str(), MaxTime, strlen(MaxTime)); Mmsg(tmp, " MetaEmail.EmailTime <= '%s'", esc.c_str()); append_filter(where, tmp.c_str()); } } else { if (Id[0] != 0) { db_escape_string(jcr, jcr->db, esc.c_str(), Id, strlen(Id)); Mmsg(tmp, " MetaAttachment.AttachmentEmailId = '%s'", esc.c_str()); append_AND_OR_filter(and_or, where, tmp.c_str()); } if (Name[0] != 0) { db->search_op(jcr, "MetaAttachment.AttachmentName", Name, esc.handle(), tmp.handle()); append_AND_OR_filter(and_or, where, tmp.c_str()); } if (isInline >= 0) { Mmsg(tmp, " MetaAttachment.AttachmentIsInline = %d", isInline); append_filter(where, tmp.c_str()); } if (ContentType[0]) { db_escape_string(jcr, jcr->db, esc.c_str(), ContentType, strlen(ContentType)); Mmsg(tmp, " MetaAttachment.AttachmentContentType = '%s'", esc.c_str()); append_filter(where, tmp.c_str()); } } if (Owner[0]) { db_escape_string(jcr, jcr->db, esc.c_str(), Owner, strlen(Owner)); if (strchr(Owner, '%')) { Mmsg(tmp, " Meta%s.%sOwner ILIKE '%s'", Type, Type, esc.c_str()); } else { Mmsg(tmp, " Meta%s.%sOwner = '%s'", Type, Type, esc.c_str()); } append_filter(where, tmp.c_str()); } if (Tenant[0]) { db_escape_string(jcr, jcr->db, esc.c_str(), Tenant, strlen(Tenant)); Mmsg(tmp, " Meta%s.%sTenant = '%s'", Type, Type, esc.c_str()); append_filter(where, tmp.c_str()); } if (MinSize > 0) { Mmsg(tmp, " Meta%s.%sSize >= %llu", Type, Type, MinSize); append_filter(where, tmp.c_str()); } if (MaxSize > 0) { Mmsg(tmp, " Meta%s.%sSize <= %llu", Type, Type, MaxSize); append_filter(where, tmp.c_str()); } if (Plugin[0] != 0) { db_escape_string(jcr, jcr->db, esc.c_str(), Plugin, strlen(Plugin)); Mmsg(tmp, " Meta%s.Plugin='%s'", Type, esc.c_str()); append_filter(where, tmp.c_str()); } if (is_a_number_list(JobIds)) { Mmsg(tmp, " Meta%s.JobId IN (%s)", Type, JobIds); append_filter(where, tmp.c_str()); } } #endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ bacula-15.0.3/src/cats/fix_postgresql_tables0000644000175000017500000000144214771010173020732 0ustar bsbuildbsbuild#!/bin/sh # # Shell script to fix PostgreSQL tables in version 8 # echo " " echo "This script will fix a Bacula PostgreSQL database version 8" echo "Depending on the size of your database," echo "this script may take several minutes to run." echo " " # # Set the following to the path to psql. bindir=****EDIT-ME to be the path to psql**** if $bindir/psql $* -f - < (NOW - Retention) */ bool sorted_client; /* Results sorted by Client, SnapshotId */ int status; /* Status of the snapshot */ DBId_t SnapshotId; /* Unique Snapshot ID */ DBId_t JobId; /* Related JobId */ DBId_t FileSetId; /* FileSetId if any */ DBId_t ClientId; /* From which client this snapshot comes */ char Name[MAX_NAME_LENGTH]; /* Snapshot Name */ char FileSet[MAX_NAME_LENGTH];/* FileSet name if any */ char Client[MAX_NAME_LENGTH]; /* Client name */ char Type[MAX_NAME_LENGTH]; /* zfs, btrfs, lvm, netapp, */ char Comment[MAX_NAME_LENGTH];/* Comment */ char CreateDate[MAX_TIME_LENGTH]; /* Create date as string */ time_t CreateTDate; /* Create TDate (in sec, since epoch) */ char *Volume; /* Volume taken in snapshot */ char *Device; /* Device, Pool, Directory, ... */ char *errmsg; /* Error associated with a snapshot */ utime_t Retention; /* Number of second before pruning the snapshot */ uint64_t Size; /* Snapshot Size */ }; class TAG_DBR { public: char Client[MAX_NAME_LENGTH]; /* Client name */ char Job[MAX_NAME_LENGTH]; /* Job name */ char Pool[MAX_NAME_LENGTH]; /* Pool name */ char Volume[MAX_NAME_LENGTH]; /* Volume name */ char Comment[MAX_NAME_LENGTH]; /* Comment */ char Name[MAX_NAME_LENGTH]; /* Name */ char Object[MAX_NAME_LENGTH]; /* Object name */ JobId_t JobId; /* JobId */ bool all; int limit; /* Used in search */ TAG_DBR() { zero(); }; void zero() { limit = JobId = 0; all = false; *Object = *Client = *Job = *Pool = *Volume = *Comment = *Name = '\0'; }; virtual ~TAG_DBR(){}; /* Scan a TAG_DBR object to help with SQL queries */ void gen_sql(JCR *jcr, BDB *db, const char **table, /* Table name (Client, Job, Media...) */ const char **name, /* Name of the record (Name, VolumeName, JobId) */ const char **id, /* Id of the record (ClientId, JobId, MediaId) */ char *esc, /* Escaped name of the resource */ char *esc_name, /* Escaped name of the tag */ uint64_t *aclbits, /* ACL used */ uint64_t *aclbits_extra); /* Extra ACL used */ }; /* Used to search in MetaEmail and MetaAttachment table */ #define MAX_SEARCH_LENGTH 512 #define MAX_SEARCH_LENGTH_ESCAPED (MAX_SEARCH_LENGTH * 2 + 1) class META_DBR: public SMARTALLOC { public: int64_t MinSize; int64_t MaxSize; int HasAttachment; int isDraft; int isRead; int isInline; uint64_t offset; uint32_t limit; int order; int orderby; // 0: JobId, FileIndex 1: EmailTime bool all; bool alljobs; // query all jobs, else return only the last version char *JobIds; char Id[MAX_SEARCH_LENGTH]; char Tenant[MAX_SEARCH_LENGTH]; char Owner[MAX_SEARCH_LENGTH]; char ClientName[MAX_NAME_LENGTH]; char From[MAX_SEARCH_LENGTH]; char To[MAX_SEARCH_LENGTH]; char Cc[MAX_SEARCH_LENGTH]; char Tags[MAX_SEARCH_LENGTH]; char Subject[MAX_SEARCH_LENGTH]; char BodyPreview[MAX_SEARCH_LENGTH]; char Type[16]; // Email or Attachment char ConversationId[MAX_NAME_LENGTH]; char Category[MAX_SEARCH_LENGTH]; char MinTime[MAX_NAME_LENGTH]; char MaxTime[MAX_NAME_LENGTH]; char Plugin[MAX_NAME_LENGTH]; char Name[MAX_SEARCH_LENGTH]; char FolderName[MAX_SEARCH_LENGTH]; char ContentType[MAX_SEARCH_LENGTH]; char errmsg[MAX_NAME_LENGTH]; META_DBR(): MinSize(-1), MaxSize(-1), HasAttachment(-1), isDraft(-1), isRead(-1), isInline(-1), offset(0), limit(512), order(0), orderby(0), all(false),alljobs(false) { JobIds = NULL; *Id = *Tenant = *Owner = 0; *ClientName = *From = *To = *Cc = *Subject = *Tags = 0; *BodyPreview = *Type = *ConversationId = *Category = 0; *FolderName = *Name = *MinTime = *MaxTime = *Plugin = 0; *ContentType = 0; *errmsg = 0; }; ~META_DBR() {}; void get_important_keys(POOLMEM **dest); void get_all_keys(POOLMEM **dest); void create_db_filter(JCR *jcr, BDB *db, POOLMEM **dest); bool check(); /* check if valid */ }; class FILEEVENT_DBR: public SMARTALLOC { public: DBId_t FileIndex; // FileIndex of the File char *JobId; // JobId of the file. For list only: can specify list or range of jobs DBId_t SourceJobId; // Verify/Restore JobId char Type; // antivirus, malware, lost file int Severity; // level of severity (0 OK, 100 Important) char Description[MAX_NAME_LENGTH]; char Source[MAX_NAME_LENGTH]; FILEEVENT_DBR(): FileIndex(0), JobId(NULL), Type(0), Severity(0) { *Description = *Source = 0; }; ~FILEEVENT_DBR() {}; bool unpack(uint32_t stream, const char *data, int data_len) { SourceJobId = FileIndex = 0; if (scan_string(data, "%c %d %127s %127s 0", &Type, &Severity, Source, Description) == 4) { unbash_spaces(Source); unbash_spaces(Description); return true; } Dmsg1(50, "Got incorrect stream for FileEvent %s\n", data); return false; }; }; /* Call back context for getting a 32/64 bit value from the database */ class db_int64_ctx { public: int64_t value; /* value returned */ int count; /* number of values seen */ db_int64_ctx() : value(0), count(0) {}; ~db_int64_ctx() {}; private: db_int64_ctx(const db_int64_ctx&); /* prohibit pass by value */ db_int64_ctx &operator=(const db_int64_ctx&); /* prohibit class assignment */ }; /* Call back context for getting a list of comma separated strings from the * database */ class db_list_ctx { public: POOLMEM *list; /* list */ int count; /* number of values seen */ db_list_ctx() { list = get_pool_memory(PM_FNAME); reset(); } ~db_list_ctx() { free_pool_memory(list); list = NULL; } void reset() { *list = 0; count = 0;} void add(const db_list_ctx &str) { if (str.count > 0) { if (*list) { pm_strcat(list, ","); } pm_strcat(list, str.list); count += str.count; } } void add(const char *str) { if (count > 0) { pm_strcat(list, ","); } pm_strcat(list, str); count++; } private: db_list_ctx(const db_list_ctx&); /* prohibit pass by value */ db_list_ctx &operator=(const db_list_ctx&); /* prohibit class assignment */ }; /* sql_query flags */ #define QF_STORE_RESULT 0x01 /* sql_list.c */ enum e_list_type { HORZ_LIST, /* list */ VERT_LIST, /* llist */ ARG_LIST, /* key1=v1 key2=v2 key3=v3 */ JSON_LIST, FAILED_JOBS, INCOMPLETE_JOBS, LAST_JOBS }; #include "bdb.h" #include "protos.h" #include "jcr.h" #include "sql_cmds.h" /* Object used in db_list_xxx function */ class LIST_CTX { public: char line[256]; /* Used to print last dash line */ int32_t num_rows; e_list_type type; /* Vertical/Horizontal */ DB_LIST_HANDLER *send; /* send data back */ bool once; /* Used to print header one time */ void *ctx; /* send() user argument */ BDB *mdb; JCR *jcr; void empty() { once = false; line[0] = '\0'; } void send_dashes() { if (*line) { send(ctx, line); } } LIST_CTX(JCR *j, BDB *m, DB_LIST_HANDLER *h, void *c, e_list_type t) { line[0] = '\0'; once = false; num_rows = 0; type = t; send = h; ctx = c; jcr = j; mdb = m; } }; class META_JSON: public SMARTALLOC { public: META_JSON(){}; virtual ~META_JSON(){}; bool parse(JCR *jcr, BDB *db, DBId_t jid, int64_t fidx, const char *value, int len, POOLMEM **dest); }; /* Functions exported by sql.c for use within the cats directory. */ void json_list_begin(void *vctx, const char *title); void json_list_end(void *vctx, int errcode, const char *errmsg); int list_result(void *vctx, int cols, char **row); int list_result(JCR *jcr, BDB *mdb, const char *title, DB_LIST_HANDLER *send, void *ctx, e_list_type type); int get_sql_record_max(JCR *jcr, BDB *mdb); void list_dashes(BDB *mdb, DB_LIST_HANDLER *send, void *ctx); void print_dashes(BDB *mdb); void print_result(BDB *mdb); int QueryDB(const char *file, int line, JCR *jcr, BDB *db, char *select_cmd); int InsertDB(const char *file, int line, JCR *jcr, BDB *db, char *select_cmd); int DeleteDB(const char *file, int line, JCR *jcr, BDB *db, char *delete_cmd); void split_path_and_file(JCR *jcr, BDB *mdb, const char *fname); /* Helper functions */ void parse_restore_object_string(char **obj_str, ROBJECT_DBR *r_r); #endif /* __CATS_H_ */ bacula-15.0.3/src/cats/create_mysql_database.in0000644000175000017500000000062214771010173021247 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # shell script to create Bacula database(s) # bindir=@MYSQL_BINDIR@ db_name=@db_name@ if $bindir/mysql $* -f </dev/null 1>/dev/null <StartTime; (void)localtime_r(&stime, &tm); strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); JobTDate = (btime_t)stime; if (!is_name_valid(jr->WriteDevice, NULL)) { *jr->WriteDevice = 0; // Not valid, not used } if (!is_name_valid(jr->LastReadDevice, NULL)) { *jr->LastReadDevice = 0; // Not valid, not used } if (jr->PriorJobId) { bstrncpy(PriorJobId, edit_int64(jr->PriorJobId, ed1), sizeof(PriorJobId)); } else { bstrncpy(PriorJobId, "0", sizeof(PriorJobId)); } bdb_lock(); Mmsg(cmd, "UPDATE Job SET JobStatus='%c',Level='%c',StartTime='%s'," "ClientId=%s,JobTDate=%s,PoolId=%s,FileSetId=%s,RealStartTime='%s'," "isVirtualFull=%d,LastReadStorageId=%d,LastReadDevice='%s'," "WriteStorageId=%d,WriteDevice='%s',StatusInfo='%s',Encrypted=%d,PriorJobId=%s,PriorJob='%s' " "WHERE JobId=%s", (char)(jcr->JobStatus), (char)(jr->JobLevel), dt, edit_int64(jr->ClientId, ed1), edit_uint64(JobTDate, ed2), edit_int64(jr->PoolId, ed3), edit_int64(jr->FileSetId, ed4), dt, jr->isVirtualFull, jr->LastReadStorageId, jr->LastReadDevice, jr->WriteStorageId, jr->WriteDevice, jr->StatusInfo, jr->Encrypted, PriorJobId, jr->PriorJob, edit_int64(jr->JobId, ed5)); stat = UpdateDB(jcr, cmd, false); changes = 0; bdb_unlock(); return stat; } /* * Update Long term statistics with all jobs that were run before * age seconds */ int BDB::bdb_update_stats(JCR *jcr, utime_t age) { char ed1[30]; int rows; utime_t now = (utime_t)time(NULL); edit_uint64(now - age, ed1); bdb_lock(); Mmsg(cmd, fill_jobhisto, ed1); QueryDB(jcr, cmd); /* TODO: get a message ? */ rows = sql_affected_rows(); bdb_unlock(); return rows; } /* * Update the Job record at end of Job * * Returns: 0 on failure * 1 on success */ int BDB::bdb_update_job_end_record(JCR *jcr, JOB_DBR *jr) { char dt[MAX_TIME_LENGTH]; char rdt[MAX_TIME_LENGTH]; char esc1[MAX_ESCAPE_NAME_LENGTH]; char esc2[MAX_ESCAPE_UNAME_LENGTH]; char esc3[MAX_ESCAPE_UNAME_LENGTH]; time_t ttime; struct tm tm; int stat; char ed1[30], ed2[30], ed3[50], ed4[50], ed5[50], ed6[50]; btime_t JobTDate; ttime = jr->EndTime; (void)localtime_r(&ttime, &tm); strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); if (jr->RealEndTime == 0 || jr->RealEndTime < jr->EndTime) { jr->RealEndTime = jr->EndTime; } ttime = jr->RealEndTime; (void)localtime_r(&ttime, &tm); strftime(rdt, sizeof(rdt), "%Y-%m-%d %H:%M:%S", &tm); JobTDate = ttime; bdb_lock(); bdb_escape_string(jcr, esc1, jr->StatusInfo, strlen(jr->StatusInfo)); bdb_escape_string(jcr, esc2, jr->LastReadDevice, strlen(jr->LastReadDevice)); bdb_escape_string(jcr, esc3, jr->WriteDevice, strlen(jr->WriteDevice)); Mmsg(cmd, "UPDATE Job SET JobStatus='%c',EndTime='%s'," "ClientId=%u,JobBytes=%s,ReadBytes=%s,JobFiles=%u,JobErrors=%u,VolSessionId=%u," "VolSessionTime=%u,PoolId=%u,FileSetId=%u,JobTDate=%s," "RealEndTime='%s',HasBase=%u,PurgedFiles=%u," "Rate=%.1f,CompressRatio=%.1f,WriteStorageId=%s,LastReadStorageId=%s,StatusInfo='%s'," "LastReadDevice='%s',WriteDevice='%s',Encrypted=%d WHERE JobId=%s", (char)(jr->JobStatus), dt, jr->ClientId, edit_uint64(jr->JobBytes, ed1), edit_uint64(jr->ReadBytes, ed4), jr->JobFiles, jr->JobErrors, jr->VolSessionId, jr->VolSessionTime, jr->PoolId, jr->FileSetId, edit_uint64(JobTDate, ed2), rdt, jr->HasBase, jr->PurgedFiles, jr->Rate, jr->CompressRatio, edit_uint64(jr->WriteStorageId, ed5), edit_uint64(jr->LastReadStorageId, ed6), esc1, esc2, esc3, jr->Encrypted, edit_int64(jr->JobId, ed3)); stat = UpdateDB(jcr, cmd, false); bdb_unlock(); return stat; } /* * Update Client record * Returns: 0 on failure * 1 on success */ int BDB::bdb_update_client_record(JCR *jcr, CLIENT_DBR *cr) { int stat; char ed1[50], ed2[50]; char esc_name[MAX_ESCAPE_NAME_LENGTH]; char esc_uname[MAX_ESCAPE_UNAME_LENGTH]; char esc_plugin[MAX_ESCAPE_PLUGIN_LENGTH]; CLIENT_DBR tcr; bdb_lock(); memcpy(&tcr, cr, sizeof(tcr)); if (!bdb_create_client_record(jcr, &tcr)) { bdb_unlock(); return 0; } bdb_escape_string(jcr, esc_name, cr->Name, strlen(cr->Name)); bdb_escape_string(jcr, esc_uname, cr->Uname, strlen(cr->Uname)); bdb_escape_string(jcr, esc_plugin, cr->Plugins, strlen(cr->Plugins)); Mmsg(cmd, "UPDATE Client SET AutoPrune=%d,FileRetention=%s,JobRetention=%s," "Uname='%s',Plugins='%s' WHERE Name='%s'", cr->AutoPrune, edit_uint64(cr->FileRetention, ed1), edit_uint64(cr->JobRetention, ed2), esc_uname, esc_plugin, esc_name); stat = UpdateDB(jcr, cmd, false); bdb_unlock(); return stat; } /* * Update Counters record * Returns: 0 on failure * 1 on success */ int BDB::bdb_update_counter_record(JCR *jcr, COUNTER_DBR *cr) { char esc[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); bdb_escape_string(jcr, esc, cr->Counter, strlen(cr->Counter)); Mmsg(cmd, update_counter_values[bdb_get_type_index()], cr->MinValue, cr->MaxValue, cr->CurrentValue, cr->WrapCounter, esc); int stat = UpdateDB(jcr, cmd, false); bdb_unlock(); return stat; } int BDB::bdb_update_pool_record(JCR *jcr, POOL_DBR *pr) { int stat; char ed1[50], ed2[50], ed3[50], ed4[50], ed5[50], ed6[50], ed7[50], ed8[50]; char esc[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); bdb_escape_string(jcr, esc, pr->LabelFormat, strlen(pr->LabelFormat)); Mmsg(cmd, "SELECT count(*) from Media WHERE PoolId=%s", edit_int64(pr->PoolId, ed4)); pr->NumVols = get_sql_record_max(jcr, this); Dmsg1(dbglevel2, "NumVols=%d\n", pr->NumVols); Mmsg(cmd, "UPDATE Pool SET NumVols=%u,MaxVols=%u,UseOnce=%d,UseCatalog=%d," "AcceptAnyVolume=%d,VolRetention='%s',VolUseDuration='%s'," "MaxVolJobs=%u,MaxVolFiles=%u,MaxVolBytes=%s,Recycle=%d," "AutoPrune=%d,LabelType=%d,LabelFormat='%s',RecyclePoolId=%s," "ScratchPoolId=%s,ActionOnPurge=%d,CacheRetention='%s',MaxPoolBytes='%s' WHERE PoolId=%s", pr->NumVols, pr->MaxVols, pr->UseOnce, pr->UseCatalog, pr->AcceptAnyVolume, edit_uint64(pr->VolRetention, ed1), edit_uint64(pr->VolUseDuration, ed2), pr->MaxVolJobs, pr->MaxVolFiles, edit_uint64(pr->MaxVolBytes, ed3), pr->Recycle, pr->AutoPrune, pr->LabelType, esc, edit_int64(pr->RecyclePoolId,ed5), edit_int64(pr->ScratchPoolId,ed6), pr->ActionOnPurge, edit_uint64(pr->CacheRetention, ed7), edit_uint64(pr->MaxPoolBytes, ed8), ed4); stat = UpdateDB(jcr, cmd, false); bdb_unlock(); return stat; } bool BDB::bdb_update_storage_record(JCR *jcr, STORAGE_DBR *sr) { int stat; char ed1[50]; bdb_lock(); Mmsg(cmd, "UPDATE Storage SET AutoChanger=%d WHERE StorageId=%s", sr->AutoChanger, edit_int64(sr->StorageId, ed1)); stat = UpdateDB(jcr, cmd, false); bdb_unlock(); return stat; } /* * Update the Media Record at end of Session * * Returns: 0 on failure * numrows on success */ int BDB::bdb_update_media_record(JCR *jcr, MEDIA_DBR *mr) { char dt[MAX_TIME_LENGTH]; time_t ttime; struct tm tm; int stat; char ed1[50], ed2[50], ed3[50], ed4[50]; char ed5[50], ed6[50], ed7[50], ed8[50]; char ed9[50], ed10[50], ed11[50], ed12[50]; char ed13[50], ed14[50], ed15[50], ed16[50]; char esc_name[MAX_ESCAPE_NAME_LENGTH]; char esc_status[MAX_ESCAPE_NAME_LENGTH]; Dmsg1(dbglevel1, "update_media: FirstWritten=%d\n", mr->FirstWritten); bdb_lock(); bdb_escape_string(jcr, esc_name, mr->VolumeName, strlen(mr->VolumeName)); bdb_escape_string(jcr, esc_status, mr->VolStatus, strlen(mr->VolStatus)); if (mr->set_first_written) { Dmsg1(dbglevel2, "Set FirstWritten Vol=%s\n", mr->VolumeName); ttime = mr->FirstWritten; (void)localtime_r(&ttime, &tm); strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); Mmsg(cmd, "UPDATE Media SET FirstWritten='%s'" " WHERE VolumeName='%s'", dt, esc_name); stat = UpdateDB(jcr, cmd, false); Dmsg1(dbglevel2, "Firstwritten=%d\n", mr->FirstWritten); } /* Label just done? */ if (mr->set_label_date) { ttime = mr->LabelDate; if (ttime == 0) { ttime = time(NULL); } (void)localtime_r(&ttime, &tm); strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); Mmsg(cmd, "UPDATE Media SET LabelDate='%s' " "WHERE VolumeName='%s'", dt, esc_name); UpdateDB(jcr, cmd, false); } if (mr->LastWritten != 0) { ttime = mr->LastWritten; (void)localtime_r(&ttime, &tm); strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); Mmsg(cmd, "UPDATE Media Set LastWritten='%s' " "WHERE VolumeName='%s'", dt, esc_name); UpdateDB(jcr, cmd, false); } /* sanity checks for #1066 */ if (mr->VolReadTime < 0) { mr->VolReadTime = 0; } if (mr->VolWriteTime < 0) { mr->VolWriteTime = 0; } Mmsg(cmd, "UPDATE Media SET VolJobs=%u," "VolFiles=%u,VolBlocks=%u,VolBytes=%s,VolABytes=%s," "VolHoleBytes=%s,VolHoles=%u,VolMounts=%u,VolErrors=%u," "VolWrites=%s,MaxVolBytes=%s,VolStatus='%s'," "Slot=%d,InChanger=%d,VolReadTime=%s,VolWriteTime=%s,VolType=%d," "VolParts=%d,VolCloudParts=%d,LastPartBytes=%s," "LabelType=%d,StorageId=%s,PoolId=%s,VolRetention=%s,VolUseDuration=%s," "MaxVolJobs=%d,MaxVolFiles=%d,Enabled=%d,LocationId=%s," "ScratchPoolId=%s,RecyclePoolId=%s,RecycleCount=%d,Recycle=%d," "ActionOnPurge=%d,CacheRetention=%s,EndBlock=%u,Protected=%d," "UseProtect=%d,VolEncrypted=%d" " WHERE VolumeName='%s'", mr->VolJobs, mr->VolFiles, mr->VolBlocks, edit_uint64(mr->VolBytes, ed1), edit_uint64(mr->VolABytes, ed2), edit_uint64(mr->VolHoleBytes, ed3), mr->VolHoles, mr->VolMounts, mr->VolErrors, edit_uint64(mr->VolWrites, ed4), edit_uint64(mr->MaxVolBytes, ed5), esc_status, mr->Slot, mr->InChanger, edit_int64(mr->VolReadTime, ed6), edit_int64(mr->VolWriteTime, ed7), mr->VolType, mr->VolParts, mr->VolCloudParts, edit_uint64(mr->LastPartBytes, ed8), mr->LabelType, edit_int64(mr->StorageId, ed9), edit_int64(mr->PoolId, ed10), edit_uint64(mr->VolRetention, ed11), edit_uint64(mr->VolUseDuration, ed12), mr->MaxVolJobs, mr->MaxVolFiles, mr->Enabled, edit_uint64(mr->LocationId, ed13), edit_uint64(mr->ScratchPoolId, ed14), edit_uint64(mr->RecyclePoolId, ed15), mr->RecycleCount,mr->Recycle, mr->ActionOnPurge, edit_uint64(mr->CacheRetention, ed16), mr->EndBlock, mr->Protected, mr->UseProtect, mr->VolEncrypted, esc_name); Dmsg1(dbglevel1, "%s\n", cmd); stat = UpdateDB(jcr, cmd, false); /* Make sure InChanger is 0 for any record having the same Slot */ db_make_inchanger_unique(jcr, this, mr); bdb_unlock(); return stat; } /* * Update the Media Record Default values from Pool * * Returns: 0 on failure * numrows on success */ int BDB::bdb_update_media_defaults(JCR *jcr, MEDIA_DBR *mr) { int stat; char ed1[50], ed2[50], ed3[50], ed4[50], ed5[50], ed6[50]; char esc[MAX_ESCAPE_NAME_LENGTH]; bool can_be_empty; bdb_lock(); if (mr->VolumeName[0]) { bdb_escape_string(jcr, esc, mr->VolumeName, strlen(mr->VolumeName)); Mmsg(cmd, "UPDATE Media SET " "ActionOnPurge=%d, Recycle=%d,VolRetention=%s,VolUseDuration=%s," "MaxVolJobs=%u,MaxVolFiles=%u,MaxVolBytes=%s,RecyclePoolId=%s,CacheRetention=%s" " WHERE VolumeName='%s'", mr->ActionOnPurge, mr->Recycle,edit_uint64(mr->VolRetention, ed1), edit_uint64(mr->VolUseDuration, ed2), mr->MaxVolJobs, mr->MaxVolFiles, edit_uint64(mr->MaxVolBytes, ed3), edit_uint64(mr->RecyclePoolId, ed4), edit_uint64(mr->CacheRetention, ed5), esc); can_be_empty = false; } else { Mmsg(cmd, "UPDATE Media SET " "ActionOnPurge=%d, Recycle=%d,VolRetention=%s,VolUseDuration=%s," "MaxVolJobs=%u,MaxVolFiles=%u,MaxVolBytes=%s,RecyclePoolId=%s,CacheRetention=%s" " WHERE PoolId=%s", mr->ActionOnPurge, mr->Recycle,edit_uint64(mr->VolRetention, ed1), edit_uint64(mr->VolUseDuration, ed2), mr->MaxVolJobs, mr->MaxVolFiles, edit_uint64(mr->MaxVolBytes, ed3), edit_int64(mr->RecyclePoolId, ed4), edit_uint64(mr->CacheRetention, ed5), edit_int64(mr->PoolId, ed6)); can_be_empty = true; } Dmsg1(dbglevel1, "%s\n", cmd); stat = UpdateDB(jcr, cmd, can_be_empty); bdb_unlock(); return stat; } /* * If we have a non-zero InChanger, ensure that no other Media * record has InChanger set on the same Slot. * * This routine assumes the database is already locked. */ void BDB::bdb_make_inchanger_unique(JCR *jcr, MEDIA_DBR *mr) { char ed1[50]; char esc[MAX_ESCAPE_NAME_LENGTH]; if (mr->InChanger != 0 && mr->Slot != 0 && mr->StorageId != 0) { if (!mr->sid_group) { mr->sid_group = edit_int64(mr->StorageId, mr->sid); } if (mr->MediaId != 0) { Mmsg(cmd, "UPDATE Media SET InChanger=0, Slot=0 WHERE " "Slot=%d AND StorageId IN (%s) AND MediaId!=%s", mr->Slot, mr->sid_group, edit_int64(mr->MediaId, ed1)); } else if (*mr->VolumeName) { bdb_escape_string(jcr, esc,mr->VolumeName,strlen(mr->VolumeName)); Mmsg(cmd, "UPDATE Media SET InChanger=0, Slot=0 WHERE " "Slot=%d AND StorageId IN (%s) AND VolumeName!='%s'", mr->Slot, mr->sid_group, esc); } else { /* used by ua_label to reset all volume with this slot */ Mmsg(cmd, "UPDATE Media SET InChanger=0, Slot=0 WHERE " "Slot=%d AND StorageId IN (%s)", mr->Slot, mr->sid_group, mr->VolumeName); } Dmsg1(dbglevel1, "%s\n", cmd); UpdateDB(jcr, cmd, true); } } /* Update only Retention */ bool BDB::bdb_update_snapshot_record(JCR *jcr, SNAPSHOT_DBR *sr) { int stat, len; char ed1[50], ed2[50]; len = strlen(sr->Comment); bdb_lock(); esc_name = check_pool_memory_size(esc_name, len*2+1); bdb_escape_string(jcr, esc_name, sr->Comment, len); Mmsg(cmd, "UPDATE Snapshot SET Retention=%s, Comment='%s' WHERE SnapshotId=%s", edit_int64(sr->Retention, ed2), sr->Comment, edit_int64(sr->SnapshotId, ed1)); stat = UpdateDB(jcr, cmd, false); bdb_unlock(); return stat; } #endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ bacula-15.0.3/src/cats/sql_list.c0000644000175000017500000015735214771010173016416 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Bacula Catalog Database List records interface routines * * Written by Kern Sibbald, March 2000 * */ #include "bacula.h" #if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL #include "cats.h" /* ----------------------------------------------------------------------- * * Generic Routines (or almost generic) * * ----------------------------------------------------------------------- */ /* We search resources for a specific tag, we just return the resource name * itself and the result can be stored in a list for example */ bool BDB::bdb_search_tag_records(JCR *jcr, TAG_DBR *tag, DB_RESULT_HANDLER *result_handler, void *ctx) { POOL_MEM tmp, where; char esc[MAX_ESCAPE_NAME_LENGTH]; char esc_name[MAX_ESCAPE_NAME_LENGTH]; uint64_t aclbits, aclbits_extra; const char *name; const char *id; const char *table; tag->gen_sql(jcr, this, &table, &name, &id, esc, esc_name, &aclbits, &aclbits_extra); bdb_lock(); pm_strcpy(where, get_acls(aclbits, true)); /* get_acls() uses a specific object buffer */ const char *whereand = get_acls(aclbits, false); const char *join = get_acl_join_filter(aclbits_extra); if (table) { if (tag->all) { if (*esc_name) { /* Display all resource for a tag */ Mmsg(tmp, "SELECT %s AS %s FROM Tag%s AS T JOIN %s USING (%s) %s WHERE T.Tag = '%s' %s", name, table, table, table, id, join, esc_name, whereand); } } if (tag->limit > 0) { char ed1[50]; pm_strcat(cmd, " LIMIT "); pm_strcat(cmd, edit_uint64(tag->limit, ed1)); } if (!bdb_sql_query(tmp.c_str(), result_handler, ctx)) { bdb_unlock(); return false; } } bdb_unlock(); return true; } /* * Search Job record(s) that match JOB_DBR, the result can be stored in a alist for example * */ bool BDB::bdb_search_job_records(JCR *jcr, JOB_DBR *jr, DB_RESULT_HANDLER *handler, void *ctx) { char esc[MAX_ESCAPE_NAME_LENGTH]; const char *join = ""; const char *where_tmp = ""; if (jr->Job[0] == 0) { return false; } bdb_lock(); bdb_escape_string(jcr, esc, jr->Job, strlen(jr->Job)); /* The ACL can limit on two extra tables, Client and FileSet */ where_tmp = get_acls(DB_ACL_BIT(DB_ACL_RBCLIENT) | DB_ACL_BIT(DB_ACL_FILESET), 0); if (*where_tmp) { join = get_acl_join_filter(DB_ACL_BIT(DB_ACL_RBCLIENT) | DB_ACL_BIT(DB_ACL_FILESET)); } Mmsg(cmd, "SELECT Job " "FROM Job " " %s WHERE Job.Job %s '%%%s%%' %s", join, sql_like[bdb_get_type_index()], esc, where_tmp); if (jr->limit > 0) { char ed1[50]; pm_strcat(cmd, " LIMIT "); pm_strcat(cmd, edit_uint64(jr->limit, ed1)); } if (!bdb_sql_query(cmd, handler, ctx)) { bdb_unlock(); return false; } bdb_unlock(); return true; } /* Search for a client, return only the name */ bool BDB::bdb_search_client_records(JCR *jcr, CLIENT_DBR *rec, DB_RESULT_HANDLER *callback, void *ctx) { char esc[MAX_ESCAPE_NAME_LENGTH]; const char *where_tmp = ""; bdb_lock(); bdb_escape_string(jcr, esc, rec->Name, strlen(rec->Name)); /* We can apply some ACLs for the Client table */ where_tmp = get_acls(DB_ACL_BIT(DB_ACL_BCLIENT) | DB_ACL_BIT(DB_ACL_RCLIENT), 0); Mmsg(cmd, "SELECT Name " "FROM Client WHERE Name %s '%%%s%%' %s", sql_like[bdb_get_type_index()], esc, where_tmp); if (rec->limit > 0) { char ed1[50]; pm_strcat(cmd, " LIMIT "); pm_strcat(cmd, edit_uint64(rec->limit, ed1)); } if (!bdb_sql_query(cmd, callback, ctx)) { bdb_unlock(); return false; } bdb_unlock(); return true; } /* * Submit general SQL query */ int BDB::bdb_list_sql_query(JCR *jcr, const char *title, const char *query, DB_LIST_HANDLER *sendit, void *ctx, int verbose, e_list_type type) { bdb_lock(); if (!sql_query(query, QF_STORE_RESULT)) { Mmsg(errmsg, _("Query failed: %s\n"), sql_strerror()); if (verbose) { sendit(ctx, errmsg); } bdb_unlock(); return 0; } list_result(jcr,this, title, sendit, ctx, type); sql_free_result(); bdb_unlock(); return 1; } void BDB::bdb_list_pool_records(JCR *jcr, POOL_DBR *pdbr, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { char esc[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); bdb_escape_string(jcr, esc, pdbr->Name, strlen(pdbr->Name)); if (type == VERT_LIST || type == JSON_LIST) { if (pdbr->Name[0] != 0) { Mmsg(cmd, "SELECT PoolId,Name,NumVols,MaxVols,UseOnce,UseCatalog," "AcceptAnyVolume,VolRetention,VolUseDuration,MaxVolJobs,MaxVolBytes," "AutoPrune,Recycle,PoolType,LabelFormat,Enabled,ScratchPoolId," "RecyclePoolId,LabelType,ActionOnPurge,CacheRetention,MaxPoolBytes, " "%s as PoolBytes " " FROM Pool WHERE Name='%s' %s", poolbytes[bdb_get_type_index()], esc, get_acl(DB_ACL_POOL, false)); } else { Mmsg(cmd, "SELECT PoolId,Name,NumVols,MaxVols,UseOnce,UseCatalog," "AcceptAnyVolume,VolRetention,VolUseDuration,MaxVolJobs,MaxVolBytes," "AutoPrune,Recycle,PoolType,LabelFormat,Enabled,ScratchPoolId," "RecyclePoolId,LabelType,ActionOnPurge,CacheRetention,MaxPoolBytes, " "%s AS PoolBytes " " FROM Pool %s ORDER BY PoolId", poolbytes[bdb_get_type_index()], get_acl(DB_ACL_POOL, true)); } } else { if (pdbr->Name[0] != 0) { Mmsg(cmd, "SELECT PoolId,Name,NumVols,MaxVols,PoolType,LabelFormat " "FROM Pool WHERE Name='%s' %s", esc, get_acl(DB_ACL_POOL, false)); } else { Mmsg(cmd, "SELECT PoolId,Name,NumVols,MaxVols,PoolType,LabelFormat " "FROM Pool %s ORDER BY PoolId", get_acl(DB_ACL_POOL, true)); } } if (!QueryDB(jcr, cmd)) { bdb_unlock(); return; } list_result(jcr, this, "pool", sendit, ctx, type); sql_free_result(); bdb_unlock(); } void BDB::bdb_list_client_records(JCR *jcr, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { bdb_lock(); if (type == VERT_LIST || type == JSON_LIST) { Mmsg(cmd, "SELECT ClientId,Name,Uname,Plugins,AutoPrune,FileRetention," "JobRetention " "FROM Client %s ORDER BY ClientId", get_acls(DB_ACL_BIT(DB_ACL_RBCLIENT), true)); } else { Mmsg(cmd, "SELECT ClientId,Name,FileRetention,JobRetention " "FROM Client %s ORDER BY ClientId", get_acls(DB_ACL_BIT(DB_ACL_RBCLIENT), true)); } if (!QueryDB(jcr, cmd)) { bdb_unlock(); return; } list_result(jcr, this, "client", sendit, ctx, type); sql_free_result(); bdb_unlock(); } /* * List plugin objects types */ void BDB::bdb_list_plugin_object_types(JCR *jcr, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { Mmsg(cmd, "SELECT DISTINCT ObjectType FROM Object ORDER BY ObjectType ASC"); bdb_lock(); if (!QueryDB(jcr, cmd)) { Jmsg(jcr, M_ERROR, 0, _("Query %s failed!\n"), cmd); bdb_unlock(); return; } list_result(jcr, this, "objecttype", sendit, ctx, type); sql_free_result(); bdb_unlock(); } /* * List plugin objects (search is based on object provided) */ void BDB::bdb_list_plugin_objects(JCR *jcr, OBJECT_DBR *obj_r, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { POOL_MEM esc(PM_MESSAGE), tmp(PM_MESSAGE), where(PM_MESSAGE), join(PM_MESSAGE); bdb_lock(); //TODO add ACL part obj_r->create_db_filter(jcr, where.handle()); if (obj_r->ClientName[0] != 0) { bdb_escape_string(jcr, esc.c_str(), obj_r->ClientName, strlen(obj_r->ClientName)); Mmsg(tmp, " Client.Name='%s'", esc.c_str()); append_filter(where.handle(), tmp.c_str()); Mmsg(join, " INNER JOIN Job On Object.JobId=Job.JobId " " INNER JOIN Client ON Job.ClientId=Client.ClientId "); } Mmsg(tmp, " ORDER BY ObjectId %s ", obj_r->order ? "DESC" : "ASC"); pm_strcat(where, tmp.c_str()); if (obj_r->limit) { Mmsg(tmp, " LIMIT %d ", obj_r->limit); pm_strcat(where, tmp.c_str()); } switch (type) { case JSON_LIST: case VERT_LIST: Mmsg(cmd, "SELECT Object.ObjectId, Object.JobId, Object.Path, Object.Filename, Object.PluginName, Object.ObjectCategory, " "Object.ObjectType, Object.ObjectName, Object.ObjectSource, " "Object.ObjectUUID, Object.ObjectSize, Object.ObjectStatus, Object.ObjectCount " "FROM Object %s %s", join.c_str(), where.c_str()); break; case HORZ_LIST: Mmsg(cmd, "SELECT Object.ObjectId, Object.JobId, Object.ObjectCategory, " "Object.ObjectType, Object.ObjectName, Object.ObjectStatus " "FROM Object %s %s", join.c_str(), where.c_str()); break; default: break; } if (!QueryDB(jcr, cmd)) { Jmsg(jcr, M_ERROR, 0, _("Query %s failed!\n"), cmd); bdb_unlock(); return; } list_result(jcr, this, "object", sendit, ctx, type); sql_free_result(); bdb_unlock(); } /* * List plugin objects from list of ids provided */ void BDB::bdb_list_plugin_objects_ids(JCR *jcr, char* id_list, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { POOL_MEM msg; switch (type) { case JSON_LIST: case VERT_LIST: Mmsg(cmd, "SELECT Object.ObjectId, Object.JobId, Object.Path, Object.Filename, Object.PluginName, Object.ObjectCategory, " "Object.ObjectType, Object.ObjectName, Object.ObjectSource, Object.ObjectUUID, " "Object.ObjectSize, Object.ObjectStatus, Object.ObjectCount " "FROM Object WHERE ObjectId IN (%s) ORDER BY ObjectId ASC", id_list); break; case HORZ_LIST: Mmsg(cmd, "SELECT Object.ObjectId, Object.JobId, Object.ObjectCategory, " "Object.ObjectType, Object.ObjectName, Object.ObjectUUID, Object.ObjectStatus " "FROM Object WHERE ObjectId IN (%s) ORDER BY ObjectId ASC", id_list); break; default: break; } bdb_lock(); if (!QueryDB(jcr, cmd)) { Jmsg(jcr, M_ERROR, 0, _("Query %s failed!\n"), cmd); bdb_unlock(); return; } list_result(jcr, this, "object", sendit, ctx, type); sql_free_result(); bdb_unlock(); } /* * List restore objects * * JobId | JobIds: List RestoreObjects for specific Job(s) * It is possible to specify the ObjectType using FileType field. */ void BDB::bdb_list_restore_objects(JCR *jcr, ROBJECT_DBR *rr, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { POOL_MEM filter, jfilter; /* The ACL checking is done on the bconsole command */ if (rr->JobIds && is_a_number_list(rr->JobIds)) { Mmsg(jfilter, " %s ", rr->JobIds); } else if (rr->JobId) { Mmsg(jfilter, " %ld ", rr->JobId); } else if (rr->clientid > 0) { Mmsg(jfilter, "SELECT A.JobId FROM Job AS A JOIN RestoreObject AS B USING (JobId) WHERE A.ClientId = %ld ORDER By A.JobTDate DESC LIMIT 1", rr->clientid); } else { return; } if (rr->clientid == 0 && rr->FileType > 0) { Mmsg(filter, "AND ObjectType = %d ", rr->FileType); } bdb_lock(); if (type == VERT_LIST || type == JSON_LIST) { Mmsg(cmd, "SELECT JobId, RestoreObjectId, ObjectName, " "PluginName, ObjectType " "FROM RestoreObject JOIN Job USING (JobId) WHERE JobId IN (%s) %s " "ORDER BY JobTDate ASC, RestoreObjectId ASC", jfilter.c_str(), filter.c_str()); } else { Mmsg(cmd, "SELECT JobId, RestoreObjectId, ObjectName, " "PluginName, ObjectType, ObjectLength " "FROM RestoreObject JOIN Job USING (JobId) WHERE JobId IN (%s) %s " "ORDER BY JobTDate ASC, RestoreObjectId ASC", jfilter.c_str(), filter.c_str()); } if (!QueryDB(jcr, cmd)) { bdb_unlock(); return; } list_result(jcr, this, "restoreobject", sendit, ctx, type); sql_free_result(); bdb_unlock(); } /* * If VolumeName is non-zero, list the record for that Volume */ bool BDB::bdb_search_media_records(JCR *jcr, MEDIA_DBR *mdbr, DB_RESULT_HANDLER *handler, void *ctx) { char esc[MAX_ESCAPE_NAME_LENGTH]; if (mdbr->VolumeName[0] == 0) { return false; } bdb_lock(); bdb_escape_string(jcr, esc, mdbr->VolumeName, strlen(mdbr->VolumeName)); const char *where = get_acl(DB_ACL_POOL, false); const char *join = *where ? get_acl_join_filter(DB_ACL_BIT(DB_ACL_POOL)) : ""; if (mdbr->limit == 0) { mdbr->limit = 50; } Mmsg(cmd, "SELECT VolumeName FROM Media %s WHERE Media.VolumeName %s '%%%s%%' %s LIMIT %u", join, sql_like[bdb_get_type_index()], esc, where, mdbr->limit); if (!bdb_sql_query(cmd, handler, ctx)) { bdb_unlock(); return false; } sql_free_result(); bdb_unlock(); return true; } /* * If VolumeName is non-zero, list the record for that Volume * otherwise, list the Volumes in the Pool specified by PoolId */ void BDB::bdb_list_media_records(JCR *jcr, MEDIA_DBR *mdbr, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { char ed1[50]; char esc[MAX_ESCAPE_NAME_LENGTH]; const char *expiresin = expires_in[bdb_get_type_index()]; bdb_lock(); bdb_escape_string(jcr, esc, mdbr->VolumeName, strlen(mdbr->VolumeName)); const char *where = get_acl(DB_ACL_POOL, false); const char *join = *where ? get_acl_join_filter(DB_ACL_BIT(DB_ACL_POOL)) : ""; if (type == VERT_LIST || type == JSON_LIST) { if (mdbr->VolumeName[0] != 0) { Mmsg(cmd, "SELECT MediaId,VolumeName,Slot,PoolId," "MediaType,MediaTypeId,FirstWritten,LastWritten,LabelDate,VolJobs," "VolFiles,VolBlocks,VolParts,VolCloudParts,Media.CacheRetention,VolMounts,VolBytes," "VolABytes,VolAPadding," "VolHoleBytes,VolHoles,LastPartBytes,VolErrors,VolWrites," "VolCapacityBytes,VolStatus,Media.Enabled,Media.Recycle,Media.VolRetention," "Media.VolUseDuration,Media.MaxVolJobs,Media.MaxVolFiles,Media.MaxVolBytes,InChanger," "EndFile,EndBlock,VolType,Media.LabelType,StorageId,DeviceId," "MediaAddressing,VolReadTime,VolWriteTime," "LocationId,RecycleCount,InitialWrite,Media.ScratchPoolId,Media.RecyclePoolId, " "Media.ActionOnPurge,%s AS ExpiresIn, Comment, Protected, UseProtect, VolEncrypted" " FROM Media %s WHERE Media.VolumeName='%s' %s", expiresin, join, esc, where ); } else { Mmsg(cmd, "SELECT MediaId,VolumeName,Slot,PoolId," "MediaType,MediaTypeId,FirstWritten,LastWritten,LabelDate,VolJobs," "VolFiles,VolBlocks,VolParts,VolCloudParts,Media.CacheRetention,VolMounts,VolBytes," "VolABytes,VolAPadding," "VolHoleBytes,VolHoles,LastPartBytes,VolErrors,VolWrites," "VolCapacityBytes,VolStatus,Media.Enabled,Media.Recycle,Media.VolRetention," "Media.VolUseDuration,Media.MaxVolJobs,Media.MaxVolFiles,Media.MaxVolBytes,InChanger," "EndFile,EndBlock,VolType,Media.LabelType,StorageId,DeviceId," "MediaAddressing,VolReadTime,VolWriteTime," "LocationId,RecycleCount,InitialWrite,Media.ScratchPoolId,Media.RecyclePoolId, " "Media.ActionOnPurge,%s AS ExpiresIn, Comment, Protected, UseProtect, VolEncrypted" " FROM Media %s WHERE Media.PoolId=%s %s ORDER BY MediaId", expiresin, join, edit_int64(mdbr->PoolId, ed1), where ); } } else { if (mdbr->VolumeName[0] != 0) { Mmsg(cmd, "SELECT MediaId,VolumeName,VolStatus,Media.Enabled," "VolBytes,VolFiles,Media.VolRetention,Media.Recycle,Slot,InChanger,MediaType,VolType," "VolParts,%s AS ExpiresIn " "FROM Media %s WHERE Media.VolumeName='%s' %s", expiresin, join, esc, where ); } else { Mmsg(cmd, "SELECT MediaId,VolumeName,VolStatus,Media.Enabled," "VolBytes,VolFiles,Media.VolRetention,Media.Recycle,Slot,InChanger,MediaType,VolType," "VolParts,LastWritten,%s AS ExpiresIn " "FROM Media %s WHERE Media.PoolId=%s %s ORDER BY MediaId", expiresin, join, edit_int64(mdbr->PoolId, ed1), where ); } } Dmsg1(DT_SQL|50, "q=%s\n", cmd); if (!QueryDB(jcr, cmd)) { bdb_unlock(); return; } list_result(jcr, this, "media", sendit, ctx, type); sql_free_result(); bdb_unlock(); } void BDB::bdb_list_jobmedia_records(JCR *jcr, uint32_t JobId, char *volume, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { POOL_MEM where2; bdb_lock(); const char *where_and = "WHERE"; /* Get some extra SQL parameters if needed */ const char *where = get_acls(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_FILESET) | DB_ACL_BIT(DB_ACL_BCLIENT), true); const char *join = *where ? get_acl_join_filter(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_FILESET) | DB_ACL_BIT(DB_ACL_BCLIENT)) : ""; if (*where) { where_and = "AND"; } if (JobId) { Mmsg(where2, " %s JobMedia.JobId=%lu ", where_and, JobId); where_and = "AND"; } if (volume) { POOL_MEM tmp, tmp2; int len = strlen(volume); tmp.check_size(len*2+1); db_escape_string(jcr, this, tmp.c_str(), volume, len); Mmsg(tmp2, " %s Media.VolumeName = '%s' ", where_and, tmp.c_str()); pm_strcat(where2, tmp2.c_str()); } if (type == VERT_LIST || type == JSON_LIST) { Mmsg(cmd, "SELECT JobMediaId,JobId,Media.MediaId,Media.VolumeName," "FirstIndex,LastIndex,StartFile,JobMedia.EndFile,StartBlock," "JobMedia.EndBlock " "FROM JobMedia JOIN Media USING (MediaId) %s " "%s %s ORDER BY JobMediaId ASC", join, where, where2.c_str()); } else { Mmsg(cmd, "SELECT JobId,Media.VolumeName,FirstIndex,LastIndex " "FROM JobMedia JOIN Media USING (MediaId) %s %s %s ORDER BY JobMediaId ASC", join, where, where2.c_str()); } Dmsg1(DT_SQL|50, "q=%s\n", cmd); if (!QueryDB(jcr, cmd)) { bdb_unlock(); return; } list_result(jcr, this, "jobmedia", sendit, ctx, type); sql_free_result(); bdb_unlock(); } /* List FileMedia records for a given job/file */ void BDB::bdb_list_filemedia_records(JCR *jcr, uint32_t JobId, uint32_t FileIndex, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { POOL_MEM tmp, filter; char ed1[50]; if (JobId > 0) { Mmsg(filter, "AND FileMedia.JobId=%s ", edit_int64(JobId, ed1)); } if (FileIndex > 0) { Mmsg(tmp, "AND FileMedia.FileIndex=%s ", edit_int64(FileIndex, ed1)); pm_strcat(filter, tmp.c_str()); } bdb_lock(); if (type == VERT_LIST || type == JSON_LIST) { Mmsg(cmd, "SELECT JobId,FileIndex,Media.MediaId,Media.VolumeName," "BlockAddress,RecordNo,FileOffset " "FROM FileMedia,Media WHERE Media.MediaId=FileMedia.MediaId " "%s ORDER BY FileIndex ASC, FileOffset ASC", filter.c_str()); } else { Mmsg(cmd, "SELECT JobId,FileIndex,Media.VolumeName,BlockAddress,RecordNo,FileOffset " "FROM FileMedia,Media WHERE Media.MediaId=FileMedia.MediaId %s ORDER By FileIndex ASC, FileOffset ASC", filter.c_str()); } if (!QueryDB(jcr, cmd)) { bdb_unlock(); return; } list_result(jcr, this, "filemedia", sendit, ctx, type); sql_free_result(); bdb_unlock(); } /* List FileEvents records for a given job/file */ void BDB::bdb_list_fileevents_records(JCR *jcr, FILEEVENT_DBR *rec, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { POOL_MEM tmp, filter; char ed1[50]; Mmsg(filter, "FileEvents.JobId in (%s) ", rec->JobId); if (rec->FileIndex > 0) { Mmsg(tmp, "AND FileEvents.FileIndex=%s ", edit_int64(rec->FileIndex, ed1)); pm_strcat(filter, tmp.c_str()); } if (B_ISALPHA(rec->Type)) { Mmsg(tmp, "AND FileEvents.Type='%c' ", rec->Type); pm_strcat(filter, tmp.c_str()); } if (rec->Severity > 0) { Mmsg(tmp, "AND FileEvents.Severity >= %d ", rec->Severity); pm_strcat(filter, tmp.c_str()); } bdb_lock(); const char *where = get_acls(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_RBCLIENT), false); const char *join = *where ? get_acl_join_filter(DB_ACL_BIT(DB_ACL_RBCLIENT)) : ""; if (type == VERT_LIST || type == JSON_LIST) { Mmsg(cmd, "SELECT JobId,FileIndex,Path,Filename,Source,Severity,Type,Description " "FROM FileEvents JOIN File USING (Jobid, FileIndex) JOIN Path USING (PathId) %s WHERE " "%s %s ORDER BY JobId, FileIndex ASC", join, filter.c_str(), where); } else { Mmsg(cmd, "SELECT JobId,Path,Filename,Severity,Type,Description " "FROM FileEvents JOIN File USING (Jobid, FileIndex) JOIN Path USING (PathId) %s WHERE " "%s %s ORDER BY JobId, FileIndex ASC", join, filter.c_str(), where); } if (!QueryDB(jcr, cmd)) { bdb_unlock(); return; } list_result(jcr, this, "fileevents", sendit, ctx, type); sql_free_result(); bdb_unlock(); } void BDB::bdb_list_copies_records(JCR *jcr, uint32_t limit, char *JobIds, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { POOL_MEM str_limit(PM_MESSAGE); POOL_MEM str_jobids(PM_MESSAGE); if (limit > 0) { Mmsg(str_limit, " LIMIT %d", limit); } if (JobIds && JobIds[0]) { Mmsg(str_jobids, " AND (Job.PriorJobId IN (%s) OR Job.JobId IN (%s)) ", JobIds, JobIds); } bdb_lock(); const char *where = get_acls(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_BCLIENT), false); const char *join = *where ? get_acl_join_filter(DB_ACL_BIT(DB_ACL_BCLIENT)) : ""; Mmsg(cmd, "SELECT DISTINCT Job.PriorJobId AS JobId, Job.Job, " "Job.JobId AS CopyJobId, Media.MediaType " "FROM Job %s " "JOIN JobMedia USING (JobId) " "JOIN Media USING (MediaId) " "WHERE Job.Type = '%c' %s %s ORDER BY Job.PriorJobId DESC %s", join, (char) JT_JOB_COPY, where, str_jobids.c_str(), str_limit.c_str()); if (!QueryDB(jcr, cmd)) { goto bail_out; } if (sql_num_rows()) { if (JobIds && JobIds[0]) { sendit(ctx, _("These JobIds have copies as follows:\n")); } else { sendit(ctx, _("The catalog contains copies as follows:\n")); } list_result(jcr, this, "copy", sendit, ctx, type); } sql_free_result(); bail_out: bdb_unlock(); } void BDB::bdb_list_events_records(JCR *jcr, EVENTS_DBR *rec, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { bool p_and=false; POOL_MEM str_limit(PM_MESSAGE); POOL_MEM where(PM_MESSAGE); POOL_MEM tmp2(PM_MESSAGE); POOL_MEM tmp(PM_MESSAGE); bdb_lock(); if (rec->limit > 0) { Mmsg(str_limit, " LIMIT %d OFFSET %d", rec->limit, rec->offset); } if (rec->EventsType[0]) { int len = strlen(rec->EventsType); tmp.check_size(len*2+1); db_escape_string(jcr, this, tmp.c_str(), rec->EventsType, len); Mmsg(tmp2, "%s Events.EventsType = '%s' ", p_and?"AND": "WHERE", tmp.c_str()); pm_strcat(where, tmp2.c_str()); p_and=true; } if (rec->EventsDaemon[0]) { int len = strlen(rec->EventsDaemon); tmp.check_size(len*2+1); db_escape_string(jcr, this, tmp.c_str(), rec->EventsDaemon, len); Mmsg(tmp2, "%s Events.EventsDaemon = '%s' ", p_and?"AND": "WHERE", tmp.c_str()); pm_strcat(where, tmp2.c_str()); p_and=true; } if (rec->EventsSource[0]) { int len = strlen(rec->EventsSource); tmp.check_size(len*2+1); db_escape_string(jcr, this, tmp.c_str(), rec->EventsSource, len); Mmsg(tmp2, "%s Events.EventsSource = '%s' ", p_and?"AND": "WHERE", tmp.c_str()); pm_strcat(where, tmp2.c_str()); p_and=true; } if (rec->EventsCode[0]) { int len = strlen(rec->EventsCode); tmp.check_size(len*2+1); db_escape_string(jcr, this, tmp.c_str(), rec->EventsCode, len); Mmsg(tmp2, "%s Events.EventsCode = '%s' ", p_and?"AND": "WHERE", tmp.c_str()); pm_strcat(where, tmp2.c_str()); p_and=true; } if (rec->start[0]) { int len = strlen(rec->start); tmp.check_size(len*2+1); db_escape_string(jcr, this, tmp.c_str(), rec->start, len); Mmsg(tmp2, "%s Events.EventsTime >= '%s' ", p_and?"AND": "WHERE", tmp.c_str()); pm_strcat(where, tmp2.c_str()); p_and=true; } if (rec->end[0]) { int len = strlen(rec->end); tmp.check_size(len*2+1); db_escape_string(jcr, this, tmp.c_str(), rec->end, len); Mmsg(tmp2, "%s Events.EventsTime <= '%s' ", p_and?"AND": "WHERE", tmp.c_str()); pm_strcat(where, tmp2.c_str()); p_and=true; } if (type == HORZ_LIST) { Mmsg(cmd, "SELECT EventsTime AS Time, EventsDaemon AS Daemon, EventsSource AS Source, EventsType AS Type, EventsText AS Events " "FROM Events " "%s ORDER BY Events.EventsTime %s %s", where.c_str(), rec->order ? "DESC" : "ASC", str_limit.c_str()); } else if (type == JSON_LIST) { Mmsg(tmp2, to_unix_timestamp[bdb_get_type_index()], "EventsTime"); Mmsg(cmd, "SELECT EventsTime AS Time, %s AS UnixTime, EventsCode AS Code, EventsDaemon AS Daemon, EventsRef AS Ref, EventsType AS Type, EventsSource AS Source, EventsText AS Events " "FROM Events " "%s ORDER BY Events.EventsTime %s %s", tmp2.c_str(), where.c_str(), rec->order ? "DESC" : "ASC", str_limit.c_str()); } else { Mmsg(cmd, "SELECT EventsTime AS Time, EventsCode AS Code, EventsDaemon AS Daemon, EventsRef AS Ref, EventsType AS Type, EventsSource AS Source, EventsText AS Events " "FROM Events " "%s ORDER BY Events.EventsTime %s %s", where.c_str(), rec->order ? "DESC" : "ASC", str_limit.c_str()); } if (!QueryDB(jcr, cmd)) { goto bail_out; } list_result(jcr, this, "event", sendit, ctx, type); bail_out: bdb_unlock(); } void BDB::bdb_list_joblog_records(JCR *jcr, uint32_t JobId, const char *pattern, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { if (JobId <= 0 && !pattern) { return; } POOL_MEM tmp, where2; char ed1[50]; bdb_lock(); if (JobId > 0) { Mmsg(tmp, "Log.JobId=%s", edit_int64(JobId, ed1)); append_filter(where2.handle(), tmp.c_str()); } if (pattern) { POOL_MEM esc; esc.check_size(strlen(pattern) * 2 + 1); bdb_escape_string(jcr, esc.c_str(), pattern, strlen(pattern)); Mmsg(tmp, "Log.LogText %s '%%%s%%' ", sql_like[bdb_get_type_index()], esc.c_str()); append_filter(where2.handle(), tmp.c_str()); } const char *where = get_acls(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_FILESET) | DB_ACL_BIT(DB_ACL_RBCLIENT) , where2.c_str()[0] == '\0'); const char *join = *where ? get_acl_join_filter(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_FILESET) | DB_ACL_BIT(DB_ACL_RBCLIENT)) : ""; if (type == VERT_LIST) { Mmsg(cmd, "SELECT Time,LogText FROM Log %s " "%s %s ORDER BY LogId ASC", join, where2.c_str(), where); } else if (type == JSON_LIST) { Mmsg(cmd, "SELECT JobId, Time,LogText FROM Log %s " "%s %s ORDER BY LogId ASC", join, where2.c_str(), where); } else { Mmsg(cmd, "SELECT LogText FROM Log %s " "%s %s ORDER BY LogId ASC", join, where2.c_str(), where); } Dmsg1(DT_SQL|50, "q=%s\n", cmd); if (!QueryDB(jcr, cmd)) { goto bail_out; } list_result(jcr, this, "joblog", sendit, ctx, type); sql_free_result(); bail_out: bdb_unlock(); } /* * List Job record(s) that match JOB_DBR * * Currently, we return all jobs or if jr->JobId is set, * only the job with the specified id. */ alist *BDB::bdb_list_job_records(JCR *jcr, JOB_DBR *jr, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { char ed1[50]; char limit[50]; char esc[MAX_ESCAPE_NAME_LENGTH]; alist *list = NULL; POOLMEM *where = get_pool_memory(PM_MESSAGE); POOLMEM *tmp = get_pool_memory(PM_MESSAGE); const char *order = "ASC"; const char *join = ""; const char *where_tmp = ""; *where = 0; bdb_lock(); if (jr->order == 1) { order = "DESC"; } if (jr->limit > 0) { snprintf(limit, sizeof(limit), " LIMIT %d", jr->limit); } else { limit[0] = 0; } if (jr->Name[0]) { bdb_escape_string(jcr, esc, jr->Name, strlen(jr->Name)); Mmsg(tmp, " Job.Name='%s' ", esc); append_filter(&where, tmp); } else if (jr->JobId != 0) { Mmsg(tmp, " Job.JobId=%s ", edit_int64(jr->JobId, ed1)); append_filter(&where, tmp); } else if (jr->Job[0] != 0) { bdb_escape_string(jcr, esc, jr->Job, strlen(jr->Job)); Mmsg(tmp, " Job.Job='%s' ", esc); append_filter(&where, tmp); /* 1 => 0, 2 => 1, 0 => not used */ } else if (jr->Reviewed > 0) { Mmsg(tmp, " Job.Reviewed = %d ", jr->Reviewed - 1); append_filter(&where, tmp); } else if (jr->isVirtualFull > 0) { Mmsg(tmp, " Job.isVirtualFull = %d ", jr->isVirtualFull); append_filter(&where, tmp); } if (type == INCOMPLETE_JOBS && jr->JobStatus == JS_FatalError) { Mmsg(tmp, " Job.JobStatus IN ('E', 'f') "); append_filter(&where, tmp); } else if (jr->JobStatus) { Mmsg(tmp, " Job.JobStatus='%c' ", jr->JobStatus); append_filter(&where, tmp); } if (jr->JobType) { Mmsg(tmp, " Job.Type='%c' ", jr->JobType); append_filter(&where, tmp); } if (jr->JobLevel) { Mmsg(tmp, " Job.Level='%c' ", jr->JobLevel); append_filter(&where, tmp); } if (jr->JobErrors > 0) { Mmsg(tmp, " Job.JobErrors > 0 "); append_filter(&where, tmp); } if (jr->ClientId > 0) { Mmsg(tmp, " Job.ClientId=%s ", edit_int64(jr->ClientId, ed1)); append_filter(&where, tmp); } if (jr->FromDate[0]) { bdb_escape_string(jcr, esc, jr->FromDate, strlen(jr->FromDate)); Mmsg(tmp, " Job.StartTime >= '%s' ", esc); append_filter(&where, tmp); } if (jr->isVirtualFull > 0) { Mmsg(tmp, " Job.isVirtualFull=%s ", edit_int64(jr->isVirtualFull, ed1)); append_filter(&where, tmp); } where_tmp = get_acls(DB_ACL_BIT(DB_ACL_RBCLIENT) | DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_FILESET), where[0] == 0); pm_strcat(where, where_tmp); if (*where_tmp) { join = get_acl_join_filter(DB_ACL_BIT(DB_ACL_RBCLIENT) | DB_ACL_BIT(DB_ACL_FILESET)); } switch (type) { case JSON_LIST: case VERT_LIST: Mmsg(cmd, "SELECT JobId,Job,Job.Name,PurgedFiles,Type,Level," "Job.ClientId,Client.Name as ClientName,JobStatus,Status.JobStatusLong,SchedTime," "StartTime,EndTime,RealEndTime,RealStartTime,JobTDate," "VolSessionId,VolSessionTime,JobFiles,JobBytes,ReadBytes,JobErrors," "JobMissingFiles,Job.PoolId,Pool.Name as PoolName,PriorJobId,PriorJob," "Job.FileSetId,FileSet.FileSet,Job.HasCache,Comment,Reviewed,isVirtualFull,Rate,CompressRatio,StatusInfo, " "SW.Name AS WriteStorage, WriteDevice, SR.Name AS LastReadStorage, LastReadDevice " "FROM Job JOIN Client USING (ClientId) LEFT JOIN Pool USING (PoolId) " "LEFT JOIN FileSet USING (FileSetId) LEFT JOIN Status USING (JobStatus) " "LEFT JOIN Storage AS SW ON (SW.StorageId = Job.WriteStorageId) LEFT JOIN Storage AS SR ON (SR.StorageId = Job.LastReadStorageId) %s " "ORDER BY StartTime %s %s", where, order, limit); break; case HORZ_LIST: Mmsg(cmd, "SELECT JobId,Job.Name,StartTime,Type,Level,JobFiles,JobBytes,JobStatus " "FROM Job %s %s ORDER BY StartTime %s,JobId %s %s", join, where, order, order, limit); break; case INCOMPLETE_JOBS: Mmsg(cmd, "SELECT JobId,Job.Name,StartTime,Type,Level,JobFiles,JobBytes,JobStatus " "FROM Job %s %s ORDER BY StartTime %s,JobId %s %s", join, where, order, order, limit); break; case LAST_JOBS: Mmsg(cmd, "SELECT JobId,Client1.Name as Client,Job.Name as Name,StartTime,Level as " "JobLevel,JobFiles,JobBytes " "FROM Client AS Client1 JOIN Job USING (ClientId) %s %s AND JobStatus IN ('T','W') " "ORDER BY StartTime %s %s", join, where, order, limit); default: break; } Dmsg1(DT_SQL|50, "SQL: %s\n", cmd); free_pool_memory(tmp); free_pool_memory(where); if (!QueryDB(jcr, cmd)) { bdb_unlock(); return NULL; } if (type == INCOMPLETE_JOBS) { SQL_ROW row; list = New(alist(10)); sql_data_seek(0); for (int i=0; (row=sql_fetch_row()) != NULL; i++) { list->append(bstrdup(row[0])); } } sql_data_seek(0); list_result(jcr, this, "job", sendit, ctx, type); sql_free_result(); bdb_unlock(); return list; } /* * List Job totals * * TODO: Adapt for JSON */ void BDB::bdb_list_job_totals(JCR *jcr, JOB_DBR *jr, DB_LIST_HANDLER *sendit, void *ctx) { bdb_lock(); const char *where = get_acls(DB_ACL_BIT(DB_ACL_BCLIENT) | DB_ACL_BIT(DB_ACL_JOB), true); const char *join = *where ? get_acl_join_filter(DB_ACL_BIT(DB_ACL_BCLIENT)) : ""; /* List by Job */ Mmsg(cmd, "SELECT count(*) AS Jobs,sum(JobFiles) " "AS Files,sum(JobBytes) AS Bytes,Job.Name AS Job FROM Job %s %s GROUP BY Job.Name", join, where); if (!QueryDB(jcr, cmd)) { bdb_unlock(); return; } list_result(jcr, this, "jobtotal", sendit, ctx, HORZ_LIST); sql_free_result(); /* Do Grand Total */ Mmsg(cmd, "SELECT count(*) AS Jobs,sum(JobFiles) " "AS Files,sum(JobBytes) As Bytes FROM Job %s %s", join, where); if (!QueryDB(jcr, cmd)) { bdb_unlock(); return; } list_result(jcr, this, "jobtotal", sendit, ctx, HORZ_LIST); sql_free_result(); bdb_unlock(); } /* List all file records from a job * "deleted" values are described just below */ void BDB::bdb_list_files_for_job(JCR *jcr, JobId_t jobid, int deleted, DB_LIST_HANDLER *sendit, void *ctx) { char ed1[50]; const char *opt; LIST_CTX lctx(jcr, this, sendit, ctx, HORZ_LIST); switch (deleted) { case 0: /* Show only actual files */ opt = " AND FileIndex > 0 "; break; case 1: /* Show only deleted files */ opt = " AND FileIndex <= 0 "; break; default: /* Show everything */ opt = ""; break; } bdb_lock(); /* Get optional filters for the SQL query */ const char *where = get_acls(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_BCLIENT) | DB_ACL_BIT(DB_ACL_FILESET), true); const char *join = *where ? get_acl_join_filter(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_BCLIENT) | DB_ACL_BIT(DB_ACL_FILESET)) : ""; /* * MySQL is different with no || operator */ if (bdb_get_type_index() == SQL_TYPE_MYSQL) { Mmsg(cmd, "SELECT CONCAT(Path.Path,F.Filename) AS Filename " "FROM (SELECT PathId, Filename, JobId FROM File WHERE JobId=%s %s" "UNION ALL " "SELECT PathId, Filename, BaseFiles.JobId " "FROM BaseFiles JOIN File " "ON (BaseFiles.FileId = File.FileId) " "WHERE BaseFiles.JobId = %s" ") AS F JOIN Path ON (Path.PathId=F.PathId) %s %s", edit_int64(jobid, ed1), opt, ed1, join, where); } else { /* Note: For some files with attributes update may be listed twice here. * We may consider adding 'DISTICT' to the query, but since it could * make query much longer it would be nice to handle it in some different way. */ Mmsg(cmd, "SELECT Path.Path||F.Filename AS Filename " "FROM (SELECT PathId, Filename, JobId FROM File WHERE JobId=%s %s" "UNION ALL " "SELECT PathId, Filename, BaseFiles.JobId " "FROM BaseFiles JOIN File " "ON (BaseFiles.FileId = File.FileId) " "WHERE BaseFiles.JobId = %s" ") AS F JOIN Path ON (Path.PathId=F.PathId) %s %s", edit_int64(jobid, ed1), opt, ed1, join, where); } Dmsg1(DT_SQL|50, "q=%s\n", cmd); if (!bdb_big_sql_query(cmd, list_result, &lctx)) { bdb_unlock(); return; } lctx.send_dashes(); sql_free_result(); bdb_unlock(); } /* List all file records from a job * "deleted" values are described just below */ void BDB::bdb_list_fileevents_for_job(JCR *jcr, JobId_t jobid, char etype, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { char ed1[50]; POOL_MEM f, fields; const char *concat="Path.Path||F.Filename"; bdb_lock(); /* Get optional filters for the SQL query */ const char *where = get_acls(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_RBCLIENT) | DB_ACL_BIT(DB_ACL_FILESET), true); const char *join = *where ? get_acl_join_filter(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_RBCLIENT) | DB_ACL_BIT(DB_ACL_FILESET)) : ""; if (etype) { Mmsg(f, " AND FileEvents.Type = '%c' ", etype); } /* * MySQL is different with no || operator */ if (bdb_get_type_index() == SQL_TYPE_MYSQL) { concat = " CONCAT(Path.Path,F.Filename) "; } switch (type) { case JSON_LIST: Mmsg(fields, "JobId, %s AS Filename, Type, Severity, Description, Source", concat); break; case VERT_LIST: Mmsg(fields, "JobId, SourceJobId, %s AS Filename, Type, Severity, Description, Source", concat); break; case HORZ_LIST: Mmsg(fields, "JobId, %s AS Filename, Description, Source", concat); break; default: goto bail_out; } Mmsg(cmd, "SELECT DISTINCT %s " "FROM (SELECT PathId, Filename, File.JobId, FileEvents.SourceJobId, FileEvents.Type, FileEvents.Description, FileEvents.Source, FileEvents.Severity FROM File " "JOIN FileEvents ON (File.JobId = FileEvents.JobId AND File.FileIndex = FileEvents.FileIndex) " "WHERE File.JobId=%s %s " "UNION ALL " "SELECT PathId, Filename, BaseFiles.JobId, FileEvents.SourceJobId, FileEvents.Type, FileEvents.Description, FileEvents.Source, FileEvents.Severity " "FROM BaseFiles JOIN File ON (BaseFiles.FileId = File.FileId) " "JOIN FileEvents ON (File.JobId = FileEvents.JobId AND File.FileIndex = FileEvents.FileIndex) " "WHERE BaseFiles.JobId = %s %s " ") AS F JOIN Path ON (Path.PathId=F.PathId) %s %s", fields.c_str(), edit_int64(jobid, ed1), f.c_str(), ed1, f.c_str(), join, where); Dmsg1(DT_SQL|50, "q=%s\n", cmd); if (!QueryDB(jcr, cmd)) { goto bail_out; } // TODO: Display list_result(jcr, this, "fileevents", sendit, ctx, type); bail_out: sql_free_result(); bdb_unlock(); } void BDB::bdb_list_base_files_for_job(JCR *jcr, JobId_t jobid, DB_LIST_HANDLER *sendit, void *ctx) { char ed1[50]; LIST_CTX lctx(jcr, this, sendit, ctx, HORZ_LIST); bdb_lock(); /* * Stupid MySQL is NON-STANDARD ! */ if (bdb_get_type_index() == SQL_TYPE_MYSQL) { Mmsg(cmd, "SELECT CONCAT(Path.Path,File.Filename) AS Filename " "FROM BaseFiles, File, Path " "WHERE BaseFiles.JobId=%s AND BaseFiles.BaseJobId = File.JobId " "AND BaseFiles.FileId = File.FileId " "AND Path.PathId=File.PathId", edit_int64(jobid, ed1)); } else { Mmsg(cmd, "SELECT Path.Path||File.Filename AS Filename " "FROM BaseFiles, File, Path " "WHERE BaseFiles.JobId=%s AND BaseFiles.BaseJobId = File.JobId " "AND BaseFiles.FileId = File.FileId " "AND Path.PathId=File.PathId", edit_int64(jobid, ed1)); } if (!bdb_big_sql_query(cmd, list_result, &lctx)) { bdb_unlock(); return; } lctx.send_dashes(); sql_free_result(); bdb_unlock(); } void BDB::bdb_list_snapshot_records(JCR *jcr, SNAPSHOT_DBR *sdbr, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { POOLMEM *filter = get_pool_memory(PM_MESSAGE); POOLMEM *tmp = get_pool_memory(PM_MESSAGE); POOLMEM *esc = get_pool_memory(PM_MESSAGE); char ed1[50]; bdb_lock(); const char *where = get_acl(DB_ACL_BCLIENT, false); *filter = 0; if (sdbr->Name[0]) { bdb_escape_string(jcr, esc, sdbr->Name, strlen(sdbr->Name)); Mmsg(tmp, "Name='%s'", esc); append_filter(&filter, tmp); } if (sdbr->SnapshotId > 0) { Mmsg(tmp, "Snapshot.SnapshotId=%d", sdbr->SnapshotId); append_filter(&filter, tmp); } if (sdbr->ClientId > 0) { Mmsg(tmp, "Snapshot.ClientId=%d", sdbr->ClientId); append_filter(&filter, tmp); } if (sdbr->JobId > 0) { Mmsg(tmp, "Snapshot.JobId=%d", sdbr->JobId); append_filter(&filter, tmp); } if (*sdbr->Client) { bdb_escape_string(jcr, esc, sdbr->Client, strlen(sdbr->Client)); Mmsg(tmp, "Client.Name='%s'", esc); append_filter(&filter, tmp); } if (sdbr->Device && *(sdbr->Device)) { esc = check_pool_memory_size(esc, strlen(sdbr->Device) * 2 + 1); bdb_escape_string(jcr, esc, sdbr->Device, strlen(sdbr->Device)); Mmsg(tmp, "Device='%s'", esc); append_filter(&filter, tmp); } if (*sdbr->Type) { bdb_escape_string(jcr, esc, sdbr->Type, strlen(sdbr->Type)); Mmsg(tmp, "Type='%s'", esc); append_filter(&filter, tmp); } if (*sdbr->created_before) { bdb_escape_string(jcr, esc, sdbr->created_before, strlen(sdbr->created_before)); Mmsg(tmp, "CreateDate <= '%s'", esc); append_filter(&filter, tmp); } if (*sdbr->created_after) { bdb_escape_string(jcr, esc, sdbr->created_after, strlen(sdbr->created_after)); Mmsg(tmp, "CreateDate >= '%s'", esc); append_filter(&filter, tmp); } if (sdbr->expired) { Mmsg(tmp, "CreateTDate < (%s - Retention)", edit_int64(time(NULL), ed1)); append_filter(&filter, tmp); } if (*sdbr->CreateDate) { bdb_escape_string(jcr, esc, sdbr->CreateDate, strlen(sdbr->CreateDate)); Mmsg(tmp, "CreateDate = '%s'", esc); append_filter(&filter, tmp); } if (sdbr->sorted_client) { pm_strcat(filter, " ORDER BY Client.Name, SnapshotId DESC"); } else { pm_strcat(filter, " ORDER BY SnapshotId DESC"); } if (type == VERT_LIST || type == ARG_LIST || type == JSON_LIST) { Mmsg(cmd, "SELECT SnapshotId, Snapshot.Name, CreateDate, Client.Name AS Client, " "FileSet.FileSet AS FileSet, JobId, Volume, Device, Type, Retention, Comment " "FROM Snapshot JOIN Client USING (ClientId) LEFT JOIN FileSet USING (FileSetId) %s %s", filter, where); } else if (type == HORZ_LIST) { Mmsg(cmd, "SELECT SnapshotId, Snapshot.Name, CreateDate, Client.Name AS Client, " "Device, Type " "FROM Snapshot JOIN Client USING (ClientId) %s", filter, where); } if (!QueryDB(jcr, cmd)) { goto bail_out; } list_result(jcr, this, "snapshot", sendit, ctx, type); bail_out: sql_free_result(); bdb_unlock(); free_pool_memory(filter); free_pool_memory(esc); free_pool_memory(tmp); } void BDB::bdb_list_files(JCR *jcr, FILE_DBR *fr, DB_RESULT_HANDLER *result_handler, void *ctx) { uint32_t firstindex = fr->FileIndex; uint32_t lastindex = fr->FileIndex2 ? fr->FileIndex2 : fr->FileIndex; bdb_lock(); Mmsg(cmd, "SELECT Path.Path, File.Filename, File.FileIndex, File.JobId, " "File.LStat, File.DeltaSeq, File.Md5 " "FROM File JOIN Path USING (PathId) " "WHERE FileIndex >= %ld AND FileIndex <= %ld AND JobId = %ld", firstindex, lastindex, fr->JobId); if (!bdb_sql_query(cmd, result_handler, ctx)) { goto bail_out; } bail_out: bdb_unlock(); } void BDB::bdb_list_tag_records(JCR *jcr, TAG_DBR *tag, DB_LIST_HANDLER *result_handler, void *ctx, e_list_type type) { POOL_MEM tmp, where; char esc[MAX_ESCAPE_NAME_LENGTH]; char esc_name[MAX_ESCAPE_NAME_LENGTH]; uint64_t aclbits, aclbits_extra; const char *name; const char *id; const char *table; tag->gen_sql(jcr, this, &table, &name, &id, esc, esc_name, &aclbits, &aclbits_extra); bdb_lock(); pm_strcpy(where, get_acls(aclbits, true)); /* get_acls() uses a specific object buffer */ const char *whereand = get_acls(aclbits, false); const char *join = get_acl_join_filter(aclbits_extra); if (table) { if (tag->all) { if (*esc_name) { /* Display all resource for a tag */ Mmsg(tmp, "SELECT %s, %s AS %s FROM Tag%s AS T JOIN %s USING (%s) %s WHERE T.Tag = '%s' %s", id, name, table, table, table, id, join, esc_name, whereand); } else { /* Display all tags for a resource type */ Mmsg(tmp, "SELECT DISTINCT T.Tag, %s AS %s, %s AS %s FROM Tag%s AS T JOIN %s USING (%s) %s %s", id, id, name, table, table, table, id, join, where.c_str()); } } else { if (*esc_name) { Mmsg(tmp, "SELECT T.Tag, %s as %s, %s AS %s FROM Tag%s AS T JOIN %s USING (%s) %s WHERE %s = '%s' AND T.Tag = '%s' %s", id, id, name, table, table, table, id, join, name, esc, esc_name, whereand); } else { /* Display all tags for a client */ Mmsg(tmp, "SELECT Tag, %s as %s, %s as %s FROM Tag%s AS T JOIN %s USING (%s) %s WHERE %s = '%s' %s", id, id, (tag->JobId>0)?"Name":name, table, table, table, id, join, name, esc, whereand); } } Dmsg1(DT_SQL|50, "q=%s\n", tmp.c_str()); bdb_list_sql_query(jcr, "tag", tmp.c_str(), result_handler, ctx, 0, type); } bdb_unlock(); } /* List Owner in the metadata table */ void BDB::bdb_list_metadata_owner_records(JCR *jcr, META_DBR *meta_r, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { POOL_MEM esc(PM_MESSAGE), tmp(PM_MESSAGE), where(PM_MESSAGE), join(PM_MESSAGE); bdb_lock(); /* We use the command line to generate the SQL query with what the user * want to filter */ meta_r->create_db_filter(jcr, this, where.handle()); /* We also apply ACL via SQL (here on the Client name and the Job name) if we * are in a restricted console */ const char *where_filter = get_acls(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_BCLIENT), strcmp(where.c_str(), "") == 0); const char *join_filter = (*where_filter && meta_r->ClientName[0] == 0) ? get_acl_join_filter(DB_ACL_BIT(DB_ACL_BCLIENT)) : ""; if (meta_r->ClientName[0] != 0) { Mmsg(join, " JOIN Job ON (Job.JobId = Meta%s.JobId) JOIN Client USING (ClientId) ", meta_r->Type); } else if (*where_filter) { // We add manually the Job join filter part Mmsg(join, " JOIN Job ON (Job.JobId = Meta%s.JobId) ", meta_r->Type); } if (where_filter && *where_filter) { pm_strcat(where, where_filter); } if (join_filter && *join_filter) { pm_strcat(join, join_filter); } if (meta_r->limit > 0) { Mmsg(tmp, " LIMIT %d ", meta_r->limit); pm_strcat(where, tmp.c_str()); } if (meta_r->offset > 0) { Mmsg(tmp, " OFFSET %ld ", meta_r->offset); pm_strcat(where, tmp.c_str()); } switch (type) { case JSON_LIST: case VERT_LIST: case HORZ_LIST: Mmsg(cmd, "SELECT DISTINCT %sOwner " "FROM Meta%s %s %s", meta_r->Type, meta_r->Type, join.c_str(), where.c_str()); break; default: break; } Dmsg1(DT_SQL|50, "%s\n", cmd); if (!QueryDB(jcr, cmd)) { Jmsg(jcr, M_ERROR, 0, _("Query %s failed!\n"), cmd); bdb_unlock(); return; } if (strcmp(meta_r->Type, "Email") == 0) { Mmsg(esc, "metadataemail"); } else { Mmsg(esc, "metaattachment"); } list_result(jcr, this, esc.c_str(), sendit, ctx, type); sql_free_result(); bdb_unlock(); } /* * List plugin objects (search is based on object provided) */ void BDB::bdb_list_metadata_records(JCR *jcr, META_DBR *meta_r, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { if (!meta_r->Owner[0] || strchr(meta_r->Owner, '%')) { /* List Owners */ bdb_list_metadata_owner_records(jcr, meta_r, sendit, ctx, type); return; } const char *k1=""; // set to Attachment if it is appropriate POOL_MEM esc(PM_MESSAGE), tmp(PM_MESSAGE), where(PM_MESSAGE), join(PM_MESSAGE); bdb_lock(); //TODO add ACL part meta_r->create_db_filter(jcr, this, where.handle()); Dmsg1(DT_SQL|50, "where=[%s]\n", where.c_str()); const char *where_filter = get_acls(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_BCLIENT), strcmp(where.c_str(), "") == 0); const char *join_filter = (*where_filter && meta_r->ClientName[0] == 0) ? get_acl_join_filter(DB_ACL_BIT(DB_ACL_BCLIENT)) : ""; if (meta_r->ClientName[0] != 0) { Mmsg(join, " JOIN Job ON (Job.JobId = Meta%s.JobId) JOIN Client USING (ClientId) ", meta_r->Type); } else if (*where_filter) { // We add manually the Job join filter part Mmsg(join, " JOIN Job ON (Job.JobId = Meta%s.JobId) ", meta_r->Type); } if (strcmp(meta_r->Type, "Attachment") == 0) { k1 = "Attachment"; pm_strcat(join, " JOIN MetaEmail ON (EmailId = AttachmentEmailId AND MetaEmail.JobId = MetaAttachment.JobId) "); } if (where_filter && *where_filter) { pm_strcat(where, where_filter); } if (join_filter && *join_filter) { pm_strcat(join, join_filter); } /* If we have a list of JobId, a single value (nothing is considered as a list, we want everything) */ bool list_jobid = false; if (!meta_r->JobIds) { list_jobid = true; } else if (strchr(meta_r->JobIds, ',') != NULL) { list_jobid = true; } if (list_jobid) { if (!meta_r->alljobs) { /* The idea is to get only the last occurence of each email. The key differentiator is * the EmailId, and we use the Job table via the StartTime to select the latest version * * should give * AND MetaEmail.JobId = (SELECT JobId FROM Job JOIN MetaEmail AS B USING (JobId) * WHERE MetaEmail.EmailId=B.EmailId ORDER BY StartTime DESC LIMIT 1 * or * AND MetaAttachment.JobId = (SELECT JobId FROM Job JOIN MetaAttachment AS B USING (JobId) * WHERE MetaAttachment.AttachmentEmailId=B.AttachmentEmailId ORDER BY StartTime DESC LIMIT 1 */ Mmsg(tmp, " AND Meta%s.JobId = (SELECT JobId FROM Job JOIN Meta%s AS B USING (JobId) %s WHERE Meta%s.%sEmailId=B.%sEmailId %s ORDER BY StartTime DESC LIMIT 1) ", meta_r->Type, meta_r->Type, join_filter, meta_r->Type, k1, k1, where_filter); pm_strcat(where, tmp.c_str()); } } if (meta_r->orderby == 1) { Mmsg(tmp, " ORDER BY EmailTime %s ", meta_r->order ? "DESC" : "ASC"); } else { Mmsg(tmp, " ORDER BY Meta%s.JobId, Meta%s.FileIndex %s ", meta_r->Type, meta_r->Type, meta_r->order ? "DESC" : "ASC"); } pm_strcat(where, tmp.c_str()); if (meta_r->limit > 0) { Mmsg(tmp, " LIMIT %d ", meta_r->limit); pm_strcat(where, tmp.c_str()); } if (meta_r->offset > 0) { Mmsg(tmp, " OFFSET %ld ", meta_r->offset); pm_strcat(where, tmp.c_str()); } switch (type) { case JSON_LIST: case VERT_LIST: meta_r->get_all_keys(tmp.handle()); Mmsg(cmd, "SELECT %s %s " "FROM Meta%s %s %s", (strcmp(meta_r->Type, "Email") == 0) ? "" : "DISTINCT", tmp.c_str(), meta_r->Type, join.c_str(), where.c_str()); break; case HORZ_LIST: meta_r->get_important_keys(tmp.handle()); Mmsg(cmd, "SELECT %s %s " "FROM Meta%s %s %s", (strcmp(meta_r->Type, "Email") == 0) ? "" : "DISTINCT", tmp.c_str(), meta_r->Type, join.c_str(), where.c_str()); break; default: break; } Dmsg1(DT_SQL|50, "%s\n", cmd); if (!QueryDB(jcr, cmd)) { Jmsg(jcr, M_ERROR, 0, _("Query %s failed!\n"), cmd); bdb_unlock(); return; } if (strcmp(meta_r->Type, "Email") == 0) { Mmsg(esc, "metadataemail"); } else { Mmsg(esc, "metaattachment"); } list_result(jcr, this, esc.c_str(), sendit, ctx, type); sql_free_result(); bdb_unlock(); } /* List all file records from a job * "deleted" values are described just below */ void BDB::bdb_list_jobs_for_file(JCR *jcr, const char *client, const char *fname, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type) { if (!client || !*client || !fname || !*fname) { return; } const char *concat="Path.Path||File.Filename"; if (bdb_get_type_index() == SQL_TYPE_MYSQL) { concat = " CONCAT(Path.Path,File.Filename) "; } bdb_lock(); /* Get optional filters for the SQL query */ const char *where = get_acls(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_BCLIENT) | DB_ACL_BIT(DB_ACL_FILESET), false); const char *join = *where ? get_acl_join_filter(DB_ACL_BIT(DB_ACL_FILESET)) : ""; int len = strlen(fname); char *esc = (char *)malloc(len * 2 + 1); bdb_escape_string(jcr, esc, fname, len); len = strlen(client); char *esc2 = (char *)malloc(len * 2 + 1); bdb_escape_string(jcr, esc2, client, len); Mmsg(cmd, "SELECT Job.JobId as JobId," "%s as Name, " // Concat of the filename "StartTime, Type as JobType, JobStatus,JobFiles,JobBytes " "FROM Client JOIN Job USING (ClientId) JOIN File USING (JobId) JOIN Path USING (PathId) %s " "WHERE Client.Name = '%s' " "AND File.FileIndex > 0 " "AND File.Filename='%s' %s ORDER BY StartTime DESC LIMIT 20", concat, join, esc2, esc, where); free(esc); free(esc2); Dmsg1(DT_SQL|50, "q=%s\n", cmd); if (!QueryDB(jcr, cmd)) { goto bail_out; } list_result(jcr, this, "job", sendit, ctx, HORZ_LIST); bail_out: sql_free_result(); bdb_unlock(); } #endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ bacula-15.0.3/src/cats/sql_find.c0000644000175000017500000005011514771010173016350 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Bacula Catalog Database Find record interface routines * * Note, generally, these routines are more complicated * that a simple search by name or id. Such simple * request are in get.c * * Written by Kern Sibbald, December 2000 */ #include "bacula.h" #if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL #include "cats.h" /* ----------------------------------------------------------------------- * * Generic Routines (or almost generic) * * ----------------------------------------------------------------------- */ /* * Find the most recent successful real end time for a job given. * * RealEndTime is returned in etime * Job name is returned in job (MAX_NAME_LENGTH) * * Returns: false on failure * true on success, jr is unchanged, but etime and job are set */ bool BDB::bdb_find_last_job_end_time(JCR *jcr, JOB_DBR *jr, POOLMEM **etime, char *job) { SQL_ROW row; char ed1[50], ed2[50]; char esc_name[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); bdb_escape_string(jcr, esc_name, jr->Name, strlen(jr->Name)); pm_strcpy(etime, "0000-00-00 00:00:00"); /* default */ job[0] = 0; Mmsg(cmd, "SELECT RealEndTime, Job FROM Job WHERE JobStatus IN ('T','W') AND Type='%c' AND " "Level IN ('%c','%c','%c') AND Name='%s' AND ClientId=%s AND FileSetId=%s " "ORDER BY RealEndTime DESC LIMIT 1", jr->JobType, L_FULL, L_DIFFERENTIAL, L_INCREMENTAL, esc_name, edit_int64(jr->ClientId, ed1), edit_int64(jr->FileSetId, ed2)); if (!QueryDB(jcr, cmd)) { Mmsg2(&errmsg, _("Query error for end time request: ERR=%s\nCMD=%s\n"), sql_strerror(), cmd); goto bail_out; } if ((row = sql_fetch_row()) == NULL) { sql_free_result(); Mmsg(errmsg, _("No prior backup Job record found.\n")); goto bail_out; } Dmsg1(100, "Got end time: %s\n", row[0]); pm_strcpy(etime, row[0]); bstrncpy(job, row[1], MAX_NAME_LENGTH); sql_free_result(); bdb_unlock(); return true; bail_out: bdb_unlock(); return false; } /* * Find job start time if JobId specified, otherwise * find last Job start time Incremental and Differential saves. * If FileSetId is 0, the FileSet is not used to retrieve the job * information. * * StartTime is returned in stime * Job name is returned in job (MAX_NAME_LENGTH) * * Returns: false on failure * true on success, jr is unchanged, but stime and job are set */ bool BDB::bdb_find_job_start_time(JCR *jcr, JOB_DBR *jr, POOLMEM **stime, char *job) { SQL_ROW row; char ed1[50], ed2[50]; char esc_name[MAX_ESCAPE_NAME_LENGTH]; char fileset[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); bdb_escape_string(jcr, esc_name, jr->Name, strlen(jr->Name)); pm_strcpy(stime, "0000-00-00 00:00:00"); /* default */ job[0] = 0; fileset[0] = 0; if (jr->FileSetId) { bsnprintf(fileset, sizeof(fileset), " AND FileSetId=%s ", edit_int64(jr->FileSetId, ed2)); } /* If no Id given, we must find corresponding job */ if (jr->JobId == 0) { /* Differential is since last Full backup */ Mmsg(cmd, "SELECT StartTime, Job, PriorJob FROM Job WHERE JobStatus IN ('T','W') AND Type='%c' AND " "Level='%c' AND Name='%s' AND ClientId=%s %s " "ORDER BY StartTime DESC LIMIT 1", jr->JobType, L_FULL, esc_name, edit_int64(jr->ClientId, ed1), fileset); if (jr->JobLevel == L_DIFFERENTIAL) { /* SQL cmd for Differential backup already edited above */ /* Incremental is since last Full, Incremental, or Differential */ } else if (jr->JobLevel == L_INCREMENTAL) { /* * For an Incremental job, we must first ensure * that a Full backup was done (cmd edited above) * then we do a second look to find the most recent * backup */ if (!QueryDB(jcr, cmd)) { Mmsg2(&errmsg, _("Query error for start time request: ERR=%s\nCMD=%s\n"), sql_strerror(), cmd); goto bail_out; } if ((row = sql_fetch_row()) == NULL) { sql_free_result(); Mmsg(errmsg, _("No prior Full backup Job record found.\n")); goto bail_out; } sql_free_result(); /* Now edit SQL command for Incremental Job */ Mmsg(cmd, "SELECT StartTime, Job, PriorJob FROM Job WHERE JobStatus IN ('T','W') AND Type='%c' AND " "Level IN ('%c','%c','%c') AND Name='%s' AND ClientId=%s " "%s ORDER BY StartTime DESC LIMIT 1", jr->JobType, L_INCREMENTAL, L_DIFFERENTIAL, L_FULL, esc_name, edit_int64(jr->ClientId, ed1), fileset); } else { Mmsg1(errmsg, _("Unknown level=%d\n"), jr->JobLevel); goto bail_out; } } else { Dmsg1(100, "Submitting: %s\n", cmd); Mmsg(cmd, "SELECT StartTime, Job, PriorJob FROM Job WHERE Job.JobId=%s", edit_int64(jr->JobId, ed1)); } if (!QueryDB(jcr, cmd)) { pm_strcpy(stime, ""); /* set EOS */ Mmsg2(&errmsg, _("Query error for start time request: ERR=%s\nCMD=%s\n"), sql_strerror(), cmd); goto bail_out; } if ((row = sql_fetch_row()) == NULL) { Mmsg2(&errmsg, _("No Job record found: ERR=%s\nCMD=%s\n"), sql_strerror(), cmd); sql_free_result(); goto bail_out; } Dmsg2(100, "Got start time: %s, job: %s\n", row[0], row[1]); pm_strcpy(stime, row[0]); /* If we have a PriorJob name in the Job record, we can use it It should * make plugins using the job name compatible with Copy/Migration. * * The name of the previous job in the incremental/differential backup chain * can be used by Plugins for some operations. For example, we can create a * snapshot that uses the current Job name, and the next incremental may * use the previous job name to make a diff between the current snapshot and * the previous one for example. This is also useful to "validate" that the * previous job ran OK. */ if (row[2] && row[2][0]) { bstrncpy(job, row[2], MAX_NAME_LENGTH); } else { bstrncpy(job, row[1], MAX_NAME_LENGTH); } sql_free_result(); bdb_unlock(); return true; bail_out: bdb_unlock(); return false; } /* * Find the last job start time for the specified JobLevel * * StartTime is returned in stime * Job name is returned in job (MAX_NAME_LENGTH) * * If FileSetId is 0, then the FileSet is not used to get the job record * * Returns: false on failure * true on success, jr is unchanged, but stime and job are set */ bool BDB::bdb_find_last_job_start_time(JCR *jcr, JOB_DBR *jr, POOLMEM **stime, char *job, int JobLevel) { SQL_ROW row; char ed1[50], ed2[50]; char esc_name[MAX_ESCAPE_NAME_LENGTH]; char fileset[MAX_ESCAPE_NAME_LENGTH]; fileset[0] = 0; if (jr->FileSetId) { bsnprintf(fileset, sizeof(fileset), " AND FileSetId=%s ", edit_int64(jr->FileSetId, ed2)); } bdb_lock(); bdb_escape_string(jcr, esc_name, jr->Name, strlen(jr->Name)); pm_strcpy(stime, "0000-00-00 00:00:00"); /* default */ job[0] = 0; Mmsg(cmd, "SELECT StartTime, Job, PriorJob FROM Job WHERE JobStatus IN ('T','W') AND Type='%c' AND " "Level='%c' AND Name='%s' AND ClientId=%s %s " "ORDER BY StartTime DESC LIMIT 1", jr->JobType, JobLevel, esc_name, edit_int64(jr->ClientId, ed1), fileset); if (!QueryDB(jcr, cmd)) { Mmsg2(&errmsg, _("Query error for start time request: ERR=%s\nCMD=%s\n"), sql_strerror(), cmd); goto bail_out; } if ((row = sql_fetch_row()) == NULL) { sql_free_result(); Mmsg(errmsg, _("No prior Full backup Job record found.\n")); goto bail_out; } Dmsg1(100, "Got start time: %s\n", row[0]); pm_strcpy(stime, row[0]); /* If we have a PriorJob name, we can return it * see bdb_find_job_start_time() comments for more information */ if (row[2] && row[2][0]) { bstrncpy(job, row[2], MAX_NAME_LENGTH); } else { bstrncpy(job, row[1], MAX_NAME_LENGTH); } sql_free_result(); bdb_unlock(); return true; bail_out: bdb_unlock(); return false; } /* * Find last failed job since given start-time * it must be either Full or Diff. * * Returns: false on failure * true on success, jr is unchanged and stime unchanged * level returned in JobLevel */ bool BDB::bdb_find_failed_job_since(JCR *jcr, JOB_DBR *jr, POOLMEM *stime, int &JobLevel) { SQL_ROW row; char ed1[50], ed2[50]; char esc_name[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); bdb_escape_string(jcr, esc_name, jr->Name, strlen(jr->Name)); /* Differential is since last Full backup */ Mmsg(cmd, "SELECT Level FROM Job WHERE JobStatus IN ('%c','%c', '%c', '%c') AND " "Type='%c' AND Level IN ('%c','%c') AND Name='%s' AND ClientId=%s " "AND FileSetId=%s AND StartTime>'%s' " "ORDER BY StartTime DESC LIMIT 1", JS_Canceled, JS_ErrorTerminated, JS_Error, JS_FatalError, jr->JobType, L_FULL, L_DIFFERENTIAL, esc_name, edit_int64(jr->ClientId, ed1), edit_int64(jr->FileSetId, ed2), stime); if (!QueryDB(jcr, cmd)) { bdb_unlock(); return false; } if ((row = sql_fetch_row()) == NULL) { sql_free_result(); bdb_unlock(); return false; } JobLevel = (int)*row[0]; sql_free_result(); bdb_unlock(); return true; } /* * Find JobId of last job that ran. E.g. for * VERIFY_CATALOG we want the JobId of the last INIT. * For VERIFY_VOLUME_TO_CATALOG, we want the JobId of the last Job. * * Returns: true on success * false on failure */ bool BDB::bdb_find_last_jobid(JCR *jcr, const char *Name, JOB_DBR *jr) { SQL_ROW row; char ed1[50]; char esc_name[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); /* Find last full */ Dmsg2(100, "JobLevel=%d JobType=%d\n", jr->JobLevel, jr->JobType); if (jr->JobLevel == L_VERIFY_CATALOG) { bdb_escape_string(jcr, esc_name, jr->Name, strlen(jr->Name)); Mmsg(cmd, "SELECT JobId FROM Job WHERE Type='V' AND Level='%c' AND " " JobStatus IN ('T','W') AND Name='%s' AND " "ClientId=%s ORDER BY StartTime DESC LIMIT 1", L_VERIFY_INIT, esc_name, edit_int64(jr->ClientId, ed1)); } else if (jr->JobLevel == L_VERIFY_VOLUME_TO_CATALOG || jr->JobLevel == L_VERIFY_DISK_TO_CATALOG || jr->JobLevel == L_VERIFY_DATA || jr->JobType == JT_BACKUP) { if (Name) { bdb_escape_string(jcr, esc_name, (char*)Name, MIN(strlen(Name), sizeof(esc_name))); Mmsg(cmd, "SELECT JobId FROM Job WHERE Type='B' AND JobStatus IN ('T','W') AND " "Name='%s' ORDER BY StartTime DESC LIMIT 1", esc_name); } else { Mmsg(cmd, "SELECT JobId FROM Job WHERE Type='B' AND JobStatus IN ('T','W') AND " "ClientId=%s ORDER BY StartTime DESC LIMIT 1", edit_int64(jr->ClientId, ed1)); } } else { Mmsg1(&errmsg, _("Unknown Job level=%d\n"), jr->JobLevel); bdb_unlock(); return false; } Dmsg1(100, "Query: %s\n", cmd); if (!QueryDB(jcr, cmd)) { bdb_unlock(); return false; } if ((row = sql_fetch_row()) == NULL) { Mmsg1(&errmsg, _("No Job found for: %s.\n"), cmd); sql_free_result(); bdb_unlock(); return false; } jr->JobId = str_to_int64(row[0]); sql_free_result(); Dmsg1(100, "db_get_last_jobid: got JobId=%d\n", jr->JobId); if (jr->JobId <= 0) { Mmsg1(&errmsg, _("No Job found for: %s\n"), cmd); bdb_unlock(); return false; } bdb_unlock(); return true; } /* * Find Available Media (Volume) for Pool * * Find a Volume for a given PoolId, MediaType, and Status. * * Returns: 0 on failure * numrows on success */ int BDB::bdb_find_next_volume(JCR *jcr, int item, bool InChanger, MEDIA_DBR *mr) { POOL_MEM volencrypted(PM_FNAME); SQL_ROW row = NULL; int numrows; const char *order; char esc_type[MAX_ESCAPE_NAME_LENGTH]; char esc_status[MAX_ESCAPE_NAME_LENGTH]; char ed1[50]; bdb_lock(); bdb_escape_string(jcr, esc_type, mr->MediaType, strlen(mr->MediaType)); bdb_escape_string(jcr, esc_status, mr->VolStatus, strlen(mr->VolStatus)); if (item == -1) { /* find oldest volume */ /* Find oldest volume */ if (mr->VolEncrypted != fnv_encrypted_any) { /* we have a requirement on VolEncrypted */ Mmsg(volencrypted, "AND (VolStatus!='Append' OR VolEncrypted=%d) ", mr->VolEncrypted); } Mmsg(cmd, "SELECT MediaId,VolumeName,VolJobs,VolFiles,VolBlocks," "VolBytes,VolMounts,VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes," "MediaType,VolStatus,PoolId,VolRetention,VolUseDuration,MaxVolJobs," "MaxVolFiles,Recycle,Slot,FirstWritten,LastWritten,InChanger," "EndFile,EndBlock,VolType,VolParts,VolCloudParts,LastPartBytes," "LabelType,LabelDate,StorageId," "Enabled,LocationId,RecycleCount,InitialWrite," "ScratchPoolId,RecyclePoolId,VolReadTime,VolWriteTime,ActionOnPurge,CacheRetention, " "VolEncrypted " "FROM Media WHERE PoolId=%s AND MediaType='%s' " " AND (VolStatus IN ('Full', 'Append', 'Used') OR (VolStatus IN ('Recycle', 'Purged', 'Used') AND Recycle=1)) " " AND Enabled=1 " "%s" /* volencrypted */ "ORDER BY LastWritten LIMIT 1", edit_int64(mr->PoolId, ed1), esc_type, volencrypted.c_str()); item = 1; } else { POOL_MEM changer(PM_FNAME); POOL_MEM voltype(PM_FNAME); POOL_MEM exclude(PM_FNAME); /* Find next available volume */ /* ***FIXME*** * replace switch with * if (StorageId == 0) * break; * else use mr->sid_group; but it must be set!!! */ if (InChanger) { ASSERT(mr->sid_group); if (mr->sid_group) { Mmsg(changer, " AND InChanger=1 AND StorageId IN (%s) ", mr->sid_group); } else { Mmsg(changer, " AND InChanger=1 AND StorageId=%s ", edit_int64(mr->StorageId, ed1)); } } /* Volumes will be automatically excluded from the query, we just take the * first one of the list */ if (mr->exclude_list && *mr->exclude_list) { item = 1; Mmsg(exclude, " AND MediaId NOT IN (%s) ", mr->exclude_list); } if (strcmp(mr->VolStatus, "Recycle") == 0 || strcmp(mr->VolStatus, "Purged") == 0) { order = "AND Recycle=1 ORDER BY LastWritten ASC,MediaId"; /* take oldest that can be recycled */ } else { order = sql_media_order_most_recently_written[bdb_get_type_index()]; /* take most recently written */ } if (strcmp(mr->VolStatus, "Append") == 0 && mr->VolEncrypted != fnv_encrypted_any) { Mmsg(volencrypted, "AND VolEncrypted=%d", mr->VolEncrypted); } if (mr->VolType == 0) { Mmsg(voltype, ""); } else if (mr->VolType == B_DEDUP_DEV) { /* Special tweak for Dedup, it has two types */ Mmsg(voltype, "AND VolType IN (0,%d,%d)", B_DEDUP_DEV, B_DEDUP_OLD_DEV); } else { Mmsg(voltype, "AND VolType IN (0,%d)", mr->VolType); } Mmsg(cmd, "SELECT MediaId,VolumeName,VolJobs,VolFiles,VolBlocks," "VolBytes,VolMounts,VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes," "MediaType,VolStatus,PoolId,VolRetention,VolUseDuration,MaxVolJobs," "MaxVolFiles,Recycle,Slot,FirstWritten,LastWritten,InChanger," "EndFile,EndBlock,VolType,VolParts,VolCloudParts,LastPartBytes," "LabelType,LabelDate,StorageId," "Enabled,LocationId,RecycleCount,InitialWrite," "ScratchPoolId,RecyclePoolId,VolReadTime,VolWriteTime,ActionOnPurge,CacheRetention," "VolEncrypted " "FROM Media WHERE PoolId=%s AND MediaType='%s' AND Enabled=1 " "AND VolStatus='%s' " "%s " /* volencrypted */ "%s " /* voltype */ "%s " /* changer */ "%s " /* exclude */ "%s LIMIT %d", edit_int64(mr->PoolId, ed1), esc_type, esc_status, volencrypted.c_str(), voltype.c_str(), changer.c_str(), exclude.c_str(), order, item); } Dmsg1(100, "fnextvol=%s\n", cmd); if (!QueryDB(jcr, cmd)) { bdb_unlock(); return 0; } numrows = sql_num_rows(); if (item > numrows || item < 1) { Dmsg2(050, "item=%d got=%d\n", item, numrows); Mmsg2(&errmsg, _("Request for Volume item %d greater than max %d or less than 1\n"), item, numrows); bdb_unlock(); return 0; } /* Note, we previously seeked to the row using: * sql_data_seek(item-1); * but this failed on PostgreSQL, so now we loop * over all the records. This should not be too horrible since * the maximum Volumes we look at in any case is 20. */ while (item-- > 0) { if ((row = sql_fetch_row()) == NULL) { Dmsg1(050, "Fail fetch item=%d\n", item+1); Mmsg1(&errmsg, _("No Volume record found for item %d.\n"), item); sql_free_result(); bdb_unlock(); return 0; } } /* Return fields in Media Record */ mr->MediaId = str_to_int64(row[0]); bstrncpy(mr->VolumeName, row[1]!=NULL?row[1]:"", sizeof(mr->VolumeName)); mr->VolJobs = str_to_int64(row[2]); mr->VolFiles = str_to_int64(row[3]); mr->VolBlocks = str_to_int64(row[4]); mr->VolBytes = str_to_uint64(row[5]); mr->VolMounts = str_to_int64(row[6]); mr->VolErrors = str_to_int64(row[7]); mr->VolWrites = str_to_int64(row[8]); mr->MaxVolBytes = str_to_uint64(row[9]); mr->VolCapacityBytes = str_to_uint64(row[10]); bstrncpy(mr->MediaType, row[11]!=NULL?row[11]:"", sizeof(mr->MediaType)); bstrncpy(mr->VolStatus, row[12]!=NULL?row[12]:"", sizeof(mr->VolStatus)); mr->PoolId = str_to_int64(row[13]); mr->VolRetention = str_to_uint64(row[14]); mr->VolUseDuration = str_to_uint64(row[15]); mr->MaxVolJobs = str_to_int64(row[16]); mr->MaxVolFiles = str_to_int64(row[17]); mr->Recycle = str_to_int64(row[18]); mr->Slot = str_to_int64(row[19]); bstrncpy(mr->cFirstWritten, row[20]!=NULL?row[20]:"", sizeof(mr->cFirstWritten)); mr->FirstWritten = (time_t)str_to_utime(mr->cFirstWritten); bstrncpy(mr->cLastWritten, row[21]!=NULL?row[21]:"", sizeof(mr->cLastWritten)); mr->LastWritten = (time_t)str_to_utime(mr->cLastWritten); mr->InChanger = str_to_uint64(row[22]); mr->EndFile = str_to_uint64(row[23]); mr->EndBlock = str_to_uint64(row[24]); mr->VolType = str_to_int64(row[25]); mr->VolParts = str_to_int64(row[26]); mr->VolCloudParts = str_to_int64(row[27]); mr->LastPartBytes = str_to_int64(row[28]); mr->LabelType = str_to_int64(row[29]); bstrncpy(mr->cLabelDate, row[30]!=NULL?row[30]:"", sizeof(mr->cLabelDate)); mr->LabelDate = (time_t)str_to_utime(mr->cLabelDate); mr->StorageId = str_to_int64(row[31]); mr->Enabled = str_to_int64(row[32]); mr->LocationId = str_to_int64(row[33]); mr->RecycleCount = str_to_int64(row[34]); bstrncpy(mr->cInitialWrite, row[35]!=NULL?row[35]:"", sizeof(mr->cInitialWrite)); mr->InitialWrite = (time_t)str_to_utime(mr->cInitialWrite); mr->ScratchPoolId = str_to_int64(row[36]); mr->RecyclePoolId = str_to_int64(row[37]); mr->VolReadTime = str_to_int64(row[38]); mr->VolWriteTime = str_to_int64(row[39]); mr->ActionOnPurge = str_to_int64(row[40]); mr->CacheRetention = str_to_int64(row[41]); mr->VolEncrypted = str_to_int64(row[42]); sql_free_result(); bdb_unlock(); Dmsg1(050, "Rtn numrows=%d\n", numrows); return numrows; } #endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ bacula-15.0.3/src/cats/make_postgresql_tables.in0000644000175000017500000005516314771010173021477 0ustar bsbuildbsbuild#!/bin/sh # # shell script to create Bacula PostgreSQL tables # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Important note: # You won't get any support for performance issue if you changed the default # schema. # bindir=@POSTGRESQL_BINDIR@ PATH="$bindir:$PATH" db_name=${db_name:-@db_name@} psql -f - -d ${db_name} $* < file1017.data if [ $? -ne 0 ]; then echo "Error while dumping file table to $PWD/file1017.data" exit 1 fi if ! psql --set ON_ERROR_STOP=1 -f - -d ${db_name} $* < file1016.data if [ $? -ne 0 ]; then echo "Error while dumping file table to $PWD/file1016.data" exit 1 fi if ! psql --set ON_ERROR_STOP=1 -f - -d ${db_name} $* </dev/null 2>/dev/null set client_min_messages = fatal; CREATE INDEX media_poolid_idx on Media (PoolId); CREATE INDEX media_storageid_idx ON Media (StorageId); END-OF-DATA exit 0 bacula-15.0.3/src/cats/postgresql.in0000644000175000017500000000034114771010173017134 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # shell script to create Bacula PostgreSQL tables # bindir=@POSTGRESQL_BINDIR@ db_name=@db_name@ $bindir/psql $* ${db_name} bacula-15.0.3/src/cats/make_bacula_tables.in0000755000175000017500000000167614771010173020526 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # This routine makes the appropriately configured # Bacula tables for PostgreSQL, Ingres, MySQL, or SQLite. # # can be used to change the current user with su pre_command="sh -c" default_db_type=@DEFAULT_DB_TYPE@ # # See if the first argument is a valid backend name. # If so the user overrides the default database backend. # if [ $# -gt 0 ]; then case $1 in sqlite3) db_type=$1 shift ;; mysql) db_type=$1 shift ;; postgresql) db_type=$1 shift ;; *) ;; esac fi # # If no new db_type is gives use the default db_type. # if [ -z "${db_type}" ]; then db_type="${default_db_type}" fi if [ $db_type = postgresql -a "$UID" = 0 ]; then pre_command="su - postgres -c" fi echo "Making ${db_type} tables" $pre_command "@scriptdir@/make_${db_type}_tables $*" bacula-15.0.3/src/cats/update_sqlite3_tables.in0000644000175000017500000005214014771010173021215 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Shell script to update SQLite3 tables from Bacula # OLDVERSION=1022 NEWVERSION=1026 echo " " echo "This script will update a Bacula SQLite3 database from version 12-14,$OLDVERSION to $NEWVERSION" echo " which is needed to convert from any Bacula Communty to version 15.0.x" echo " " bindir=@SQLITE_BINDIR@ PATH="$bindir:$PATH" cd @working_dir@ db_name=@db_name@ DBVERSION=`sqlite3 ${db_name}.db <bdb_escape_string(jcr, pattern, p, len); } void set_filename(char *p) { uint32_t len = strlen(p); filename = check_pool_memory_size(filename, len*2+1); db->bdb_escape_string(jcr, filename, p, len); } /* Get the root point */ DBId_t get_root(); /* It's much better to access Path though their PathId, it * avoids mistakes with string encoding */ bool ch_dir(DBId_t pathid); /* * Returns true if the directory exists */ bool ch_dir(const char *path); bool ls_all_files(); /* Returns true if we have more files to read */ bool ls_files(); /* Returns true if we have more files to read */ bool ls_dirs(); /* Returns true if we have more dir to read */ void ls_special_dirs(); /* get . and .. */ void get_all_file_versions(DBId_t pathid, FileId_t fnid, char *client) { alist clients(1, not_owned_by_alist); clients.append(client); get_all_file_versions(pathid, fnid, &clients); }; void get_all_file_versions(DBId_t pathid, FileId_t fnid, alist *clients); void update_cache(); /* bfileview */ void fv_update_cache(); void set_see_all_versions(bool val) { see_all_versions = val; } void set_see_copies(bool val) { see_copies = val; } int filter_jobid(); /* Call after set_username, returns the number of jobids */ void set_username(char *user) { if (user) { username = bstrdup(user); } }; char *escape_list(alist *list); bool copy_acl(alist *list) { if (!list || (list->size() > 0 && (strcasecmp((char *)list->get(0), "*all*") == 0))) { return false; } return true; }; /* Keep a pointer to various ACLs */ void set_job_acl(alist *lst) { job_acl = copy_acl(lst)?lst:NULL; use_acl = true; }; void set_fileset_acl(alist *lst) { fileset_acl = copy_acl(lst)?lst:NULL; use_acl = true; }; /* For client, we copy ACL from ClientACL and RestoreClientACL * (bvfs is handling only Restore) */ void set_client_acl(alist *client, alist *restore) { client_acl = New(alist(10, not_owned_by_alist)); /* Everything is authorized */ if (client && client->size() == 1 && strcasecmp((char*)client->get(0), "*all*") == 0) { /* nothing to do */ /* Everything is authorized */ } else if (restore && restore->size() == 1 && strcasecmp((char*)restore->get(0), "*all*") == 0) { /* nothing to do */ } else { /* We copy one by one */ char *elt; if (client) { foreach_alist(elt, client) { client_acl->append(elt); } } if (restore) { foreach_alist(elt, restore) { client_acl->append(elt); } } } /* Nothing in the list, we can keep an empty one */ if (client_acl->size() == 0) { delete client_acl; client_acl = NULL; } use_acl = true; }; void set_pool_acl(alist *lst) { pool_acl = copy_acl(lst)?lst:NULL; use_acl = true; }; void set_handler(DB_RESULT_HANDLER *h, void *ctx) { list_entries = h; user_data = ctx; }; DBId_t get_pwd() { return pwd_id; }; ATTR *get_attr() { return attr; } JCR *get_jcr() { return jcr; } void reset_offset() { offset=0; } void next_offset() { offset+=limit; } /* Clear all cache */ void clear_cache(); /* Compute restore list */ bool compute_restore_list(char *fileid, char *dirid, char *fileindex, char *output_table); /* Drop previous restore list */ bool drop_restore_list(char *output_table); /* for internal use */ int _handle_path(void *, int, char **); /* Handle Delta parts if any */ void insert_missing_delta(char *output_table, int64_t *res); /* Handle hardlinks if any */ bool insert_hardlinks(char *output_table); /* Handle hardlinks if any */ bool insert_hardlinks_fast(char *output_table); /* Check if this function can be used */ bool can_use_insert_hardlinks_fast(); /* Get a list of volumes */ void get_volumes(FileId_t fileid); /* Get Delta parts of a file */ bool get_delta(FileId_t fileid); /* Query handler to check UID of the file with the current uid/gid */ int checkuid_cb(int fields, char **row); /* Query handler to check hardlinks */ int checkhardlinks_cb(int fields, char **row); /* Check if the parent directories are accessible */ bool check_path_access(DBId_t pathid); /* Check if the full path is authorized by the current set of ACLs */ bool check_full_path_access(int nb, sellist *sel, db_list_ctx *toexcl); void set_uid(uid_t u, gid_t g) { if (u == 0) { return; } if (!uid_acl) { uid_acl = New(alist(5, not_owned_by_alist)); gid_acl = New(alist(5, not_owned_by_alist)); } uid_acl->append((void*)(intptr_t)u); gid_acl->append((void*)(intptr_t)g); use_acl = true; }; alist *uid_acl; alist *gid_acl; alist *dir_acl; int check_dirs; /* When it's 1, we check the against directory_acl */ bool can_access(struct stat *st); bool can_access_dir(const char *path); bool check_permissions(char *output_table); bool delete_fileid(char *fileids); private: Bvfs(const Bvfs &); /* prohibit pass by value */ Bvfs & operator = (const Bvfs &); /* prohibit class assignment */ JCR *jcr; BDB *db; POOLMEM *jobids; char *username; /* Used with Bweb */ POOLMEM *prev_dir; /* ls_dirs query returns all versions, take the 1st one */ POOLMEM *pattern; POOLMEM *filename; POOLMEM *tmp; POOLMEM *escaped_list; /* Pointer to Console ACL */ alist *job_acl; alist *client_acl; alist *restoreclient_acl; alist *fileset_acl; alist *pool_acl; char *last_dir_acl; htable *hardlinks; /* Check if we already saw a given hardlink */ alist *missing_hardlinks; /* list with all the missing jobid/fileindex1 */ ATTR *attr; /* Can be use by handler to call decode_stat() */ uint32_t limit; uint32_t offset; uint32_t nb_record; /* number of records of the last query */ DBId_t pwd_id; /* Current pathid */ bool see_all_versions; bool see_copies; bool compute_delta; /* Restrict the ouput with some uid/gid */ db_list_ctx fileid_to_delete; /* used also by check_path_access */ bool need_to_check_permissions(); bool use_acl; /* bfileview */ void fv_get_big_files(int64_t pathid, int64_t min_size, int32_t limit); void fv_update_size_and_count(int64_t pathid, int64_t size, int64_t count); void fv_compute_size_and_count(int64_t pathid, int64_t *size, int64_t *count); void fv_get_current_size_and_count(int64_t pathid, int64_t *size, int64_t *count); void fv_get_size_and_count(int64_t pathid, int64_t *size, int64_t *count); DB_RESULT_HANDLER *list_entries; void *user_data; }; #define bvfs_is_dir(row) ((row)[BVFS_Type][0] == BVFS_DIR_RECORD) #define bvfs_is_file(row) ((row)[BVFS_Type][0] == BVFS_FILE_RECORD) #define bvfs_is_version(row) ((row)[BVFS_Type][0] == BVFS_FILE_VERSION) #define bvfs_is_volume_list(row) ((row)[BVFS_Type][0] == BVFS_VOLUME_LIST) #define bvfs_is_delta_list(row) ((row)[BVFS_Type][0] == BVFS_DELTA_RECORD) void bvfs_update_fv_cache(JCR *jcr, BDB *mdb, char *jobids); int bvfs_update_path_hierarchy_cache(JCR *jcr, BDB *mdb, char *jobids); void bvfs_update_cache(JCR *jcr, BDB *mdb); char *bvfs_parent_dir(char *path); /* Return the basename of the with the trailing / (update the given string) * TODO: see in the rest of bacula if we don't have * this function already */ char *bvfs_basename_dir(char *path); #endif /* __BVFS_H_ */ bacula-15.0.3/src/cats/install-default-backend.in0000755000175000017500000000204014771010173021407 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # LIBTOOL="@BUILD_DIR@/libtool" if [ $# -lt 3 ]; then echo "Usage: $0 " exit 1 fi default_backend=$1 library_version=$2 install_dir=$3 # # Find out what the shared lib extension is for this platform. # eval `${LIBTOOL} --config | grep shrext_cmds` eval SHLIB_EXT=$shrext_cmds if [ -z "${SHLIB_EXT}" ]; then echo "Failed to determine default shared library extension" exit 1 fi if [ -f ${install_dir}/libbaccats-${default_backend}-${library_version}${SHLIB_EXT} ]; then # # Create a default catalog library pointing to one of the shared libs. # rm -f ${install_dir}/libbaccats-${library_version}${SHLIB_EXT} # # Create a relative symlink to the default backend # As all backends are in the same directory anyhow this should # always work. # ln -s libbaccats-${default_backend}${SHLIB_EXT} \ ${install_dir}/libbaccats-${library_version}${SHLIB_EXT} fi exit 0 bacula-15.0.3/src/cats/bdb.h0000644000175000017500000004555714771010173015323 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Catalog DB Interface class * * Written by Kern E. Sibbald * */ #ifndef __BDB_H_ #define __BDB_H_ 1 /* * These enums can be used to build queries that respects * Bacula Restricted Consoles. */ typedef enum { DB_ACL_JOB = 1, DB_ACL_CLIENT, DB_ACL_STORAGE, DB_ACL_POOL, DB_ACL_FILESET, DB_ACL_RCLIENT, DB_ACL_BCLIENT, DB_ACL_RBCLIENT, DB_ACL_PATH, DB_ACL_LOG, DB_ACL_LAST /* Keep last */ } DB_ACL_t; /* * Bits for the opts argument of db_get_job_list() * If neither DBL_ALL_FILES nor DBL_DELETED, return non-deleted files */ #define DBL_NONE 0 /* no options */ #define DBL_USE_DELTA (1<<0) /* Use delta indexes */ #define DBL_ALL_FILES (1<<1) /* Return all files including deleted ones */ #define DBL_DELETED (1<<2) /* Return only deleted files */ #define DBL_USE_MD5 (1<<3) /* Include md5 */ #define DBL_USE_OBJ (1<<4) /* Include RestoreObjects */ /* Turn the num to a bit field */ #define DB_ACL_BIT(x) (1<$1.sql ;; mysql) BINDIR=@MYSQL_BINDIR@ if test $# -gt 2; then MYSQLPASSWORD=" --password=$3" else MYSQLPASSWORD="" fi if test $# -gt 3; then MYSQLHOST=" --host=$4" else MYSQLHOST="" fi ${BINDIR}/mysqldump -u ${user}${MYSQLPASSWORD}${MYSQLHOST} -f --opt $1 >$1.sql ;; postgresql) BINDIR=@POSTGRESQL_BINDIR@ if test $# -gt 2; then PGPASSWORD=$3 export PGPASSWORD fi if test $# -gt 3; then PGHOST=" --host=$4" else PGHOST="" fi # you could also add --compress for compression. See man pg_dump exec ${BINDIR}/pg_dump -c $PGHOST -U $user $1 >$1.sql ;; esac # # To read back a MySQL database use: # cd @working_dir@ # rm -f ${BINDIR}/../var/bacula/* # mysql /tmp/$$ DBVERSION=`sed -n -e 's/^VersionId: \(.*\)$/\1/p' /tmp/$$` } getVersion if [ "x$DBVERSION" = x ]; then echo " " echo "Unable to detect database version, you can specify connection information" echo "on the command line." echo "Error. Cannot upgrade this database." echo " " exit 1 fi if [ "$DBVERSION" -eq "$NEWVERSION" ] ; then echo "Current ${db_name} database is up-to-date (version $NEWVERSION)." exit 0 fi if [ "$DBVERSION" -lt 1014 -o "$DBVERSION" -gt $OLDVERSION ] ; then if [ "$DBVERSION" -lt 12 -o "$DBVERSION" -gt 16 ] ; then echo " " echo "The existing database is version $DBVERSION !!" echo "This script can only update an existing version 12-16 or 1014-$OLDVERSION database to version $NEWVERSION." echo "Error. Cannot upgrade this database." echo " " exit 1 fi fi # For all versions, we need to create the Index on Media(StorageId) # It may fail, but it's not a big problem # mysql $* </dev/null 2> /dev/null # CREATE INDEX media_storageid_idx ON Media (StorageId); # END-OF-DATA if [ "$DBVERSION" -eq 12 ] ; then if mysql $* < Found existing $$filename, installing new file as $$destname"; \ else \ destname=$$filename; \ fi; \ echo "$(INSTALL_OSCRIPT) $$filename $(DESTDIR)$(scriptdir)/$$destname"; \ $(INSTALL_OSCRIPT) $$filename $(DESTDIR)$(scriptdir)/$$destname @filename=make_catalog_backup; \ if test -f $(DESTDIR)$(scriptdir)/$$filename; then \ destname=$$filename.new; \ echo " ==> Found existing $$filename, installing new file as $$destname"; \ else \ destname=$$filename; \ fi; \ echo "$(INSTALL_OSCRIPT) $$filename $(DESTDIR)$(scriptdir)/$$destname"; \ $(INSTALL_OSCRIPT) $$filename $(DESTDIR)$(scriptdir)/$$destname @filename=delete_catalog_backup; \ if test -f $(DESTDIR)$(scriptdir)/$$filename; then \ destname=$$filename.new; \ echo " ==> Found existing $$filename, installing new file as $$destname"; \ else \ destname=$$filename; \ fi; \ echo "$(INSTALL_OSCRIPT) $$filename $(DESTDIR)$(scriptdir)/$$destname"; \ $(INSTALL_OSCRIPT) $$filename $(DESTDIR)$(scriptdir)/$$destname uninstall: @LIBTOOL_UNINSTALL_TARGET@ @INCLUDE_UNINSTALL_TARGET@ @for db_type in @DB_BACKENDS@; do \ (cd $(DESTDIR)$(scriptdir); $(RMF) create_$${db_type}_database); \ (cd $(DESTDIR)$(scriptdir); $(RMF) update_$${db_type}_tables); \ (cd $(DESTDIR)$(scriptdir); $(RMF) make_$${db_type}_tables); \ (cd $(DESTDIR)$(scriptdir); $(RMF) grant_$${db_type}_privileges); \ (cd $(DESTDIR)$(scriptdir); $(RMF) drop_$${db_type}_tables); \ (cd $(DESTDIR)$(scriptdir); $(RMF) drop_$${db_type}_database); \ done (cd $(DESTDIR)$(scriptdir); $(RMF) create_bacula_database) (cd $(DESTDIR)$(scriptdir); $(RMF) update_bacula_tables) (cd $(DESTDIR)$(scriptdir); $(RMF) make_bacula_tables) (cd $(DESTDIR)$(scriptdir); $(RMF) grant_bacula_privileges) (cd $(DESTDIR)$(scriptdir); $(RMF) drop_bacula_tables) (cd $(DESTDIR)$(scriptdir); $(RMF) drop_bacula_database) (cd $(DESTDIR)$(scriptdir); $(RMF) make_catalog_backup) (cd $(DESTDIR)$(scriptdir); $(RMF) make_catalog_backup.pl) (cd $(DESTDIR)$(scriptdir); $(RMF) delete_catalog_backup) # Semi-automatic generation of dependencies: # Use gcc -M because X11 `makedepend' doesn't work on all systems # and it also includes system headers. # `semi'-automatic since dependencies are generated at distribution time. depend: @$(MV) Makefile Makefile.bak @$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile @$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile @for src in $(LIBBACSQL_SRCS); do \ $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) $(DEFS) -I$(srcdir) -I$(basedir) $$src >> Makefile; \ done @for src in $(MYSQL_SRCS); do \ $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) $(DEFS) -I$(srcdir) -I$(basedir) $(MYSQL_INCLUDE) $$src >> Makefile; \ done @for src in $(POSTGRESQL_SRCS); do \ $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) $(DEFS) -I$(srcdir) -I$(basedir) $(POSTGRESQL_INCLUDE) $$src >> Makefile; \ done @for src in $(SQLITE_SRCS); do \ $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) $(DEFS) -I$(srcdir) -I$(basedir) $(SQLITE_INCLUDE) $$src >> Makefile; \ done # @for src in $(DBI_SRCS); do \ # $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) $(DEFS) -I$(srcdir) -I$(basedir) $(DBI_INCLUDE) $$src >> Makefile; \ # done @if test -f Makefile ; then \ $(RMF) Makefile.bak; \ else \ $(MV) Makefile.bak Makefile; \ echo " ===== Something went wrong in make depend ====="; \ fi # ----------------------------------------------------------------------- # DO NOT DELETE: nice dependency list follows bacula-15.0.3/src/cats/drop_bacula_tables.in0000755000175000017500000000166614771010173020554 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Drop Bacula tables -- works for whatever is configured, # MySQL, SQLite, Ingres, or PostgreSQL # # can be used to change the current user with su pre_command="sh -c" default_db_type=@DEFAULT_DB_TYPE@ # # See if the first argument is a valid backend name. # If so the user overrides the default database backend. # if [ $# -gt 0 ]; then case $1 in sqlite3) db_type=$1 shift ;; mysql) db_type=$1 shift ;; postgresql) db_type=$1 shift ;; *) ;; esac fi # # If no new db_type is gives use the default db_type. # if [ -z "${db_type}" ]; then db_type="${default_db_type}" fi if [ $db_type = postgresql -a "$UID" = 0 ]; then pre_command="su - postgres -c" fi $pre_command "@scriptdir@/drop_${db_type}_tables $*" echo "Dropped ${db_type} tables" bacula-15.0.3/src/cats/sql_get.c0000644000175000017500000021374214771010173016216 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * Bacula Catalog Database Get record interface routines * Note, these routines generally get a record by id or * by name. If more logic is involved, the routine * should be in find.c * * Written by Kern Sibbald, March 2000 * */ #include "bacula.h" #if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL #include "cats.h" /* ----------------------------------------------------------------------- * * Generic Routines (or almost generic) * * ----------------------------------------------------------------------- */ /* * Given a full filename (with path), look up the File record * (with attributes) in the database. * * Returns: false on failure * true on success with the File record in FILE_DBR */ bool BDB::bdb_get_file_attributes_record(JCR *jcr, char *afname, JOB_DBR *jr, FILE_DBR *fdbr) { bool ok; Dmsg1(500, "db_get_file_att_record fname=%s \n", afname); bdb_lock(); split_path_and_file(jcr, this, afname); fdbr->PathId = bdb_get_path_record(jcr); esc_name = check_pool_memory_size(esc_name, 2*fnl+2); bdb_escape_string(jcr, esc_name, fname, fnl); fdbr->Filename = esc_name; ok = bdb_get_file_record(jcr, jr, fdbr); bdb_unlock(); return ok; } /** * Get a File record * * DO NOT use Jmsg in this routine. * * Note in this routine, we do not use Jmsg because it may be * called to get attributes of a non-existent file, which is * "normal" if a new file is found during Verify. * * The following is a bit of a kludge: because we always backup a * directory entry, we can end up with two copies of the directory * in the backup. One is when we encounter the directory and find * we cannot recurse into it, and the other is when we find an * explicit mention of the directory. This can also happen if the * use includes the directory twice. In this case, Verify * VolumeToCatalog fails because we have two copies in the catalog, and * only the first one is marked (twice). So, when calling from Verify, * VolumeToCatalog jr is not NULL, and we know jr->FileIndex is the fileindex * of the version of the directory/file we actually want and do * a more explicit SQL search. * * Returns: false on failure * true on success * */ bool BDB::bdb_get_file_record(JCR *jcr, JOB_DBR *jr, FILE_DBR *fdbr) { SQL_ROW row; bool ok = false; char ed1[50], ed2[50], ed3[50], ed4[50]; switch (jcr->getJobLevel()) { case L_VERIFY_VOLUME_TO_CATALOG: Mmsg(cmd, "SELECT FileId, LStat, MD5, FileIndex FROM File WHERE File.JobId=%s AND File.PathId=%s AND " "File.Filename='%s' AND File.FileIndex=%d", edit_int64(fdbr->JobId, ed1), edit_int64(fdbr->PathId, ed2), fdbr->Filename, jr->FileIndex); break; case L_VERIFY_DISK_TO_CATALOG: Mmsg(cmd, "SELECT FileId, LStat, MD5, FileIndex FROM File,Job WHERE " "File.JobId=Job.JobId AND File.PathId=%s AND " "File.Filename='%s' AND Job.Type='B' AND Job.JobStatus IN ('T','W') AND " "ClientId=%s AND Job.JobId=%s ORDER BY StartTime DESC LIMIT 1", edit_int64(fdbr->PathId, ed1), fdbr->Filename, edit_int64(jr->ClientId,ed3), edit_uint64(jr->JobId, ed4)); break; default: if (fdbr->PathId && fdbr->Filename) { Mmsg(cmd, "SELECT FileId, LStat, MD5, FileIndex FROM File WHERE File.JobId=%s AND File.PathId=%s AND " "File.Filename='%s' ORDER BY DeltaSeq DESC LIMIT 1", edit_int64(fdbr->JobId, ed1), edit_int64(fdbr->PathId, ed2), fdbr->Filename); } else if (fdbr->FileId) { Mmsg(cmd, "SELECT FileId, LStat, MD5, FileIndex FROM File WHERE File.JobId=%s AND File.FileId=%s", edit_int64(fdbr->JobId, ed1), edit_int64(fdbr->FileId, ed2)); } else { Dmsg0(100, "Wrong arguments\n"); return ok; /* Arguments missing */ } break; } Dmsg3(450, "Get_file_record JobId=%u Filename=%s PathId=%u\n", fdbr->JobId, fdbr->Filename, fdbr->PathId); Dmsg1(100, "Query=%s\n", cmd); if (QueryDB(jcr, cmd)) { Dmsg1(100, "get_file_record sql_num_rows()=%d\n", sql_num_rows()); if (sql_num_rows() >= 1) { if ((row = sql_fetch_row()) == NULL) { Mmsg1(errmsg, _("Error fetching row: %s\n"), sql_strerror()); } else { fdbr->FileId = (FileId_t)str_to_int64(row[0]); bstrncpy(fdbr->LStat, row[1], sizeof(fdbr->LStat)); bstrncpy(fdbr->Digest, row[2], sizeof(fdbr->Digest)); fdbr->FileIndex = (DBId_t)str_to_int64(row[3]); ok = true; if (sql_num_rows() > 1) { Mmsg3(errmsg, _("get_file_record want 1 got rows=%d PathId=%s Filename=%s\n"), sql_num_rows(), edit_int64(fdbr->PathId, ed1), fdbr->Filename); Dmsg1(000, "=== Problem! %s", errmsg); } } } else { Mmsg3(errmsg, _("File record for PathId=%s FileId=%s Filename=%s not found.\n"), edit_int64(fdbr->PathId, ed1), edit_int64(fdbr->FileId, ed2), NPRTB(fdbr->Filename)); } sql_free_result(); } else { Mmsg(errmsg, _("File record not found in Catalog.\n")); } return ok; } /** * Get path record * Returns: 0 on failure * PathId on success * * DO NOT use Jmsg in this routine (see notes for get_file_record) */ int BDB::bdb_get_path_record(JCR *jcr) { SQL_ROW row; uint32_t PathId = 0; esc_name = check_pool_memory_size(esc_name, 2*pnl+2); bdb_escape_string(jcr, esc_name, path, pnl); if (cached_path_id != 0 && cached_path_len == pnl && strcmp(cached_path, path) == 0) { return cached_path_id; } Mmsg(cmd, "SELECT PathId FROM Path WHERE Path='%s'", esc_name); if (QueryDB(jcr, cmd)) { char ed1[30]; if (sql_num_rows() > 1) { Mmsg2(errmsg, _("More than one Path!: %s for path: %s\n"), edit_uint64(sql_num_rows(), ed1), path); Jmsg(jcr, M_WARNING, 0, "%s", errmsg); } /* Even if there are multiple paths, take the first one */ if (sql_num_rows() >= 1) { if ((row = sql_fetch_row()) == NULL) { Mmsg1(errmsg, _("error fetching row: %s\n"), sql_strerror()); } else { PathId = str_to_int64(row[0]); if (PathId <= 0) { Mmsg2(errmsg, _("Get DB path record %s found bad record: %s\n"), cmd, edit_int64(PathId, ed1)); PathId = 0; } else { /* Cache path */ if (PathId != cached_path_id) { cached_path_id = PathId; cached_path_len = pnl; pm_strcpy(cached_path, path); } } } } else { Mmsg1(errmsg, _("Path record: %s not found.\n"), path); } sql_free_result(); } else { Mmsg(errmsg, _("Path record: %s not found in Catalog.\n"), path); } return PathId; } /** * Get Job record for given JobId or Job name * Returns: false on failure * true on success */ bool BDB::bdb_get_job_record(JCR *jcr, JOB_DBR *jr) { SQL_ROW row; char ed1[50]; char esc[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); if (jr->JobId == 0) { if (jr->Job[0]) { bdb_escape_string(jcr, esc, jr->Job, strlen(jr->Job)); Mmsg(cmd, "SELECT VolSessionId,VolSessionTime," "PoolId,StartTime,EndTime,JobFiles,JobBytes,JobTDate,Job,JobStatus," "Type,Level,ClientId,Job.Name,PriorJobId,RealEndTime,JobId,FileSetId," "SchedTime,RealStartTime,ReadBytes,HasBase,PurgedFiles,PriorJob,Comment," "Reviewed,isVirtualFull,WriteStorageId,LastReadstorageId,StatusInfo,LastReadDevice,WriteDevice,Encrypted, Client.Name AS Client " "FROM Job JOIN Client USING (ClientId) WHERE Job='%s'", esc); } else if (jr->PriorJob[0]) { bdb_escape_string(jcr, esc, jr->PriorJob, strlen(jr->PriorJob)); Mmsg(cmd, "SELECT VolSessionId,VolSessionTime," "PoolId,StartTime,EndTime,JobFiles,JobBytes,JobTDate,Job,JobStatus," "Type,Level,ClientId,Job.Name AS Name,PriorJobId,RealEndTime,JobId,FileSetId," "SchedTime,RealStartTime,ReadBytes,HasBase,PurgedFiles,PriorJob,Comment," "Reviewed,isVirtualFull,WriteStorageId,LastReadstorageId,StatusInfo,LastReadDevice,WriteDevice,Encrypted, Client.Name AS Client " "FROM Job JOIN Client USING (ClientId) WHERE PriorJob='%s' ORDER BY Type ASC LIMIT 1", esc); } else { Mmsg0(errmsg, _("No Job found\n")); bdb_unlock(); return false; /* failed */ } } else { Mmsg(cmd, "SELECT VolSessionId,VolSessionTime," "PoolId,StartTime,EndTime,JobFiles,JobBytes,JobTDate,Job,JobStatus," "Type,Level,ClientId,Job.Name AS Name,PriorJobId,RealEndTime,JobId,FileSetId," "SchedTime,RealStartTime,ReadBytes,HasBase,PurgedFiles,PriorJob,Comment," "Reviewed,isVirtualFull,WriteStorageId,LastReadStorageId,StatusInfo,LastReadDevice,WriteDevice,Encrypted, Client.Name AS Client " "FROM Job JOIN Client USING (ClientId) WHERE JobId=%s", edit_int64(jr->JobId, ed1)); } if (!QueryDB(jcr, cmd)) { bdb_unlock(); return false; /* failed */ } if ((row = sql_fetch_row()) == NULL) { Mmsg1(errmsg, _("No Job found for JobId %s\n"), edit_int64(jr->JobId, ed1)); sql_free_result(); bdb_unlock(); /* If a prior job is set, we can retry with it */ if (jr->Job[0] && jr->PriorJob[0]) { jr->Job[0] = 0; return bdb_get_job_record(jcr, jr); } return false; } jr->VolSessionId = str_to_uint64(row[0]); jr->VolSessionTime = str_to_uint64(row[1]); jr->PoolId = str_to_int64(row[2]); bstrncpy(jr->cStartTime, row[3]!=NULL?row[3]:"", sizeof(jr->cStartTime)); bstrncpy(jr->cEndTime, row[4]!=NULL?row[4]:"", sizeof(jr->cEndTime)); jr->JobFiles = str_to_int64(row[5]); jr->JobBytes = str_to_int64(row[6]); jr->JobTDate = str_to_int64(row[7]); bstrncpy(jr->Job, row[8]!=NULL?row[8]:"", sizeof(jr->Job)); jr->JobStatus = row[9]!=NULL?(int)*row[9]:JS_FatalError; jr->JobType = row[10]!=NULL?(int)*row[10]:JT_BACKUP; jr->JobLevel = row[11]!=NULL?(int)*row[11]:L_NONE; jr->ClientId = str_to_uint64(row[12]!=NULL?row[12]:(char *)""); bstrncpy(jr->Name, row[13]!=NULL?row[13]:"", sizeof(jr->Name)); jr->PriorJobId = str_to_uint64(row[14]!=NULL?row[14]:(char *)""); bstrncpy(jr->cRealEndTime, row[15]!=NULL?row[15]:"", sizeof(jr->cRealEndTime)); if (jr->JobId == 0) { jr->JobId = str_to_int64(row[16]); } jr->FileSetId = str_to_int64(row[17]); bstrncpy(jr->cSchedTime, row[18]!=NULL?row[18]:"", sizeof(jr->cSchedTime)); bstrncpy(jr->cRealStartTime, row[19]!=NULL?row[19]:"", sizeof(jr->cRealStartTime)); jr->ReadBytes = str_to_int64(row[20]); jr->StartTime = str_to_utime(jr->cStartTime); jr->SchedTime = str_to_utime(jr->cSchedTime); jr->EndTime = str_to_utime(jr->cEndTime); jr->RealEndTime = str_to_utime(jr->cRealEndTime); jr->RealStartTime = str_to_utime(jr->cRealStartTime); jr->HasBase = str_to_int64(row[21]); jr->PurgedFiles = str_to_int64(row[22]); bstrncpy(jr->PriorJob, NPRTB(row[23]), sizeof(jr->PriorJob)); bstrncpy(jr->Comment, NPRTB(row[24]), sizeof(jr->Comment)); jr->Reviewed = str_to_int64(row[25]); jr->isVirtualFull = str_to_int64(row[26]); jr->WriteStorageId = str_to_int64(row[27]); jr->LastReadStorageId = str_to_int64(row[28]); bstrncpy(jr->StatusInfo, NPRTB(row[29]), sizeof(jr->StatusInfo)); bstrncpy(jr->LastReadDevice, NPRTB(row[30]), sizeof(jr->LastReadDevice)); bstrncpy(jr->WriteDevice, NPRTB(row[31]), sizeof(jr->WriteDevice)); jr->Encrypted = str_to_int64(row[32]); bstrncpy(jr->Client, NPRTB(row[33]), sizeof(jr->Client)); sql_free_result(); bdb_unlock(); return true; } /** * Find VolumeNames for a given JobId * Returns: 0 on error or no Volumes found * number of volumes on success * Volumes are concatenated in VolumeNames * separated by a vertical bar (|) in the order * that they were written. * * Returns: number of volumes on success */ int BDB::bdb_get_job_volume_names(JCR *jcr, JobId_t JobId, POOLMEM **VolumeNames, char *LastVolumeName, int maxlen) { SQL_ROW row; char ed1[50]; int stat = 0; int i; bdb_lock(); /* Get one entry per VolumeName, but "sort" by VolIndex */ Mmsg(cmd, "SELECT VolumeName,MAX(VolIndex) FROM JobMedia,Media WHERE " "JobMedia.JobId=%s AND JobMedia.MediaId=Media.MediaId " "GROUP BY VolumeName " "ORDER BY 2 ASC", edit_int64(JobId,ed1)); Dmsg1(130, "VolNam=%s\n", cmd); *VolumeNames[0] = '\0'; if (LastVolumeName != NULL && maxlen>0) { LastVolumeName[0] = '\0'; } if (QueryDB(jcr, cmd)) { Dmsg1(130, "Num rows=%d\n", sql_num_rows()); if (sql_num_rows() <= 0) { Mmsg1(errmsg, _("No volumes found for JobId=%d\n"), JobId); stat = 0; } else { stat = sql_num_rows(); for (i=0; i < stat; i++) { if ((row = sql_fetch_row()) == NULL) { Mmsg2(errmsg, _("Error fetching row %d: ERR=%s\n"), i, sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); stat = 0; break; } else { if (*VolumeNames[0] != 0) { pm_strcat(VolumeNames, "|"); } pm_strcat(VolumeNames, row[0]); } } if (LastVolumeName != NULL && stat > 0) { bstrncat(LastVolumeName, row[0], maxlen); /* remember the last written volume */ } } sql_free_result(); } else { Mmsg(errmsg, _("No Volume for JobId %d found in Catalog.\n"), JobId); } bdb_unlock(); return stat; } /** * Find Volume parameters for a give JobId * Returns: 0 on error or no Volumes found * number of volumes on success * List of Volumes and start/end file/blocks (malloced structure!) * * Returns: number of volumes on success */ int BDB::bdb_get_job_volume_parameters(JCR *jcr, JobId_t JobId, VOL_PARAMS **VolParams) { SQL_ROW row; char ed1[50]; int stat = 0; int i; VOL_PARAMS *Vols = NULL; bdb_lock(); Mmsg(cmd, "SELECT VolumeName,MediaType,FirstIndex,LastIndex,StartFile," "JobMedia.EndFile,StartBlock,JobMedia.EndBlock," "Slot,StorageId,InChanger" " FROM JobMedia,Media WHERE JobMedia.JobId=%s" " AND JobMedia.MediaId=Media.MediaId ORDER BY VolIndex,JobMediaId", edit_int64(JobId, ed1)); Dmsg1(130, "VolNam=%s\n", cmd); if (QueryDB(jcr, cmd)) { Dmsg1(200, "Num rows=%d\n", sql_num_rows()); if (sql_num_rows() <= 0) { Mmsg1(errmsg, _("No volumes found for JobId=%d\n"), JobId); stat = 0; } else { stat = sql_num_rows(); DBId_t *SId = NULL; if (stat > 0) { *VolParams = Vols = (VOL_PARAMS *)malloc(stat * sizeof(VOL_PARAMS)); SId = (DBId_t *)malloc(stat * sizeof(DBId_t)); } for (i=0; i < stat; i++) { if ((row = sql_fetch_row()) == NULL) { Mmsg2(errmsg, _("Error fetching row %d: ERR=%s\n"), i, sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); stat = 0; break; } else { DBId_t StorageId; uint32_t StartBlock, EndBlock, StartFile, EndFile; bstrncpy(Vols[i].VolumeName, row[0], MAX_NAME_LENGTH); bstrncpy(Vols[i].MediaType, row[1], MAX_NAME_LENGTH); Vols[i].FirstIndex = str_to_uint64(row[2]); Vols[i].LastIndex = str_to_uint64(row[3]); StartFile = str_to_uint64(row[4]); EndFile = str_to_uint64(row[5]); StartBlock = str_to_uint64(row[6]); EndBlock = str_to_uint64(row[7]); Vols[i].StartAddr = (((uint64_t)StartFile)<<32) | StartBlock; Vols[i].EndAddr = (((uint64_t)EndFile)<<32) | EndBlock; Vols[i].Slot = str_to_uint64(row[8]); StorageId = str_to_uint64(row[9]); Vols[i].InChanger = str_to_uint64(row[10]); Vols[i].Storage[0] = 0; SId[i] = StorageId; } } for (i=0; i < stat; i++) { if (SId[i] != 0) { Mmsg(cmd, "SELECT Name from Storage WHERE StorageId=%s", edit_int64(SId[i], ed1)); if (QueryDB(jcr, cmd)) { if ((row = sql_fetch_row()) && row[0]) { bstrncpy(Vols[i].Storage, row[0], MAX_NAME_LENGTH); } } } } if (SId) { free(SId); } } sql_free_result(); } bdb_unlock(); return stat; } /** * Get JobMedia record * Returns: false on error or no JobMedia found * JobMedia in argument, use JobMediaId to find it * * Returns: true on success */ bool BDB::bdb_get_jobmedia_record(JCR *jcr, JOBMEDIA_DBR *jmr) { SQL_ROW row; char ed1[50]; bdb_lock(); Mmsg(cmd, "SELECT FirstIndex,LastIndex,StartFile," "EndFile,StartBlock,EndBlock,VolIndex, JobId, MediaId" " FROM JobMedia WHERE JobMedia.JobMediaId=%s", edit_int64(jmr->JobMediaId, ed1)); if (QueryDB(jcr, cmd)) { Dmsg1(200, "Num rows=%d\n", sql_num_rows()); if (sql_num_rows() != 1) { Mmsg1(errmsg, _("No JobMedia found for JobMediaId=%d\n"), jmr->JobMediaId); sql_free_result(); bdb_unlock(); return false; } if ((row = sql_fetch_row()) == NULL) { Mmsg1(errmsg, _("No JobMedia found for JobMediaId %d\n"), edit_int64(jmr->JobMediaId, ed1)); sql_free_result(); bdb_unlock(); return false; /* failed */ } jmr->FirstIndex = str_to_uint64(row[0]); jmr->LastIndex = str_to_uint64(row[1]); jmr->StartFile = str_to_int64(row[2]); jmr->EndFile = str_to_int64(row[3]); jmr->StartBlock = str_to_int64(row[4]); jmr->EndBlock = str_to_int64(row[5]); jmr->VolIndex = str_to_int64(row[6]); jmr->JobId = str_to_int64(row[7]); jmr->MediaId = str_to_int64(row[8]); sql_free_result(); bdb_unlock(); return true; } return false; } /** * Get the number of pool records * * Returns: -1 on failure * number on success */ int BDB::bdb_get_num_pool_records(JCR *jcr) { int stat = 0; bdb_lock(); Mmsg(cmd, "SELECT count(*) from Pool"); stat = get_sql_record_max(jcr, this); bdb_unlock(); return stat; } /** * This function returns a list of all the Pool record ids. * The caller must free ids if non-NULL. * * Returns 0: on failure * 1: on success */ int BDB::bdb_get_pool_ids(JCR *jcr, int *num_ids, uint32_t *ids[]) { SQL_ROW row; int stat = 0; int i = 0; uint32_t *id; bdb_lock(); *ids = NULL; Mmsg(cmd, "SELECT PoolId FROM Pool ORDER By Name"); if (QueryDB(jcr, cmd)) { *num_ids = sql_num_rows(); if (*num_ids > 0) { id = (uint32_t *)malloc(*num_ids * sizeof(uint32_t)); while ((row = sql_fetch_row()) != NULL) { id[i++] = str_to_uint64(row[0]); } *ids = id; } sql_free_result(); stat = 1; } else { Mmsg(errmsg, _("Pool id select failed: ERR=%s\n"), sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); stat = 0; } bdb_unlock(); return stat; } /** * This function returns a list of all the Client record ids. * The caller must free ids if non-NULL. * * Returns 0: on failure * 1: on success */ int BDB::bdb_get_client_ids(JCR *jcr, int *num_ids, uint32_t *ids[]) { SQL_ROW row; int stat = 0; int i = 0; uint32_t *id; bdb_lock(); *ids = NULL; Mmsg(cmd, "SELECT ClientId FROM Client ORDER BY Name ASC"); if (QueryDB(jcr, cmd)) { *num_ids = sql_num_rows(); if (*num_ids > 0) { id = (uint32_t *)malloc(*num_ids * sizeof(uint32_t)); while ((row = sql_fetch_row()) != NULL) { id[i++] = str_to_uint64(row[0]); } *ids = id; } sql_free_result(); stat = 1; } else { Mmsg(errmsg, _("Client id select failed: ERR=%s\n"), sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); stat = 0; } bdb_unlock(); return stat; } /** * Get Pool Id, Scratch Pool Id, Recycle Pool Id * Returns: false on failure * true on success */ bool BDB::bdb_get_pool_record(JCR *jcr, POOL_DBR *pdbr) { SQL_ROW row; bool ok = false; char ed1[50]; char esc[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); if (pdbr->PoolId != 0) { /* find by id */ Mmsg(cmd, "SELECT PoolId,Name,NumVols,MaxVols,UseOnce,UseCatalog,AcceptAnyVolume," "AutoPrune,Recycle,VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles," "MaxVolBytes,PoolType,LabelType,LabelFormat,RecyclePoolId,ScratchPoolId," "ActionOnPurge,CacheRetention,MaxPoolBytes FROM Pool WHERE Pool.PoolId=%s", edit_int64(pdbr->PoolId, ed1)); } else { /* find by name */ bdb_escape_string(jcr, esc, pdbr->Name, strlen(pdbr->Name)); Mmsg(cmd, "SELECT PoolId,Name,NumVols,MaxVols,UseOnce,UseCatalog,AcceptAnyVolume," "AutoPrune,Recycle,VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles," "MaxVolBytes,PoolType,LabelType,LabelFormat,RecyclePoolId,ScratchPoolId," "ActionOnPurge,CacheRetention,MaxPoolBytes FROM Pool WHERE Pool.Name='%s'", esc); } if (QueryDB(jcr, cmd)) { if (sql_num_rows() > 1) { char ed1[30]; Mmsg1(errmsg, _("More than one Pool! Num=%s\n"), edit_uint64(sql_num_rows(), ed1)); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); } else if (sql_num_rows() == 1) { if ((row = sql_fetch_row()) == NULL) { Mmsg1(errmsg, _("error fetching row: %s\n"), sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); } else { pdbr->PoolId = str_to_int64(row[0]); bstrncpy(pdbr->Name, row[1]!=NULL?row[1]:"", sizeof(pdbr->Name)); pdbr->NumVols = str_to_int64(row[2]); pdbr->MaxVols = str_to_int64(row[3]); pdbr->UseOnce = str_to_int64(row[4]); pdbr->UseCatalog = str_to_int64(row[5]); pdbr->AcceptAnyVolume = str_to_int64(row[6]); pdbr->AutoPrune = str_to_int64(row[7]); pdbr->Recycle = str_to_int64(row[8]); pdbr->VolRetention = str_to_int64(row[9]); pdbr->VolUseDuration = str_to_int64(row[10]); pdbr->MaxVolJobs = str_to_int64(row[11]); pdbr->MaxVolFiles = str_to_int64(row[12]); pdbr->MaxVolBytes = str_to_uint64(row[13]); bstrncpy(pdbr->PoolType, row[14]!=NULL?row[14]:"", sizeof(pdbr->PoolType)); pdbr->LabelType = str_to_int64(row[15]); bstrncpy(pdbr->LabelFormat, row[16]!=NULL?row[16]:"", sizeof(pdbr->LabelFormat)); pdbr->RecyclePoolId = str_to_int64(row[17]); pdbr->ScratchPoolId = str_to_int64(row[18]); pdbr->ActionOnPurge = str_to_int32(row[19]); pdbr->CacheRetention = str_to_int64(row[20]); pdbr->MaxPoolBytes = str_to_int64(row[21]); ok = true; } } sql_free_result(); } bdb_unlock(); return ok; } #ifdef COMMUNITY /** * Get Pool numvols * If the PoolId is non-zero, we get its record, * otherwise, we search on the PoolName and we compute the number of volumes * * Returns: false on failure * true on success */ bool BDB::bdb_get_pool_numvols(JCR *jcr, POOL_DBR *pdbr) { bool ok; char ed1[50]; SQL_ROW row; ok = db_get_pool_record(jcr, this, pdbr); bdb_lock(); if (ok) { uint32_t NumVols=0; /* It is not necessary to compute the PoolBytes that can be expensive */ Mmsg(cmd, "SELECT count(*) from Media WHERE PoolId=%s", edit_int64(pdbr->PoolId, ed1)); if (QueryDB(jcr, cmd)) { if ((row = sql_fetch_row()) == NULL) { Mmsg1(&errmsg, _("error fetching row: %s\n"), sql_strerror()); ok = false; } else { NumVols = str_to_int64(row[0]); } sql_free_result(); } else { Mmsg1(&errmsg, _("error fetching row: %s\n"), sql_strerror()); ok = false; } Dmsg2(400, "Actual NumVols=%d Pool NumVols=%d\n", NumVols, pdbr->NumVols); if (ok && NumVols != pdbr->NumVols) { pdbr->NumVols = NumVols; db_update_pool_record(jcr, this, pdbr); } } else { Mmsg(errmsg, _("Pool record not found in Catalog.\n")); } bdb_unlock(); return ok; } #endif /* * Free restoreobject record (some fields are allocated through malloc) */ void db_free_restoreobject_record(JCR *jcr, ROBJECT_DBR *rr) { if (rr->object) { free(rr->object); } if (rr->object_name) { free(rr->object_name); } if (rr->plugin_name) { free(rr->plugin_name); } rr->object = rr->plugin_name = rr->object_name = NULL; } /* * Get list of Object Ids from search based by provided Object fields * */ bool BDB::bdb_get_plugin_objects_ids(JCR *jcr, OBJECT_DBR *obj_r, db_list_ctx *ids) { POOL_MEM where(PM_MESSAGE); int stat = false; obj_r->create_db_filter(jcr, where.handle()); Mmsg(cmd, "SELECT ObjectId FROM Object %s ORDER BY ObjectId ASC", where.c_str()); ids->reset(); bdb_lock(); if(!bdb_sql_query(cmd, db_list_handler, ids)) { Jmsg(jcr, M_ERROR, 0, _("Getting plugin object ids query %s failed!\n"), cmd); goto bail_out; } stat = true; bail_out: bdb_unlock(); return stat; } /* * Get specified Plugin Object by ObjectId * */ bool BDB::bdb_get_plugin_object_record(JCR *jcr, OBJECT_DBR *obj_r) { SQL_ROW row; POOL_MEM where(PM_MESSAGE); int stat = false; obj_r->create_db_filter(jcr, where.handle()); Mmsg(cmd, "SELECT ObjectId, JobId, Path, Filename, PluginName, ObjectCategory, " "ObjectType, ObjectName, ObjectSource, ObjectUUID, ObjectSize, ObjectStatus, ObjectCount " "FROM Object %s", where.c_str()); bdb_lock(); if (QueryDB(jcr, cmd)) { if (sql_num_rows() > 1) { char ed1[30]; Dmsg1(0, _("Error got %s PluginObjects but expected only one!\n"), edit_uint64(sql_num_rows(), ed1)); sql_data_seek(sql_num_rows()-1); goto bail_out; } if ((row = sql_fetch_row()) == NULL) { Dmsg1(100, _("PluginObject with ObjectId=%lu not found.\n"), obj_r->ObjectId); goto bail_out; } else { obj_r->ObjectId = str_to_uint64(row[0]); obj_r->JobId = str_to_uint64(row[1]); pm_strcpy(obj_r->Path, row[2]); pm_strcpy(obj_r->Filename, row[3]); pm_strcpy(obj_r->PluginName, row[4]); bstrncpy(obj_r->ObjectCategory, row[5], sizeof(obj_r->ObjectCategory)); bstrncpy(obj_r->ObjectType, row[6], sizeof(obj_r->ObjectType)); bstrncpy(obj_r->ObjectName, row[7], sizeof(obj_r->ObjectName)); bstrncpy(obj_r->ObjectSource, row[8], sizeof(obj_r->ObjectSource)); bstrncpy(obj_r->ObjectUUID, row[9], sizeof(obj_r->ObjectUUID)); obj_r->ObjectSize = str_to_uint64(row[10]); obj_r->ObjectStatus = row[11] != NULL ? (int)*row[11] : 'U'; obj_r->ObjectCount = str_to_uint64(row[12]); stat = true; } } else { Jmsg(jcr, M_ERROR, 0, _("PluginObject query %s failed!\n"), cmd); } bail_out: bdb_unlock(); return stat; } /* * Get RestoreObject Record * If the RestoreObjectId is non-zero, we get its record * * You must call db_free_restoreobject_record() after db_get_restoreobject_record() * * Returns: false on failure * true on success */ bool BDB::bdb_get_restoreobject_record(JCR *jcr, ROBJECT_DBR *rr) { SQL_ROW row; int stat = false; char ed1[50]; int32_t len; bdb_lock(); Mmsg(cmd, "SELECT ObjectName, PluginName, ObjectType, JobId, ObjectCompression, " "RestoreObject, ObjectLength, ObjectFullLength, FileIndex " "FROM RestoreObject " "WHERE RestoreObjectId=%s", edit_int64(rr->RestoreObjectId, ed1)); /* Using the JobId permits to check the Job name against ACLs and * make sure that the current user is authorized to see the Restore object */ if (rr->JobId) { pm_strcat(cmd, " AND JobId="); pm_strcat(cmd, edit_int64(rr->JobId, ed1)); } else if (rr->JobIds && is_a_number_list(rr->JobIds)) { pm_strcat(cmd, " AND JobId IN ("); pm_strcat(cmd, rr->JobIds); pm_strcat(cmd, ")"); } if (QueryDB(jcr, cmd)) { if (sql_num_rows() > 1) { char ed1[30]; Mmsg1(errmsg, _("Error got %s RestoreObjects but expected only one!\n"), edit_uint64(sql_num_rows(), ed1)); sql_data_seek(sql_num_rows()-1); } if ((row = sql_fetch_row()) == NULL) { Mmsg1(errmsg, _("RestoreObject record \"%d\" not found.\n"), rr->RestoreObjectId); } else { db_free_restoreobject_record(jcr, rr); rr->object_name = bstrdup(row[0]); rr->plugin_name = bstrdup(row[1]); rr->FileType = str_to_uint64(row[2]); rr->JobId = str_to_uint64(row[3]); rr->object_compression = str_to_int64(row[4]); rr->object_len = str_to_uint64(row[6]); rr->object_full_len = str_to_uint64(row[7]); rr->object_index = str_to_uint64(row[8]); bdb_unescape_object(jcr, row[5], /* Object */ rr->object_len, /* Object length */ &cmd, &len); if (rr->object_compression > 0) { int out_len = rr->object_full_len + 100; /* full length */ char *obj = (char *)malloc(out_len); Zinflate(cmd, rr->object_len, obj, out_len); /* out_len is updated */ if (out_len != (int)rr->object_full_len) { Dmsg3(10, "Decompression failed. Len wanted=%d got=%d. Object=%s\n", rr->object_full_len, out_len, rr->plugin_name); Mmsg(errmsg, _("Decompression failed. Len wanted=%d got=%d. Object=%s\n"), rr->object_full_len, out_len, rr->plugin_name); } obj[out_len] = 0; rr->object = obj; rr->object_len = out_len; } else { rr->object = (char *)malloc(sizeof(char)*(len+1)); memcpy(rr->object, cmd, len); rr->object[len] = 0; rr->object_len = len; } stat = true; } sql_free_result(); } else { Mmsg(errmsg, _("RestoreObject record not found in Catalog.\n")); } bdb_unlock(); return stat; } /** * Get Client Record * If the ClientId is non-zero, we get its record, * otherwise, we search on the Client Name * * Returns: 0 on failure * 1 on success */ int BDB::bdb_get_client_record(JCR *jcr, CLIENT_DBR *cdbr) { SQL_ROW row; int stat = 0; char ed1[50]; char esc[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); if (cdbr->ClientId != 0) { /* find by id */ Mmsg(cmd, "SELECT ClientId,Name,Uname,AutoPrune,FileRetention,JobRetention " "FROM Client WHERE Client.ClientId=%s", edit_int64(cdbr->ClientId, ed1)); } else { /* find by name */ bdb_escape_string(jcr, esc, cdbr->Name, strlen(cdbr->Name)); Mmsg(cmd, "SELECT ClientId,Name,Uname,AutoPrune,FileRetention,JobRetention " "FROM Client WHERE Client.Name='%s'", esc); } if (QueryDB(jcr, cmd)) { if (sql_num_rows() > 1) { Mmsg1(errmsg, _("More than one Client!: %s\n"), edit_uint64(sql_num_rows(), ed1)); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); } else if (sql_num_rows() == 1) { if ((row = sql_fetch_row()) == NULL) { Mmsg1(errmsg, _("error fetching row: %s\n"), sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); } else { cdbr->ClientId = str_to_int64(row[0]); bstrncpy(cdbr->Name, row[1]!=NULL?row[1]:"", sizeof(cdbr->Name)); bstrncpy(cdbr->Uname, row[2]!=NULL?row[2]:"", sizeof(cdbr->Uname)); cdbr->AutoPrune = str_to_int64(row[3]); cdbr->FileRetention = str_to_int64(row[4]); cdbr->JobRetention = str_to_int64(row[5]); stat = 1; } } else { Mmsg(errmsg, _("Client record not found in Catalog.\n")); } sql_free_result(); } else { Mmsg(errmsg, _("Client record not found in Catalog.\n")); } bdb_unlock(); return stat; } /** * Get Counter Record * * Returns: 0 on failure * 1 on success */ bool BDB::bdb_get_counter_record(JCR *jcr, COUNTER_DBR *cr) { SQL_ROW row; char esc[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); bdb_escape_string(jcr, esc, cr->Counter, strlen(cr->Counter)); Mmsg(cmd, select_counter_values[bdb_get_type_index()], esc); if (QueryDB(jcr, cmd)) { /* If more than one, report error, but return first row */ if (sql_num_rows() > 1) { Mmsg1(errmsg, _("More than one Counter!: %d\n"), sql_num_rows()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); } if (sql_num_rows() >= 1) { if ((row = sql_fetch_row()) == NULL) { Mmsg1(errmsg, _("error fetching Counter row: %s\n"), sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); sql_free_result(); bdb_unlock(); return false; } cr->MinValue = str_to_int64(row[0]); cr->MaxValue = str_to_int64(row[1]); cr->CurrentValue = str_to_int64(row[2]); if (row[3]) { bstrncpy(cr->WrapCounter, row[3], sizeof(cr->WrapCounter)); } else { cr->WrapCounter[0] = 0; } sql_free_result(); bdb_unlock(); return true; } sql_free_result(); } else { Mmsg(errmsg, _("Counter record: %s not found in Catalog.\n"), cr->Counter); } bdb_unlock(); return false; } /** * Get FileSet Record * If the FileSetId is non-zero, we get its record, * otherwise, we search on the name * * Returns: 0 on failure * id on success */ int BDB::bdb_get_fileset_record(JCR *jcr, FILESET_DBR *fsr) { SQL_ROW row; int stat = 0; char ed1[50]; char esc[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); if (fsr->FileSetId != 0) { /* find by id */ Mmsg(cmd, "SELECT FileSetId,FileSet,MD5,CreateTime,Content FROM FileSet " "WHERE FileSetId=%s", edit_int64(fsr->FileSetId, ed1)); } else { /* find by name */ bdb_escape_string(jcr, esc, fsr->FileSet, strlen(fsr->FileSet)); Mmsg(cmd, "SELECT FileSetId,FileSet,MD5,CreateTime,Content FROM FileSet " "WHERE FileSet='%s' ORDER BY CreateTime DESC LIMIT 1", esc); } if (QueryDB(jcr, cmd)) { if (sql_num_rows() > 1) { char ed1[30]; Mmsg1(errmsg, _("Error got %s FileSets but expected only one!\n"), edit_uint64(sql_num_rows(), ed1)); sql_data_seek(sql_num_rows()-1); } if ((row = sql_fetch_row()) == NULL) { Mmsg1(errmsg, _("FileSet record \"%s\" not found.\n"), fsr->FileSet); } else { fsr->FileSetId = str_to_int64(row[0]); bstrncpy(fsr->FileSet, row[1]!=NULL?row[1]:"", sizeof(fsr->FileSet)); bstrncpy(fsr->MD5, row[2]!=NULL?row[2]:"", sizeof(fsr->MD5)); bstrncpy(fsr->cCreateTime, row[3]!=NULL?row[3]:"", sizeof(fsr->cCreateTime)); bstrncpy(fsr->Content, row[4]!=NULL?row[4]:"", sizeof(fsr->Content)); stat = fsr->FileSetId; } sql_free_result(); } else { Mmsg(errmsg, _("FileSet record not found in Catalog.\n")); } bdb_unlock(); return stat; } /** * Get the number of Media records * * Returns: -1 on failure * number on success */ int BDB::bdb_get_num_media_records(JCR *jcr) { int stat = 0; bdb_lock(); Mmsg(cmd, "SELECT count(*) from Media"); stat = get_sql_record_max(jcr, this); bdb_unlock(); return stat; } /** * Get the number of Restore Object for a given jobid list */ int BDB::bdb_get_num_restoreobject_records(JCR *jcr, ROBJECT_DBR *rr) { int ret = 0; char ed1[50]; POOL_MEM filter, tmp; SQL_ROW row; /* Using the JobId permits to check the Job name against ACLs and * make sure that the current user is authorized to see the Restore object */ if (rr->JobId) { Mmsg(filter, " JobId=%s", edit_int64(rr->JobId, ed1)); } else if (rr->JobIds && is_a_number_list(rr->JobIds)) { Mmsg(filter, " JobId IN (%s) ", rr->JobIds); } else { return 0; } if (rr->FileType) { Mmsg(tmp, " AND ObjectType=%s ", edit_uint64(rr->FileType, ed1)); pm_strcat(filter, tmp.c_str()); } if (rr->object_name) { Mmsg(tmp, " AND ObjectName='%s' ", rr->object_name); pm_strcat(filter, tmp.c_str()); } if (rr->plugin_name) { Mmsg(tmp, " AND PluginName='%s' ", rr->plugin_name); pm_strcat(filter, tmp.c_str()); } bdb_lock(); Mmsg(cmd, "SELECT COUNT(1) FROM RestoreObject WHERE %s", filter.c_str()); if (rr->limit) { Mmsg(tmp, " LIMIT %d", rr->limit); pm_strcat(cmd, tmp.c_str()); } if (QueryDB(jcr, cmd)) { if (sql_num_rows() > 1) { char ed1[30]; Mmsg1(errmsg, _("Error got %s RestoreObjects count but expected only one!\n"), edit_uint64(sql_num_rows(), ed1)); goto bail_out; } if ((row = sql_fetch_row()) == NULL) { Mmsg0(errmsg, _("No RestoreObject record found.\n")); } else { ret = str_to_int64(row[0]); } } bail_out: bdb_unlock(); return ret; } /** * This function returns a list of all the Media record ids for * the current Pool, the correct Media Type, Recyle, Enabled, StorageId, VolBytes * VolumeName if specified * The caller must free ids if non-NULL. * * Returns false: on failure * true: on success */ bool BDB::bdb_get_media_ids(JCR *jcr, MEDIA_DBR *mr, int *num_ids, uint32_t *ids[]) { SQL_ROW row; int i = 0; uint32_t *id; char ed1[50]; bool ok = false; char buf[MAX_NAME_LENGTH*3]; /* Can contain MAX_NAME_LENGTH*2+1 + AND ....='' */ char esc[MAX_NAME_LENGTH*2+1]; bdb_lock(); *ids = NULL; if (mr->Enabled >= 0) { /* Enabled is valid */ Mmsg(cmd, "SELECT DISTINCT MediaId FROM Media WHERE Enabled=%d ", mr->Enabled); } else { /* We look for all Enabled (0, 1, 2) */ Mmsg(cmd, "SELECT DISTINCT MediaId FROM Media WHERE Enabled >= 0 "); } if (mr->Recycle >= 0) { bsnprintf(buf, sizeof(buf), "AND Recycle=%d ", mr->Recycle); pm_strcat(cmd, buf); } if (*mr->MediaType) { bdb_escape_string(jcr, esc, mr->MediaType, strlen(mr->MediaType)); bsnprintf(buf, sizeof(buf), "AND MediaType='%s' ", esc); pm_strcat(cmd, buf); } if (mr->sid_group) { bsnprintf(buf, sizeof(buf), "AND StorageId IN (%s) ", mr->sid_group); pm_strcat(cmd, buf); } else if (mr->StorageId) { bsnprintf(buf, sizeof(buf), "AND StorageId=%s ", edit_uint64(mr->StorageId, ed1)); pm_strcat(cmd, buf); } if (mr->PoolId) { bsnprintf(buf, sizeof(buf), "AND PoolId=%s ", edit_uint64(mr->PoolId, ed1)); pm_strcat(cmd, buf); } if (mr->VolBytes) { bsnprintf(buf, sizeof(buf), "AND VolBytes > %s ", edit_uint64(mr->VolBytes, ed1)); pm_strcat(cmd, buf); } if (*mr->VolumeName) { bdb_escape_string(jcr, esc, mr->VolumeName, strlen(mr->VolumeName)); bsnprintf(buf, sizeof(buf), "AND VolumeName = '%s' ", esc); pm_strcat(cmd, buf); } if (mr->MediaId) { bsnprintf(buf, sizeof(buf), "AND MediaId = %lld ", (int64_t)mr->MediaId); pm_strcat(cmd, buf); } if (*mr->VolStatus) { bdb_escape_string(jcr, esc, mr->VolStatus, strlen(mr->VolStatus)); bsnprintf(buf, sizeof(buf), "AND VolStatus = '%s' ", esc); pm_strcat(cmd, buf); } /* Filter the volumes with the CacheRetention */ if (mr->CacheRetention) { bsnprintf(buf, sizeof(buf), "AND %s ", prune_cache[bdb_get_type_index()]); pm_strcat(cmd, buf); } Dmsg1(100, "q=%s\n", cmd); if (QueryDB(jcr, cmd)) { *num_ids = sql_num_rows(); if (*num_ids > 0) { id = (uint32_t *)malloc(*num_ids * sizeof(uint32_t)); while ((row = sql_fetch_row()) != NULL) { id[i++] = str_to_uint64(row[0]); } *ids = id; } sql_free_result(); ok = true; } else { Mmsg(errmsg, _("Media id select failed: ERR=%s\n"), sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); ok = false; } bdb_unlock(); return ok; } /** * This function returns a list of all the DBIds that are returned * for the query. * * Returns false: on failure * true: on success */ bool BDB::bdb_get_query_dbids(JCR *jcr, POOL_MEM &query, dbid_list &ids) { SQL_ROW row; int i = 0; bool ok = false; bdb_lock(); ids.num_ids = 0; if (QueryDB(jcr, query.c_str())) { ids.num_ids = sql_num_rows(); if (ids.num_ids > 0) { if (ids.max_ids < ids.num_ids) { free(ids.DBId); ids.DBId = (DBId_t *)malloc(ids.num_ids * sizeof(DBId_t)); } while ((row = sql_fetch_row()) != NULL) { ids.DBId[i++] = str_to_uint64(row[0]); } } sql_free_result(); ok = true; } else { Mmsg(errmsg, _("query dbids failed: ERR=%s\n"), sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); ok = false; } bdb_unlock(); return ok; } /** * Get Media Record * * Returns: false: on failure * true: on success */ bool BDB::bdb_get_media_record(JCR *jcr, MEDIA_DBR *mr) { SQL_ROW row; char ed1[50]; bool ok = false; char esc[MAX_ESCAPE_NAME_LENGTH]; POOL_MEM filter; bdb_lock(); if (mr->MediaId == 0 && mr->VolumeName[0] == 0) { Mmsg(cmd, "SELECT count(*) from Media"); mr->MediaId = get_sql_record_max(jcr, this); bdb_unlock(); return true; } if (mr->MediaId != 0) { /* find by id */ Mmsg(filter, "WHERE MediaId=%s", edit_int64(mr->MediaId, ed1)); } else { /* find by name */ bdb_escape_string(jcr, esc, mr->VolumeName, strlen(mr->VolumeName)); Mmsg(filter, "WHERE VolumeName='%s'", esc); } Mmsg(cmd, "SELECT MediaId,VolumeName,VolJobs,VolFiles," "VolBlocks,VolBytes,VolABytes,VolHoleBytes,VolHoles,VolMounts," "VolErrors,VolWrites,Media.MaxVolBytes,Media.VolCapacityBytes," "MediaType,VolStatus,Media.PoolId,Media.VolRetention,Media.VolUseDuration,Media.MaxVolJobs," "Media.MaxVolFiles,Media.Recycle,Slot,FirstWritten,LastWritten,InChanger," "EndFile,EndBlock,VolType,VolParts,VolCloudParts,LastPartBytes," "Media.LabelType,LabelDate,StorageId," "Media.Enabled,LocationId,RecycleCount,InitialWrite," "Media.ScratchPoolId,Media.RecyclePoolId,VolReadTime,VolWriteTime,Media.ActionOnPurge," "Media.CacheRetention,Protected,UseProtect,VolEncrypted,Pool.Name " "FROM Media JOIN Pool USING (PoolId) %s", filter.c_str()); if (QueryDB(jcr, cmd)) { char ed1[50]; if (sql_num_rows() > 1) { Mmsg1(errmsg, _("More than one Volume!: %s\n"), edit_uint64(sql_num_rows(), ed1)); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); } else if (sql_num_rows() == 1) { if ((row = sql_fetch_row()) == NULL) { Mmsg1(errmsg, _("error fetching row: %s\n"), sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); } else { /* return values */ mr->MediaId = str_to_int64(row[0]); bstrncpy(mr->VolumeName, row[1]!=NULL?row[1]:"", sizeof(mr->VolumeName)); mr->VolJobs = str_to_int64(row[2]); mr->VolFiles = str_to_int64(row[3]); mr->VolBlocks = str_to_int64(row[4]); mr->VolBytes = str_to_uint64(row[5]); mr->VolABytes = str_to_uint64(row[6]); mr->VolHoleBytes = str_to_uint64(row[7]); mr->VolHoles = str_to_int64(row[8]); mr->VolMounts = str_to_int64(row[9]); mr->VolErrors = str_to_int64(row[10]); mr->VolWrites = str_to_int64(row[11]); mr->MaxVolBytes = str_to_uint64(row[12]); mr->VolCapacityBytes = str_to_uint64(row[13]); bstrncpy(mr->MediaType, row[14]!=NULL?row[14]:"", sizeof(mr->MediaType)); bstrncpy(mr->VolStatus, row[15]!=NULL?row[15]:"", sizeof(mr->VolStatus)); mr->PoolId = str_to_int64(row[16]); mr->VolRetention = str_to_uint64(row[17]); mr->VolUseDuration = str_to_uint64(row[18]); mr->MaxVolJobs = str_to_int64(row[19]); mr->MaxVolFiles = str_to_int64(row[20]); mr->Recycle = str_to_int64(row[21]); mr->Slot = str_to_int64(row[22]); bstrncpy(mr->cFirstWritten, row[23]!=NULL?row[23]:"", sizeof(mr->cFirstWritten)); mr->FirstWritten = (time_t)str_to_utime(mr->cFirstWritten); bstrncpy(mr->cLastWritten, row[24]!=NULL?row[24]:"", sizeof(mr->cLastWritten)); mr->LastWritten = (time_t)str_to_utime(mr->cLastWritten); mr->InChanger = str_to_uint64(row[25]); mr->EndFile = str_to_uint64(row[26]); mr->EndBlock = str_to_uint64(row[27]); mr->VolType = str_to_int64(row[28]); mr->VolParts = str_to_int64(row[29]); mr->VolCloudParts = str_to_int64(row[30]); mr->LastPartBytes = str_to_uint64(row[31]); mr->LabelType = str_to_int64(row[32]); bstrncpy(mr->cLabelDate, row[33]!=NULL?row[33]:"", sizeof(mr->cLabelDate)); mr->LabelDate = (time_t)str_to_utime(mr->cLabelDate); mr->StorageId = str_to_int64(row[34]); mr->Enabled = str_to_int64(row[35]); mr->LocationId = str_to_int64(row[36]); mr->RecycleCount = str_to_int64(row[37]); bstrncpy(mr->cInitialWrite, row[38]!=NULL?row[38]:"", sizeof(mr->cInitialWrite)); mr->InitialWrite = (time_t)str_to_utime(mr->cInitialWrite); mr->ScratchPoolId = str_to_int64(row[39]); mr->RecyclePoolId = str_to_int64(row[40]); mr->VolReadTime = str_to_int64(row[41]); mr->VolWriteTime = str_to_int64(row[42]); mr->ActionOnPurge = str_to_int32(row[43]); mr->CacheRetention = str_to_int64(row[44]); mr->Protected = str_to_int64(row[45]); mr->UseProtect = str_to_int64(row[46]); mr->VolEncrypted = str_to_int64(row[47]); bstrncpy(mr->Pool, row[48], sizeof(mr->Pool)); ok = true; } } else { if (mr->MediaId != 0) { Mmsg1(errmsg, _("Media record with MediaId=%s not found.\n"), edit_int64(mr->MediaId, ed1)); } else { Mmsg1(errmsg, _("Media record for Volume name \"%s\" not found.\n"), mr->VolumeName); } } sql_free_result(); } else { if (mr->MediaId != 0) { Mmsg(errmsg, _("Media record for MediaId=%u not found in Catalog.\n"), mr->MediaId); } else { Mmsg(errmsg, _("Media record for Volume Name \"%s\" not found in Catalog.\n"), mr->VolumeName); } } bdb_unlock(); return ok; } /* Remove all MD5 from a query (can save lot of memory with many files) */ static void strip_md5(char *q) { char *p = q; while ((p = strstr(p, ", MD5"))) { memset(p, ' ', 5 * sizeof(char)); } p = q; while ((p = strstr(p, ", '' AS MD5"))) { memset(p, ' ', 11 * sizeof(char)); } } /** * Find the last "accurate" backup state (that can take deleted files in * account) * 1) Get all files with jobid in list (F subquery) * Get all files in BaseFiles with jobid in list * 2) Take only the last version of each file (Temp subquery) => accurate list * is ok * 3) Join the result to file table to get fileindex, jobid and lstat information * * TODO: See if we can do the SORT only if needed (as an argument) * * In addition, find all objects for the given jobs, and keep all versions. * For Object, the FileIndex is not stored, so the code is prepared but not yet * fonctionnal. It is done via an "manual" procedure waiting for FileIndex */ bool BDB::bdb_get_file_list(JCR *jcr, char *jobids, int opts, DB_RESULT_HANDLER *result_handler, void *ctx) { const char *type; if (opts & DBL_ALL_FILES) { type = ""; } else { /* Only non-deleted files */ type = "WHERE FileIndex > 0"; } if (opts & DBL_DELETED) { type = "WHERE FileIndex <= 0"; } if (!*jobids) { bdb_lock(); Mmsg(errmsg, _("ERR=JobIds are empty\n")); bdb_unlock(); return false; } POOL_MEM buf(PM_MESSAGE); POOL_MEM buf2(PM_MESSAGE); POOL_MEM buf3(PM_MESSAGE); if (opts & DBL_USE_DELTA) { Mmsg(buf2, select_recent_version_with_basejob_and_delta[bdb_get_type_index()], jobids, jobids, jobids, jobids); } else { Mmsg(buf2, select_recent_version_with_basejob[bdb_get_type_index()], jobids, jobids, jobids, jobids); } /* During virtual full, we want to include Objects in the BSR */ if (bdb_get_type_index() == SQL_TYPE_SQLITE3) { /* This is a SQLite version of the query that could replace the * pgsql/mysql that is left untouched below. * The problem is that sqlite don't like parenthesis to manage UNION like * in (select * from table1) UNION (select * from table2) * the parenthesis were required because of two "order by" like this: * select * from ( * (select * from tableXX) * UNION * (select * from tableYY order by WW)) * order by WW * but the "before last order by" is not required because it is identical to * the last one and then the parenthesis can be removed to get something like : * select * from * ( * select * from tableXX * UNION * select * from tableYY * ) * order by WW */ if (opts & DBL_USE_OBJ) { Mmsg(buf3, "UNION SELECT ObjectName AS Path, PluginName AS Filename, FileIndex, JobId, '' AS LStat, 0 AS DeltaSeq, '' AS MD5, JobTDate " "FROM Job JOIN RestoreObject USING (JobId) " "WHERE JobId IN (%s) ", jobids); } /* bsr code is optimized for JobId sorted, with Delta, we need to get * them ordered by date. JobTDate and JobId can be mixed if using Copy * or Migration */ Mmsg(buf, "SELECT Path, Filename, FileIndex, JobId, LStat, DeltaSeq, MD5, JobTDate " "FROM (" "SELECT Path.Path, T1.Filename, T1.FileIndex, T1.JobId, LStat, DeltaSeq, MD5, JobTDate " "FROM ( %s ) AS T1 " "JOIN Path ON (Path.PathId = T1.PathId) %s " " %s ) AS U1 ORDER BY JobTDate, FileIndex ASC",/* Return sorted by JobTDate */ /* FileIndex for restore code */ buf2.c_str(), type, buf3.c_str()); } else { if (opts & DBL_USE_OBJ) { Mmsg(buf3, "UNION (SELECT ObjectName AS Path, PluginName AS Filename, FileIndex, JobId, '' AS LStat, 0 AS DeltaSeq, '' AS MD5, JobTDate " "FROM Job JOIN RestoreObject USING (JobId) " "WHERE JobId IN (%s) ORDER BY JobTDate ASC, FileIndex ASC) ", jobids); } /* bsr code is optimized for JobId sorted, with Delta, we need to get * them ordered by date. JobTDate and JobId can be mixed if using Copy * or Migration */ Mmsg(buf, "SELECT Path, Filename, FileIndex, JobId, LStat, DeltaSeq, MD5, JobTDate " "FROM ((" "SELECT Path.Path, T1.Filename, T1.FileIndex, T1.JobId, LStat, DeltaSeq, MD5, JobTDate " "FROM ( %s ) AS T1 " "JOIN Path ON (Path.PathId = T1.PathId) %s " ") %s ) AS U1 ORDER BY JobTDate, FileIndex ASC",/* Return sorted by JobTDate */ /* FileIndex for restore code */ buf2.c_str(), type, buf3.c_str()); } if (!(opts & DBL_USE_MD5)) { strip_md5(buf.c_str()); } Dmsg1(50|DT_SQL, "q=%s\n", buf.c_str()); return bdb_big_sql_query(buf.c_str(), result_handler, ctx); } /** * This procedure gets the base jobid list used by jobids, */ bool BDB::bdb_get_used_base_jobids(JCR *jcr, POOLMEM *jobids, db_list_ctx *result) { POOL_MEM buf; Mmsg(buf, "SELECT DISTINCT BaseJobId " " FROM Job JOIN BaseFiles USING (JobId) " " WHERE Job.HasBase = 1 " " AND Job.JobId IN (%s) ", jobids); return bdb_sql_query(buf.c_str(), db_list_handler, result); } /* Mutex used to have global counter on btemp table */ static pthread_mutex_t btemp_mutex = PTHREAD_MUTEX_INITIALIZER; static uint32_t btemp_cur = 1; /** * The decision do change an incr/diff was done before * Full : do nothing * Differential : get the last full id * Incremental : get the last full + last diff + last incr(s) ids * * If you specify jr->StartTime, it will be used to limit the search * in the time. (usually now) * * In some case multiple job can satisfy the requirement (two jobs ran exactly * at the same time or mutiple copy jobs), and the function take the first one. * But when specified, like in the command ".bvfs_get_jobids jobid=XXX" we must * verify that XXX match the requirement and return it if it exists, not any * other "interchangeable jobs". That's what the from_jobid is for. If not sure, * set 0 * * TODO: look and merge from ua_restore.c */ bool BDB::bdb_get_accurate_jobids(JCR *jcr, JOB_DBR *jr, uint32_t from_jobid, db_list_ctx *jobids) { bool ret=false; char clientid[50], jobid[50], filesetid[50]; char date[MAX_TIME_LENGTH]; char esc[MAX_ESCAPE_NAME_LENGTH]; POOL_MEM query(PM_MESSAGE), name(PM_FNAME), aux(PM_FNAME); /* Take the current time as upper limit if nothing else specified */ utime_t StartTime = (jr->StartTime)?jr->StartTime:time(NULL); bstrutime(date, sizeof(date), StartTime + 1); jobids->reset(); Dmsg1(100, "from_jobid=%ld hint\n", from_jobid); P(btemp_mutex); bsnprintf(jobid, sizeof(jobid), "0%u", btemp_cur++); V(btemp_mutex); if (jr->Name[0] != 0) { bdb_escape_string(jcr, esc, jr->Name, strlen(jr->Name)); Mmsg(name, " AND Name = '%s' ", esc); aux.strcat(name.c_str()); } /* First, find the last good Full backup for this job/client/fileset */ Mmsg(query, create_temp_accurate_jobids[bdb_get_type_index()], jobid, edit_uint64(jr->ClientId, clientid), date, edit_uint64(jr->FileSetId, filesetid), aux.c_str() ); if (!bdb_sql_query(query.c_str(), NULL, NULL)) { goto bail_out; } if (jr->JobLevel == L_INCREMENTAL || jr->JobLevel == L_VIRTUAL_FULL) { /* Now, find the last differential backup after the last full */ Mmsg(query, "INSERT INTO btemp3%s (JobId, StartTime, EndTime, JobTDate, PurgedFiles) " "SELECT JobId, StartTime, EndTime, JobTDate, PurgedFiles " "FROM Job JOIN FileSet USING (FileSetId) " "WHERE ClientId = %s " "AND Level='D' AND JobStatus IN ('T','W') AND Type='B' " "AND StartTime > (SELECT EndTime FROM btemp3%s ORDER BY EndTime DESC LIMIT 1) " "AND StartTime < '%s' " "AND FileSet.FileSet= (SELECT FileSet FROM FileSet WHERE FileSetId = %s) " " %s " /* Optional name */ "ORDER BY Job.JobTDate DESC LIMIT 1 ", jobid, clientid, jobid, date, filesetid, name.c_str() ); if (!bdb_sql_query(query.c_str(), NULL, NULL)) { goto bail_out; } /* We just have to take all incremental after the last Full/Diff */ Mmsg(query, "INSERT INTO btemp3%s (JobId, StartTime, EndTime, JobTDate, PurgedFiles) " "SELECT JobId, StartTime, EndTime, JobTDate, PurgedFiles " "FROM Job JOIN FileSet USING (FileSetId) " "WHERE ClientId = %s " "AND Level='I' AND JobStatus IN ('T','W') AND Type='B' " "AND StartTime > (SELECT EndTime FROM btemp3%s ORDER BY EndTime DESC LIMIT 1) " "AND StartTime < '%s' " "AND FileSet.FileSet= (SELECT FileSet FROM FileSet WHERE FileSetId = %s) " " %s " "ORDER BY Job.JobTDate DESC ", jobid, clientid, jobid, date, filesetid, name.c_str() ); if (!bdb_sql_query(query.c_str(), NULL, NULL)) { goto bail_out; } } /* build a jobid list ie: 1,2,3,4 */ Mmsg(query, "SELECT JobId FROM btemp3%s ORDER by JobTDate", jobid); if(!bdb_sql_query(query.c_str(), db_list_handler, jobids)) { goto bail_out; } /* FIXME: When we come with a copy (or VF), the list can return * a JobId list that doesn't include the one that was provided. * if the result is also one jobid. It is useful when */ Dmsg1(1, "db_get_accurate_jobids=%s\n", jobids->list); ret = true; bail_out: Mmsg(query, "DROP TABLE IF EXISTS btemp3%s", jobid); bdb_sql_query(query.c_str(), NULL, NULL); return ret; } static int db_prior_job_handler(void *ctx, int num_fields, char **row) { JOB_DBR *jr = (JOB_DBR*) ctx; jr->PriorJobId = 0; *jr->PriorJob = 0; if (num_fields == 2) { jr->PriorJobId = str_to_uint64(row[0]); bstrncpy(jr->PriorJob, row[1], sizeof(jr->PriorJob)); } return 0; } /* Function to assign the PriorJob, PriorJobid to the current job after a VirtualFull */ bool BDB::bdb_get_prior_job(JCR *jcr, const char *jobids, JOB_DBR *jr) { bool ret=false; bdb_lock(); Mmsg(cmd, "SELECT PriorJobId, PriorJob FROM Job WHERE JobId IN (%s) ORDER By JobTDate DESC LIMIT 1", jobids); if(!bdb_sql_query(cmd, db_prior_job_handler, jr)) { goto bail_out; } if (jr->PriorJobId == 0) { // If we just copied a Full, we can use it directly Mmsg(cmd, "SELECT JobId, Job FROM Job WHERE JobId IN (%s) ORDER BY JobTDate DESC LIMIT 1", jobids); if(!bdb_sql_query(cmd, db_prior_job_handler, jr)) { goto bail_out; } } Dmsg2(0, "PriorJobId=%lu PriorJob=%s\n", jr->PriorJobId, jr->PriorJob); ret = true; bail_out: bdb_unlock(); return ret; } bool BDB::bdb_get_base_file_list(JCR *jcr, bool use_md5, DB_RESULT_HANDLER *result_handler, void *ctx) { POOL_MEM buf(PM_MESSAGE); Mmsg(buf, "SELECT Path, Name, FileIndex, JobId, LStat, 0 As DeltaSeq, MD5 " "FROM new_basefile%lld ORDER BY JobId, FileIndex ASC", (uint64_t) jcr->JobId); if (!use_md5) { strip_md5(buf.c_str()); } return bdb_sql_query(buf.c_str(), result_handler, ctx); } bool BDB::bdb_get_base_jobid(JCR *jcr, JOB_DBR *jr, JobId_t *jobid) { POOL_MEM query(PM_FNAME); utime_t StartTime; db_int64_ctx lctx; char date[MAX_TIME_LENGTH]; char esc[MAX_ESCAPE_NAME_LENGTH]; bool ret = false; *jobid = 0; lctx.count = 0; lctx.value = 0; StartTime = (jr->StartTime)?jr->StartTime:time(NULL); bstrutime(date, sizeof(date), StartTime + 1); bdb_escape_string(jcr, esc, jr->Name, strlen(jr->Name)); /* we can take also client name, fileset, etc... */ Mmsg(query, "SELECT JobId, Job, StartTime, EndTime, JobTDate, PurgedFiles " "FROM Job " // "JOIN FileSet USING (FileSetId) JOIN Client USING (ClientId) " "WHERE Job.Name = '%s' " "AND Level='B' AND JobStatus IN ('T','W') AND Type='B' " // "AND FileSet.FileSet= '%s' " // "AND Client.Name = '%s' " "AND StartTime<'%s' " "ORDER BY Job.JobTDate DESC LIMIT 1", esc, // edit_uint64(jr->ClientId, clientid), // edit_uint64(jr->FileSetId, filesetid)); date); Dmsg1(10, "db_get_base_jobid q=%s\n", query.c_str()); if (!bdb_sql_query(query.c_str(), db_int64_handler, &lctx)) { goto bail_out; } *jobid = (JobId_t) lctx.value; Dmsg1(10, "db_get_base_jobid=%lld\n", *jobid); ret = true; bail_out: return ret; } /* Get JobIds associated with a volume */ bool BDB::bdb_get_volume_jobids(JCR *jcr, MEDIA_DBR *mr, db_list_ctx *lst) { char ed1[50]; bool ret = false; bdb_lock(); Mmsg(cmd, "SELECT DISTINCT JobId FROM JobMedia WHERE MediaId=%s", edit_int64(mr->MediaId, ed1)); ret = bdb_sql_query(cmd, db_list_handler, lst); bdb_unlock(); return ret; } /* Get JobIds associated with a client */ bool BDB::bdb_get_client_jobids(JCR *jcr, CLIENT_DBR *cr, db_list_ctx *lst) { char ed1[50]; bool ret = false; bdb_lock(); Mmsg(cmd, "SELECT JobId FROM Job WHERE ClientId=%s", edit_int64(cr->ClientId, ed1)); ret = bdb_sql_query(cmd, db_list_handler, lst); bdb_unlock(); return ret; } /** * Get Snapshot Record * * Returns: false: on failure * true: on success */ bool BDB::bdb_get_snapshot_record(JCR *jcr, SNAPSHOT_DBR *sr) { SQL_ROW row; char ed1[50]; bool ok = false; char esc[MAX_ESCAPE_NAME_LENGTH]; POOL_MEM filter1, filter2; if (sr->SnapshotId == 0 && (sr->Name[0] == 0 || sr->Device[0] == 0)) { Dmsg0(10, "No SnapshotId or Name/Device provided\n"); return false; } bdb_lock(); if (sr->SnapshotId != 0) { /* find by id */ Mmsg(filter1, "Snapshot.SnapshotId=%d", sr->SnapshotId); } else if (*sr->Name && *sr->Device) { /* find by name */ bdb_escape_string(jcr, esc, sr->Name, strlen(sr->Name)); Mmsg(filter1, "Snapshot.Name='%s'", esc); bdb_escape_string(jcr, esc, sr->Device, strlen(sr->Device)); Mmsg(filter2, "AND Snapshot.Device='%s'", esc); } else { Dmsg0(10, "No SnapshotId or Name and Device\n"); return false; } Mmsg(cmd, "SELECT SnapshotId, Snapshot.Name, JobId, Snapshot.FileSetId, " "FileSet.FileSet, CreateTDate, CreateDate, " "Client.Name AS Client, Snapshot.ClientId, Volume, Device, Type, Retention, " "Comment FROM Snapshot JOIN Client USING (ClientId) LEFT JOIN FileSet USING (FileSetId) WHERE %s %s", filter1.c_str(), filter2.c_str()); if (QueryDB(jcr, cmd)) { char ed1[50]; if (sql_num_rows() > 1) { Mmsg1(errmsg, _("More than one Snapshot!: %s\n"), edit_uint64(sql_num_rows(), ed1)); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); } else if (sql_num_rows() == 1) { if ((row = sql_fetch_row()) == NULL) { Mmsg1(errmsg, _("error fetching row: %s\n"), sql_strerror()); Jmsg(jcr, M_ERROR, 0, "%s", errmsg); } else { /* return values */ sr->reset(); sr->need_to_free = true; sr->SnapshotId = str_to_int64(row[0]); bstrncpy(sr->Name, row[1], sizeof(sr->Name)); sr->JobId = str_to_int64(row[2]); sr->FileSetId = str_to_int64(row[3]); bstrncpy(sr->FileSet, row[4], sizeof(sr->FileSet)); sr->CreateTDate = str_to_uint64(row[5]); bstrncpy(sr->CreateDate, row[6], sizeof(sr->CreateDate)); bstrncpy(sr->Client, row[7], sizeof(sr->Client)); sr->ClientId = str_to_int64(row[8]); sr->Volume = bstrdup(row[9]); sr->Device = bstrdup(row[10]); bstrncpy(sr->Type, row[11], sizeof(sr->Type)); sr->Retention = str_to_int64(row[12]); bstrncpy(sr->Comment, NPRTB(row[13]), sizeof(sr->Comment)); ok = true; } } else { if (sr->SnapshotId != 0) { Mmsg1(errmsg, _("Snapshot record with SnapshotId=%s not found.\n"), edit_int64(sr->SnapshotId, ed1)); } else { Mmsg1(errmsg, _("Snapshot record for Snapshot name \"%s\" not found.\n"), sr->Name); } } sql_free_result(); } else { if (sr->SnapshotId != 0) { Mmsg1(errmsg, _("Snapshot record with SnapshotId=%s not found.\n"), edit_int64(sr->SnapshotId, ed1)); } else { Mmsg1(errmsg, _("Snapshot record for Snapshot name \"%s\" not found.\n"), sr->Name); } } bdb_unlock(); return ok; } /* Job, Level */ static void build_estimate_query(BDB *db, POOL_MEM &query, const char *mode, char *job_esc, char level) { POOL_MEM filter, tmp; char ed1[50]; if (level == 0) { level = 'F'; } /* MySQL doesn't have statistic functions */ if (db->bdb_get_type_index() == SQL_TYPE_POSTGRESQL) { /* postgresql have functions that permit to handle lineal regression * in y=ax + b * REGR_SLOPE(Y,X) = get x * REGR_INTERCEPT(Y,X) = get b * and we need y when x=now() * CORR gives the correlation * (TODO: display progress bar only if CORR > 0.8) */ btime_t now = time(NULL); Mmsg(query, "SELECT temp.jobname AS jobname, " "COALESCE(CORR(value,JobTDate),0) AS corr, " "(%s*REGR_SLOPE(value,JobTDate) " " + REGR_INTERCEPT(value,JobTDate)) AS value, " "AVG(value) AS avg_value, " " COUNT(1) AS nb ", edit_int64(now, ed1)); } else { Mmsg(query, "SELECT jobname AS jobname, " "0.1 AS corr, AVG(value) AS value, AVG(value) AS avg_value, " "COUNT(1) AS nb "); } /* if it's a differential, we need to compare since the last full * * F D D D F D D D F I I I I D I I I * | # # # # | # # * | # # # # # # | # # * | # # # # # # # # | # # # # # # # # # * +----------------- +------------------- */ if (level == L_DIFFERENTIAL) { Mmsg(filter, " AND Job.StartTime > ( " " SELECT StartTime " " FROM Job " " WHERE Job.Name = '%s' " " AND Job.Level = 'F' " " AND Job.JobStatus IN ('T', 'W') " " ORDER BY Job.StartTime DESC LIMIT 1) ", job_esc); } Mmsg(tmp, " FROM ( " " SELECT Job.Name AS jobname, " " %s AS value, " " JobTDate AS jobtdate " " FROM Job INNER JOIN Client USING (ClientId) " " WHERE Job.Name = '%s' " " AND Job.Level = '%c' " " AND Job.JobStatus IN ('T', 'W') " "%s " "ORDER BY StartTime DESC " "LIMIT 4" ") AS temp GROUP BY temp.jobname", mode, job_esc, level, filter.c_str() ); pm_strcat(query, tmp.c_str()); } bool BDB::bdb_get_job_statistics(JCR *jcr, JOB_DBR *jr) { SQL_ROW row; POOL_MEM queryB, queryF, query; char job_esc[MAX_ESCAPE_NAME_LENGTH]; bool ok = false; bdb_lock(); bdb_escape_string(jcr, job_esc, jr->Name, strlen(jr->Name)); build_estimate_query(this, queryB, "JobBytes", job_esc, jr->JobLevel); build_estimate_query(this, queryF, "JobFiles", job_esc, jr->JobLevel); Mmsg(query, "SELECT bytes.corr * 100 AS corr_jobbytes, " /* 0 */ "bytes.value AS jobbytes, " /* 1 */ "bytes.avg_value AS avg_jobbytes, " /* 2 */ "bytes.nb AS nb_jobbytes, " /* 3 */ "files.corr * 100 AS corr_jobfiles, " /* 4 */ "files.value AS jobfiles, " /* 5 */ "files.avg_value AS avg_jobfiles, " /* 6 */ "files.nb AS nb_jobfiles " /* 7 */ "FROM (%s) AS bytes LEFT JOIN (%s) AS files USING (jobname)", queryB.c_str(), queryF.c_str()); Dmsg1(100, "query=%s\n", query.c_str()); if (QueryDB(jcr, query.c_str())) { char ed1[50]; if (sql_num_rows() > 1) { Mmsg1(errmsg, _("More than one Result!: %s\n"), edit_uint64(sql_num_rows(), ed1)); goto bail_out; } ok = true; if ((row = sql_fetch_row()) == NULL) { Mmsg1(errmsg, _("error fetching row: %s\n"), sql_strerror()); } else { jr->CorrJobBytes = str_to_int64(row[0]); jr->JobBytes = str_to_int64(row[1]); /* lineal expression with only one job doesn't return a correct value */ if (str_to_int64(row[3]) == 1) { jr->JobBytes = str_to_int64(row[2]); /* Take the AVG value */ } jr->CorrNbJob = str_to_int64(row[3]); /* Number of jobs used in this sample */ jr->CorrJobFiles = str_to_int64(row[4]); jr->JobFiles = str_to_int64(row[5]); if (str_to_int64(row[7]) == 1) { jr->JobFiles = str_to_int64(row[6]); /* Take the AVG value */ } } sql_free_result(); } bail_out: bdb_unlock(); return ok; } /* * List Client/Pool associations. Return a list of Client/Pool in a alist. * [0] Client * [1] Pool * [2] Client * [3] Pool * ... */ bool BDB::bdb_get_client_pool(JCR *jcr, alist *results) { SQL_ROW row; bool ret=false; POOLMEM *where = get_pool_memory(PM_MESSAGE); POOLMEM *tmp = get_pool_memory(PM_MESSAGE); bdb_lock(); /* Build the WHERE part with the current ACLs if any */ pm_strcpy(where, get_acls(DB_ACL_BIT(DB_ACL_CLIENT) | // TODO: See if we let Backup Client here (for pruning) DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_POOL), true)); Mmsg(cmd, "SELECT DISTINCT Client.Name, Pool.Name " "FROM Job JOIN Client USING (ClientId) JOIN Pool USING (PoolId) %s", where); Dmsg1(100, "sql=%s\n", cmd); if (QueryDB(jcr, cmd)) { ret = true; while ((row = sql_fetch_row()) != NULL) { results->append(bstrdup(row[0])); /* append client */ results->append(bstrdup(row[1])); /* append pool */ } sql_free_result(); } bdb_unlock(); free_pool_memory(where); free_pool_memory(tmp); return ret; } #endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ bacula-15.0.3/src/cats/bdb_postgresql.h0000644000175000017500000000452014771010173017567 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* Postgresql driver specific definitions */ #ifdef __BDB_POSTGRESQL_H_ class BDB_POSTGRESQL: public BDB { private: PGconn *m_db_handle; PGresult *m_result; POOLMEM *m_buf; /* Buffer to manipulate queries */ public: BDB_POSTGRESQL(); ~BDB_POSTGRESQL(); /* Functions that we override */ bool bdb_open_database(JCR *jcr); void bdb_close_database(JCR *jcr); void bdb_thread_cleanup(void); void bdb_escape_string(JCR *jcr, char *snew, const char *old, int len); char *bdb_escape_object(JCR *jcr, char *old, int len); void bdb_unescape_object(JCR *jcr, char *from, int32_t expected_len, POOLMEM **dest, int32_t *len); void bdb_start_transaction(JCR *jcr); void bdb_end_transaction(JCR *jcr); bool bdb_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx); bool bdb_big_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx); void sql_free_result(void); SQL_ROW sql_fetch_row(void); bool sql_query(const char *query, int flags=0); const char *sql_strerror(void); int sql_num_rows(void); void sql_data_seek(int row); int sql_affected_rows(void); uint64_t sql_insert_autokey_record(const char *query, const char *table_name); void sql_field_seek(int field); SQL_FIELD *sql_fetch_field(void); int sql_num_fields(void); bool sql_field_is_not_null(int field_type); bool sql_field_is_numeric(int field_type); bool sql_batch_start(JCR *jcr); bool sql_batch_end(JCR *jcr, const char *error); bool sql_batch_insert(JCR *jcr, ATTR_DBR *ar); const char *search_op(JCR *jcr, const char *table_col, char *value, POOLMEM **esc, POOLMEM **dest); }; #endif /* __BDB_POSTGRESQL_H_ */ bacula-15.0.3/src/cats/sql.c0000644000175000017500000012075614771010173015361 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Bacula Catalog Database interface routines * * Almost generic set of SQL database interface routines * (with a little more work) * SQL engine specific routines are in mysql.c, postgresql.c, * sqlite.c, ... * * Written by Kern Sibbald, March 2000 * * Note: at one point, this file was changed to class based by a certain * programmer, and other than "wrapping" in a class, which is a trivial * change for a C++ programmer, nothing substantial was done, yet all the * code was recommitted under this programmer's name. Consequently, we * undo those changes here. */ #include "bacula.h" #if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL #include "cats.h" /* Forward referenced subroutines */ void print_dashes(BDB *mdb); void print_result(BDB *mdb); dbid_list::dbid_list() { memset(this, 0, sizeof(dbid_list)); max_ids = 1000; DBId = (DBId_t *)malloc(max_ids * sizeof(DBId_t)); num_ids = num_seen = tot_ids = 0; PurgedFiles = NULL; } dbid_list::~dbid_list() { free(DBId); } /* * Called here to retrieve a resource name (e.g. Storage name) from the database */ int db_name_handler(void *ctx, int num_fields, char **row) { char *name = (char *)ctx; // Retrieved name is longer than it should be if (strlen(row[0]) > (MAX_NAME_LENGTH - 1)) { return 1; } bstrncpy(name, row[0], MAX_NAME_LENGTH); return 0; } /* * Called here to retrieve a list of jobid x,y,z from the database */ int db_jobids_handler(void *ctx, int num_fields, char **row) { POOLMEM **ret = (POOLMEM **)ctx; if (row[0]) { if (**ret) { pm_strcat(ret, ","); } pm_strcat(ret, row[0]); } return 0; } /* * Called here to retrieve an string list from the database */ int db_string_list_handler(void *ctx, int num_fields, char **row) { alist **val = (alist **)ctx; if (row[0]) { (*val)->append(bstrdup(row[0])); } return 0; } /* * Called here to retrieve an integer from the database */ int db_int_handler(void *ctx, int num_fields, char **row) { uint32_t *val = (uint32_t *)ctx; Dmsg1(800, "int_handler starts with row pointing at %x\n", row); if (row[0]) { Dmsg1(800, "int_handler finds '%s'\n", row[0]); *val = str_to_int64(row[0]); } else { Dmsg0(800, "int_handler finds zero\n"); *val = 0; } Dmsg0(800, "int_handler finishes\n"); return 0; } /* * Called here to retrieve a 32/64 bit integer from the database. * The returned integer will be extended to 64 bit. */ int db_int64_handler(void *ctx, int num_fields, char **row) { db_int64_ctx *lctx = (db_int64_ctx *)ctx; if (row[0]) { lctx->value = str_to_int64(row[0]); lctx->count++; } return 0; } /* * Called here to retrieve a 32/64 bit integer from the database. * The returned integer will be extended to 64 bit. It can * return multiple values, but the array has to match the number * of fields to be returned. */ int db_mint64_handler(void *ctx, int num_fields, char **row) { int64_t *tab = (int64_t *)ctx; for (int i = 0; i < num_fields; i++) { if (row[i]) { tab[i] = str_to_int64(row[i]); } } return 0; } /* * Called here to retrieve a btime from the database. * The returned integer will be extended to 64 bit. */ int db_strtime_handler(void *ctx, int num_fields, char **row) { db_int64_ctx *lctx = (db_int64_ctx *)ctx; if (row[0]) { lctx->value = str_to_utime(row[0]); lctx->count++; } return 0; } /* * Use to build a comma separated list of values from a query. "10,20,30" */ int db_list_handler(void *ctx, int num_fields, char **row) { db_list_ctx *lctx = (db_list_ctx *)ctx; if (num_fields == 1 && row[0]) { lctx->add(row[0]); } return 0; } /* * specific context passed from bdb_check_max_connections to * db_max_connections_handler. */ struct max_connections_context { BDB *db; uint32_t nr_connections; }; /* * Called here to retrieve max_connections from db */ static int db_max_connections_handler(void *ctx, int num_fields, char **row) { struct max_connections_context *context; uint32_t index; context = (struct max_connections_context *)ctx; switch (context->db->bdb_get_type_index()) { case SQL_TYPE_MYSQL: index = 1; default: index = 0; } if (row[index]) { context->nr_connections = str_to_int64(row[index]); } else { Dmsg0(800, "int_handler finds zero\n"); context->nr_connections = 0; } return 0; } BDB::BDB() { init_acl(); acl_join = get_pool_memory(PM_MESSAGE); acl_where = get_pool_memory(PM_MESSAGE); } BDB::~BDB() { free_acl(); free_pool_memory(acl_join); free_pool_memory(acl_where); } /* Get the WHERE section of a query that permits to respect * the console ACLs. * * get_acls(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_CLIENT), true) * -> WHERE Job.Name IN ('a', 'b', 'c') AND Client.Name IN ('d', 'e') * * get_acls(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_CLIENT), false) * -> AND Job.Name IN ('a', 'b', 'c') AND Client.Name IN ('d', 'e') */ char *BDB::get_acls(int tables, bool where /* use WHERE or AND */) { pm_strcpy(acl_where, ""); for (int i=0 ; i < DB_ACL_LAST; i++) { if (tables & DB_ACL_BIT(i)) { pm_strcat(acl_where, get_acl((DB_ACL_t)i, where)); where = acl_where[0] == 0 && where; } } return acl_where; } /* Get the list of the JobId that are accessible for this console * Usually, this function is called in a restore context */ char *BDB::bdb_get_jobids(const char *jobids, POOLMEM **ret, bool append) { if (!ret || !*ret) { return NULL; } if (!append) { // Return empty list if append = false pm_strcpy(ret, ""); } if (!jobids || !*jobids || !is_a_number_list(jobids)) { return *ret; } bdb_lock(); /* Get optional filters for the SQL query */ const char *where = get_acls(DB_ACL_BIT(DB_ACL_JOB) | DB_ACL_BIT(DB_ACL_RCLIENT) | // Clients where we can restore DB_ACL_BIT(DB_ACL_FILESET), false); const char *join = *where ? get_acl_join_filter(DB_ACL_BIT(DB_ACL_RCLIENT) | DB_ACL_BIT(DB_ACL_FILESET)) : ""; /* No filters, no need to run the query */ if (!*where && !*join) { if (*ret[0]) { pm_strcat(ret, ","); } pm_strcat(ret, jobids); goto bail_out; } Mmsg(cmd, "SELECT Job.JobId as JobId " "FROM Job %s WHERE JobId IN (%s%s%s) %s ORDER BY JobTDate ASC", join, *ret /* previous list */, *ret[0] ? "," : "" /* comma to separate from the new list */, jobids, where); /* We start from a fresh list, previous entries will be listed */ pm_strcpy(ret, ""); Dmsg1(50|DT_SQL, "q=%s\n", cmd); if(!bdb_sql_query(cmd, db_jobids_handler, ret)) { goto bail_out; } bail_out: sql_free_result(); bdb_unlock(); return *ret; } /* Create the JOIN string that will help to filter queries results */ char *BDB::get_acl_join_filter(int tables) { POOL_MEM tmp; pm_strcpy(acl_join, ""); if (tables & DB_ACL_BIT(DB_ACL_JOB)) { Mmsg(tmp, " JOIN Job USING (JobId) "); pm_strcat(acl_join, tmp); } if (tables & (DB_ACL_BIT(DB_ACL_CLIENT) | DB_ACL_BIT(DB_ACL_RCLIENT) | DB_ACL_BIT(DB_ACL_BCLIENT) | DB_ACL_BIT(DB_ACL_RBCLIENT))) { Mmsg(tmp, " JOIN Client USING (ClientId) "); pm_strcat(acl_join, tmp); } if (tables & DB_ACL_BIT(DB_ACL_POOL)) { Mmsg(tmp, " JOIN Pool USING (PoolId) "); pm_strcat(acl_join, tmp); } if (tables & DB_ACL_BIT(DB_ACL_PATH)) { Mmsg(tmp, " JOIN Path USING (PathId) "); pm_strcat(acl_join, tmp); } if (tables & DB_ACL_BIT(DB_ACL_LOG)) { Mmsg(tmp, " JOIN Log USING (JobId) "); pm_strcat(acl_join, tmp); } if (tables & DB_ACL_BIT(DB_ACL_FILESET)) { Mmsg(tmp, " LEFT JOIN FileSet USING (FileSetId) "); pm_strcat(acl_join, tmp); } return acl_join; } /* Intialize the ACL list */ void BDB::init_acl() { use_acls = false; for(int i=0; i < DB_ACL_LAST; i++) { acls[i] = NULL; } } /* Free ACL list */ void BDB::free_acl() { for(int i=0; i < DB_ACL_LAST; i++) { free_and_null_pool_memory(acls[i]); } use_acls = false; } /* Get ACL for a given type */ const char *BDB::get_acl(DB_ACL_t type, bool where /* display WHERE or AND */) { if (!acls[type]) { return ""; } /* The acls[type] list of string allowed has a prefix, and we can overwrite * it with the where parameter */ strcpy(acls[type], where?" WHERE ":" AND "); acls[type][7] = ' ' ; /* replace \0 by ' ' */ return acls[type]; } /* Keep UAContext ACLs in our structure for further SQL queries */ void BDB::set_acl(JCR *jcr, DB_ACL_t type, alist *list, alist *list2, alist *list3) { const char *key=NULL; const char *keyid=NULL; /* We are in a restricted console mode */ use_acls = true; /* If the list is present, but we authorize everything */ if (list && list->size() == 1 && strcasecmp((char*)list->get(0), "*all*") == 0) { return; } /* If the list is present, but we authorize everything */ if (list2 && list2->size() == 1 && strcasecmp((char*)list2->get(0), "*all*") == 0) { return; } /* If the list is present, but we authorize everything */ if (list3 && list3->size() == 1 && strcasecmp((char*)list3->get(0), "*all*") == 0) { return; } POOLMEM *tmp = get_pool_memory(PM_FNAME); POOLMEM *where = get_pool_memory(PM_FNAME); *where = 0; *tmp = 0; switch(type) { case DB_ACL_JOB: key = "Job.Name"; break; case DB_ACL_BCLIENT: case DB_ACL_CLIENT: case DB_ACL_RCLIENT: case DB_ACL_RBCLIENT: key = "Client.Name"; break; case DB_ACL_FILESET: key = "FileSet.FileSet"; keyid = "FileSet.FileSetId"; break; case DB_ACL_POOL: key = "Pool.Name"; keyid = "Pool.PoolId"; break; default: break; } /* For clients, we can have up to 3 lists */ char *elt; alist *merged_list = New(alist(5, not_owned_by_alist)); if (list) { foreach_alist(elt, list) { merged_list->append(elt); } } if (list2) { foreach_alist(elt, list2) { merged_list->append(elt); } } if (list3) { foreach_alist(elt, list3) { merged_list->append(elt); } } escape_acl_list(jcr, key, &tmp, merged_list); delete merged_list; if (keyid) { Mmsg(where, " AND (%s IS NULL OR %s) ", keyid, tmp); } else { Mmsg(where, " AND %s ", tmp); } acls[type] = where; Dmsg1(50|DT_SQL, "%s\n", where); free_pool_memory(tmp); } /* Convert a ACL list to a SQL IN() / regexp list * key=Client.Name + (test1, test2) * => (Client.Name IN ('test1', 'test2')) * key=Client.Name + (test1, test2*) * => (Client.Name IN ('test1') OR (Client.Name ~ 'test2.*')) */ char *BDB::escape_acl_list(JCR *jcr, const char *key, POOLMEM **escaped_list, alist *lst) { char *elt, *p, *dst; int len; bool have_in=false, have_reg=false; POOL_MEM tmp, tmp2, reg, in; if (lst==NULL || lst->size() == 0) { Mmsg(tmp, "(%s IN (''))", key); pm_strcat(escaped_list, tmp.c_str()); return *escaped_list; } foreach_alist(elt, lst) { if (elt && *elt) { len = strlen(elt); /* Escape + ' ' */ tmp.check_size(4 * len + 2 + 2); tmp2.check_size(4 * len + 2 + 2); if (strchr(elt, '*') != NULL || strchr(elt, '[') != NULL) { /* Replace all * by .* */ dst = tmp2.c_str(); for (p = elt; *p ; p++) { if (*p == '*') { *dst++ = '.'; *dst = '*'; /* Escape regular */ } else if (*p == '.' || *p == '$' || *p == '^' || *p == '+' || *p == '(' || *p == ')' || *p == '|') { *dst++ = '\\'; *dst = *p; } else { *dst = *p; } dst++; } *dst = '\0'; /* Escape the expression, the result is now in "tmp" */ bdb_lock(); bdb_escape_string(jcr, tmp.c_str(), tmp2.c_str(), strlen(tmp2.c_str())); bdb_unlock(); /* Build the SQL part */ Mmsg(tmp2, "(%s %s '%s')", key, regexp_value[bdb_get_type_index()], tmp.c_str()); /* Append the expression to the existing one if any */ if (have_reg) { pm_strcat(reg, " OR "); } pm_strcat(reg, tmp2.c_str()); have_reg = true; } else { /* Escape the string between '' */ pm_strcpy(tmp, "'"); bdb_lock(); bdb_escape_string(jcr, tmp.c_str() + 1 , elt, len); bdb_unlock(); pm_strcat(tmp, "'"); if (have_in) { pm_strcat(in, ","); } pm_strcat(in, tmp.c_str()); have_in = true; } } } /* Check if we have */ pm_strcat(escaped_list, "("); if (have_in) { Mmsg(tmp, "%s IN (%s)", key, in.c_str()); pm_strcat(escaped_list, tmp.c_str()); } if (have_reg) { if (have_in) { pm_strcat(escaped_list, " OR "); } pm_strcat(escaped_list, reg.c_str()); } pm_strcat(escaped_list, ")"); return *escaped_list; } /* * Check catalog max_connections setting */ bool BDB::bdb_check_max_connections(JCR *jcr, uint32_t max_concurrent_jobs) { struct max_connections_context context; /* Without Batch insert, no need to verify max_connections */ if (!batch_insert_available()) return true; context.db = this; context.nr_connections = 0; /* Check max_connections setting */ if (!bdb_sql_query(sql_get_max_connections[bdb_get_type_index()], db_max_connections_handler, &context)) { Jmsg(jcr, M_ERROR, 0, "Can't verify max_connections settings %s", errmsg); return false; } if (context.nr_connections && max_concurrent_jobs && max_concurrent_jobs > context.nr_connections) { Mmsg(errmsg, _("Potential performance problem:\n" "max_connections=%d set for %s database \"%s\" should be larger than Director's " "MaxConcurrentJobs=%d\n"), context.nr_connections, bdb_get_engine_name(), get_db_name(), max_concurrent_jobs); Jmsg(jcr, M_WARNING, 0, "%s", errmsg); return false; } return true; } /* NOTE!!! The following routines expect that the * calling subroutine sets and clears the mutex */ /* Check that the tables correspond to the version we want */ bool BDB::bdb_check_version(JCR *jcr) { uint32_t bacula_db_version = 0; const char *query = "SELECT VersionId FROM Version"; bacula_db_version = 0; if (!bdb_sql_query(query, db_int_handler, (void *)&bacula_db_version)) { return false; } if (bacula_db_version != BDB_VERSION) { Mmsg(errmsg, "Version error for database \"%s\". Wanted %d, got %d\n", get_db_name(), BDB_VERSION, bacula_db_version); return false; } return true; } /* * Utility routine for queries. The database MUST be locked before calling here. * Returns: 0 on failure * 1 on success */ bool BDB::QueryDB(JCR *jcr, char *cmd, const char *file, int line) { sql_free_result(); if (!sql_query(cmd, QF_STORE_RESULT)) { if (use_acls) { Dmsg2(DT_SQL, "query %s failed:\n%s\n", cmd, sql_strerror()); /* The restricted console we get a basic message */ m_msg(file, line, &errmsg, _("query failed\n")); } else { /* Complete message for non restricted console */ m_msg(file, line, &errmsg, _("query %s failed:\n%s\n"), cmd, sql_strerror()); } if (use_fatal_jmsg()) { j_msg(file, line, jcr, M_FATAL, 0, "%s", errmsg); } if (verbose && !use_acls) { j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd); } return false; } return true; } /* * Utility routine to do inserts * Returns: 0 on failure * 1 on success */ bool BDB::InsertDB(JCR *jcr, char *cmd, const char *file, int line) { if (!sql_query(cmd)) { if (use_acls) { Dmsg2(DT_SQL, _("insert %s failed:\n%s\n"), cmd, sql_strerror()); m_msg(file, line, &errmsg, _("insert failed\n")); } else { m_msg(file, line, &errmsg, _("insert %s failed:\n%s\n"), cmd, sql_strerror()); } if (use_fatal_jmsg()) { j_msg(file, line, jcr, M_FATAL, 0, "%s", errmsg); } if (verbose && !use_acls) { j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd); } return false; } int num_rows = sql_affected_rows(); if (num_rows != 1) { char ed1[30]; m_msg(file, line, &errmsg, _("Insertion problem: affected_rows=%s\n"), edit_uint64(num_rows, ed1)); if (verbose) { j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd); } return false; } changes++; return true; } /* Utility routine for updates. * Returns: false on failure * true on success * * Some UPDATE queries must update record(s), other queries might not update * anything. */ bool BDB::UpdateDB(JCR *jcr, char *cmd, bool can_be_empty, const char *file, int line) { if (!sql_query(cmd)) { if (use_acls) { Dmsg2(DT_SQL, _("update %s failed:\n%s\n"), cmd, sql_strerror()); m_msg(file, line, &errmsg, _("update failed:\n")); } else { m_msg(file, line, &errmsg, _("update %s failed:\n%s\n"), cmd, sql_strerror()); } j_msg(file, line, jcr, M_ERROR, 0, "%s", errmsg); if (verbose && !use_acls) { j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd); } return false; } int num_rows = sql_affected_rows(); if ((num_rows == 0 && !can_be_empty) || num_rows < 0) { char ed1[30]; Dmsg2(DT_SQL, _("Update failed: affected_rows=%s for %s\n"), edit_uint64(num_rows, ed1), cmd); if (use_acls) { m_msg(file, line, &errmsg, _("Update failed: affected_rows=%s\n"), edit_uint64(num_rows, ed1)); } else { m_msg(file, line, &errmsg, _("Update failed: affected_rows=%s for %s\n"), edit_uint64(num_rows, ed1), cmd); } if (verbose && !use_acls) { // j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd); } return false; } changes++; return true; } /* Utility routine for deletes * * Returns: -1 on error * n number of rows affected */ int BDB::DeleteDB(JCR *jcr, char *cmd, const char *file, int line) { if (!sql_query(cmd)) { if (use_acls) { Dmsg2(DT_SQL, _("delete %s failed:\n%s\n"), cmd, sql_strerror()); m_msg(file, line, &errmsg, _("delete failed:\n")); } else { m_msg(file, line, &errmsg, _("delete %s failed:\n%s\n"), cmd, sql_strerror()); } j_msg(file, line, jcr, M_ERROR, 0, "%s", errmsg); if (verbose && !use_acls) { j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd); } return -1; } changes++; return sql_affected_rows(); } /* * Get record max. Query is already in mdb->cmd * No locking done * * Returns: -1 on failure * count on success */ int get_sql_record_max(JCR *jcr, BDB *mdb) { SQL_ROW row; int stat = 0; if (mdb->QueryDB(jcr, mdb->cmd)) { if ((row = mdb->sql_fetch_row()) == NULL) { Mmsg1(&mdb->errmsg, _("error fetching row: %s\n"), mdb->sql_strerror()); stat = -1; } else { stat = str_to_int64(row[0]); } mdb->sql_free_result(); } else { Mmsg1(&mdb->errmsg, _("error fetching row: %s\n"), mdb->sql_strerror()); stat = -1; } return stat; } /* * Given a full filename, split it into its path * and filename parts. They are returned in pool memory * in the mdb structure. */ void split_path_and_file(JCR *jcr, BDB *mdb, const char *afname) { const char *p, *f; /* Find path without the filename. * I.e. everything after the last / is a "filename". * OK, maybe it is a directory name, but we treat it like * a filename. If we don't find a / then the whole name * must be a path name (e.g. c:). */ for (p=f=afname; *p; p++) { if (IsPathSeparator(*p)) { f = p; /* set pos of last slash */ } } if (IsPathSeparator(*f)) { /* did we find a slash? */ f++; /* yes, point to filename */ } else { /* no, whole thing must be path name */ f = p; } /* If filename doesn't exist (i.e. root directory), we * simply create a blank name consisting of a single * space. This makes handling zero length filenames * easier. */ mdb->fnl = p - f; if (mdb->fnl > 0) { mdb->fname = check_pool_memory_size(mdb->fname, mdb->fnl+1); memcpy(mdb->fname, f, mdb->fnl); /* copy filename */ mdb->fname[mdb->fnl] = 0; } else { mdb->fname[0] = 0; mdb->fnl = 0; } mdb->pnl = f - afname; if (mdb->pnl > 0) { mdb->path = check_pool_memory_size(mdb->path, mdb->pnl+1); memcpy(mdb->path, afname, mdb->pnl); mdb->path[mdb->pnl] = 0; } else { Mmsg1(&mdb->errmsg, _("Path length is zero. File=%s\n"), afname); Jmsg(jcr, M_FATAL, 0, "%s", mdb->errmsg); mdb->path[0] = 0; mdb->pnl = 0; } Dmsg3(500, "split fname=%s: path=%s file=%s\n", afname, mdb->path, mdb->fname); } /* * Set maximum field length to something reasonable */ static int max_length(int max_length) { int max_len = max_length; /* Sanity check */ if (max_len < 0) { max_len = 2; } else if (max_len > 100) { max_len = 100; } return max_len; } /* * List dashes as part of header for listing SQL results in a table */ void list_dashes(BDB *mdb, DB_LIST_HANDLER *send, void *ctx) { SQL_FIELD *field; int i, j; int len; mdb->sql_field_seek(0); send(ctx, "+"); for (i = 0; i < mdb->sql_num_fields(); i++) { field = mdb->sql_fetch_field(); if (!field) { break; } len = max_length(field->max_length + 2); for (j = 0; j < len; j++) { send(ctx, "-"); } send(ctx, "+"); } send(ctx, "\n"); } /* Small handler to print the last line of a list xxx command */ static void last_line_handler(void *vctx, const char *str) { LIST_CTX *ctx = (LIST_CTX *)vctx; bstrncat(ctx->line, str, sizeof(ctx->line)); } int list_result(void *vctx, int nb_col, char **row) { SQL_FIELD *field; int i, col_len, max_len = 0; char buf[2000], ewc[30]; LIST_CTX *pctx = (LIST_CTX *)vctx; DB_LIST_HANDLER *send = pctx->send; e_list_type type = pctx->type; BDB *mdb = pctx->mdb; void *ctx = pctx->ctx; JCR *jcr = pctx->jcr; if (!pctx->once) { pctx->once = true; Dmsg1(800, "list_result starts looking at %d fields\n", mdb->sql_num_fields()); /* determine column display widths */ mdb->sql_field_seek(0); for (i = 0; i < mdb->sql_num_fields(); i++) { Dmsg1(800, "list_result processing field %d\n", i); field = mdb->sql_fetch_field(); if (!field) { break; } col_len = cstrlen(field->name); if (type == VERT_LIST) { if (col_len > max_len) { max_len = col_len; } } else { if (mdb->sql_field_is_numeric(field->type) && (int)field->max_length > 0) { /* fixup for commas */ field->max_length += (field->max_length - 1) / 3; } if (col_len < (int)field->max_length) { col_len = field->max_length; } if (col_len < 4 && !mdb->sql_field_is_not_null(field->flags)) { col_len = 4; /* 4 = length of the word "NULL" */ } field->max_length = col_len; /* reset column info */ } } pctx->num_rows++; Dmsg0(800, "list_result finished first loop\n"); if (type == VERT_LIST) { goto vertical_list; } if (type == ARG_LIST) { goto arg_list; } if (type == JSON_LIST) { goto json_list; } Dmsg1(800, "list_result starts second loop looking at %d fields\n", mdb->sql_num_fields()); /* Keep the result to display the same line at the end of the table */ list_dashes(mdb, last_line_handler, pctx); send(ctx, pctx->line); send(ctx, "|"); mdb->sql_field_seek(0); for (i = 0; i < mdb->sql_num_fields(); i++) { Dmsg1(800, "list_result looking at field %d\n", i); field = mdb->sql_fetch_field(); if (!field) { break; } max_len = max_length(field->max_length); bsnprintf(buf, sizeof(buf), " %-*s |", max_len, field->name); send(ctx, buf); } send(ctx, "\n"); list_dashes(mdb, send, ctx); } Dmsg1(800, "list_result starts third loop looking at %d fields\n", mdb->sql_num_fields()); mdb->sql_field_seek(0); send(ctx, "|"); for (i = 0; i < mdb->sql_num_fields(); i++) { field = mdb->sql_fetch_field(); if (!field) { break; } max_len = max_length(field->max_length); if (row[i] == NULL) { bsnprintf(buf, sizeof(buf), " %-*s |", max_len, "NULL"); } else if (mdb->sql_field_is_numeric(field->type) && !jcr->gui && is_an_integer(row[i])) { bsnprintf(buf, sizeof(buf), " %*s |", max_len, add_commas(row[i], ewc)); } else { bsnprintf(buf, sizeof(buf), " %-*s |", max_len, row[i]); } send(ctx, buf); } send(ctx, "\n"); return 0; vertical_list: Dmsg1(800, "list_result starts vertical list at %d fields\n", mdb->sql_num_fields()); mdb->sql_field_seek(0); for (i = 0; i < mdb->sql_num_fields(); i++) { field = mdb->sql_fetch_field(); if (!field) { break; } if (row[i] == NULL) { bsnprintf(buf, sizeof(buf), " %*s: %s\n", max_len, field->name, "NULL"); } else if (mdb->sql_field_is_numeric(field->type) && !jcr->gui && is_an_integer(row[i])) { bsnprintf(buf, sizeof(buf), " %*s: %s\n", max_len, field->name, add_commas(row[i], ewc)); } else { bsnprintf(buf, sizeof(buf), " %*s: %s\n", max_len, field->name, row[i]); } send(ctx, buf); } send(ctx, "\n"); return 0; arg_list: Dmsg1(800, "list_result starts simple list at %d fields\n", mdb->sql_num_fields()); mdb->sql_field_seek(0); for (i = 0; i < mdb->sql_num_fields(); i++) { field = mdb->sql_fetch_field(); if (!field) { break; } if (row[i] == NULL) { bsnprintf(buf, sizeof(buf), "%s%s=", (i>0?" ":""), field->name); } else { bash_spaces(row[i]); bsnprintf(buf, sizeof(buf), "%s%s=%s ", (i>0?" ":""), field->name, row[i]); } send(ctx, buf); } send(ctx, "\n"); return 0; json_list: POOL_MEM tmp, qval, qkey; bool first=true; Dmsg1(800, "list_result starts json list at %d fields\n", mdb->sql_num_fields()); mdb->sql_field_seek(0); send(ctx, "{"); for (i = 0; i < mdb->sql_num_fields(); i++) { field = mdb->sql_fetch_field(); if (!field) { break; } quote_string(qkey.addr(), field->name); lcase(qkey.c_str()); if (!mdb->sql_field_is_numeric(field->type)) { quote_string(qval.addr(), NPRTB(row[i])); } else { pm_strcpy(qval, row[i]); } Mmsg(tmp, "%s%s: %s", first?"":",", qkey.c_str(), qval.c_str()); send(ctx, tmp.c_str()); first = false; } send(ctx, "}"); return 0; } /* Use this function to start a new JSON list */ void json_list_begin(DB_LIST_HANDLER *send, void *ctx, const char *title) { send(ctx, "{\"type\":\""); send(ctx, title); send(ctx, "\", \"data\":"); } /* Use this function to end a JSON list */ void json_list_end(DB_LIST_HANDLER *send, void *ctx, int errcode, const char *errmsg) { send(ctx, ",\"error\":0, \"errmsg\":\"\"}\n"); } /* * If full_list is set, we list vertically, otherwise, we * list on one line horizontally. * Return number of rows */ int list_result(JCR *jcr, BDB *mdb, const char *title, DB_LIST_HANDLER *send, void *ctx, e_list_type type) { SQL_FIELD *field; SQL_ROW row; int i, col_len, max_len = 0; char buf[2000], ewc[30]; if (type == JSON_LIST) { json_list_begin(send, ctx, title); } Dmsg0(800, "list_result starts\n"); if (mdb->sql_num_rows() == 0) { if (type == JSON_LIST) { send(ctx, "[]"); json_list_end(send, ctx, 0, ""); } else { send(ctx, _("No results to list.\n")); } return mdb->sql_num_rows(); } Dmsg1(800, "list_result starts looking at %d fields\n", mdb->sql_num_fields()); /* determine column display widths */ mdb->sql_field_seek(0); for (i = 0; i < mdb->sql_num_fields(); i++) { Dmsg1(800, "list_result processing field %d\n", i); field = mdb->sql_fetch_field(); if (!field) { break; } col_len = cstrlen(field->name); if (type == VERT_LIST) { if (col_len > max_len) { max_len = col_len; } } else { if (mdb->sql_field_is_numeric(field->type) && (int)field->max_length > 0) { /* fixup for commas */ field->max_length += (field->max_length - 1) / 3; } if (col_len < (int)field->max_length) { col_len = field->max_length; } if (col_len < 4 && !mdb->sql_field_is_not_null(field->flags)) { col_len = 4; /* 4 = length of the word "NULL" */ } field->max_length = col_len; /* reset column info */ } } Dmsg0(800, "list_result finished first loop\n"); if (type == VERT_LIST) { goto vertical_list; } if (type == ARG_LIST) { goto arg_list; } if (type == JSON_LIST) { goto json_list; } Dmsg1(800, "list_result starts second loop looking at %d fields\n", mdb->sql_num_fields()); list_dashes(mdb, send, ctx); send(ctx, "|"); mdb->sql_field_seek(0); for (i = 0; i < mdb->sql_num_fields(); i++) { Dmsg1(800, "list_result looking at field %d\n", i); field = mdb->sql_fetch_field(); if (!field) { break; } max_len = max_length(field->max_length); bsnprintf(buf, sizeof(buf), " %-*s |", max_len, field->name); send(ctx, buf); } send(ctx, "\n"); list_dashes(mdb, send, ctx); Dmsg1(800, "list_result starts third loop looking at %d fields\n", mdb->sql_num_fields()); while ((row = mdb->sql_fetch_row()) != NULL) { mdb->sql_field_seek(0); send(ctx, "|"); for (i = 0; i < mdb->sql_num_fields(); i++) { field = mdb->sql_fetch_field(); if (!field) { break; } max_len = max_length(field->max_length); if (row[i] == NULL) { bsnprintf(buf, sizeof(buf), " %-*s |", max_len, "NULL"); } else if (mdb->sql_field_is_numeric(field->type) && !jcr->gui && is_an_integer(row[i])) { bsnprintf(buf, sizeof(buf), " %*s |", max_len, add_commas(row[i], ewc)); } else { strip_trailing_junk(row[i]); bsnprintf(buf, sizeof(buf), " %-*s |", max_len, row[i]); } send(ctx, buf); } send(ctx, "\n"); } list_dashes(mdb, send, ctx); return mdb->sql_num_rows(); vertical_list: Dmsg1(800, "list_result starts vertical list at %d fields\n", mdb->sql_num_fields()); while ((row = mdb->sql_fetch_row()) != NULL) { mdb->sql_field_seek(0); for (i = 0; i < mdb->sql_num_fields(); i++) { field = mdb->sql_fetch_field(); if (!field) { break; } if (row[i] == NULL) { bsnprintf(buf, sizeof(buf), " %*s: %s\n", max_len, field->name, "NULL"); } else if (mdb->sql_field_is_numeric(field->type) && !jcr->gui && is_an_integer(row[i])) { bsnprintf(buf, sizeof(buf), " %*s: %s\n", max_len, field->name, add_commas(row[i], ewc)); } else { strip_trailing_junk(row[i]); bsnprintf(buf, sizeof(buf), " %*s: %s\n", max_len, field->name, row[i]); } send(ctx, buf); } send(ctx, "\n"); } arg_list: Dmsg1(800, "list_result starts arg list at %d fields\n", mdb->sql_num_fields()); while ((row = mdb->sql_fetch_row()) != NULL) { mdb->sql_field_seek(0); for (i = 0; i < mdb->sql_num_fields(); i++) { field = mdb->sql_fetch_field(); if (!field) { break; } if (row[i] == NULL) { bsnprintf(buf, sizeof(buf), "%s%s=", (i>0?" ":""), field->name); } else { bash_spaces(row[i]); bsnprintf(buf, sizeof(buf), "%s%s=%s", (i>0?" ":""), field->name, row[i]); } send(ctx, buf); } send(ctx, "\n"); } return mdb->sql_num_rows(); json_list: Dmsg1(800, "list_result starts json list at %d fields\n", mdb->sql_num_fields()); POOL_MEM tmp, qval, qkey; bool rfirst=true; send(ctx, "["); while ((row = mdb->sql_fetch_row()) != NULL) { bool first=true; send(ctx, rfirst?"{" : ",{"); rfirst = false; mdb->sql_field_seek(0); for (i = 0; i < mdb->sql_num_fields(); i++) { field = mdb->sql_fetch_field(); if (!field) { break; } quote_string(qkey.addr(), field->name); lcase(qkey.c_str()); if (!mdb->sql_field_is_numeric(field->type)) { quote_string(qval.addr(), NPRTB(row[i])); } else { pm_strcpy(qval, row[i]); } Mmsg(tmp, "%s%s: %s", first?"":",", qkey.c_str(), qval.c_str()); send(ctx, tmp.c_str()); first = false; } send(ctx, "}"); } send(ctx, "]"); json_list_end(send, ctx, 0, ""); return mdb->sql_num_rows(); } /* * Open a new connection to mdb catalog. This function is used * by batch and accurate mode. */ bool BDB::bdb_open_batch_connection(JCR *jcr) { bool multi_db; multi_db = batch_insert_available(); if (!jcr->db_batch) { jcr->db_batch = bdb_clone_database_connection(jcr, multi_db); if (!jcr->db_batch) { Mmsg0(&errmsg, _("Could not init database batch connection\n")); Jmsg(jcr, M_FATAL, 0, "%s", errmsg); return false; } if (!jcr->db_batch->bdb_open_database(jcr)) { Mmsg2(&errmsg, _("Could not open database \"%s\": ERR=%s\n"), jcr->db_batch->get_db_name(), jcr->db_batch->bdb_strerror()); Jmsg(jcr, M_FATAL, 0, "%s", errmsg); return false; } } return true; } /* * !!! WARNING !!! Use this function only when bacula is stopped. * ie, after a fatal signal and before exiting the program * Print information about a BDB object. */ void bdb_debug_print(JCR *jcr, FILE *fp) { BDB *mdb = jcr->db; if (!mdb) { return; } fprintf(fp, "BDB=%p db_name=%s db_user=%s connected=%s\n", mdb, NPRTB(mdb->get_db_name()), NPRTB(mdb->get_db_user()), mdb->is_connected() ? "true" : "false"); fprintf(fp, "\tcmd=\"%s\" changes=%i\n", NPRTB(mdb->cmd), mdb->changes); mdb->print_lock_info(fp); } /* Scan a TAG_DBR object to help with SQL queries */ void TAG_DBR::gen_sql(JCR *jcr, BDB *db, const char **table_, /* Table name (Client, Job, Media...) */ const char **name_, /* Name of the record (Name, VolumeName, JobId) */ const char **id_, /* Id of the record (ClientId, MediaId, JobId) */ char *esc, /* Escaped name of the resource */ char *esc_name, /* Escaped name of the tag */ uint64_t *aclbits_, /* ACL used */ uint64_t *aclbits_extra_ /* Extra ACLs (not already included in the join list) */) { db->bdb_lock(); *esc = 0; *esc_name = 0; const char *name = NT_("Name"); const char *table = NULL; const char *id = NULL; uint64_t aclbits=0; /* Used in the WHERE */ uint64_t aclbits_extra=0; /* Used in the JOIN */ if (*Client) { table = "Client"; id = "ClientId"; db->bdb_escape_string(jcr, esc, Client, strlen(Client)); aclbits |= DB_ACL_BIT(DB_ACL_CLIENT); } else if (*Job) { table = "Job"; name = "Name"; id = "JobId"; int len = strlen(Job); /* 01234567890123456789012 */ if (len > 23) { /* Can be a full job name .2020-08-28_15.34.32_03 */ int idx=len-23; if (Job[idx] == '.' && B_ISDIGIT(Job[idx+1]) && B_ISDIGIT(Job[idx+2]) && B_ISDIGIT(Job[idx+3]) && B_ISDIGIT(Job[idx+4]) && Job[idx+5] == '-' && B_ISDIGIT(Job[idx+6]) && B_ISDIGIT(Job[idx+7]) && Job[idx+8] == '-' && B_ISDIGIT(Job[idx+9]) && B_ISDIGIT(Job[idx+10]) && Job[idx+11] == '_' && B_ISDIGIT(Job[idx+12]) && B_ISDIGIT(Job[idx+13]) && Job[idx+14] == '.' && B_ISDIGIT(Job[idx+15]) && B_ISDIGIT(Job[idx+16]) && Job[idx+17] == '.' && B_ISDIGIT(Job[idx+18]) && B_ISDIGIT(Job[idx+19]) && Job[idx+20] == '_' && B_ISDIGIT(Job[idx+21]) && B_ISDIGIT(Job[idx+22]) && B_ISDIGIT(Job[idx+23]) == 0) { name = "Job"; } } db->bdb_escape_string(jcr, esc, Job, strlen(Job)); aclbits |= DB_ACL_BIT(DB_ACL_JOB); } else if (*Volume) { table = "Media"; name = "VolumeName"; id = "MediaId"; db->bdb_escape_string(jcr, esc, Volume, strlen(Volume)); aclbits_extra |= DB_ACL_BIT(DB_ACL_POOL); aclbits |= DB_ACL_BIT(DB_ACL_POOL); } else if (*Pool) { table = "Pool"; id = "PoolId"; db->bdb_escape_string(jcr, esc, Pool, strlen(Pool)); aclbits_extra |= DB_ACL_BIT(DB_ACL_POOL); aclbits |= DB_ACL_BIT(DB_ACL_POOL); } else if (*Object) { table = "Object"; name = "ObjectName"; id = "ObjectId"; db->bdb_escape_string(jcr, esc, Object, strlen(Object)); aclbits |= DB_ACL_BIT(DB_ACL_JOB); aclbits_extra |= DB_ACL_BIT(DB_ACL_JOB); } if (*Name) { db->bdb_escape_string(jcr, esc_name, Name, strlen(Name)); } db->bdb_unlock(); if (JobId > 0) { table = "Job"; name = "JobId"; id = "JobId"; edit_uint64(JobId, esc); aclbits |= DB_ACL_BIT(DB_ACL_JOB); } *table_ = table; *name_ = name; *id_ = id; *aclbits_ = aclbits; *aclbits_extra_ = aclbits_extra; } const char *BDB::search_op(JCR *jcr, const char *table_col, char *value, POOLMEM **esc, POOLMEM **dest) { int len = strlen(value); *esc = check_pool_memory_size(*esc, len*2+1); bdb_escape_string(jcr, *esc, (char*)value, len); Mmsg(dest, " %s ILIKE '%%%s%%'", table_col, value); return *dest; } #ifdef COMMUNITY bool BDB::bdb_check_settings(JCR *jcr, int64_t *starttime, int val, int64_t val2) { /* Implement checks for tuning hints */ return true; } #endif #endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ bacula-15.0.3/src/cats/sql_delete.c0000644000175000017500000002073414771010173016676 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Bacula Catalog Database Delete record interface routines * * Written by Kern Sibbald, December 2000-2014 * */ #include "bacula.h" #if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL #include "cats.h" /* ----------------------------------------------------------------------- * * Generic Routines (or almost generic) * * ----------------------------------------------------------------------- */ /* * Delete Pool record, must all associated * Media records are left untouched. The uppper * level should empty the pool first. * * Returns: 0 on error * 1 on success * PoolId = number of Pools deleted (should be 1) */ int BDB::bdb_delete_pool_record(JCR *jcr, POOL_DBR *pr) { SQL_ROW row; char esc[MAX_ESCAPE_NAME_LENGTH]; bdb_lock(); bdb_escape_string(jcr, esc, pr->Name, strlen(pr->Name)); Mmsg(cmd, "SELECT PoolId FROM Pool WHERE Name='%s'", esc); Dmsg1(10, "selectpool: %s\n", cmd); pr->PoolId = pr->NumVols = 0; if (QueryDB(jcr, cmd)) { int nrows = sql_num_rows(); if (nrows == 0) { Mmsg(errmsg, _("No pool record %s exists\n"), pr->Name); sql_free_result(); bdb_unlock(); return 0; } else if (nrows != 1) { Mmsg(errmsg, _("Expecting one pool record, got %d\n"), nrows); sql_free_result(); bdb_unlock(); return 0; } if ((row = sql_fetch_row()) == NULL) { Mmsg1(&errmsg, _("Error fetching row %s\n"), sql_strerror()); bdb_unlock(); return 0; } pr->PoolId = str_to_int64(row[0]); sql_free_result(); } /* Delete Pool */ Mmsg(cmd, "DELETE FROM Pool WHERE Pool.PoolId = %d", pr->PoolId); pr->PoolId = DeleteDB(jcr, cmd); Dmsg1(200, "Deleted %d Pool records\n", pr->PoolId); bdb_unlock(); return 1; } #define MAX_DEL_LIST_LEN 1000000 struct s_del_ctx { JobId_t *JobId; int num_ids; /* ids stored */ int max_ids; /* size of array */ int num_del; /* number deleted */ int tot_ids; /* total to process */ }; /* * Called here to make in memory list of JobIds to be * deleted. The in memory list will then be transversed * to issue the SQL DELETE commands. Note, the list * is allowed to get to MAX_DEL_LIST_LEN to limit the * maximum malloc'ed memory. */ static int delete_handler(void *ctx, int num_fields, char **row) { struct s_del_ctx *del = (struct s_del_ctx *)ctx; if (del->num_ids == MAX_DEL_LIST_LEN) { return 1; } if (del->num_ids == del->max_ids) { del->max_ids = (del->max_ids * 3) / 2; del->JobId = (JobId_t *)brealloc(del->JobId, sizeof(JobId_t) * del->max_ids); } del->JobId[del->num_ids++] = (JobId_t)str_to_int64(row[0]); return 0; } /* * This routine will purge (delete) all records * associated with a particular Volume. It will * not delete the media record itself. * TODO: This function is broken and it doesn't purge * File, BaseFiles, Log, ... * We call it from relabel and delete volume=, both ensure * that the volume is properly purged. */ static int do_media_purge(BDB *mdb, MEDIA_DBR *mr) { POOLMEM *query = get_pool_memory(PM_MESSAGE); struct s_del_ctx del; char ed1[50]; int i; del.num_ids = 0; del.tot_ids = 0; del.num_del = 0; del.max_ids = 0; Mmsg(mdb->cmd, "SELECT JobId from JobMedia WHERE MediaId=%lu", mr->MediaId); del.max_ids = mr->VolJobs; if (del.max_ids < 100) { del.max_ids = 100; } else if (del.max_ids > MAX_DEL_LIST_LEN) { del.max_ids = MAX_DEL_LIST_LEN; } del.JobId = (JobId_t *)malloc(sizeof(JobId_t) * del.max_ids); mdb->bdb_sql_query(mdb->cmd, delete_handler, (void *)&del); for (i=0; i < del.num_ids; i++) { Dmsg1(400, "Delete JobId=%d\n", del.JobId[i]); Mmsg(query, "DELETE FROM Job WHERE JobId=%s", edit_int64(del.JobId[i], ed1)); mdb->bdb_sql_query(query, NULL, (void *)NULL); Mmsg(query, "DELETE FROM File WHERE JobId=%s", edit_int64(del.JobId[i], ed1)); mdb->bdb_sql_query(query, NULL, (void *)NULL); Mmsg(query, "DELETE FROM JobMedia WHERE JobId=%s", edit_int64(del.JobId[i], ed1)); mdb->bdb_sql_query(query, NULL, (void *)NULL); Mmsg(query, "DELETE FROM FileMedia WHERE JobId=%s", edit_int64(del.JobId[i], ed1)); mdb->bdb_sql_query(query, NULL, (void *)NULL); } free(del.JobId); free_pool_memory(query); return 1; } /* Delete Media record and all records that * are associated with it. */ int BDB::bdb_delete_media_record(JCR *jcr, MEDIA_DBR *mr) { bdb_lock(); if (mr->MediaId == 0 && !bdb_get_media_record(jcr, mr)) { bdb_unlock(); return 0; } /* Do purge if not already purged */ if (strcmp(mr->VolStatus, "Purged") != 0) { /* Delete associated records */ do_media_purge(this, mr); } Mmsg(cmd, "DELETE FROM Media WHERE MediaId=%lu", mr->MediaId); bdb_sql_query(cmd, NULL, (void *)NULL); Mmsg(cmd, "DELETE FROM TagMedia WHERE MediaId=%lu", mr->MediaId); bdb_sql_query(cmd, NULL, (void *)NULL); bdb_unlock(); return 1; } /* * Purge all records associated with a * media record. This does not delete the * media record itself. But the media status * is changed to "Purged". */ int BDB::bdb_purge_media_record(JCR *jcr, MEDIA_DBR *mr) { bdb_lock(); if (mr->MediaId == 0 && !bdb_get_media_record(jcr, mr)) { bdb_unlock(); return 0; } /* Delete associated records */ do_media_purge(this, mr); /* Note, always purge */ /* Mark Volume as purged */ strcpy(mr->VolStatus, "Purged"); if (!bdb_update_media_record(jcr, mr)) { bdb_unlock(); return 0; } bdb_unlock(); return 1; } /* Delete Snapshot record */ int BDB::bdb_delete_snapshot_record(JCR *jcr, SNAPSHOT_DBR *sr) { bdb_lock(); if (sr->SnapshotId == 0 && !bdb_get_snapshot_record(jcr, sr)) { bdb_unlock(); return 0; } Mmsg(cmd, "DELETE FROM Snapshot WHERE SnapshotId=%d", sr->SnapshotId); bdb_sql_query(cmd, NULL, (void *)NULL); bdb_unlock(); return 1; } /* Delete Client record */ int BDB::bdb_delete_client_record(JCR *jcr, CLIENT_DBR *cr) { bdb_lock(); if (cr->ClientId == 0 && !bdb_get_client_record(jcr, cr)) { bdb_unlock(); return 0; } Mmsg(cmd, "DELETE FROM Client WHERE ClientId=%d", cr->ClientId); bdb_sql_query(cmd, NULL, (void *)NULL); Mmsg(cmd, "DELETE FROM TagClient WHERE ClientId=%d", cr->ClientId); bdb_sql_query(cmd, NULL, (void *)NULL); bdb_unlock(); return 1; } /* Delete Tag record */ int BDB::bdb_delete_tag_record(JCR *jcr, TAG_DBR *tag) { char esc[MAX_ESCAPE_NAME_LENGTH]; char esc_name[MAX_ESCAPE_NAME_LENGTH]; uint64_t aclbits, aclbits_extra; const char *name; const char *table; const char *id; bool ret = false; tag->gen_sql(jcr, this, &table, &name, &id, esc, esc_name, &aclbits, &aclbits_extra); bdb_lock(); /* TODO: Need a special kind of ACL */ const char *join = get_acl_join_filter(aclbits_extra); const char *whereand = get_acls(aclbits, false); if (*esc_name) { /* We have a tag name */ if (tag->all) { Mmsg(cmd, "DELETE FROM Tag%s WHERE Tag = '%s'", table, esc_name); } else { Mmsg(cmd, "DELETE FROM Tag%s WHERE Tag = '%s' AND %s IN (SELECT W.%sId FROM %s AS W %s WHERE W.%s = '%s' %s)", table, esc_name, id, table, table, join, name, esc, whereand); } } else { Mmsg(cmd, "DELETE FROM Tag%s WHERE %sId IN (SELECT W.%s FROM %s AS W %s WHERE W.%s = '%s' %s)", table, table, id, table, join, name, esc, whereand); } Dmsg1(DT_SQL|50, "q=%s\n", cmd); ret = bdb_sql_query(cmd, NULL, (void *)NULL); bdb_unlock(); return ret; } #endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL */ bacula-15.0.3/src/cats/create_postgresql_database.in0000644000175000017500000000315014771010173022304 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # shell script to create Bacula database(s) # PATH="@POSTGRESQL_BINDIR@:$PATH" db_name=${db_name:-@db_name@} # # use SQL_ASCII to be able to put any filename into # the database even those created with unusual character sets PSQLVERSION=`psql -d template1 -c 'select version()' $* | awk '/PostgreSQL/ {print $2}' | cut -d '.' -f 1,2` # # Note, LC_COLLATE and LC_TYPE are needed on 8.4 and beyond, but are # not implemented in 8.3 or below. # case ${PSQLVERSION} in 6.* | 7.* | 8.[0123]) ENCODING="ENCODING 'SQL_ASCII'" ;; *) ENCODING="ENCODING 'SQL_ASCII' LC_COLLATE 'C' LC_CTYPE 'C'" ;; esac # # Please note: !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # We do not recommend that you use ENCODING 'SQL_UTF8' # It can result in creating filenames in the database that # cannot be seen or restored. # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # if psql -f - -d template1 $* </dev/null; then echo "Database encoding OK" else echo " " echo "Database encoding bad. Do not use this database" echo " " exit 1 fi bacula-15.0.3/src/cats/drop_postgresql_tables.in0000644000175000017500000000313714771010173021520 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # shell script to delete Bacula tables for PostgreSQL bindir=@POSTGRESQL_BINDIR@ db_name=@db_name@ $bindir/psql -f - -d ${db_name} $* </dev/null 2>/dev/null drop table if exists MalwareMD5; drop table if exists MalwareSHA256; drop table if exists FileEvents; drop table if exists MetaEmail; drop table if exists MetaAttachment; drop table if exists TagJob; drop table if exists TagClient; drop table if exists TagMedia; drop table if exists TagObject; drop table if exists Object; drop table if exists Events; drop table if exists unsavedfiles; drop table if exists basefiles; drop table if exists jobmedia; drop table if exists filemedia; drop table if exists file; drop table if exists job; drop table if exists jobhisto; drop table if exists media; drop table if exists client; drop table if exists pool; drop table if exists fileset; drop table if exists path; drop table if exists counters; drop table if exists version; drop table if exists CDImages; drop table if exists Device; drop table if exists Storage; drop table if exists MediaType; drop table if exists Status; drop table if exists mac; drop table if exists log; drop table if exists Location; drop table if exists locationlog; drop table if exists PathVisibility; drop table if exists PathHierarchy; drop table if exists RestoreObject; drop table if exists Snapshot; END-OF-DATA pstat=$? if test $pstat = 0; then echo "Deletion of Bacula PostgreSQL tables succeeded." else echo "Deletion of Bacula PostgreSQL tables failed." fi exit $pstat bacula-15.0.3/src/cats/drop_bacula_database.in0000755000175000017500000000141514771010173021036 0ustar bsbuildbsbuild#!/bin/sh # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Drop Bacula database -- works for whatever is configured, # MySQL, SQLite, PostgreSQL, Ingres # default_db_type=@DEFAULT_DB_TYPE@ # # See if the first argument is a valid backend name. # If so the user overrides the default database backend. # if [ $# -gt 0 ]; then case $1 in sqlite3) db_type=$1 shift ;; mysql) db_type=$1 shift ;; postgresql) db_type=$1 shift ;; *) ;; esac fi # # If no new db_type is gives use the default db_type. # if [ -z "${db_type}" ]; then db_type="${default_db_type}" fi echo "Dropping ${db_type} database" @scriptdir@/drop_${db_type}_database $* bacula-15.0.3/src/tools/0000755000175000017500000000000014771010173014611 5ustar bsbuildbsbuildbacula-15.0.3/src/tools/test-cpp.c0000644000175000017500000000344514771010173016522 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* Written by Eric Bollengier March 2021 */ #include "bacula.h" #include "lib/unittests.h" /* These macro are defined in src/baconfig.h */ #if __cplusplus >= 201103L # define bdelete_and_null_auto(a) do{if(a){auto b__ = a; (a)=NULL; delete b__;}} while(0) # define TEST_AUTO { ok(1, "auto available"); } #else # define TEST_AUTO { ok(1, "auto not available"); } #endif # ifdef HAVE_TYPEOF # define bdelete_and_null_typeof(a) do{if(a){typeof(a) b__ = a; (a)=NULL; delete b__;}} while(0) # define TEST_TYPEOF { ok(1, "typeof available"); } # else # define TEST_TYPEOF { ok(1, "typeof not available"); } #endif class obj { public: obj(){}; ~obj(){ok(1, "delete was called properly");} }; int main() { Unittests alist_test("test-cpp"); log("Test C++ Features ..."); TEST_AUTO; TEST_TYPEOF; obj *a = new obj(); obj *b = new obj(); char *c = (char *)malloc(10); bdelete_and_null_auto(a); bdelete_and_null_typeof(b); bfree_and_null(c); is(unittest_get_nb_tests(), 2, "Test if the two delete were done"); ok(c == NULL, "bfree_and_null()"); ok(a == NULL, "bdelete_and_null_auto()"); ok(b == NULL, "bdelete_and_null_typeof()"); return report(); } bacula-15.0.3/src/tools/bpluginfo.c0000644000175000017500000003702214771010173016746 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* Contributed in 2012 by Inteos sp. z o.o. * * Utility tool display various information about plugins * including but not limited to: * - Name and Author of the plugin * - Plugin License * - Description * - API version * - Enabled functions, etc. */ #ifdef working /* currently will not compile KES 21 Nov 2015 */ #include #include #include #include #include #include #ifndef __WIN32__ #include #endif #include "bacula.h" #include "../filed/fd_plugins.h" #include "../dird/dir_plugins.h" // I can't include sd_plugins.h here ... #include "../stored/stored.h" #include "assert_macro.h" extern "C" { typedef int (*loadPlugin) (void *binfo, void *bfuncs, void **pinfo, void **pfuncs); typedef int (*unloadPlugin) (void); } #define DEFAULT_API_VERSION 1 enum plugintype { DIRPLUGIN, FDPLUGIN, SDPLUGIN, ERRORPLUGIN, }; /* * pDirInfo * pInfo * psdInfo */ typedef union _pluginfo pluginfo; union _pluginfo { pDirInfo pdirinfo; pInfo pfdinfo; psdInfo psdinfo; }; /* * pDirFuncs * pFuncs * psdFuncs */ typedef union _plugfuncs plugfuncs; union _plugfuncs { pDirFuncs pdirfuncs; pFuncs pfdfuncs; psdFuncs psdfuncs; }; /* * bDirFuncs * bFuncs * bsdFuncs */ /* * TODO: change to union * typedef union _baculafuncs baculafuncs; union _baculafuncs { bDirFuncs bdirfuncs; bFuncs bfdfuncs; bsdFuncs bsdfuncs; }; */ typedef struct _baculafuncs baculafuncs; struct _baculafuncs { uint32_t size; uint32_t version; int (*registerBaculaEvents) (void *ctx, ...); int (*getBaculaValue) (void *ctx, int var, void *value); int (*setBaculaValue) (void *ctx, int var, void *value); int (*JobMessage) (void *ctx, const char *file, int line, int type, int64_t mtime, const char *fmt, ...); int (*DebugMessage) (void *ctx, const char *file, int line, int level, const char *fmt, ...); void *(*baculaMalloc) (void *ctx, const char *file, int line, size_t size); void (*baculaFree) (void *ctx, const char *file, int line, void *mem); }; /* * bDirInfo * bInfo * bsdInfo */ typedef union _baculainfos baculainfos; union _baculainfos { bDirInfo bdirinfo; bInfo bfdinfo; bsdInfo bsdinfo; }; /* typedef struct _baculainfos baculainfos; struct _baculainfos { uint32_t size; uint32_t version; }; */ typedef struct _progdata progdata; struct _progdata { int verbose; int listinfo; int listfunc; char *pluginfile; void *pluginhandle; int bapiversion; int bplugtype; pluginfo *pinfo; plugfuncs *pfuncs; }; /* memory allocation/deallocation */ #define MALLOC(size) \ (char *) bmalloc ( size ); #define ASSERT_MEMORY(m) \ if ( m == NULL ){ \ printf ( "Error: memory allocation error!\n" ); \ exit (10); \ } #define FREE(ptr) \ if ( ptr != NULL ){ \ bfree ( ptr ); \ ptr = NULL; \ } int registerBaculaEvents(void *ctx, ...) { return 0; }; int getBaculaValue(void *ctx, int var, void *value) { return 0; }; int setBaculaValue(void *ctx, int var, void *value) { return 0; }; int DebugMessage(void *ctx, const char *file, int line, int level, const char *fmt, ...) { #ifdef DEBUGMSG printf("DG: %s:%d %s\n", file, line, fmt); #endif return 0; }; int JobMessage(void *ctx, const char *file, int line, int type, int64_t mtime, const char *fmt, ...) { #ifdef DEBUGMSG printf("JM: %s:%d <%d> %s\n", file, line, type, fmt); #endif return 0; }; void *baculaMalloc(void *ctx, const char *file, int line, size_t size) { return MALLOC(size); }; void baculaFree(void *ctx, const char *file, int line, void *mem) { FREE(mem); }; /* * displays a short help */ void print_help(int argc, char *argv[]) { printf("\n" "Usage: bpluginfo [options] \n" " -v verbose\n" " -i list plugin header information only (default)\n" " -f list plugin functions information only\n" " -a bacula api version (default %d)\n" " -h help screen\n" "\n", DEFAULT_API_VERSION); } /* allocates and resets a main program data variable */ progdata *allocpdata(void) { progdata *pdata; pdata = (progdata *) bmalloc(sizeof(progdata)); ASSERT_MEMORY(pdata); memset(pdata, 0, sizeof(progdata)); return pdata; } /* releases all allocated program data resources */ void freepdata(progdata * pdata) { if (pdata->pluginfile) { FREE(pdata->pluginfile); } FREE(pdata); } /* * parse execution arguments and fills required pdata structure fields * * input: * pdata - pointer to program data structure * argc, argv - execution envinroment variables * output: * pdata - required structure fields * * supported options: * -v verbose flag * -i list plugin header info only (default) * -f list implemented functions only * -a bacula api version (default 1) * -h help screen */ void parse_args(progdata * pdata, int argc, char *argv[]) { int i; char *dirtmp; char *progdir; int api; int s; if (argc < 2) { /* TODO - add a real help screen */ printf("\nNot enough parameters!\n"); print_help(argc, argv); exit(1); } if (argc > 5) { /* TODO - add a real help screen */ printf("\nToo many parameters!\n"); print_help(argc, argv); exit(1); } for (i = 1; i < argc; i++) { if (strcmp(argv[i], "-h") == 0) { /* help screen */ print_help(argc, argv); exit(0); } if (strcmp(argv[i], "-v") == 0) { /* verbose option */ pdata->verbose = 1; continue; } if (strcmp(argv[i], "-f") == 0) { /* functions list */ pdata->listfunc = 1; continue; } if (strcmp(argv[i], "-i") == 0) { /* header list */ pdata->listinfo = 1; continue; } if (strcmp(argv[i], "-a") == 0) { /* bacula api version */ if (i < argc - 1) { s = sscanf(argv[i + 1], "%d", &api); if (s == 1) { pdata->bapiversion = api; i++; continue; } } printf("\nAPI version number required!\n"); print_help(argc, argv); exit(1); } if (!pdata->pluginfile) { if (argv[i][0] != '/') { dirtmp = MALLOC(PATH_MAX); ASSERT_MEMORY(dirtmp); progdir = MALLOC(PATH_MAX); ASSERT_MEMORY(progdir); dirtmp = getcwd(dirtmp, PATH_MAX); strcat(dirtmp, "/"); strcat(dirtmp, argv[i]); if (realpath(dirtmp, progdir) == NULL) { /* error in resolving path */ FREE(progdir); progdir = bstrdup(argv[i]); } pdata->pluginfile = bstrdup(progdir); FREE(dirtmp); FREE(progdir); } else { pdata->pluginfile = bstrdup(argv[i]); } continue; } } } /* * checks a plugin type based on a plugin magic string * * input: * pdata - program data with plugin info structure * output: * int - enum plugintype */ int getplugintype(progdata * pdata) { ASSERT_NVAL_RET_V(pdata, ERRORPLUGIN); pluginfo *pinfo = pdata->pinfo; ASSERT_NVAL_RET_V(pinfo, ERRORPLUGIN); if (pinfo->pdirinfo.plugin_magic && strcmp(pinfo->pdirinfo.plugin_magic, DIR_PLUGIN_MAGIC) == 0) { return DIRPLUGIN; } else if (pinfo->pfdinfo.plugin_magic && strcmp(pinfo->pfdinfo.plugin_magic, FD_PLUGIN_MAGIC) == 0) { return FDPLUGIN; } else if (pinfo->psdinfo.plugin_magic && strcmp(pinfo->psdinfo.plugin_magic, SD_PLUGIN_MAGIC) == 0) { return SDPLUGIN; } else { return ERRORPLUGIN; } } /* * prints any available information about a plugin * * input: * pdata - program data with plugin info structure * output: * printed info */ void dump_pluginfo(progdata * pdata) { ASSERT_NVAL_RET(pdata); pluginfo *pinfo = pdata->pinfo; ASSERT_NVAL_RET(pinfo); plugfuncs *pfuncs = pdata->pfuncs; ASSERT_NVAL_RET(pfuncs); switch (pdata->bplugtype) { case DIRPLUGIN: printf("\nPlugin type:\t\tBacula Director plugin\n"); if (pdata->verbose) { printf("Plugin magic:\t\t%s\n", NPRT(pinfo->pdirinfo.plugin_magic)); } printf("Plugin version:\t\t%s\n", pinfo->pdirinfo.plugin_version); printf("Plugin release date:\t%s\n", NPRT(pinfo->pdirinfo.plugin_date)); printf("Plugin author:\t\t%s\n", NPRT(pinfo->pdirinfo.plugin_author)); printf("Plugin licence:\t\t%s\n", NPRT(pinfo->pdirinfo.plugin_license)); printf("Plugin description:\t%s\n", NPRT(pinfo->pdirinfo.plugin_description)); printf("Plugin API version:\t%d\n", pinfo->pdirinfo.version); break; case FDPLUGIN: printf("\nPlugin type:\t\tFile Daemon plugin\n"); if (pdata->verbose) { printf("Plugin magic:\t\t%s\n", NPRT(pinfo->pfdinfo.plugin_magic)); } printf("Plugin version:\t\t%s\n", pinfo->pfdinfo.plugin_version); printf("Plugin release date:\t%s\n", NPRT(pinfo->pfdinfo.plugin_date)); printf("Plugin author:\t\t%s\n", NPRT(pinfo->pfdinfo.plugin_author)); printf("Plugin licence:\t\t%s\n", NPRT(pinfo->pfdinfo.plugin_license)); printf("Plugin description:\t%s\n", NPRT(pinfo->pfdinfo.plugin_description)); printf("Plugin API version:\t%d\n", pinfo->pfdinfo.version); break; case SDPLUGIN: printf("\nPlugin type:\t\tBacula Storage plugin\n"); if (pdata->verbose) { printf("Plugin magic:\t\t%s\n", NPRT(pinfo->psdinfo.plugin_magic)); } printf("Plugin version:\t\t%s\n", pinfo->psdinfo.plugin_version); printf("Plugin release date:\t%s\n", NPRT(pinfo->psdinfo.plugin_date)); printf("Plugin author:\t\t%s\n", NPRT(pinfo->psdinfo.plugin_author)); printf("Plugin licence:\t\t%s\n", NPRT(pinfo->psdinfo.plugin_license)); printf("Plugin description:\t%s\n", NPRT(pinfo->psdinfo.plugin_description)); printf("Plugin API version:\t%d\n", pinfo->psdinfo.version); break; default: printf("\nUnknown plugin type or other Error\n\n"); } } /* * prints any available information about plugin' functions * * input: * pdata - program data with plugin info structure * output: * printed info */ void dump_plugfuncs(progdata * pdata) { ASSERT_NVAL_RET(pdata); plugfuncs *pfuncs = pdata->pfuncs; ASSERT_NVAL_RET(pfuncs); printf("\nPlugin functions:\n"); switch (pdata->bplugtype) { case DIRPLUGIN: if (pdata->verbose) { if (pfuncs->pdirfuncs.newPlugin) { printf(" newPlugin()\n"); } if (pfuncs->pdirfuncs.freePlugin) { printf(" freePlugin()\n"); } } if (pfuncs->pdirfuncs.getPluginValue) { printf(" getPluginValue()\n"); } if (pfuncs->pdirfuncs.setPluginValue) { printf(" setPluginValue()\n"); } if (pfuncs->pdirfuncs.handlePluginEvent) { printf(" handlePluginEvent()\n"); } break; case FDPLUGIN: if (pdata->verbose) { if (pfuncs->pfdfuncs.newPlugin) { printf(" newPlugin()\n"); } if (pfuncs->pfdfuncs.freePlugin) { printf(" freePlugin()\n"); } } if (pfuncs->pfdfuncs.getPluginValue) { printf(" getPluginValue()\n"); } if (pfuncs->pfdfuncs.setPluginValue) { printf(" setPluginValue()\n"); } if (pfuncs->pfdfuncs.handlePluginEvent) { printf(" handlePluginEvent()\n"); } if (pfuncs->pfdfuncs.startBackupFile) { printf(" startBackupFile()\n"); } if (pfuncs->pfdfuncs.endBackupFile) { printf(" endBackupFile()\n"); } if (pfuncs->pfdfuncs.startRestoreFile) { printf(" startRestoreFile()\n"); } if (pfuncs->pfdfuncs.endRestoreFile) { printf(" endRestoreFile()\n"); } if (pfuncs->pfdfuncs.pluginIO) { printf(" pluginIO()\n"); } if (pfuncs->pfdfuncs.createFile) { printf(" createFile()\n"); } if (pfuncs->pfdfuncs.setFileAttributes) { printf(" setFileAttributes()\n"); } if (pfuncs->pfdfuncs.checkFile) { printf(" checkFile()\n"); } break; case SDPLUGIN: if (pdata->verbose) { if (pfuncs->psdfuncs.newPlugin) { printf(" newPlugin()\n"); } if (pfuncs->psdfuncs.freePlugin) { printf(" freePlugin()\n"); } } if (pfuncs->psdfuncs.getPluginValue) { printf(" getPluginValue()\n"); } if (pfuncs->psdfuncs.setPluginValue) { printf(" setPluginValue()\n"); } if (pfuncs->psdfuncs.handlePluginEvent) { printf(" handlePluginEvent()\n"); } break; default: printf("\nUnknown plugin type or other Error\n\n"); } } /* * input parameters: * argv[0] [options] * * exit codes: * 0 - success * 1 - cannot load a plugin * 2 - cannot find a loadPlugin function * 3 - cannot find an unloadPlugin function * 10 - not enough memory */ int main(int argc, char *argv[]) { progdata *pdata; loadPlugin loadplugfunc; unloadPlugin unloadplugfunc; baculafuncs bfuncs = { sizeof(bfuncs), 1, registerBaculaEvents, getBaculaValue, setBaculaValue, JobMessage, DebugMessage, baculaMalloc, baculaFree, }; baculainfos binfos; pdata = allocpdata(); parse_args(pdata, argc, argv); binfos.bfdinfo.size = sizeof(binfos); binfos.bfdinfo.version = DEFAULT_API_VERSION; pdata->pluginhandle = dlopen(pdata->pluginfile, RTLD_LAZY); if (pdata->pluginhandle == NULL) { printf("\nCannot load a plugin: %s\n\n", dlerror()); freepdata(pdata); exit(1); } loadplugfunc = (loadPlugin) dlsym(pdata->pluginhandle, "loadPlugin"); if (loadplugfunc == NULL) { printf("\nCannot find loadPlugin function: %s\n", dlerror()); printf("\nWhether the file is a really Bacula plugin?\n\n"); freepdata(pdata); exit(2); } unloadplugfunc = (unloadPlugin) dlsym(pdata->pluginhandle, "unloadPlugin"); if (unloadplugfunc == NULL) { printf("\nCannot find unloadPlugin function: %s\n", dlerror()); printf("\nWhether the file is a really Bacula plugin?\n\n"); freepdata(pdata); exit(3); } if (pdata->bapiversion > 0) { binfos.bdirinfo.version = pdata->bapiversion; } loadplugfunc(&binfos, &bfuncs, (void **)&pdata->pinfo, (void **)&pdata->pfuncs); pdata->bplugtype = getplugintype(pdata); if (!pdata->listfunc) { dump_pluginfo(pdata); } if ((!pdata->listinfo && pdata->listfunc) || pdata->verbose) { dump_plugfuncs(pdata); } printf("\n"); unloadplugfunc(); dlclose(pdata->pluginhandle); freepdata(pdata); return 0; } #endif /* working */ bacula-15.0.3/src/tools/test_tags.c0000644000175000017500000000611514771010173016755 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* Simple tool to test chk_dbglvl() macro */ #include "bacula.h" #include "lib/unittests.h" void *start_heap; int main (int argc, char *argv[]) { Unittests t("tags_test", true, true); start_heap = sbrk(0); setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); my_name_is(argc, argv, "dmsg"); init_msg(NULL, NULL); daemon_start_time = time(NULL); set_working_directory("/tmp/"); set_thread_concurrency(150); debug_level = 0; debug_level_tags = 0; nok(chk_dbglvl(10), "debug_level < 10"); ok(chk_dbglvl(0), "lvl 0"); nok(chk_dbglvl(DT_NETWORK), "no tag defined"); nok(chk_dbglvl(DT_NETWORK|10), "no tag, debug_level < 10"); debug_level = 10; debug_level_tags = 0; ok(chk_dbglvl(10), "debug_level = 10"); ok(chk_dbglvl(0), "lvl 0"); nok(chk_dbglvl(DT_NETWORK), "no tag defined"); nok(chk_dbglvl(DT_NETWORK|10), "no tag, debug_level = 10"); debug_level = 20; debug_level_tags = 0; ok(chk_dbglvl(10), "debug_level > 10"); ok(chk_dbglvl(0), "lvl 0"); nok(chk_dbglvl(DT_NETWORK), "no tag defined"); nok(chk_dbglvl(DT_NETWORK|10), "no tag, debug_level = 20"); debug_level = 20; debug_level_tags = DT_NETWORK; ok(chk_dbglvl(10), "debug_level > 10"); ok(chk_dbglvl(0), "lvl 0"); ok(chk_dbglvl(DT_NETWORK), "network tag defined"); ok(chk_dbglvl(DT_NETWORK|10), "tag defined, debug_level > lvl"); nok(chk_dbglvl(DT_NETWORK|30), "tag defined, debug_level < lvl"); debug_level = 0; debug_level_tags = 0; ok(debug_parse_tags("network,volume", &debug_level_tags), "parse tags"); nok(chk_dbglvl(10), "debug_level > 10"); ok(chk_dbglvl(0), "lvl 0"); ok(chk_dbglvl(DT_NETWORK), "network tag defined"); nok(chk_dbglvl(DT_NETWORK|10), "tag defined, debug_level > lvl"); ok(debug_parse_tags("!network,!volume", &debug_level_tags), "parse tags"); nok(chk_dbglvl(10), "debug_level > 10"); ok(chk_dbglvl(0), "lvl 0"); nok(chk_dbglvl(DT_NETWORK), "network tag not defined"); nok(chk_dbglvl(DT_NETWORK|10), "no tag, debug_level > lvl"); ok(debug_parse_tags("all,!network", &debug_level_tags), "parse tags"); nok(chk_dbglvl(DT_NETWORK), "network not defined"); ok(chk_dbglvl(DT_VOLUME), "volume tag defined"); ok(chk_dbglvl(DT_SQL), "sql tag defined"); nok(debug_parse_tags("blabla", &debug_level_tags), "bad tags"); ok(debug_parse_tags("", &debug_level_tags), "empty tags"); term_msg(); return report(); } bacula-15.0.3/src/tools/bsmtp.c0000644000175000017500000004571414771010173016115 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* Derived from smtp-orig.c AUTHOR(S) W.Z. Venema Eindhoven University of Technology Department of Mathematics and Computer Science Den Dolech 2, P.O. Box 513, 5600 MB Eindhoven, The Netherlands CREATION DATE Wed Dec 1 14:51:13 MET 1993 LAST UPDATE Fri Aug 11 12:29:23 MET DST 1995 COPYRIGHT Copyright 1993-1995 by Wietse Venema. All rights reserved. Some individual files may be covered by other copyrights. This material was originally written and compiled by Wietse Venema at Eindhoven University of Technology, The Netherlands, in 1993, 1994, and 1995. Redistribution and use in source and binary forms, with or without modification, are permitted provided that this entire copyright notice is duplicated in all such copies. This software is provided "as is" and without any expressed or implied warranties, including, without limitation, the implied warranties of merchantibility and fitness for any particular purpose. */ #include "bacula.h" #include "jcr.h" #define MY_NAME "bsmtp" #if defined(HAVE_WIN32) #include #endif #ifndef MAXSTRING #define MAXSTRING 254 #endif enum resolv_type { RESOLV_PROTO_ANY, RESOLV_PROTO_IPV4, RESOLV_PROTO_IPV6 }; static FILE *sfp; static FILE *rfp; static char *from_addr = NULL; static char *cc_addr = NULL; static char *subject = NULL; static char *err_addr = NULL; static const char *mailhost = NULL; static char *reply_addr = NULL; static char *sender_addr = NULL; static char *custom_headers[10]; static int mailport = 25; static char my_hostname[MAXSTRING]; static bool content_utf8 = false; static resolv_type default_resolv_type = RESOLV_PROTO_IPV4; /* * Take input that may have names and other stuff and strip * it down to the mail box address ... i.e. what is enclosed * in < >. Otherwise add < >. */ static char *cleanup_addr(char *addr, char *buf, int buf_len) { char *p, *q; if ((p = strchr(addr, '<')) == NULL) { snprintf(buf, buf_len, "<%s>", addr); } else { /* Copy */ for (q=buf; *p && *p!='>'; ) { *q++ = *p++; } if (*p) { *q++ = *p; } *q = 0; } Dmsg2(100, "cleanup in=%s out=%s\n", addr, buf); return buf; } /* * get_response - examine message from server */ static void get_response(void) { char buf[1000]; Dmsg0(50, "Calling fgets on read socket rfp.\n"); buf[3] = 0; while (fgets(buf, sizeof(buf), rfp)) { int len = strlen(buf); if (len > 0) { buf[len-1] = 0; } if (debug_level >= 10) { fprintf(stderr, "%s <-- %s\n", mailhost, buf); } Dmsg2(10, "%s --> %s\n", mailhost, buf); if (!isdigit((int)buf[0]) || buf[0] > '3') { Pmsg2(0, _("Fatal malformed reply from %s: %s\n"), mailhost, buf); exit(1); } if (buf[3] != '-') { break; } } if (ferror(rfp)) { fprintf(stderr, _("Fatal fgets error: ERR=%s\n"), strerror(errno)); } return; } /* * chat - say something to server and check the response */ static void chat(const char *fmt, ...) { va_list ap; va_start(ap, fmt); vfprintf(sfp, fmt, ap); va_end(ap); if (debug_level >= 10) { fprintf(stdout, "%s --> ", my_hostname); va_start(ap, fmt); vfprintf(stdout, fmt, ap); va_end(ap); } /* Send message to server and parse its response. */ fflush(sfp); if (debug_level >= 10) { fflush(stdout); } get_response(); } /* * usage - explain and bail out */ static void usage() { fprintf(stderr, _("\n" "Usage: %s [-f from] [-h mailhost] [-s subject] [-c copy] [recipient ...]\n" " -4 forces bsmtp to use IPv4 addresses only.\n" #ifdef HAVE_IPV6 " -6 forces bsmtp to use IPv6 addresses only.\n" #endif " -8 set charset to UTF-8\n" " -a use any ip protocol for address resolution\n" " -c set the Cc: field\n" " -d set debug level to \n" " -dt print a timestamp in debug output\n" " -f set the From: field\n" " -h use mailhost:port as the SMTP server\n" " -s set the Subject: field\n" " -r set the Reply-To: field\n" " -S set the Sender: field\n" " -H add a custom line to the mail header\n" " -l set the maximum number of lines to send (default: unlimited)\n" " -? print this message.\n" "\n"), MY_NAME); exit(1); } /* * Return the offset west from localtime to UTC in minutes * Same as timezone.tz_minuteswest * Unix tz_offset coded by: Attila Fülöp */ static long tz_offset(time_t lnow, struct tm &tm) { #if defined(HAVE_WIN32) #if defined(HAVE_MINGW) __MINGW_IMPORT long _dstbias; #endif #if defined(MINGW64) # define _tzset tzset #endif /* Win32 code */ long offset; _tzset(); offset = _timezone; if (tm.tm_isdst) { offset += _dstbias; } return offset /= 60; #else /* Unix/Linux code */ struct tm tm_utc; time_t now = lnow; (void)gmtime_r(&now, &tm_utc); tm_utc.tm_isdst = tm.tm_isdst; return (long)difftime(mktime(&tm_utc), now) / 60; #endif } static void get_date_string(char *buf, int buf_len) { time_t now = time(NULL); struct tm tm; char tzbuf[MAXSTRING]; long my_timezone; /* Add RFC822 date */ (void)localtime_r(&now, &tm); my_timezone = tz_offset(now, tm); strftime(buf, buf_len, "%a, %d %b %Y %H:%M:%S", &tm); snprintf(tzbuf, sizeof(tzbuf), " %+2.2ld%2.2u", -my_timezone / 60, (unsigned int)abs(my_timezone) % 60); strcat(buf, tzbuf); /* add +0100 */ strftime(tzbuf, sizeof(tzbuf), " (%Z)", &tm); strcat(buf, tzbuf); /* add (CEST) */ } /********************************************************************* * * Program to send email */ int main (int argc, char *argv[]) { char buf[1000]; int i, ch; unsigned long maxlines, lines; #if defined(HAVE_WIN32) SOCKET s; #else int r; int s = -1; struct passwd *pwd; #endif char *cp, *p; #ifdef HAVE_GETADDRINFO int res; struct addrinfo hints; struct addrinfo *ai, *rp; char mail_port[10]; #else struct hostent *hp; struct sockaddr_in sin; #endif #ifdef HAVE_IPV6 const char *options = "468ac:d:f:h:r:s:l:S:H:?"; #else const char *options = "48ac:d:f:h:r:s:l:S:H:?"; #endif int custom_header_count = 0; int custom_header_max = (sizeof(custom_headers) / sizeof(char *)); setlocale(LC_ALL, "en_US"); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); my_name_is(argc, argv, "bsmtp"); maxlines = 0; while ((ch = getopt(argc, argv, options)) != -1) { switch (ch) { case '4': default_resolv_type = RESOLV_PROTO_IPV4; break; #ifdef HAVE_IPV6 case '6': default_resolv_type = RESOLV_PROTO_IPV6; break; #endif case '8': content_utf8 = true; break; case 'a': default_resolv_type = RESOLV_PROTO_ANY; break; case 'c': Dmsg1(20, "cc=%s\n", optarg); cc_addr = optarg; break; case 'd': /* set debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } } Dmsg1(20, "Debug level = %d\n", debug_level); break; case 'f': /* from */ from_addr = optarg; break; case 'h': /* smtp host */ Dmsg1(20, "host=%s\n", optarg); p = strchr(optarg, ':'); if (p) { *p++ = 0; mailport = atoi(p); } mailhost = optarg; break; case 's': /* subject */ Dmsg1(20, "subject=%s\n", optarg); subject = optarg; break; case 'r': /* reply address */ reply_addr = optarg; break; case 'S': /* sender address */ sender_addr = optarg; break; case 'H': /* custom header "XXXX: YYYYYYY" */ if (custom_header_countai_canonname, sizeof(my_hostname)); freeaddrinfo(ai); #else if ((hp = gethostbyname(my_hostname)) == NULL) { Pmsg2(0, _("Fatal gethostbyname for myself failed \"%s\": ERR=%s\n"), my_hostname, strerror(errno)); exit(1); } bstrncpy(my_hostname, hp->h_name, sizeof(my_hostname)); #endif Dmsg1(20, "My hostname is: %s\n", my_hostname); /* * Determine from address. */ if (from_addr == NULL) { #if defined(HAVE_WIN32) DWORD dwSize = UNLEN + 1; LPSTR lpszBuffer = (LPSTR)alloca(dwSize); if (GetUserName(lpszBuffer, &dwSize)) { snprintf(buf, sizeof(buf), "%s@%s", lpszBuffer, my_hostname); } else { snprintf(buf, sizeof(buf), "unknown-user@%s", my_hostname); } #else if ((pwd = getpwuid(getuid())) == 0) { snprintf(buf, sizeof(buf), "userid-%d@%s", (int)getuid(), my_hostname); } else { snprintf(buf, sizeof(buf), "%s@%s", pwd->pw_name, my_hostname); } #endif from_addr = bstrdup(buf); } Dmsg1(20, "From addr=%s\n", from_addr); /* * Connect to smtp daemon on mailhost. */ lookup_host: #ifdef HAVE_GETADDRINFO memset(&hints, 0, sizeof(struct addrinfo)); switch (default_resolv_type) { case RESOLV_PROTO_ANY: hints.ai_family = AF_UNSPEC; break; case RESOLV_PROTO_IPV4: hints.ai_family = AF_INET; break; #ifdef HAVE_IPV6 case RESOLV_PROTO_IPV6: hints.ai_family = AF_INET6; break; #endif default: hints.ai_family = AF_UNSPEC; break; } hints.ai_socktype = SOCK_STREAM; hints.ai_protocol = 0; hints.ai_flags = 0; snprintf(mail_port, sizeof(mail_port), "%d", mailport); if ((res = getaddrinfo(mailhost, mail_port, &hints, &ai)) != 0) { Pmsg2(0, _("Error unknown mail host \"%s\": ERR=%s\n"), mailhost, gai_strerror(res)); if (strcasecmp(mailhost, "localhost")) { Pmsg0(0, _("Retrying connection using \"localhost\".\n")); mailhost = "localhost"; goto lookup_host; } exit(1); } for (rp = ai; rp != NULL; rp = rp->ai_next) { #if defined(HAVE_WIN32) s = WSASocket(rp->ai_family, rp->ai_socktype, rp->ai_protocol, NULL, 0, 0); #else s = socket(rp->ai_family, rp->ai_socktype, rp->ai_protocol); #endif if (s < 0) { continue; } if (connect(s, rp->ai_addr, rp->ai_addrlen) != -1) { break; } close(s); } if (!rp) { Pmsg1(0, _("Failed to connect to mailhost %s\n"), mailhost); exit(1); } freeaddrinfo(ai); #else if ((hp = gethostbyname(mailhost)) == NULL) { Pmsg2(0, _("Error unknown mail host \"%s\": ERR=%s\n"), mailhost, strerror(errno)); if (strcasecmp(mailhost, "localhost") != 0) { Pmsg0(0, _("Retrying connection using \"localhost\".\n")); mailhost = "localhost"; goto lookup_host; } exit(1); } if (hp->h_addrtype != AF_INET) { Pmsg1(0, _("Fatal error: Unknown address family for smtp host: %d\n"), hp->h_addrtype); exit(1); } memset((char *)&sin, 0, sizeof(sin)); memcpy((char *)&sin.sin_addr, hp->h_addr, hp->h_length); sin.sin_family = hp->h_addrtype; sin.sin_port = htons(mailport); #if defined(HAVE_WIN32) if ((s = WSASocket(AF_INET, SOCK_STREAM, 0, NULL, 0, 0)) < 0) { Pmsg1(0, _("Fatal socket error: ERR=%s\n"), strerror(errno)); exit(1); } #else if ((s = socket(AF_INET, SOCK_STREAM, 0)) < 0) { Pmsg1(0, _("Fatal socket error: ERR=%s\n"), strerror(errno)); exit(1); } #endif if (connect(s, (struct sockaddr *)&sin, sizeof(sin)) < 0) { Pmsg2(0, _("Fatal connect error to %s: ERR=%s\n"), mailhost, strerror(errno)); exit(1); } Dmsg0(20, "Connected\n"); #endif #if defined(HAVE_WIN32) int fdSocket = _open_osfhandle(s, _O_RDWR | _O_BINARY); if (fdSocket == -1) { Pmsg1(0, _("Fatal _open_osfhandle error: ERR=%s\n"), strerror(errno)); exit(1); } int fdSocket2 = dup(fdSocket); if ((sfp = fdopen(fdSocket, "wb")) == NULL) { Pmsg1(0, _("Fatal fdopen error: ERR=%s\n"), strerror(errno)); exit(1); } if ((rfp = fdopen(fdSocket2, "rb")) == NULL) { Pmsg1(0, _("Fatal fdopen error: ERR=%s\n"), strerror(errno)); exit(1); } #else if ((r = dup(s)) < 0) { Pmsg1(0, _("Fatal dup error: ERR=%s\n"), strerror(errno)); exit(1); } if ((sfp = fdopen(s, "w")) == 0) { Pmsg1(0, _("Fatal fdopen error: ERR=%s\n"), strerror(errno)); exit(1); } if ((rfp = fdopen(r, "r")) == 0) { Pmsg1(0, _("Fatal fdopen error: ERR=%s\n"), strerror(errno)); exit(1); } #endif /* * Send SMTP headers. Note, if any of the strings have a < * in them already, we do not enclose the string in < >, otherwise * we do. */ get_response(); /* banner */ chat("HELO %s\r\n", my_hostname); chat("MAIL FROM:%s\r\n", cleanup_addr(from_addr, buf, sizeof(buf))); for (i = 0; i < argc; i++) { char *m = argv[i]; if (m) { /* email1,email2,email3 */ for (char *end = strchr(m, ','); end ; end = strchr(m, ',')) { *end = 0; Dmsg1(20, "rcpt to: %s\n", argv[i]); chat("RCPT TO:%s\r\n", cleanup_addr(m, buf, sizeof(buf))); m = end+1; // Move forward *end = ','; // Get the original string back } if (*m) { // We should have a last one Dmsg1(20, "rcpt to: %s\n", argv[i]); chat("RCPT TO:%s\r\n", cleanup_addr(m, buf, sizeof(buf))); } } } if (cc_addr) { chat("RCPT TO:%s\r\n", cleanup_addr(cc_addr, buf, sizeof(buf))); } Dmsg0(20, "Data\n"); chat("DATA\r\n"); /* * Send header */ fprintf(sfp, "From: %s\r\n", from_addr); Dmsg1(10, "From: %s\r\n", from_addr); if (subject) { fprintf(sfp, "Subject: %s\r\n", subject); Dmsg1(10, "Subject: %s\r\n", subject); } if (reply_addr) { fprintf(sfp, "Reply-To: %s\r\n", reply_addr); Dmsg1(10, "Reply-To: %s\r\n", reply_addr); } if (err_addr) { fprintf(sfp, "Errors-To: %s\r\n", err_addr); Dmsg1(10, "Errors-To: %s\r\n", err_addr); } if (sender_addr) { fprintf(sfp, "Sender: %s\r\n", sender_addr); Dmsg1(10, "Sender: %s\r\n", sender_addr); } else { #if defined(HAVE_WIN32) DWORD dwSize = UNLEN + 1; LPSTR lpszBuffer = (LPSTR)alloca(dwSize); if (GetUserName(lpszBuffer, &dwSize)) { fprintf(sfp, "Sender: %s@%s\r\n", lpszBuffer, my_hostname); Dmsg2(10, "Sender: %s@%s\r\n", lpszBuffer, my_hostname); } else { fprintf(sfp, "Sender: unknown-user@%s\r\n", my_hostname); Dmsg1(10, "Sender: unknown-user@%s\r\n", my_hostname); } #else if ((pwd = getpwuid(getuid())) == 0) { fprintf(sfp, "Sender: userid-%d@%s\r\n", (int)getuid(), my_hostname); Dmsg2(10, "Sender: userid-%d@%s\r\n", (int)getuid(), my_hostname); } else { fprintf(sfp, "Sender: %s@%s\r\n", pwd->pw_name, my_hostname); Dmsg2(10, "Sender: %s@%s\r\n", pwd->pw_name, my_hostname); } #endif } /* Add custom header, if any */ for (i=0; i 0 && ++lines > maxlines) { Dmsg1(20, "skip line because of maxlines limit: %lu\n", maxlines); while (fgets(buf, sizeof(buf), stdin)) { ++lines; } break; } buf[sizeof(buf)-1] = '\0'; buf[strlen(buf)-1] = '\0'; if (buf[0] == '.') { /* add extra . see RFC 2821 4.5.2 */ fputs(".", sfp); } fputs(buf, sfp); fputs("\r\n", sfp); } if (lines > maxlines) { Dmsg1(10, "hit maxlines limit: %lu\n", maxlines); fprintf(sfp, "\r\n\r\n[maximum of %lu lines exceeded, skipped %lu lines of output]\r\n", maxlines, lines-maxlines); } /* * Send SMTP quit command */ chat(".\r\n"); chat("QUIT\r\n"); exit(0); } bacula-15.0.3/src/tools/timelimit.10000644000175000017500000001372314771010173016676 0ustar bsbuildbsbuild.\" Copyright (c) 2001, 2007 - 2010 Peter Pentchev .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $Ringlet$ .\" .Dd September 29, 2010 .Dt TIMELIMIT 1 .Os .Sh NAME .Nm timelimit .Nd effectively limit the absolute execution time of a process .Sh SYNOPSIS .Nm .Op Fl pq .Op Fl S Ar killsig .Op Fl s Ar warnsig .Op Fl T Ar killtime .Op Fl t Ar warntime .Ar command .Op Ar arguments ... .Sh DESCRIPTION The .Nm utility executes a given .Ar command with the supplied .Ar arguments and terminates the spawned process after a given time with a given signal. If the process exits before the time limit has elapsed, .Nm will silently exit, too. .Pp Options: .Bl -tag -width indent .It Fl p If the child process is terminated by a signal, .Nm propagates this condition, i.e. sends the same signal to itself. This allows the program executing .Nm to determine whether the child process was terminated by a signal or actually exited with an exit code larger than 128. .It Fl q Quiet operation - .Nm does not output diagnostic messages about signals sent to the child process. .It Fl S Ar killsig Specify the number of the signal to be sent to the process .Ar killtime seconds after .Ar warntime has expired. Defaults to 9 (SIGKILL). .It Fl s Ar warnsig Specify the number of the signal to be sent to the process .Ar warntime seconds after it has been started. Defaults to 15 (SIGTERM). .It Fl T Ar killtime Specify the maximum execution time of the process before sending .Ar killsig after .Ar warnsig has been sent. Defaults to 120 seconds. .It Fl t Ar warntime Specify the maximum execution time of the process in seconds before sending .Ar warnsig . Defaults to 3600 seconds. .El .Pp On systems that support the .Xr setitimer 2 system call, the .Ar warntime and .Ar killtime values may be specified in fractional seconds with microsecond precision. .Sh ENVIRONMENT .Bl -tag -width indent .It Ev KILLSIG The .Ar killsig to use if the .Fl S option was not specified. .It Ev KILLTIME The .Ar killtime to use if the .Fl T option was not specified. .It Ev WARNSIG The .Ar warnsig to use if the .Fl s option was not specified. .It Ev WARNTIME The .Ar warntime to use if the .Fl t option was not specified. .El .Sh EXIT STATUS If the child process exits normally, the .Nm utility will pass its exit code on up. If the child process is terminated by a signal and the .Fl p flag was not specified, the .Nm utility's exit status is 128 plus the signal number, similar to .Xr sh 1 . If the .Fl p flag was specified, the .Nm utility will raise the signal itself so that its own parent process may in turn reliably distinguish between a signal and a larger than 128 exit code. .Pp In rare cases, the .Nm utility may encounter a system or user error; then, its exit status is one of the standard .Xr sysexits 3 values: .Bl -tag -width indent .It Dv EX_USAGE The command-line parameters and options were incorrectly specified. .It Dv EX_SOFTWARE The .Nm utility itself received an unexpected signal while waiting for the child process to terminate. .It Dv EX_OSERR The .Nm utility was unable to execute the child process, wait for it to terminate, or examine its exit status. .El .Sh EXAMPLES .Pp The following examples are shown as given to the shell: .Pp .Dl timelimit -p /usr/local/bin/rsync rsync://some.host/dir /opt/mirror .Pp Run the rsync program to mirror a WWW or FTP site and kill it if it runs longer than 1 hour (that is 3600 seconds) with SIGTERM. If the rsync process does not exit after receiving the SIGTERM, .Nm issues a SIGKILL 120 seconds after the SIGTERM. If the rsync process is terminated by a signal, .Nm will itself raise this signal. .Pp .Dl tcpserver 0 8888 timelimit -t600 -T300 /opt/services/chat/stats .Pp Start a .Xr tcpserver n process listening on tcp port 8888; each client connection shall invoke an instance of an IRC statistics tool under .Pa /opt/services/chat and kill it after 600 seconds have elapsed. If the stats process is still running after the SIGTERM, it will be killed by a SIGKILL sent 300 seconds later. .Pp .Dl env WARNTIME=4.99 WARNSIG=1 KILLTIME=1.000001 timelimit sh stats.sh .Pp Start a shell script and kill it with a SIGHUP in a little under 5 seconds. If the shell gets stuck and does not respond to the SIGHUP, kill it with the default SIGKILL just a bit over a second afterwards. .Sh SEE ALSO .Xr kill 1 , .Xr rsync 1 , .Xr signal 3 , .Xr tcpserver n .Sh STANDARDS No standards documentation was harmed in the process of creating .Nm . .Sh BUGS Please report any bugs in .Nm to the author. .Sh AUTHOR The .Nm utility was conceived and written by .An Peter Pentchev Aq roam@ringlet.net with contributions and suggestions by .An Karsten W Rohrbach Aq karsten@rohrbach.de , .An Teddy Hogeborn Aq teddy@fukt.bsnet.se , and .An Tomasz Nowak Aq nowak2000@poczta.onet.pl . bacula-15.0.3/src/tools/bregex.c0000644000175000017500000000737014771010173016240 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Test program for testing regular expressions. * * Kern Sibbald, MMVI * */ #include "bacula.h" /* * If you define BACULA_REGEX, bregex will be built with the * Bacula bregex library, which is the same code that we * use on Win32, thus using Linux, you can test your Win32 * expressions. Otherwise, this program will link with the * system library routines. */ //#define BACULA_REGEX #ifdef BACULA_REGEX #include "lib/bregex.h" #else #ifndef HAVE_REGEX_H #include "lib/bregex.h" #else #include #endif #endif static void usage() { fprintf(stderr, "\n" "Usage: bregex [-d debug_level] -f \n" " -f specify file of data to be matched\n" " -l suppress line numbers\n" " -n print lines that do not match\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -? print this message.\n" "\n\n"); exit(1); } int main(int argc, char *const *argv) { regex_t preg; char prbuf[500]; char *fname = NULL; int rc, ch; char data[1000]; char pat[500]; FILE *fd; bool match_only = true; int lineno; bool no_linenos = false; setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); while ((ch = getopt(argc, argv, "d:f:ln?")) != -1) { switch (ch) { case 'd': /* set debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } } break; case 'f': /* data */ fname = optarg; break; case 'l': no_linenos = true; break; case 'n': match_only = false; break; case '?': default: usage(); } } argc -= optind; argv += optind; if (!fname) { printf("A data file must be specified.\n"); usage(); } OSDependentInit(); for ( ;; ) { printf("Enter regex pattern: "); if (fgets(pat, sizeof(pat)-1, stdin) == NULL) { break; } strip_trailing_newline(pat); if (pat[0] == 0) { exit(0); } rc = regcomp(&preg, pat, REG_EXTENDED); if (rc != 0) { regerror(rc, &preg, prbuf, sizeof(prbuf)); printf("Regex compile error: %s\n", prbuf); continue; } fd = bfopen(fname, "r"); if (!fd) { printf(_("Could not open data file: %s\n"), fname); exit(1); } lineno = 0; while (fgets(data, sizeof(data)-1, fd)) { const int nmatch = 30; regmatch_t pmatch[nmatch]; strip_trailing_newline(data); lineno++; rc = regexec(&preg, data, nmatch, pmatch, 0); if ((match_only && rc == 0) || (!match_only && rc != 0)) { if (no_linenos) { printf("%s\n", data); } else { printf("%5d: %s\n", lineno, data); } } } fclose(fd); regfree(&preg); } exit(0); } bacula-15.0.3/src/tools/bsock_meeting_test.c0000644000175000017500000002144314771010173020631 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "lib/unittests.h" /* Function that reproduce what the director is supposed to do * - Accept the connection from "filedaemon" * - Accept the command "setip" * - Store the socket in the BsockMeeting structure in a global list * * - Accept the connection from "bconsole" * - Do a command that connects to the client * - Get the socket from BsockMeeting list * - do some discussion */ void *start_heap; int port=2000; int nb_job=10; int done=nb_job; int started=0; int connected=0; pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; int nb_send=10; char *remote = (char *)"localhost"; bool quit=false; ilist clients(1000, not_owned_by_alist); BsockMeeting *get_client(const char *name) { lock_guard m(mutex); BsockMeeting *b; int id=0; if (sscanf(name, "client-%d", &id) != 1) { return NULL; } b = (BsockMeeting *)clients.get(id); if (!b) { b = New(BsockMeeting()); clients.put(id, b); } return b; } void set_client(const char *name, BsockMeeting *b) { lock_guard m(mutex); int id=0; if (sscanf(name, "client-%d", &id) != 1) { return; } clients.put(id, b); } void client_close() { BsockMeeting *b; int last = clients.last_index(); for (int i = 0; i <= last ; i++) { b = (BsockMeeting *)clients.get(i); if (b) { delete b; clients.put(i, NULL); } } clients.destroy(); } /* When a client connects */ static void *handle_client_request(void *fdp) { BsockMeeting *proxy; BSOCK *fd = (BSOCK *)fdp; if (fd->recv() > 0) { char *name = fd->msg; Pmsg1(0, "Got connection from %s\n", name); proxy = get_client(name); fd->fsend("OK\n"); proxy->set(fd); } else { free_bsock(fd); return NULL; } P(mutex); connected++; V(mutex); return NULL; } static void *th_server(void *) { static workq_t dir_workq; /* queue of work from Director */ IPADDR *tmp = 0; dlist *lst; lst = New(dlist(tmp, &tmp->link)); init_default_addresses(&lst, port); bnet_thread_server(lst, 100, &dir_workq, handle_client_request); delete lst; return NULL; } #ifdef HAVE_WIN32 #define WD "c:\\program files\\bacula\\working\\" #else #define WD "/tmp/" #endif /* Simulate a console and do some action */ void *th_console(void *arg) { bool quit; int64_t total=0; char ed1[50], ed2[50]; int32_t sig; btime_t timer, end_timer, elapsed; BSOCK *dir=NULL; char *name = (char *) arg; BsockMeeting *proxy = get_client(name); if (proxy == NULL) { Pmsg1(0, "Unable to find %s\n", name); goto bail_out; } Pmsg0(0, "Can go in sleep mode. Remove " WD "pause to continue\n"); { struct stat sp; fclose(fopen(WD "pause", "w")); while (stat(WD "pause", &sp) == 0) { bmicrosleep(1, 0); } } dir = proxy->get(30); if (!dir) { Pmsg1(0, "Unable to get socket %s\n", name); goto bail_out; } Pmsg0(0, "send command\n"); dir->fsend("command\n"); timer = get_current_btime(); for (quit=false; dir && !quit;) { /* Read command */ sig = dir->recv(); if (sig < 0) { Pmsg0(0, "Connection terminated\n"); break; /* connection terminated */ } else if (!strncmp(dir->msg, "quit", 4)) { Pmsg0(0, "got quit...\n"); dir->fsend("quit\n"); break; } else { total += dir->msglen; } } end_timer = get_current_btime(); elapsed = (end_timer - timer)/1000; /* 0.001s */ if (elapsed > 0) { printf("got bytes=%sB in %.2fs speed=%sB/s\n", edit_uint64_with_suffix(total, ed1), (float) elapsed/1000, edit_uint64_with_suffix(total/elapsed*1000, ed2)); } bail_out: free_bsock(dir); free(name); return NULL; } /* Simulate a filedaemon */ void *th_filedaemon(void *arg) { BSOCK *sock = NULL; char name[512]; BsockMeeting proxy; bstrncpy(name, (char *)arg, sizeof(name)); free(arg); P(mutex); started++; V(mutex); /* The FD will use a loop to connect */ connect_again: free_bsock(sock); sock = new_bsock(); if (!sock->connect(NULL, 5, 10, 2000, (char *)"*name*", remote, (char *)"*service*", port, 0)) { bmicrosleep(1, 0); goto connect_again; } Pmsg0(0, ">Connected!\n"); /* Do "authentication" */ sock->fsend("%s", name); if (sock->recv() <= 0 || strcmp(sock->msg, "OK\n") != 0) { free_bsock(sock); return NULL; } /* Read command and wait to be used */ proxy.wait_request(sock); if (sock->is_closed()) { goto connect_again; } /* get a command */ if (sock->recv() <= 0) { Pmsg0(0, "got incorrect message. Expecting command\n"); goto connect_again; } /* Do something useful or not */ sock->msg = check_pool_memory_size(sock->msg, 4100); Pmsg1(0, ">Ready to send %u buffers of 4KB\n", nb_send); for (int i = 0; i < nb_send ; i++) { memset(sock->msg, i, 4096); sock->msglen = 4096; sock->msg[sock->msglen] = 0; sock->send(); } Pmsg1(0, ">Send quit command\n", nb_send); sock->fsend("quit\n"); sock->recv(); sock->close(); P(mutex); done--; V(mutex); // goto connect_again; free_bsock(sock); Pmsg4(0, ">done=%u started=%u connected=%u name=%s\n", done, started, connected, name); return NULL; } int main (int argc, char *argv[]) { char ch; int olddone=0; bool server=false; pthread_t server_id, client_id[1000], console_id[1000]; Unittests t("BsockMeeting", true, true); InitWinAPIWrapper(); WSA_Init(); mark_heap(); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); init_msg(NULL, NULL); daemon_start_time = time(NULL); set_working_directory((char *)WD); set_thread_concurrency(150); set_trace(0); while ((ch = getopt(argc, argv, "?n:j:r:p:sd:")) != -1) { switch (ch) { case 'j': done = nb_job = MIN(atoi(optarg), 1000); break; case 'n': nb_send = atoi(optarg); break; case 'r': remote = optarg; break; case 'p': port = atoi(optarg); break; case 's': server = true; break; case 'd': debug_level = atoi(optarg); break; case '?': default: Pmsg0(0, "Usage: bsock_meeting_test [-r remote] [-s] [-p port] [-n nb_send] [-j nb_job]\n"); return 0; } } argc -= optind; argv += optind; if (strcmp(remote, "localhost") == 0) { Pmsg1(0, "Start server on port %d\n", port); pthread_create(&server_id, NULL, th_server, NULL); Pmsg0(0, ">starting fake console\n"); for (int i = nb_job - 1; i >= 0 ; i--) { char *tmp = (char *) malloc (32); snprintf(tmp, 32, "client-%d", i); pthread_create(&console_id[i], NULL, th_console, tmp); } } if (!server) { Pmsg0(0, ">starting fake clients\n"); for (int i = 0; i < nb_job ; i++) { char *tmp = (char *) malloc (32); snprintf(tmp, 32, "client-%d", i); pthread_create(&client_id[i], NULL, th_filedaemon, tmp); } while (done>=1) { if (done != olddone) { Pmsg3(0, ">done=%u started=%u connected=%u\n", done, started, connected); olddone = done; } sleep(1); } quit=true; sleep(1); } if (strcmp(remote, "localhost") == 0) { Pmsg0(0, "Stop bnet server...\n"); bnet_stop_thread_server(server_id); pthread_join(server_id, NULL); Pmsg0(0, "done.\n"); Pmsg0(0, "Join console threads...\n"); for (int i = 0; i < nb_job ; i++) { pthread_join(console_id[i], NULL); } Pmsg0(0, "done.\n"); } if (!server) { #ifdef pthread_kill #undef pthread_kill #endif for (int i = 0; i < nb_job ; i++) { pthread_kill(client_id[i], SIGUSR2); } Pmsg0(0, "Join client threads...\n"); for (int i = 0; i < nb_job ; i++) { pthread_join(client_id[i], NULL); } Pmsg0(0, "done.\n"); } client_close(); term_last_jobs_list(); dequeue_daemon_messages(NULL); dequeue_messages(NULL); term_msg(); return 0; } bacula-15.0.3/src/tools/xattr_append_test.c0000644000175000017500000000513114771010173020505 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "../lib/unittests.h" #define CIFS_XATTR "system.cifs_acl" #define CIFS_XATTR_LEN 15 int main(int argc, char **argv) { Unittests u("t"); POOL_MEM q; int l, l2; char buf[512]; Mmsg(q, "test1"); l = strlen(q.c_str()); l2 = xattr_list_append(q.addr(), l, CIFS_XATTR, CIFS_XATTR_LEN); ok(l2 == (l+CIFS_XATTR_LEN+1), "Check add string"); Dmsg1(0, "%s\n", asciidump(q.c_str(), l2, buf, sizeof(buf))); l = xattr_list_append(q.addr(), l2, CIFS_XATTR, CIFS_XATTR_LEN); ok(l == l2, "Check string already in (last pos)"); Dmsg1(0, "%s\n", asciidump(q.c_str(), l, buf, sizeof(buf))); l = 5 + 1 + CIFS_XATTR_LEN + 1; memcpy(q.c_str(), "test1\0" CIFS_XATTR, l); l2 = xattr_list_append(q.addr(), l, CIFS_XATTR, CIFS_XATTR_LEN); ok(l == l2, "Check string already in (last pos)"); Dmsg1(0, "%s\n", asciidump(q.c_str(), l2, buf, sizeof(buf))); l = 5 + 1 + 5 + 1; memcpy(q.c_str(), "test1\0test2\0", l); l2 = xattr_list_append(q.addr(), l, CIFS_XATTR, CIFS_XATTR_LEN); ok((l + CIFS_XATTR_LEN + 1) == l2, "Check string added"); Dmsg1(0, "%s\n", asciidump(q.c_str(), l2, buf, sizeof(buf))); l = 5 + 1 + 5 + 1 + CIFS_XATTR_LEN + 1; memcpy(q.c_str(), "test1\0test2\0" CIFS_XATTR, l); l2 = xattr_list_append(q.addr(), l, CIFS_XATTR, CIFS_XATTR_LEN); ok(l == l2, "Check string already in (3rd)"); Dmsg1(0, "%s\n", asciidump(q.c_str(), l2, buf, sizeof(buf))); l = 5 + 1 + 5 + 1 + CIFS_XATTR_LEN + 1 + 5 + 1; memcpy(q.c_str(), "test1\0test2\0" CIFS_XATTR "\0test3\0", l); l2 = xattr_list_append(q.addr(), l, CIFS_XATTR, CIFS_XATTR_LEN); ok(l == l2, "Check string already in (3rd)"); Dmsg1(0, "%s\n", asciidump(q.c_str(), l2, buf, sizeof(buf))); Mmsg(q, ""); l = strlen(q.c_str()); l2 = xattr_list_append(q.addr(), l, CIFS_XATTR, CIFS_XATTR_LEN); ok(l2 == (l+CIFS_XATTR_LEN+1), "Check add string (empty)"); Dmsg1(0, "%s\n", asciidump(q.c_str(), l2, buf, sizeof(buf))); return report(); } bacula-15.0.3/src/tools/bsnapshot.c0000644000175000017500000021677714771010173017002 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "lib/ini.h" #if defined(HAVE_SUN_OS) || defined(HAVE_DARWIN_OS) #include #endif // Not used on MacOS, fake declaration #ifdef HAVE_DARWIN_OS #define major(x) x #define minor(x) x #endif #ifdef MAJOR_IN_MKDEV #include #elif defined MAJOR_IN_SYSMACROS #include #endif /* HERE I HAVE TO convert the fprintf into something in message.c */ #define Dmsg(level, ...) do { \ d_msg(__FILE__ , __LINE__, level, __VA_ARGS__); \ } while (0) #define Pmsg(level, ...) do { \ if (level <= debug_level) { \ fprintf(stderr, "%s:%d ", __FILE__ , __LINE__ ); \ fprintf(stderr, __VA_ARGS__ ); \ } \ } while (0) /* Use our own implementation of MmsgDX */ #undef MmsgD0 #undef MmsgD1 #undef MmsgD2 #undef MmsgD3 #undef MmsgD4 #undef MmsgD5 #undef MmsgD6 #define MmsgD0(level, msgbuf, fmt) \ { Mmsg(msgbuf, fmt); Dmsg(level, "%s\n", msgbuf); } #define MmsgD1(level, msgbuf, fmt, a1) \ { Mmsg(msgbuf, fmt, a1); Dmsg(level, "%s\n", msgbuf); } #define MmsgD2(level, msgbuf, fmt, a1, a2) \ { Mmsg(msgbuf, fmt, a1, a2); Dmsg(level, "%s\n", msgbuf); } #define MmsgD3(level, msgbuf, fmt, a1, a2, a3) \ { Mmsg(msgbuf, fmt, a1, a2, a3); Dmsg(level, "%s\n", msgbuf); } #define MmsgD4(level, msgbuf, fmt, a1, a2, a3, a4) \ { Mmsg(msgbuf, fmt, a1, a2, a3, a4); Dmsg(level, "%s\n", msgbuf); } #define MmsgD5(level, msgbuf, fmt, a1, a2, a3, a4, a5) \ { Mmsg(msgbuf, fmt, a1, a2, a3, a4, a5); Dmsg(level, "%s\n", msgbuf); } #define MmsgD6(level, msgbuf, fmt, a1, a2, a3, a4, a5, a6) \ { Mmsg(msgbuf, fmt, a1, a2, a3, a4, a5, a6); Dmsg(level, "%s\n", msgbuf); } #define BSNAPSHOT_CONF SYSCONFDIR "/bsnapshot.conf" static int debug_fd = -1; /* file descriptor for the debugging */ static void usage(const char *msg=NULL) { if (msg) { fprintf(stderr, _("ERROR %s\n\n"), msg); } fprintf(stderr, _( "Bacula %s (%s)\n\n" "Usage: bsnapshot\n" " -d level Set debug level\n" " -v Verbose\n" " -s Use sudo\n" " -o logfile send debug to logfile\n" " -V volume volume\n" " -T type volume type\n" " -t check compatibility\n" " -c specify configuration file\n" "\n"), VERSION, LSMDATE); exit(2); } static const char *Months[] = { NULL, "Jan", "Feb", "Mar", "Apr", "Mai", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" }; /* Skip leading slash(es) */ static bool makedir(char *path) { char *p = path; while (IsPathSeparator(*p)) { p++; } while ((p = first_path_separator(p))) { char save_p; save_p = *p; *p = 0; mkdir(path, 0700); *p = save_p; while (IsPathSeparator(*p)) { p++; } } /* If not having a ending / */ if (!IsPathSeparator(path[strlen(path) - 1])) { mkdir(path, 0700); } return true; } static char *strip_tz(char *line) { if (line) { /* The format returned by LVM is 2022-05-18 09:17:41 +0000 */ char *p = strstr(line, " +"); if (p) { *p = 0; } /* str_to_utime() doesn't understand the +0000 extension */ } return line; } /* Strip trailing junk and " */ void strip_quotes(char *str) { strip_trailing_junk(str); for(char *p = str; *p ; p++) { if (*p == '"') { *p = ' '; } } } static void set_trace_file(const char *path) { char dt[MAX_TIME_LENGTH]; if (debug_fd != -1) { close(debug_fd); } debug_fd = open(path, O_RDWR | O_CREAT | O_APPEND, 0600); if (debug_fd != -1) { Dmsg(10, "Starting bsnapshot %s\n", bstrftime(dt, MAX_TIME_LENGTH, time(NULL))); // redirect Dmsg() emitted by Bacula's libraries code set_trace_for_tools(debug_fd); } else { perror(path); /* report the error to stderr */ } } /* Small function to avoid double // in path name */ static void path_concat(POOLMEM *&dest, const char *path1, const char *path2, const char *path3) { int last; last = pm_strcpy(dest, path1); last = MAX(last - 1, 0); /* Check if the last char of dest is / and the first of path2 is / */ if (dest[last] == '/') { if (path2[0] == '/') { dest[last] = 0; } } else { if (path2[0] != '/') { pm_strcat(dest, "/"); } } last = pm_strcat(dest, path2); last = MAX(last - 1, 0); if (path3) { if (dest[last] == '/') { if (path3[0] == '/') { dest[last] = 0; } } else { if (path3[0] != '/') { pm_strcat(dest, "/"); } } pm_strcat(dest, path3); } } static struct ini_items bsnap_cfg[] = { // name handler comment required default { "trace", ini_store_str, "", 0, NULL}, { "debug", ini_store_int32, "", 0, NULL}, { "sudo", ini_store_bool, "", 0, NULL}, { "disabled", ini_store_bool, "", 0, "no"}, { "retry", ini_store_int32, "", 0, "3"}, { "lvm_snapshot_size", ini_store_alist_str,"", 0, NULL}, { "skip_volume", ini_store_alist_str,"", 0, NULL}, { "snapshot_dir", ini_store_str, "", 0, NULL}, { "fail_job_on_error", ini_store_bool, "", 0, "yes"}, { "mountopts", ini_store_alist_str, "", 0, NULL}, { NULL, NULL, NULL, 0, NULL} }; class arguments { public: char *action; /* list, create, delete... */ char *volume; /* snapshot device */ char *device; /* original device name */ char *name; /* snapshot name */ char *mountpoint; /* device mountpoint */ char *snapmountpoint; /* snapshot mountpoint */ char *type; /* snapshot type */ char *fstype; /* filesystem type */ char *working; /* working directory */ const char *snapdir; /* .snapshot */ const char *sudo; /* prepend sudo to commands */ int verbose; int retry; /* retry some operations */ bool disabled; /* disabled by config file */ bool fail_job_on_error; /* Fail job on snapshot error */ ConfigFile ini; /* Configuration file */ POOL_MEM config_file; /* Path to a config file */ arguments(): action(getenv("SNAPSHOT_ACTION")), volume(getenv("SNAPSHOT_VOLUME")), device(getenv("SNAPSHOT_DEVICE")), name( getenv("SNAPSHOT_NAME")), mountpoint(getenv("SNAPSHOT_MOUNTPOINT")), snapmountpoint(getenv("SNAPSHOT_SNAPMOUNTPOINT")), type( getenv("SNAPSHOT_TYPE")), fstype(getenv("SNAPSHOT_FSTYPE")), working(getenv("SNAPSHOT_WORKING")), snapdir(".snapshots"), sudo(""), verbose(0), retry(3), disabled(false), fail_job_on_error(true) { struct stat sp; ini.register_items(bsnap_cfg, sizeof(struct ini_items)); if (stat(BSNAPSHOT_CONF, &sp) == 0) { Dmsg(10, "conf=%s\n", BSNAPSHOT_CONF); pm_strcpy(config_file, BSNAPSHOT_CONF); } }; ~arguments() { }; bool validate() { int pos; if (!action) { return false; } if (strcmp(config_file.c_str(), "") != 0) { Dmsg(10, "Reading configuration from %s\n", config_file.c_str()); if (!ini.parse(config_file.c_str())) { printf("status=1 error=\"Unable to parse %s\"\n", config_file.c_str()); return false; } pos = ini.get_item("debug"); if (ini.items[pos].found && debug_level == 0) { debug_level = ini.items[pos].val.int32val; } pos = ini.get_item("trace"); if (ini.items[pos].found) { set_trace_file(ini.items[pos].val.strval); } pos = ini.get_item("sudo"); if (ini.items[pos].found && ini.items[pos].val.boolval) { sudo = "sudo "; } pos = ini.get_item("snapshot_dir"); if (ini.items[pos].found) { snapdir = ini.items[pos].val.strval; } pos = ini.get_item("retry"); if (ini.items[pos].found) { retry = ini.items[pos].val.int32val; } pos = ini.get_item("disabled"); if (ini.items[pos].found) { disabled = ini.items[pos].val.boolval; } pos = ini.get_item("fail_job_on_error"); if (ini.items[pos].found) { fail_job_on_error = ini.items[pos].val.boolval; } } return true; }; }; class snapshot { public: const char *type; /* snapshot type, btrfs, zfs, etc.. */ POOLMEM *cmd; /* buffer to edit a command */ POOLMEM *path; /* buffer to edit volume path */ POOLMEM *fname; /* used for split_path_and_filename */ POOLMEM *errmsg; /* buffer to edit error message */ POOLMEM *mountopts; /* mount options for the current FS */ arguments *arg; /* program argument */ int pnl; /* path length */ int fnl; /* fname length */ snapshot(arguments *a, const char *t): type(t), cmd(get_pool_memory(PM_NAME)), path(get_pool_memory(PM_NAME)), fname(get_pool_memory(PM_NAME)), errmsg(get_pool_memory(PM_NAME)), mountopts(get_pool_memory(PM_FNAME)), arg(a), pnl(0), fnl(0) { cmd[0] = path[0] = fname[0] = errmsg[0] = mountopts[0] = '\0'; }; virtual ~snapshot() { free_pool_memory(cmd); free_pool_memory(path); free_pool_memory(fname); free_pool_memory(errmsg); free_pool_memory(mountopts); }; /* Get mount options, look in config file if needed */ char *get_mountopts(char *dev, POOLMEM *&output) { char *tmp, *defs=NULL; alist *lst; int pos = arg->ini.get_item("mountopts"); pm_strcpy(output, " -o ro "); if (!arg->ini.items[pos].found) { return output; } lst = arg->ini.items[pos].val.alistval; if (lst) { /* /dev/ubuntu-vg/root:uid=eric * /dev/ubuntu-vg/home:nouuid,uid=eric * uid=eric, */ foreach_alist(tmp, lst) { char *p = strchr(tmp, ':'); if (tmp[0] != '/' || !p) { defs = tmp; /* will be the default */ continue; } /* Check the device name */ if (strncmp(tmp, dev, p - tmp) != 0) { continue; } Mmsg(output, " -o \"ro,%s\" ", p+1); return output; } if (defs) { Mmsg(output, " -o \"ro,%s\" ", defs); } } return output; }; /* Basically, we check parameters here that are * common to all backends */ virtual int mount() { Dmsg(10, "[%s] Doing mount command\n", type); if (!arg->volume || !arg->name || !arg->device || !arg->mountpoint) { Dmsg(10, "volume=%s name=%s device=%s mountpoint=%s\n", NPRT(arg->volume), NPRT(arg->name), NPRT(arg->device), NPRT(arg->mountpoint)); return 0; } return 1; }; virtual int unmount() { Dmsg(10, "[%s] Doing unmount command on %s\n", type, NPRT(arg->snapmountpoint)); if (!arg->snapmountpoint) { Dmsg(10, "snapmountpoint=%s\n", NPRT(arg->snapmountpoint)); return 0; } return 1; }; virtual int support() { Dmsg(10, "[%s] Doing support on %s (%s)\n", type, NPRT(arg->mountpoint), NPRT(arg->device)); if (!arg->fstype || !arg->mountpoint || !arg->device) { Dmsg(10, "fstype=%s mountpoint=%s device=%s\n", NPRT(arg->fstype), NPRT(arg->mountpoint), NPRT(arg->device)); return 0; } return 1; }; virtual int check() { Dmsg(10, "[%s] Doing check on %s\n", type, NPRT(arg->mountpoint)); if (!arg->mountpoint) { Dmsg(10, "mountpoint=%s\n", NPRT(arg->mountpoint)); return 0; } return 1; }; virtual int create() { Dmsg(10, "[%s] Doing create %s\n", type, NPRT(arg->mountpoint)); if (!arg->mountpoint || !arg->name || !arg->device) { Dmsg(10, "mountpoint=%s name=%s device=%s\n", NPRT(arg->mountpoint), NPRT(arg->name), NPRT(arg->device)); return 0; } return 1; }; virtual int del() { Dmsg(10, "[%s] Doing del %s\n", type, NPRT(arg->volume)); if (!arg->volume || !arg->name) { Dmsg(10, "volume=%s name=%s\n", NPRT(arg->volume), NPRT(arg->name)); return 0; } return 1; }; virtual int list() { Dmsg(10, "[%s] Doing list on %s\n", type, NPRT(arg->device)); if (!arg->type || !arg->device || !arg->mountpoint) { return 0; } return 1; }; virtual int subvolumes() { Dmsg(10, "[%s] Doing subvolumes %s\n", type, NPRT(arg->mountpoint)); if (!arg->fstype || !arg->device || !arg->mountpoint) { return 0; } return 1; }; /* Function used in create() to know if we mark the error as FATAL */ int get_error_code() { Dmsg1(0, "get_error_code = %d\n", (int)arg->fail_job_on_error); /* 1 is OK */ if (arg->fail_job_on_error) { return 0; /* Fatal */ } return 2; /* Error */ }; }; /****************************************************************/ /* MacOS Implementation * - tmutil localsnapshot -- take a snapshot, name with a timestamp * Created local snapshot with date: 2021-07-02-170400 * - tmutil listlocalsnapshots -- list snapshots * Snapshots for volume group containing disk /: * com.apple.TimeMachine.2021-07-02-170400.local * - tmutil deletelocalsnapshots [ | ] * Need to specify the snapshot date * - tmutil listlocalsnapshotdates * Snapshot dates for all disks: * 2021-07-02-171341 * - mount_apfs -s com.apple.TimeMachine.2021-07-02-170400.local / /private/tmp/1 * - tmutil deletelocalsnapshots /private/tmp/1 */ /* apfs backend */ class apfs: public snapshot { public: apfs(arguments *arg): snapshot(arg, "apfs") { arg->snapdir = "/private/tmp/.apfs-snapshot"; }; int mount() { if (!snapshot::mount()) { return 0; } pm_strcpy(fname, arg->mountpoint); for(char *p = fname; *p; p++) { if (*p == '/') { *p = '_'; // Need a better function to escape } } Mmsg(path, "%s-%s-%s", arg->snapdir, arg->name, fname); if (!makedir(path)) { printf("status=%d error=\"Unable to create mountpoint directory %s errno=%d\n", get_error_code(), path, errno); return 0; } MmsgD4(10, cmd, "%smount_apfs -s \"com.apple.TimeMachine.%s.local\" \"%s\" \"%s\"", arg->sudo, arg->volume, arg->mountpoint, path); if (run_program(cmd, 60, errmsg)) { Dmsg(10, "Unable to mount snapshot %s %s\n", path, errmsg); strip_quotes(errmsg); printf("status=%d error=\"Unable to mount snapshot %s\"\n", get_error_code(), errmsg); return 0; } fprintf(stdout, "status=1 snapmountpoint=\"%s\"\n", path); return 1; }; int unmount() { int ret, retry = arg->retry; if (!snapshot::unmount()) { return 0; } MmsgD2(10, cmd, "%sumount \"%s\"", arg->sudo, arg->snapmountpoint); do { ret = run_program(cmd, 60, errmsg); if (ret != 0) { Dmsg(10, "Unable to unmount the directory. ERR=%s\n", errmsg); sleep(3); } } while (ret != 0 && retry-- > 0); if (ret != 0) { Dmsg(10, "Unable to mount volume. ERR=%s\n", errmsg); strip_quotes(errmsg); printf("status=0 error=\"Unable to umount the device %s\"\n", errmsg); return 0; } retry = arg->retry; do { Dmsg(10, "Trying to delete mountpoint %s\n", arg->snapmountpoint); if ((ret = rmdir(arg->snapmountpoint)) != 0) { sleep(3); } } while (retry-- > 0 && ret != 0); if (ret != 0) { berrno be; Dmsg(10, "Unable to delete mountpoint after unmount\n"); printf("error=\"Unable to delete mountpoint after unmount errno=%s\"", be.bstrerror(errno)); } printf("status=1\n"); return 1; }; int support() { struct stat sp; if (!snapshot::support()) { return 0; } if (!arg->working) { printf("status=0 type=apfs error=\"Unable to use SNAPSHOT_WORKING\"\n"); return 0; } Mmsg(path, "%s/snapshotdb", arg->working); if (stat(path, &sp) < 0) { if (!makedir(path)) { printf("status=%d error=\"Unable to create working database directory %s errno=%d\n", get_error_code(), arg->mountpoint, errno); return 0; } } printf("status=1 device=\"%s\" type=apfs\n", arg->mountpoint); return 1; }; int check() { char *p; int m1, m2, m3; struct stat sp; if (!snapshot::check()) { return 0; } if (!arg->working) { printf("status=0 type=apfs error=\"Unable to use SNAPSHOT_WORKING\"\n"); return 0; } Mmsg(path, "%s/snapshotdb", arg->working); if (stat(path, &sp) < 0) { if (!makedir(path)) { printf("status=%d error=\"Unable to create working database directory %s errno=%d\n", get_error_code(), arg->mountpoint, errno); return 0; } } MmsgD0(10, cmd, "sw_vers"); if (run_program_full_output(cmd, 60, errmsg)) { printf("status=0 type=apfs error=\"Unable to run sw_vers\"\n"); return 0; } p = strstr(errmsg, "ProductVersion:"); if (!p) { printf("status=0 type=apfs error=\"Unable to locate ProductVersion in sw_vers output\"\n"); printf("%s\n", errmsg); return 0; } if (scan_string(p, "ProductVersion: %d.%d.%d", &m1, &m2, &m3) != 3) { printf("status=0 type=apfs error=\"Unable to decode sw_vers output\"\"\n"); return 0; } if (m1 < 10 || (m1 == 10 && m2 < 15)) { printf("status=0 type=apfs error=\"Old version of MacOS detected %d.%d.%d\"\n", m1, m2, m3); return 0; } printf("status=1 type=apfs\n"); return 1; }; int create() { char ed1[50]; struct stat sp; /* We have SNAPSHOT_NAME, DEVICE, TYPE, FSTYPE, MOUNTPOINT, ACTION */ if (!snapshot::create()) { return 0; } /* TODO: We need to keep an association between SNAPSHOT_NAME and macos * snapshot name. Replace with joblist for example. * * The snapshot is taken for all volumes at the same time */ Mmsg(path, "%s/snapshotdb/%s", arg->working, arg->name); if (stat(path, &sp) == 0) { FILE *fp = bfopen(path, "r"); if (!fp) { printf("status=0 error=\"Unable to get information about snapshot\n"); return 0; } bfgets(path, fp); fclose(fp); strip_trailing_junk(path); printf("status=1 volume=\"%s\" createtdate=%s type=apfs\n", path, edit_uint64(sp.st_ctime, ed1)); return 1; } /* Doesn't exist yet */ MmsgD1(10, cmd, "%stmutil localsnapshot", arg->sudo); if (run_program_full_output(cmd, 60, errmsg)) { strip_quotes(errmsg); printf("status=%d error=\"Unable to create snapshot %s\"\n", get_error_code(), errmsg); return 0; } char *p = strstr(errmsg, "Created local snapshot with date:"); if (p) { strip_trailing_junk(p); p += strlen("Created local snapshot with date:"); skip_spaces(&p); pm_strcpy(fname, p); } else { printf("status=%d error=\"Unable to create snapshot %s\"\n", get_error_code(), errmsg); return 0; } FILE *fp = bfopen(path, "w"); if (!fp) { berrno be; printf("status=0 error=\"Unable to store information about snapshot in %s errno=%s\"\n", path, be.bstrerror()); return 0; } fprintf(fp, "%s\n", fname); fclose(fp); printf("status=1 volume=\"%s\" createtdate=%s type=apfs\n", fname, edit_uint64(time(NULL), ed1)); return 1; }; int del() { if (!snapshot::del()) { return 0; } Mmsg(path, "%s/snapshotdb/%s", arg->working, arg->name); unlink(path); MmsgD2(10, cmd, "%stmutil deletelocalsnapshots \"%s\"", arg->sudo, arg->volume); if (run_program(cmd, 300, errmsg)) { // TODO: The snapshot is global, so when we delete one it's over Dmsg(10, "Unable to delete snapshot %s\n", errmsg); strip_quotes(errmsg); printf("status=0 type=apfs error=\"%s\"\n", errmsg); return 0; } printf("status=1\n"); return 1; }; /* * bacula@baculas-Mac-mini 1 % tmutil listlocalsnapshotdates * Snapshot dates for all disks: * 2021-07-02-173507 */ int list() { char *p, *end; if (!snapshot::list()) { return 0; } MmsgD2(10, cmd, "%stmutil listlocalsnapshots \"%s\"", arg->sudo, arg->mountpoint); if (run_program_full_output(cmd, 300, errmsg)) { Dmsg(10, "Unable to list snapshot %s\n", errmsg); strip_quotes(errmsg); printf("status=0 type=apfs error=\"%s\"\n", errmsg); return 0; } for (p = errmsg; p && *p ;) { /* Replace final \n by \0 to have strstr() happy */ end = strchr(p, '\n'); /* If end=NULL, we are at the end of the buffer (without trailing \n) */ if (end) { *end = 0; } int i1, i2, i3, i4; if (scan_string(p, "com.apple.TimeMachine.%d-%d-%d-%d.local", &i1, &i2, &i3, &i4) == 4) { char ed1[50]; edit_uint64(i4, ed1); printf("name=\"%s\" volume=\"%d-%02d-%02d-%06d\" device=\"%s\" createdate=\"%d-%02d-%02d %c%c:%c%c:%c%c\" type=\"apfs\"\n", p, i1, i2, i3, i4, arg->device, i1, i2, i3, ed1[0], ed1[1], ed1[2], ed1[3], ed1[4], ed1[5]); // TODO: decode date from snapshot name } else { Dmsg(10, "Skip %s\n", p); } if (end) { *end = '\n'; end++; } p = end; } return 1; }; }; /****************************************************************/ /* Structure used to sort subvolumes with btrfs backend */ struct vols { rblink link; int64_t id; int count; char uuid[MAX_NAME_LENGTH]; char puuid[MAX_NAME_LENGTH]; char otime[MAX_NAME_LENGTH]; char path[1]; }; int vols_compare_id(void *item1, void *item2) { vols *vol1 = (vols *) item1; vols *vol2 = (vols *) item2; if (vol1->id > vol2->id) { return 1; } else if (vol1->id < vol2->id) { return -1; } else { return 0; } } int vols_compare_uuid(void *item1, void *item2) { vols *vol1 = (vols *) item1; vols *vol2 = (vols *) item2; return strcmp(vol1->uuid, vol2->uuid); } /* btrfs backend */ class btrfs: public snapshot { public: btrfs(arguments *arg): snapshot(arg, "btrfs") {}; /* With BTRFS, the volume is already mounted */ int mount() { if (!snapshot::mount()) { return 0; } split_path_and_filename(arg->volume, &path, &pnl, &fname, &fnl); fprintf(stdout, "status=1 snapmountpoint=\"%s\" snapdirectory=\"%s\"\n", arg->volume, path); return 1; }; int unmount() { if (!snapshot::unmount()) { return 0; } printf("status=1\n"); return 1; }; int support() { if (!snapshot::support()) { return 0; } /* If the fstype is btrfs, snapshots are supported */ /* MmsgD2(10, cmd, "%sbtrfs filesystem label \"%s\"", arg->sudo, arg->mountpoint); if (run_program(cmd, 60, errmsg)) { printf("status=0 type=btrfs\n"); return 0; } Dmsg(0, "output=%s\n", errmsg); */ printf("status=1 device=\"%s\" type=btrfs\n", arg->mountpoint); return 1; }; int check() { if (!snapshot::check()) { return 0; } return 1; }; int create() { utime_t createdate = 0; char ed1[50]; if (!snapshot::create()) { return 0; } Mmsg(path, "%s/%s", arg->mountpoint, arg->snapdir); if (!makedir(path)) { printf("status=%d error=\"Unable to create mountpoint directory %s errno=%d\n", get_error_code(), arg->mountpoint, errno); return 0; } Dmsg(10, "mountpoint=%s snapdir=%s name=%s\n", arg->mountpoint, arg->snapdir, arg->name); path_concat(path, arg->mountpoint, arg->snapdir, arg->name); Dmsg(10, "path=%s\n", path); /* Create the actual btrfs snapshot */ MmsgD3(10, cmd, "%sbtrfs subvolume snapshot -r \"%s\" \"%s\"", arg->sudo, arg->mountpoint, path); if (run_program(cmd, 60, errmsg)) { Dmsg(10, "Unable to create snapshot %s %s\n", arg->mountpoint, errmsg); strip_quotes(errmsg); printf("status=%d error=\"Unable to create snapshot %s\"\n", get_error_code(), errmsg); return 0; } /* On SLES12 btrfs 3.16, commands on "/" returns "doesn't belong to btrfs mount point" */ MmsgD2(10, cmd, "%sbtrfs subvolume show \"%s\"", arg->sudo, path); if (run_program_full_output(cmd, 60, errmsg)) { Dmsg(10, "Unable to display snapshot stats %s %s\n", arg->mountpoint, errmsg); } else { /* TODO: Check that btrfs subvolume show is reporting "Creation time:" */ char *p = strstr(errmsg, "Creation time:"); if (p) { p += strlen("Creation time:"); skip_spaces(&p); createdate = str_to_utime(p); } else { Dmsg(10, "Unable to find Creation time on %s %s\n", arg->mountpoint, errmsg); } } if (!createdate) { createdate = time(NULL); } printf("status=1 volume=\"%s\" createtdate=%s type=btrfs\n", path, edit_uint64(createdate, ed1)); return 1; }; int del() { if (!snapshot::del()) { return 0; } MmsgD2(10, cmd, "%sbtrfs subvolume delete \"%s\"", arg->sudo, arg->volume); if (run_program(cmd, 300, errmsg)) { Dmsg(10, "Unable to delete snapshot %s\n", errmsg); strip_quotes(errmsg); printf("status=0 type=btrfs error=\"%s\"\n", errmsg); return 0; } printf("status=1\n"); return 1; }; /* btrfs subvolume list -u -q -s /tmp/regress/btrfs * ID 259 gen 52 top level 5 parent_uuid - uuid baf4b5d7-28d0-9b4a-856e-36e6fd4fbc96 path .snapshots/aaa */ int list() { char *p, *p2, *end, *path; char id[50], day[50], hour[50]; struct vols *v = NULL, *v2; rblist *lst; if (!snapshot::list()) { return 0; } MmsgD2(10, cmd, "%sbtrfs subvolume list -u -q -o -s \"%s\"", arg->sudo, arg->mountpoint); if (run_program_full_output(cmd, 300, errmsg)) { Dmsg(10, "Unable to list snapshot %s\n", errmsg); strip_quotes(errmsg); printf("status=0 type=btrfs error=\"%s\"\n", errmsg); return 0; } lst = New(rblist(v, &v->link)); /* ID 259 gen 52 top level 5 parent_uuid - uuid baf4b5d7-28d0-9b4a-856e-36e6fd4fbc96 path .snapshots/aaa */ for (p = errmsg; p && *p ;) { Dmsg(20, "getting subvolumes from %s", p); /* Replace final \n by \0 to have strstr() happy */ end = strchr(p, '\n'); /* If end=NULL, we are at the end of the buffer (without trailing \n) */ if (end) { *end = 0; } /* Each line is supposed to start with "ID", and end with "path" */ bool ok = false; if (sscanf(p, "ID %49s ", id) == 1) { /* We found ID, look for path */ p2 = strstr(p, "path "); if (p2) { path = p2 + strlen("path "); v = (struct vols*) malloc(sizeof (vols) + strlen(path) + 1); *v->otime = *v->uuid = *v->puuid = 0; v->id = str_to_int64(id); v->count = 0; strcpy(v->path, path); p2 = strstr(p, "otime"); if (p2 && sscanf(p2, "otime %49s %49s", day, hour) == 2) { bsnprintf(v->otime, sizeof(v->otime), "%s %s", day, hour); } p2 = strstr(p, "parent_uuid "); if (p2 && sscanf(p2, "parent_uuid %127s", v->puuid) == 1) { p2 = strstr(p, " uuid "); if (p2 && sscanf(p2, " uuid %127s", v->uuid) == 1) { v2 = (struct vols *)lst->insert(v, vols_compare_uuid); if (v2 != v) { v2->count++; free(v); } ok = true; /* Replace final \n by \0 to have strstr() happy */ Dmsg(10, "puuid=%s uuid=%s path=%s\n", v2->puuid, v2->uuid, v2->path); } } } } if (!ok) { Dmsg(10, "Unable to decode \"%s\" line\n", p); } if (end) { *end = '\n'; end++; } /* If end==NULL, we stop */ p = end; } foreach_rblist(v, lst) { char *name = v->path; int len = strlen(arg->snapdir); if ((p = strstr(v->path, arg->snapdir))) { name = p + len + ((arg->snapdir[len-1] == '/') ? 0 : 1); } printf("volume=\"%s%s%s\" name=\"%s\" device=\"%s\" createdate=\"%s\" type=\"btrfs\"\n", arg->mountpoint, arg->mountpoint[strlen(arg->mountpoint) - 1] == '/' ? "": "/", v->path, name, arg->mountpoint, v->otime ); } delete lst; return 1; }; void scan_subvolumes(char *buf, rblist *lst) { char *p, *end; char id[50]; bool ok; struct vols *elt1 = NULL, *elt2 = NULL; /* btrfs subvolume list /var/lib/pacman/ * ID 349 gen 383 top level 5 path test * ID 354 gen 391 cgen 391 top level 5 otime 2014-11-05 17:49:07 path .snapshots/aa */ for (p = buf; p && *p ;) { Dmsg(20, "getting subvolumes from %s", p); /* Replace final \n by \0 to have strstr() happy */ end = strchr(p, '\n'); /* If end=NULL, we are at the end of the buffer (without trailing \n) */ if (end) { *end = 0; } /* Each line is supposed to start with "ID", and end with "path" */ ok = (sscanf(p, "ID %49s ", id) == 1); if (ok) { /* We found ID, look for path */ p = strstr(p, "path "); if (p) { p += strlen("path "); elt1 = (struct vols *) malloc(sizeof(struct vols) + strlen(p) + 1); elt1->id = str_to_int64(id); elt1->count = 0; strcpy(elt1->path, p); Dmsg(10, "Found path %s for id %s\n", elt1->path, id); elt2 = (struct vols *)lst->insert(elt1, vols_compare_id); if (elt2 != elt1) { elt2->count++; free(elt1); } } else { Dmsg(10, "Unable to find the path in this line\n"); } } else { Dmsg(10, "Unable to decode %s line\n", p); } if (end) { *end = '\n'; end++; } /* If end==NULL, we stop */ p = end; } }; /* List subvolumes, they may not be listed by mount */ int subvolumes() { rblist *lst; struct stat sp; struct vols *elt1 = NULL; char ed1[50]; MmsgD2(10, cmd, "%sbtrfs subvolume show \"%s\"", arg->sudo, arg->mountpoint); if (run_program_full_output(cmd, 300, errmsg)) { Dmsg(10, "Unable to get information %s\n", errmsg); strip_quotes(errmsg); printf("status=0 type=btrfs error=\"%s\"\n", errmsg); return 0; } /* * [eric@zog8 /tmp/regress]$ sudo btrfs sub show /tmp/regress/btrfs/ * / * Name: * UUID: 7b715434-bb3c-44ce-abcb-803e6bfcf78a * Parent UUID: - * Received UUID: - * Creation time: 2022-05-18 09:43:15 +0200 * Subvolume ID: 5 * Generation: 82 * Gen at creation: 0 * Parent ID: 0 * Top level ID: 0 * Flags: - * Send transid: 0 * Send time: 2022-05-18 09:43:15 +0200 * Receive transid: 0 * Receive time: - * Snapshot(s): */ /* TODO: Very week way to analyse FS */ char *p = strstr(errmsg, "Parent ID:"); if (p) { p += strlen("Parent ID:"); skip_spaces(&p); if (*p != '0') { Dmsg(10, "%s Not btrfs root (Parent ID > 0)\n", arg->mountpoint); printf("status=0 type=btrfs error=\"Not btrfs root fs\"\n"); return 0; } } else if (!strstr(errmsg, "is btrfs root")) { Dmsg(10, "%s Not btrfs root\n", arg->mountpoint); printf("status=0 type=btrfs error=\"Not btrfs root fs\"\n"); return 0; } MmsgD2(10, cmd, "%sbtrfs subvolume list -s \"%s\"", arg->sudo, arg->mountpoint); if (run_program_full_output(cmd, 300, errmsg)) { Dmsg(10, "Unable to list snapshot snapshot %s\n", errmsg); strip_quotes(errmsg); printf("status=0 type=btrfs error=\"%s\"\n", errmsg); return 0; } lst = New(rblist(elt1, &elt1->link)); scan_subvolumes(errmsg, lst); MmsgD2(10, cmd, "%sbtrfs subvolume list \"%s\"", arg->sudo, arg->mountpoint); if (run_program_full_output(cmd, 300, errmsg)) { Dmsg(10, "Unable to list subvolume %s\n", errmsg); strip_quotes(errmsg); printf("status=0 type=btrfs error=\"%s\"\n", errmsg); delete lst; return 0; } scan_subvolumes(errmsg, lst); foreach_rblist(elt1, lst) { if (elt1->count > 0) { /* Looks to be a snapshot, we saw two entries */ continue; } path_concat(path, arg->mountpoint, elt1->path, NULL); if (stat(path, &sp) == 0) { printf("dev=%s mountpoint=\"%s\" fstype=btrfs\n", edit_uint64(sp.st_dev, ed1), path); } else { Dmsg(10, "Unable to stat %s (%s)\n", elt1->path, path); } } delete lst; return 1; }; }; /* Create pool * zpool create pool /dev/device * zfs create pool/eric * zfs set mountpoint=/mnt test/eric * zfs mount pool/eric */ /* zfs backend */ class zfs: public snapshot { public: zfs(arguments *arg): snapshot(arg, "zfs") { arg->snapdir = ".zfs/snapshot"; }; /* With ZFS, the volume is already mounted * but on linux https://github.com/zfsonlinux/zfs/issues/173 * we need to use the mount command. * TODO: Adapt the code for solaris */ int mount() { struct stat sp; if (!snapshot::mount()) { return 0; } path_concat(path, arg->mountpoint, arg->snapdir, arg->name); if (stat(path, &sp) != 0) { /* See if we can change the snapdir attribute */ MmsgD2(10, cmd, "%szfs set snapdir=visible \"%s\"", arg->sudo, arg->device); if (run_program(cmd, 60, errmsg)) { Dmsg(10, "Unable to change the snapdir attribute %s %s\n", arg->device, errmsg); strip_quotes(errmsg); printf("status=0 error=\"Unable to mount snapshot %s\"\n", errmsg); return 0; } if (stat(path, &sp) != 0) { Dmsg(10, "Unable to get the snapdir %s %s\n", arg->snapdir, arg->device); strip_quotes(errmsg); printf("status=0 error=\"Unable to mount snapshot, no snapdir %s\"\n", arg->snapdir); return 0; } } #if 0 /* On linux, this function is broken for now */ makedir(path); MmsgD4(10, cmd, "%smount %s -t %s \"%s\" \"%s\"", arg->sudo, get_mountopts(arg->device, mountopts), arg->fstype, arg->volume, path); if (run_program(cmd, 60, errmsg)) { Dmsg(10, "Unable to create mount snapshot %s %s\n", arg->volume, errmsg); strip_quotes(errmsg); printf("status=0 error=\"Unable to mount snapshot %s\"\n", errmsg); return 0; } #endif fprintf(stdout, "status=1 snapmountpoint=\"%s\" snapdirectory=\"%s/%s\"\n", path, arg->mountpoint, arg->snapdir); return 1; }; /* No need to unmount something special */ int unmount() { printf("status=1\n"); return 1; }; int support() { if (!snapshot::support()) { return 0; } MmsgD2(10, cmd, "%szfs list -H -o name \"%s\"", arg->sudo, arg->mountpoint); if (run_program(cmd, 60, errmsg)) { Dmsg(10, "Unable to get device %s %s\n", arg->mountpoint, errmsg); strip_quotes(errmsg); printf("status=0 error=\"Unable to get device %s\"\n", errmsg); return 0; } strip_trailing_junk(errmsg); /* If the fstype is zfs, snapshots are supported */ printf("status=1 device=\"%s\" type=zfs\n", errmsg); return 1; }; int create() { char ed1[50]; if (!snapshot::create()) { return 0; } Mmsg(path, "%s@%s", arg->device, arg->name); /* Create the actual zfs snapshot */ MmsgD2(10, cmd, "%szfs snapshot \"%s\"", arg->sudo, path); if (run_program(cmd, 60, errmsg)) { Dmsg(10, "Unable to create snapshot %s %s\n", arg->device, errmsg); strip_quotes(errmsg); printf("status=%d error=\"Unable to create snapshot %s\"\n", get_error_code(), errmsg); return 0; } MmsgD2(10, cmd, "%szfs get -p creation \"%s\"", arg->sudo, path); if (run_program_full_output(cmd, 60, errmsg)) { Dmsg(10, "Unable to display snapshot stats %s %s\n", arg->device, errmsg); strip_quotes(errmsg); printf("status=%d error=\"Unable to get snapshot info %s\"\n", get_error_code(), errmsg); return 0; } /* TODO: Check that zfs get is reporting "creation" time */ Mmsg(cmd, "NAME PROPERTY VALUE SOURCE\n%s creation %%s", path); if (sscanf(errmsg, cmd, ed1) == 1) { Dmsg(10, "Found CreateTDate=%s\n", ed1); printf("status=1 volume=\"%s\" createtdate=%s type=zfs\n", path, ed1); } else { printf("status=1 volume=\"%s\" createtdate=%s type=zfs\n", path, edit_uint64(time(NULL), ed1)); } return 1; }; int del() { if (!snapshot::del()) { return 0; } MmsgD2(10, cmd, "%szfs destroy \"%s\"", arg->sudo, arg->volume); if (run_program(cmd, 300, errmsg)) { Dmsg(10, "Unable to delete snapshot %s\n", errmsg); strip_quotes(errmsg); printf("status=0 type=zfs error=\"%s\"\n", errmsg); return 0; } printf("status=1\n"); return 1; }; /* zfs list -t snapshot * test/eric@snap1 17K - 21K - * test/eric@snap2 17K - 21K - * * it is possible to change fields to display with -o */ int list() { POOL_MEM buf2; if (!snapshot::list()) { return 0; } MmsgD1(10, cmd, "%szfs list -t snapshot -H -o name,used,creation", arg->sudo); /* rpool@basezone_snap00 0 Fri Mar 6 9:55 2015 */ if (run_program_full_output(cmd, 60, errmsg)) { Dmsg(10, "Unable to list snapshot %s\n", errmsg); strip_quotes(errmsg); printf("status=0 error=\"Unable to list snapshot %s\"\n", errmsg); return 0; } int i = 1, Day, Year, Hour, Min; char DayW[50], Month[50], CreateDate[250]; const char *buf[4]; buf[0] = errmsg; for (char *p = errmsg; p && *p ; p++) { if (*p == '\n') { *p = 0; /* Flush the current one */ if (!arg->device || strcmp(arg->device, buf[0]) == 0) { if (sscanf(buf[3], "%49s %49s %d %d:%d %d", DayW, Month, &Day, &Hour, &Min, &Year) == 6) { /* Get a clean iso format */ for (int j=1; j <= 12 ; j++) { if (strcmp(Month, Months[j]) == 0) { bsnprintf(Month, sizeof(Month), "%02d", j); } } bsnprintf(CreateDate, sizeof(CreateDate), "%d-%s-%02d %02d:%02d:00", Year, Month, Day, Hour, Min); buf[3] = CreateDate; } printf("volume=\"%s@%s\" name=\"%s\" device=\"%s\" size=\"%s\" " "createdate=\"%s\" status=1 error=\"\" type=\"zfs\"\n", buf[0], buf[1], buf[1], buf[0], buf[2], buf[3]); } else { Dmsg(10, "Do not list %s@%s\n", buf[0], buf[1]); } i = 1; buf[0] = p+1; buf[1] = buf[2] = buf[3] = ""; } else if ((*p == '\t' || *p == '@') && i < 4) { buf[i++] = p+1; *p = 0; } } return 1; }; }; /* Structure of the LVS output */ typedef struct { const char *name; int pos; } Header; /* -1 is mandatory, -2 is optionnal */ static Header lvs_header[] = { /* KEEP FIRST */ {"Path", -1}, /* Volume Path: /dev/ubuntu-vg/root */ {"DMPath",-2}, /* Device mapper Path /dev/mapper/ubuntu--vg-root */ {"LV", -1}, /* Volume Name: root */ {"Attr", -1}, /* Attributes: -wi-ao--- */ {"KMaj", -1}, /* Kernel Major: 252 */ {"KMin", -1}, /* Kernel Minor: 0 */ {"LSize", -1}, /* Size (b) */ {"#Seg", -1}, /* Number of segments */ {"Origin",-1}, {"OSize", -1}, {"Snap%", -1}, {"Time", -1}, /* Creation date (Keep it above CTime) */ {"CTime", -2}, /* Creation date (Keep it below Time) */ {NULL, -1} }; static Header vgs_header[] = { /* KEEP FIRST */ {"VG", -1}, /* VG Name: vgroot */ {"VSize", -1}, /* Size */ {"VFree", -1}, /* Space left */ {"#Ext", -1}, /* Nb Ext */ {"Free", -1}, /* Nb Ext free */ {"Ext", -1}, /* Ext size */ {NULL, -1} }; /* LVM backend, not finished */ class lvm: public snapshot { public: alist *lvs, *vgs; int lvs_nbelt, vgs_nbelt; lvm(arguments *arg): snapshot(arg, "lvm"), lvs(NULL), vgs(NULL), lvs_nbelt(0), vgs_nbelt(0) {}; ~lvm() { free_header(lvs, lvs_nbelt); free_header(vgs, vgs_nbelt); }; void free_header(alist *lst, int nbelt) { if (lst) { char **current; /* cleanup at the end */ foreach_alist(current, lst) { for (int j=0; j < nbelt ; j++) { Dmsg(150, "current[%d] = %s\n", j, current[j]); free(current[j]); } free(current); } delete lst; } }; char *get_vg_from_lv_path(char *path, char *vg, int max) { char *p; if (!path) { return NULL; } /* Make a copy of the path */ bstrncpy(vg, path, max); path = vg; if (strncmp(path, "/dev/", 5) != 0) { Dmsg(10, "Strange path %s\n", path); return NULL; } path += 5; /* skip /dev/ */ /* End the string at the last / */ p = strchr(path, '/'); if (!p) { Dmsg(10, "Strange end of path %s\n", path); return NULL; } *p = 0; return path; }; /* Report the space available on VG */ int64_t get_space_available(char *lv) { char buf[512]; char *vgname = get_vg_from_lv_path(get_lv_value(lv, "Path"), buf, sizeof(buf)); if (vgname) { char *s = get_vg_value(vgname, "VFree"); if (s) { return str_to_int64(s); } else { Dmsg(10, "Unable to get VFree\n"); } } else { Dmsg(10, "Unable to get VG from %s\n", lv); } return -1; }; /* return vg_ssd-pacman */ char *get_lv_from_dm(char *dm, POOLMEM **ret, uint32_t *major, uint32_t *minor) { struct stat sp; char *p, *start; uint32_t maj, min; /* Looks to be a device mapper, need to convert the name */ if (strncmp(dm, "/dev/dm", strlen("/dev/dm")) != 0) { return NULL; } if (stat(dm, &sp) < 0) { return NULL; } MmsgD1(10, cmd, "%sdmsetup ls", arg->sudo); if (run_program_full_output(cmd, 60, errmsg)) { Dmsg(10, "Unable to query dmsetup %s\n", errmsg); return NULL; } /* vg_ssd-pacman-real (254:1) * vg_ssd-pacman (254:0) * or * vg_ssd-pacman-real (254, 1) * vg_ssd-pacman-real (254, 1) */ *ret = check_pool_memory_size(*ret, strlen(errmsg)+1); for (start = p = errmsg; *p ; p++) { if (*p == '\n') { *p = 0; if (sscanf(start, "%s (%d:%d)", *ret, &maj, &min) == 3 || sscanf(start, "%s (%d, %d)", *ret, &maj, &min) == 3) { if (maj == major(sp.st_rdev) && min == minor(sp.st_rdev)) { return *ret; } } start = p+1; } } return NULL; }; /* The LV path from name or dmpath */ char **get_lv(char *lv) { char **elt = NULL, *dm = NULL; int path = get_value_pos(lvs_header, "Path"); int dmpath = get_value_pos(lvs_header, "DMPath"); int kmaj = get_value_pos(lvs_header, "KMaj"); int kmin = get_value_pos(lvs_header, "KMin"); uint32_t min = 0, maj = 0; POOLMEM *buf = get_pool_memory(PM_FNAME); if (!lv || (path < 0 && dmpath < 0)) { Dmsg(10, "Unable to get LV parameters\n"); goto bail_out; } dm = get_lv_from_dm(lv, &buf, &maj, &min); Dmsg(50, "%s = get_lv_from_dm(%s, %s, %d, %d)\n", dm, lv, buf, maj, min); /* HERE: Need to loop over LVs */ foreach_alist(elt, lvs) { if (path > 0 && strcmp(NPRT(elt[path]), lv) == 0) { goto bail_out; } if (dmpath > 0 && strcmp(NPRT(elt[dmpath]), lv) == 0) { goto bail_out; } /* Try by Minor/Major if comming from device mapper */ if ((maj && kmaj && str_to_uint64(elt[kmaj]) == maj) && (min && kmin && str_to_uint64(elt[kmin]) == min)) { goto bail_out; } /* Find if /dev/mapper/vg_ssd-pacman matches vg_ssd-pacman */ if (dm && dmpath && strlen(elt[dmpath]) > strlen("/dev/mapper/")) { if (strcmp(elt[dmpath] + strlen("/dev/mapper/"), dm) == 0) { goto bail_out; } } /* Special case for old LVM where mapper path doesn't exist */ if (dmpath < 0 && strncmp("/dev/mapper/", lv, 12) == 0) { POOLMEM *buf2 = get_memory(strlen(elt[path])*2+10); pm_strcpy(buf2, "/dev/mapper/"); char *d = buf2 + 12; /* Skip /dev/mapper/ */ bool ret = false; /* Keep the same path, but escape - to -- and / to - */ for (char *p = elt[path]+5; *p ; p++) { if (*p == '-') { *d++ = *p; } /* Escape / to - if needed */ *d++ = (*p == '/') ? '-' : *p; } *d = 0; ret = (strcmp(buf2, lv) == 0); free_pool_memory(buf2); if (ret) { goto bail_out; } } } Dmsg(10, "%s not found in lv list\n", lv); return NULL; /* not found */ bail_out: if (buf) { free_pool_memory(buf); } return elt; }; /* Report LV Size in bytes */ int64_t get_lv_size(char *name) { char **elt = get_lv(arg->device); int sp; if (!elt) { return -1; } sp = get_value_pos(lvs_header, "LSize"); /* Check if we have enough space on the VG */ return str_to_int64(elt[sp]); }; char *get_lv_value(char *name, const char *value) { return get_value(lvs_header, lvs_nbelt, lvs, name, value); }; int get_value_pos(Header *header, const char *value) { for (int i = 0; header[i].name ; i++) { if (strcmp(header[i].name, value) == 0) { return header[i].pos; } } return -1; /* not found */ }; /* Return an element value */ char *get_value(Header *header, int nbelt, alist *lst, char *name, const char *value) { char **elt; int pos = get_value_pos(header, value); int id = header[0].pos; /* position name */ if (pos < 0 || id == -1) { return NULL; } /* Loop over elements we have, and return the value that is asked */ foreach_alist(elt, lst) { if (strcmp(NPRT(elt[id]), name) == 0) { return elt[pos]; } } return NULL; }; /* Return a parameter for a VolumeGroup */ char *get_vg_value(char *vg, const char *value) { return get_value(vgs_header, vgs_nbelt, vgs, vg, value); }; /* Get snapshot size, look in config file if needed */ int64_t get_lvm_snapshot_size(char *lv) { char *tmp, **elt; uint64_t s, size; int sp; alist *lst; int pos = arg->ini.get_item("lvm_snapshot_size"); if (!arg->ini.items[pos].found) { return -1; /* Nothing specified, stop here */ } lst = arg->ini.items[pos].val.alistval; if (lst) { /* /dev/ubuntu-vg/root:100M * /dev/ubuntu-vg/home:10% * /dev/ubuntu-vg/var:200GB */ foreach_alist(tmp, lst) { char *p = strchr(tmp, ':'); /* Check the LV name */ if (p && strncmp(tmp, lv, p - tmp) != 0) { continue; } /* This is a percent */ if (strchr(p+1, '%') != NULL) { Dmsg(10, "Found a %%\n"); s = str_to_int64(p+1); /* Compute the requested size */ sp = get_value_pos(lvs_header, "LSize"); elt = get_lv(lv); size = str_to_int64(elt[sp]); return s * size / 100; } /* It might be a size */ if (size_to_uint64(p+1, strlen(p+1), &s)) { char ed1[50]; Dmsg(10, "Found size %s\n", edit_uint64(s, ed1)); return s; } Dmsg(10, "Unable to use %s\n", tmp); return -1; } } return -1; }; int create() { char ed1[50], ed2[50]; char *name, *ts, buf[128], *lvname; int64_t size, ssize, maxsize; btime_t t; if (!snapshot::create()) { return 0; } if (!parse_lvs_output() || !parse_vgs_output()) { printf("status=%d error=\"Unable parse lvs or vgs output\"\n", get_error_code()); return 0; } path_concat(path, arg->mountpoint, arg->snapdir, arg->name); if (!makedir(path)) { printf("status=%d error=\"Unable to create mountpoint directory %s errno=%d\n", get_error_code(), arg->mountpoint, errno); return 0; } name = get_lv_value(arg->device, "LV"); size = get_lv_size(arg->device); if (size < 0) { printf("status=%d error=\"Unable to get lv size\"\n", get_error_code()); return 0; } ssize = get_lvm_snapshot_size(arg->device); if (ssize > 0) { size = ssize; } else { size = size / 10; /* Ask to get 10% */ } size = (size / 512L) * 512L; lvname = get_lv_value(arg->device, "Path"); maxsize = get_space_available(lvname); Dmsg(10, "maxsize=%s size=%s\n", edit_int64(maxsize, ed1), edit_int64(size, ed2)); if (maxsize < 0) { printf("status=%d error=\"Unable to detect maxsize\" type=lvm\n", get_error_code()); return 0; } if (size > maxsize) { printf("status=%d error=\"Not enough space left on VG %sB, " "%sB is required\" type=lvm\n", get_error_code(), edit_uint64_with_suffix(maxsize, ed1), edit_uint64_with_suffix(size, ed2)); return 0; } /* TODO: Need to get the volume name and add the snapshot * name at the end */ MmsgD5(10, cmd, "%s lvcreate -s -n \"%s_%s\" -L %lldb \"%s\"", arg->sudo, name, arg->name, size, arg->device); if (run_program(cmd, 60, errmsg)) { Dmsg(10, "Unable to create snapshot %s %s\n", arg->name, errmsg); strip_quotes(errmsg); printf("status=0 error=\"Unable to create snapshot %s\"\n", errmsg); return 0; } if (!parse_lvs_output()) { Dmsg(10, "Unable to parse lvm output after snapshot creation\n"); printf("status=0 error=\"Unable to parse lvs\"\n"); return 0; } Mmsg(cmd, "%s_%s", arg->device, arg->name); ts = get_lv_value(cmd, "CTime"); if (!ts) { ts = get_lv_value(cmd, "Time"); if (!ts) { Dmsg(10, "Unable to find snapshot time in lvs output\n"); bstrutime(buf, sizeof(buf), time(NULL)); ts = buf; } } strip_tz(ts); t = str_to_utime(ts); Dmsg(10, "status=1 volume=\"%s_%s\" createtdate=\"%s\" createdate=\"%s\" type=lvm\n", arg->device, arg->name, edit_uint64(t, ed1), ts); printf("status=1 volume=\"%s_%s\" createtdate=\"%s\" type=lvm\n", arg->device, arg->name, edit_uint64(t, ed1)); return 1; }; int del() { if (!snapshot::del()) { return 0; } MmsgD2(10, cmd, "%slvremove -f \"%s\"", arg->sudo, arg->volume); if (run_program(cmd, 60, errmsg)) { Dmsg(10, "Unable to delete snapshot %s %s\n", arg->name, errmsg); strip_quotes(errmsg); printf("status=0 error=\"Unable to delete snapshot %s\"\n", errmsg); return 0; } printf("status=1\n"); return 1; }; int check() { if (!snapshot::check()) { return 0; } parse_vgs_output(); for (int i = 0; vgs_header[i].name ; i++) { if (vgs_header[i].pos == -1) { Dmsg(10, "Unable to use the output of the vgs command. %s is missing\n", vgs_header[i].name); printf("status=0 error=\"Unable to use output of vgs command." " %s is missing.\"\n", vgs_header[i].name); return 0; } } parse_lvs_output(); for (int i = 0; lvs_header[i].name ; i++) { /* Copy CTime to Time if any */ if (strcmp(lvs_header[i].name, "Time") == 0 && lvs_header[i].pos < 0) { lvs_header[i].pos = lvs_header[i+1].pos; } if (lvs_header[i].pos == -1) { Dmsg(10, "Unable to use the output of the lvs command. %s is missing\n", lvs_header[i].name); printf("status=0 error=\"Unable to use output of lvs command." " %s is missing.\"\n", lvs_header[i].name); return 0; } } return 1; }; void strip_double_slashes(char *fname) { char *p = fname; while (p && *p) { p = strpbrk(p, "/\\"); if (p != NULL) { if (IsPathSeparator(p[1])) { strcpy(p, p+1); } p++; } } }; int mount() { if (!snapshot::mount()) { return 0; } path_concat(path, arg->mountpoint, arg->snapdir, arg->name); if (!makedir(path)) { printf("status=0 error=\"Unable to create mount point %s errno=%d\"\n", path, errno); return 0; } MmsgD4(10, cmd, "%smount %s \"%s\" \"%s\"", arg->sudo, get_mountopts(arg->device, mountopts), arg->volume, path); if (run_program(cmd, 60, errmsg) != 0) { Dmsg(10, "Unable to mount volume. ERR=%s\n", errmsg); strip_quotes(errmsg); printf("status=0 error=\"Unable to mount the device %s\"\n", errmsg); return 0; } Dmsg(10, "status=1 snapmountpoint=\"%s\" snapdirectory=\"%s/%s\"\n", path, arg->mountpoint, arg->snapdir); printf("status=1 snapmountpoint=\"%s\" snapdirectory=\"%s/%s\"\n", path, arg->mountpoint, arg->snapdir); return 1; }; int unmount() { int ret, retry = arg->retry; if (!snapshot::unmount()) { return 0; } MmsgD2(10, cmd, "%sumount \"%s\"", arg->sudo, arg->snapmountpoint); do { ret = run_program(cmd, 60, errmsg); if (ret != 0) { Dmsg(10, "Unable to unmount the directory. ERR=%s\n", errmsg); sleep(3); } } while (ret != 0 && retry-- > 0); if (ret != 0) { Dmsg(10, "Unable to mount volume. ERR=%s\n", errmsg); strip_quotes(errmsg); printf("status=0 error=\"Unable to umount the device %s\"\n", errmsg); return 0; } retry = arg->retry; do { Dmsg(10, "Trying to delete mountpoint %s\n", arg->snapmountpoint); if ((ret = rmdir(arg->snapmountpoint)) != 0) { sleep(3); } } while (retry-- > 0 && ret != 0); if (ret != 0) { berrno be; Dmsg(10, "Unable to delete mountpoint after unmount\n"); printf("error=\"Unable to delete mountpoint after unmount errno=%s\"", be.bstrerror(errno)); } printf(" status=1\n"); return 1; }; /* TODO: Here we need to check LVM settings */ int support() { char **elt; int mp; if (!snapshot::support()) { return 0; } if (!check()) { return 0; } elt = get_lv(arg->device); if (!elt) { Dmsg(10, "Not detected as LVM\n"); printf("status=0 error=\"Not detected as LVM\"\n"); return 0; } mp = get_value_pos(lvs_header ,"Path"); printf("status=1 device=\"%s\" type=lvm\n", elt[mp]); return 1; }; /* count the number of column in the output */ int count_col(char *l, char sep) { int nb=0; for (char *p = l ; *p ; p++) { if (*p == sep) { nb++; } } return nb; }; /* Decode the Attr field */ int decode_attr(char *l) { /* * Volume type: (m)irrored, (M)irrored without initial sync, * (o)rigin, (O)rigin with merging snapshot, (r)aid, (R)aid * without initial sync, (s)napshot, merging (S)napshot, * (p)vmove, (v)irtual, mirror or raid (i)mage, mirror or raid * (I)mage out-of-sync, mirror (l)og device, under (c)onversion, * thin (V)olume, (t)hin pool, (T)hin pool data, raid or thin * pool m(e)tadata */ return 0; }; bool parse_vgs_output() { MmsgD1(10, cmd, "%s vgs -o vg_all --separator=; --units b --nosuffix", arg->sudo); if (vgs) { free_header(vgs, vgs_nbelt); vgs_nbelt=0; } vgs = New(alist(10, not_owned_by_alist)); if (!parse_output(cmd, vgs, &vgs_nbelt, vgs_header)) { return false; } return true; }; bool parse_lvs_output() { MmsgD1(10, cmd, "%s lvs -o lv_all --separator=; --units b --nosuffix", arg->sudo); if (lvs) { free_header(lvs, lvs_nbelt); lvs_nbelt=0; } lvs = New(alist(10, not_owned_by_alist)); if (!parse_output(cmd, lvs, &lvs_nbelt, lvs_header)) { return false; } return true; }; /* Function to parse LVM command output */ bool parse_output(char *cmd, alist *ret, int *ret_nbelt, Header *hdr) { char *p; int i=0; int pos=0; int nbelt=0; char buf[2048]; /* Size for a single line */ bool header_done=false; if (run_program_full_output(cmd, 60, errmsg)) { strip_quotes(errmsg); Dmsg(10, "Unable to run lvs. ERR=%s\n", errmsg); return false; } char **current = NULL; for (p = errmsg; *p ; p++) { if (*p == ';' || *p == '\n') { /* We have a separator, handle current value */ buf[i]=0; if (!header_done) { Dmsg(300, "header=%s\n", buf); nbelt++; /* Keep the number of element in the line */ /* Find if we need this value, and where to store it */ for (int j=0; hdr[j].name ; j++) { if (strcasecmp(buf, hdr[j].name) == 0) { Dmsg(150, "header[%d]=%s\n", pos, buf); hdr[j].pos = pos; break; } } } else { if (pos == 0) { /* First item, need to allocate new array */ current = (char **)malloc(nbelt * sizeof(char *) + 1); memset(current, 0, nbelt * sizeof(char *) + 1); ret->append(current); } /* Keep the current value */ current[pos] = bstrdup(buf); } i = 0; if (*p == '\n') { pos = 0; header_done = true; } else { pos++; } } else if (i < (int)sizeof(buf)) { buf[i++] = *p; } else { Dmsg(10, "Output too big !!! %s\n", errmsg); return false; } } *ret_nbelt = nbelt; return true; }; int list() { char **elt, **elt2 = NULL, ed1[50]; const char *err = NULL; int p_attr, p_path, p_origin, p_time, p_ctime, p_size; POOLMEM *p, *f, *d; int fnl, pnl, status; btime_t t; char *c_time=NULL; if (!snapshot::list()) { return false; } if (!parse_lvs_output()) { return false; } p_attr = get_value_pos(lvs_header, "Attr"); p_path = get_value_pos(lvs_header, "Path"); p_time = get_value_pos(lvs_header, "Time"); p_ctime = get_value_pos(lvs_header, "CTime"); p_size = get_value_pos(lvs_header, "Snap%"); p_origin = get_value_pos(lvs_header, "Origin"); if (p_time < 0 && p_ctime < 0) { printf("status=1 error=\"Unable to get snapshot Time from lvs command\"\n"); return false; } if (p_origin < 0) { printf("status=1 error=\"Unable to get snapshot Origin from lvs command\"\n"); return false; } p = get_pool_memory(PM_FNAME); f = get_pool_memory(PM_FNAME); d = get_pool_memory(PM_FNAME); elt2 = get_lv(arg->device); /* TODO: We need to get the device name from the mount point */ foreach_alist(elt, lvs) { char *attr = elt[p_attr]; /* swi-a-s-- */ if (attr[0] == 's') { if (attr[4] == 'I') { /* 5 State: (a)ctive, (s)uspended, (I)nvalid snapshot, invalid (S)uspended * snapshot, snapshot (m)erge failed, suspended snapshot (M)erge * failed, mapped (d)evice present without tables, mapped device * present with (i)nactive table, (X) unknown */ status = 0; err = "Invalid snapshot"; } else { status = 1; err = ""; } split_path_and_filename(elt[p_path], &p, &pnl, &f, &fnl); Mmsg(d, "%s%s", p, elt[p_origin]); if ((!arg->device || strcmp(arg->device, d) == 0) || (elt2 && strcmp(elt2[p_path], d) == 0)) { /* On LVM, the name is LV_SnapshotName, we can strip the LV_ if we find it */ Mmsg(p, "%s_", d); /* /dev/mapper/vg_ssd/test_ */ if (strncmp(p, elt[p_path], strlen(p)) == 0) { pm_strcpy(f, elt[p_path] + strlen(p));/* test_MySnapshot_2020.. => MySnapshot_2020 */ } if (p_ctime > 0) { c_time = elt[p_ctime]; } else { c_time = elt[p_time]; } strip_tz(c_time); t = str_to_utime(c_time); printf("volume=\"%s\" device=\"%s\" name=\"%s\" createtdate=\"%s\" size=\"%s\" " "status=%d error=\"%s\" type=lvm\n", elt[p_path], d, f, edit_uint64(t, ed1), elt[p_size], status, err); } } } free_pool_memory(p); free_pool_memory(f); free_pool_memory(d); return true; }; }; /* The simulator is using a simple symlink */ class simulator: public snapshot { public: simulator(arguments *arg): snapshot(arg, "simulator") {}; int mount() { if (!snapshot::mount()) { return 0; } split_path_and_filename(arg->volume, &path, &pnl, &fname, &fnl); printf("status=1 snapmountpoint=\"%s\" snapdirectory=\"%s\"\n", arg->volume, path); return 1; }; int unmount() { printf("status=1\n"); return 1; }; int support() { if (!snapshot::support()) { return 0; } if (access(arg->mountpoint, W_OK) != 0) { printf("status=0 device=\"%s\" type=simulator " "error=\"Unable to access mountpoint\"\n", arg->mountpoint); return 0; } printf("status=1 device=\"%s\" type=simulator\n", arg->mountpoint); return 1; }; int create() { char ed1[50]; utime_t now; if (!snapshot::create()) { return 0; } Mmsg(path, "%s/%s", arg->mountpoint, arg->snapdir); makedir(path); now = time(NULL); MmsgD2(10, cmd, "ln -vsf \"%s\" \"%s\"", arg->mountpoint, path); if (run_program(cmd, 60, errmsg)) { Dmsg(10, "Unable to create symlink. ERR=%s\n", errmsg); strip_quotes(errmsg); printf("status=%d error=\"Unable to umount the device %s\"\n", get_error_code(), errmsg); } printf("status=1 volume=\"%s\" createtdate=%s type=simulator\n", path, edit_uint64(now, ed1)); return 1; }; int del() { int ret; if (!snapshot::del()) { return 0; } ret = unlink(arg->volume); printf("status=%d\n", (ret == 0)? 1 : 0); return 1; }; }; snapshot *detect_snapshot_backend(arguments *arg) { if (arg->type) { if (strcasecmp(arg->type, "btrfs") == 0) { return new btrfs(arg); } else if (strcasecmp(arg->type, "lvm") == 0) { return new lvm(arg); } else if (strcasecmp(arg->type, "simulator") == 0) { return new simulator(arg); } else if (strcasecmp(arg->type, "zfs") == 0) { return new zfs(arg); } else if (strcasecmp(arg->type, "apfs") == 0) { return new apfs(arg); } } if (arg->fstype) { if (strcasecmp(arg->fstype, "btrfs") == 0) { return new btrfs(arg); } else if (strcasecmp(arg->fstype, "tmpfs") == 0) { return new simulator(arg); } else if (strcasecmp(arg->fstype, "apfs") == 0) { return new apfs(arg); /* TODO: Need to find something smarter here */ } else if (strcasecmp(arg->fstype, "ext4") == 0) { return new lvm(arg); } else if (strcasecmp(arg->fstype, "xfs") == 0) { return new lvm(arg); } else if (strcasecmp(arg->fstype, "ext3") == 0) { return new lvm(arg); } else if (strcasecmp(arg->fstype, "zfs") == 0 || strcasecmp(arg->fstype, "fuse.zfs") == 0) { return new zfs(arg); } } Dmsg(10, "Backend not found\n"); return NULL; } /* defined in jcr.c */ void create_jcr_key(); int main(int argc, char **argv) { snapshot *snap; arguments arg; char ch; int ret=0; struct stat sp; set_trace_file("/dev/null"); setlocale(LC_ALL, ""); setenv("LANG", "C", true); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); lmgr_init_thread(); OSDependentInit(); init_stack_dump(); my_name_is(argc, argv, "bsnapshot"); create_jcr_key(); while ((ch = getopt(argc, argv, "?d:vc:so:V:T:t")) != -1) { switch (ch) { case 'd': /* set debug level */ debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } break; case 'v': arg.verbose++; break; case 's': /* use sudo */ arg.sudo = "sudo "; break; case 'c': /* config file */ pm_strcpy(arg.config_file, optarg); if (stat(optarg, &sp) < 0) { Pmsg(000, "Unable to access %s. ERR=%s\n",optarg, strerror(errno)); usage(_("Unable to open -p argument for reading")); } break; case 'o': /* where to send the debug output */ set_trace_file(optarg); break; case 't': arg.action = (char *)"check"; break; case 'V': /* set volume name */ arg.volume = optarg; break; case 'T': /* device type */ arg.type = optarg; break; default: usage(); } } argc -= optind; argv += optind; if (!arg.validate()) { usage(); } if (arg.disabled) { Dmsg(10, "disabled from config file\n"); exit (1); } snap = detect_snapshot_backend(&arg); if (!snap) { printf("status=0 error=\"Unable to detect snapshot backend\"\n"); exit(0); } start_watchdog(); if (strcasecmp(arg.action, "mount") == 0) { ret = snap->mount(); } else if (strcasecmp(arg.action, "support") == 0) { ret = snap->support(); } else if (strcasecmp(arg.action, "create") == 0) { ret = snap->create(); } else if (strcasecmp(arg.action, "delete") == 0) { ret = snap->del(); } else if (strcasecmp(arg.action, "subvolumes") == 0) { ret = snap->subvolumes(); } else if (strcasecmp(arg.action, "list") == 0) { ret = snap->list(); } else if (strcasecmp(arg.action, "check") == 0) { ret = snap->check(); } else if (strcasecmp(arg.action, "unmount") == 0) { ret = snap->unmount(); } delete snap; stop_watchdog(); close_memory_pool(); lmgr_cleanup_main(); Dmsg(10, "exit code = %d\n", (ret == 1) ? 0 : 1); return (ret == 1)? 0 : 1; } bacula-15.0.3/src/tools/grow.c0000644000175000017500000000257014771010173015737 0ustar bsbuildbsbuild/* By John Walker written ages ago. Create a sparse file. Beat denial of service floggers to death by persuading them to download a HOW_BIG pseudo GIF file which is actually a holey file occupying trivial space on our server. Make: make gigaslam Run: ./gigaslam Output: a file named gigaslam.gif that contains something like 16K bytes (i.e. 2-8K blocks), but appears to be 1GB in length because the second block is written at a 1GB address. Be careful what you do with this file as not all programs know how to deal with sparse files. Tweaked by Kern Sibbald, July 2007 to grow a file to a specified size. */ #ifdef __GNUC__ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #ifndef _FILE_OFFSET_BITS #define _FILE_OFFSET_BITS 64 #endif #endif #include "bacula.h" int main(int argc, char *argv[]) { off_t howBig; FILE *fp; if (argc != 3) { Pmsg0(0, "Calling sequence: grow \n"); exit(1); } howBig = str_to_int64(argv[2]); fp = fopen(argv[1], "r+"); if (!fp) { berrno be; Pmsg2(0, "Could not open %s for write. ERR=%s\n", argv[1], be.bstrerror()); exit(1); } char trailer[] = "xxxxxxx\n"; fseeko(fp, howBig - strlen(trailer), SEEK_SET); fwrite(trailer, strlen(trailer), 1, fp); fclose(fp); return 0; } bacula-15.0.3/src/tools/gigaslam.c0000644000175000017500000000232414771010173016542 0ustar bsbuildbsbuild/* By John Walker written ages ago. Create a sparse file. Beat denial of service floggers to death by persuading them to download a HOW_BIG pseudo GIF file which is actually a holey file occupying trivial space on our server. Make: make gigaslam Run: ./gigaslam Output: a file named gigaslam.gif that contains something like 16K bytes (i.e. 2-8K blocks), but appears to be 1GB in length because the second block is written at a 1GB address. Be careful what you do with this file as not all programs know how to deal with sparse files. */ #define HOW_BIG 1000000000ll #ifdef __GNUC__ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #ifndef _FILE_OFFSET_BITS #define _FILE_OFFSET_BITS 64 #endif #endif #include #include #include int main(int argc, char *const *argv) { FILE *fp = fopen("gigaslam.gif", "w"); char header[] = "\n
\n"; htmlbuf += "\n" + curSvc + " "; /* rest of string is marked as pre-formatted (here trimming should * be avoided, to preserve original formatting) */ QString msg(field.mid(colon+2)); if (msg.startsWith( tr("Error:")) ) { /* FIXME: should really be translated ? */ /* error msg, use a specific class */ htmlbuf += "
" + msg + "
";
            } else {
               htmlbuf += msg ;
            }
         } else {
            /* non standard string, place as-is */
            if (curTime == lastTime) {
               curTime.clear();
            } else {
               lastTime = curTime;
            }
//          htmlbuf += "
"; htmlbuf += "\n" + field ; } } /* foreach resultline */ htmlbuf += ""; /* full text ready. Here a custom sheet is used to align columns */ QString logSheet(".err {color:#FF0000;}"); textJobLog->document()->setDefaultStyleSheet(logSheet); textJobLog->document()->setHtml(htmlbuf); textJobLog->moveCursor(QTextCursor::Start); } /* if results from query */ } void Job::storeBwLimit(int val) { m_bwlimit = val; } void Job::updateRunInfo() { QString cmd; QStringList results; QStringList lst; bool parseit=false; #ifdef xxx /* This doesn't seem like the right thing to do */ if (m_bwlimit >= 100) { cmd = QString("setbandwidth limit=" + QString::number(m_bwlimit) + " jobid=" + m_jobId); m_console->dir_cmd(cmd, results); results.clear(); m_bwlimit = 0; } #endif cmd = QString(".status client=\"" + m_client + "\" running"); /* * JobId 5 Job backup.2010-12-21_09.28.17_03 is running. * VSS Full Backup Job started: 21-Dec-10 09:28 * Files=4 Bytes=610,976 Bytes/sec=87,282 Errors=0 * Files Examined=4 * Processing file: /tmp/regress/build/po/de.po * SDReadSeqNo=5 fd=5 * * Or * JobId=5 * Job=backup.2010-12-21_09.28.17_03 * VSS=1 * Files=4 * Bytes=610976 * */ QRegExp jobline("(JobId) (\\d+) Job "); QRegExp itemline("([\\w /]+)[:=]\\s*(.+)"); QRegExp filesline("Files: Examined=([\\d,]+) Backed up=([\\d,])"); QRegExp oldline("Files=([\\d,]+) Bytes=([\\d,]+) Bytes/sec=([\\d,]+) Errors=([\\d,]+)"); QRegExp restoreline("Files: Restored=([\\d,]+) Expected=([\\d,]+) Completed=([\\d,]+)%"); QRegExp restoreline2("Files Examined=([\\d,]+) Expected Files=([\\d,]+) Percent Complete=([\\d,]+)"); QString com(","); QString empty(""); if (m_console->dir_cmd(cmd, results)) { foreach (QString mline, results) { foreach (QString line, mline.split("\n")) { line = line.trimmed(); if (oldline.indexIn(line) >= 0) { if (parseit) { lst = oldline.capturedTexts(); label_JobErrors->setText(lst[4]); label_Speed->setText(convertBytesSI(lst[3].replace(com, empty).toULongLong())+"/s"); label_JobFiles->setText(lst[1]); label_JobBytes->setText(convertBytesSI(lst[2].replace(com, empty).toULongLong())); } continue; } else if (filesline.indexIn(line) >= 0) { if (parseit) { lst = filesline.capturedTexts(); // Will also catch Backed up label_FilesExamined->setText(lst[1]); } continue; // TODO: Need to be fixed // } else if (restoreline2.indexIn(line) >= 0) { // if (parseit) { // lst = filesline.capturedTexts(); // label_FilesExamined->setText(lst[1]); // Can also handle Expected and Completed // } // continue; } else if (jobline.indexIn(line) >= 0) { lst = jobline.capturedTexts(); lst.removeFirst(); } else if (itemline.indexIn(line) >= 0) { lst = itemline.capturedTexts(); lst.removeFirst(); } else { if (mainWin->m_miscDebug) Pmsg1(0, "bad line=%s\n", line.toUtf8().data()); continue; } if (lst.count() < 2) { if (mainWin->m_miscDebug) Pmsg2(0, "bad line=%s count=%d\n", line.toUtf8().data(), lst.count()); } if (lst[0] == "JobId") { if (lst[1] == m_jobId) { parseit = true; } else { parseit = false; } } if (!parseit) { continue; } // } else if (lst[0] == "Job") { // grpRun->setTitle(lst[1]); // // } else if (lst[0] == "VSS") { // } else if (lst[0] == "Level") { // Info->setText(lst[1]); // // } else if (lst[0] == "JobType" || lst[0] == "Type") { // // } else if (lst[0] == "JobStarted" || lst[0] == "StartTime") { // Started->setText(lst[1]); #ifdef xxx if (lst[0] == "Bwlimit") { int val = lst[1].toInt(); if (val > 0) { chk_Bwlimit->setChecked(true); spin_Bwlimit->setEnabled(true); spin_Bwlimit->setValue(lst[1].toInt()/1024); } else { chk_Bwlimit->setEnabled(false); spin_Bwlimit->setEnabled(false); spin_Bwlimit->setValue(0); } #endif if (lst[0] == "Errors") { label_JobErrors->setText(lst[1]); } else if (lst[0] == "Bytes/sec") { label_Speed->setText(convertBytesSI(lst[1].toULongLong())+"/s"); } else if (lst[0] == "Files" || lst[0] == "JobFiles") { label_JobFiles->setText(lst[1]); } else if (lst[0] == "Bytes" || lst[0] == "JobBytes") { label_JobBytes->setText(convertBytesSI(lst[1].toULongLong())); } else if (lst[0] == "Examined") { label_FilesExamined->setText(lst[1]); } else if (lst[0] == "Files Examined") { label_FilesExamined->setText(lst[1]); } else if (lst[0] == "Processing file") { label_CurrentFile->setText(lst[1]); } } } } } /* * Populate the text in the window */ void Job::populateForm() { QString stat, err; char buf[256]; QString query = "SELECT JobId, Job.Name, Level, Client.Name, Pool.Name, FileSet," "SchedTime, StartTime, EndTime, EndTime-StartTime AS Duration, " "JobBytes, JobFiles, JobErrors, JobStatus, PurgedFiles " "FROM Job JOIN Client USING (ClientId) " "LEFT JOIN Pool ON (Job.PoolId = Pool.PoolId) " "LEFT JOIN FileSet ON (Job.FileSetId = FileSet.FileSetId)" "WHERE JobId=" + m_jobId; QStringList results; if (m_console->sql_cmd(query, results)) { QString resultline, duration; QStringList fieldlist; foreach (resultline, results) { // should have only one result fieldlist = resultline.split("\t"); if (fieldlist.size() != 15) { Pmsg1(000, "Unexpected line %s", resultline.toUtf8().data()); continue; } QStringListIterator fld(fieldlist); label_JobId->setText(fld.next()); label_Name->setText(fld.next()); label_Level->setText(job_level_to_str(fld.next()[0].toLatin1())); m_client = fld.next(); label_Client->setText(m_client); label_Pool->setText(fld.next()); label_FileSet->setText(fld.next()); label_SchedTime->setText(fld.next()); label_StartTime->setText(fld.next()); label_EndTime->setText(fld.next()); duration = fld.next(); /* * Note: if we have a negative duration, it is because the EndTime * is zero (i.e. the Job is still running). We should use * duration = StartTime - current_time */ if (duration.left(1) == "-") { duration = "0.0"; } label_Duration->setText(duration); label_JobBytes->setText(convertBytesSI(fld.next().toULongLong())); label_JobFiles->setText(fld.next()); err = fld.next(); label_JobErrors->setText(err); stat = fld.next(); if (stat == "T" && err.toInt() > 0) { stat = "W"; } if (stat == "R") { pbDelete->setVisible(false); pbCancel->setVisible(true); grpRun->setVisible(true); if (!m_timer) { m_timer = new QTimer(this); connect(m_timer, SIGNAL(timeout()), this, SLOT(populateAll())); m_timer->start(30000); } updateRunInfo(); } else { pbDelete->setVisible(true); pbCancel->setVisible(false); grpRun->setVisible(false); if (m_timer) { m_timer->stop(); delete m_timer; m_timer = NULL; } } label_JobStatus->setPixmap(QPixmap(":/images/" + stat + ".png")); jobstatus_to_ascii_gui(stat[0].toLatin1(), buf, sizeof(buf)); stat = buf; label_JobStatus->setToolTip(stat); chkbox_PurgedFiles->setCheckState(fld.next().toInt()?Qt::Checked:Qt::Unchecked); } } } void Job::populateVolumes() { QString query = "SELECT DISTINCT VolumeName, InChanger, Slot " "FROM Job JOIN JobMedia USING (JobId) JOIN Media USING (MediaId) " "WHERE JobId=" + m_jobId + " ORDER BY VolumeName "; if (mainWin->m_sqlDebug) Pmsg1(0, "Query cmd : %s\n",query.toUtf8().data()); QStringList results; if (m_console->sql_cmd(query, results)) { QString resultline; QStringList fieldlist; list_Volume->clear(); foreach (resultline, results) { // should have only one result fieldlist = resultline.split("\t"); QStringListIterator fld(fieldlist); // QListWidgetItem(QIcon(":/images/inchanger" + fld.next() + ".png"), // fld.next(), list_Volume); list_Volume->addItem(fld.next()); } } } //QListWidgetItem ( const QIcon & icon, const QString & text, QListWidget * parent = 0, int type = Type ) bacula-15.0.3/src/qt-console/job/job.ui0000644000175000017500000005620714771010173017432 0ustar bsbuildbsbuild JobForm 0 0 975 631 Form Cancel :/images/A.png:/images/A.png true Delete :/images/purge.png:/images/purge.png true false View errors for this Job Errors :/images/zoom.png:/images/zoom.png true false Media :/images/zoom.png:/images/zoom.png true false History :/images/zoom.png:/images/zoom.png true 0 0 Run again :/images/R.png:/images/R.png true false Read doc true false FileSet :/images/zoom.png:/images/zoom.png true false Stats :/images/zoom.png:/images/zoom.png true Refresh :/images/view-refresh.png:/images/view-refresh.png true Qt::Horizontal 40 20 0 0 230 180 230 180 Basic Information JobId: 2 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Job Name: Test Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Level: VirtualFull Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Client: client-fd Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse FileSet: TheFileSet Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Pool: ThePool Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse 0 0 160 180 160 180 Status Status: :/images/T.png false Errors: 0 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Files: 1,924 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Bytes: 109 MB Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Purged: false true 0 0 260 180 260 180 Times Sched Time: 2009-07-31 00:10:00 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Start Time: 2009-07-31 00:10:00 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse End Time: 2009-07-31 00:20:00 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Duration: 00:10:00 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse 0 0 170 181 170 181 170 180 Volume Used 0 0 149 140 149 140 149 140 Vol0001 :/images/inflag1.png:/images/inflag1.png Qt::Horizontal 40 20 true 500 0 Running Information Speed: Files Examined: Current File: 250 0 /var/www/bacula/spool 100,000 100 MB/s false kB/s 100 200000 100 200000 Bandwidth Limit: Qt::Horizontal 40 20 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;"> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'DejaVu Sans'; font-size:10pt;"></p></body></html> chk_Bwlimit clicked(bool) spin_Bwlimit setEnabled(bool) 51 324 302 328 bacula-15.0.3/src/qt-console/PAGES0000644000175000017500000000401314771010173016315 0ustar bsbuildbsbuildEach page should have the following in the constructor /* this is the text identifying the page in the tree widget */ m_name = "Restore"; /* this sets values for the class based on the extern mainWin-> it sets m_parent (the stacked widget), m_console (from current console) creates the page selector tree widget and sets it's name and inserts itself in the double direction hashes */ pgInitialize(parentwidgetitem); or pgInitialize(); which will have the director as parent in page selector /* is this window always present or can the user remove it */ m_closeable = true; /* put the page in the stack widget */ dockPage(); /* bring the page to the top of the widget by "selecting" the tree widget item in the page selector */ setCurrent(); Watch out for console not being connected yet gotchya's. The console is not yet connected when the first base set of widgets are created on the stack. Use a function like populate() to place objects in the window. Call populate from virtual function void ClassName::currentStackItem(). Embed inside condition similar to if(!m_populated) {} so as to populate only the first time the page comes to the front. The major features that the pages class provides: dockPage, undockPage and togglePageDocking for docking. closeEvent to redock an undocked page when it is closed. virtual functions PgSeltreeWidgetClicked() and currentStackItem() to give pages the opportunity to have behaviours on events closeStackPage() to delete both the page object and it's page selector widget. consoleCommand(QString &command) to execute a console command setTitle() for setting the title of a window that will display when undocked. setCurrent() for making the page and tree widget item of an object selected and in the front of the stack. Closing Use the function closeStackPage() to close from within the class. Otherwise, if there are pointers which need to be deleted, use a destructor. The m_closeable page member will determine whether the option to close will appear in context menu of page selector. bacula-15.0.3/src/qt-console/bat_conf.cpp0000644000175000017500000002510614771010173020020 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Main configuration file parser for Bacula User Agent * some parts may be split into separate files such as * the schedule configuration (sch_config.c). * * Note, the configuration file parser consists of three parts * * 1. The generic lexical scanner in lib/lex.c and lib/lex.h * * 2. The generic config scanner in lib/parse_config.c and * lib/parse_config.h. * These files contain the parser code, some utility * routines, and the common store routines (name, int, * string). * * 3. The daemon specific file, which contains the Resource * definitions as well as any specific store routines * for the resource records. * * Kern Sibbald, January MM, September MM * */ #include "bacula.h" #include "bat_conf.h" /* Define the first and last resource ID record * types. Note, these should be unique for each * daemon though not a requirement. */ int32_t r_first = R_FIRST; int32_t r_last = R_LAST; RES_HEAD **res_head; /* Forward referenced subroutines */ /* We build the current resource here as we are * scanning the resource configuration definition, * then move it to allocated memory when the resource * scan is complete. */ #if defined(_MSC_VER) extern "C" URES res_all; /* declare as C to avoid name mangling by visual c */ #endif URES res_all; int32_t res_all_size = sizeof(res_all); /* Definition of records permitted within each * resource with the routine to process the record * information. */ static RES_ITEM dir_items[] = { {"name", store_name, ITEM(dir_res.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(dir_res.hdr.desc), 0, 0, 0}, {"dirport", store_pint32, ITEM(dir_res.DIRport), 0, ITEM_DEFAULT, 9101}, {"address", store_str, ITEM(dir_res.address), 0, ITEM_REQUIRED, 0}, {"password", store_password, ITEM(dir_res.password), 0, 0, 0}, {"tlsauthenticate",store_bool, ITEM(dir_res.tls_authenticate), 0, 0, 0}, {"tlsenable", store_bool, ITEM(dir_res.tls_enable), 0, 0, 0}, {"tlspskenable", store_bool, ITEM(dir_res.tls_psk_enable), 0, ITEM_DEFAULT, true}, {"tlsrequire", store_bool, ITEM(dir_res.tls_require), 0, 0, 0}, {"tlscacertificatefile", store_dir, ITEM(dir_res.tls_ca_certfile), 0, 0, 0}, {"tlscacertificatedir", store_dir, ITEM(dir_res.tls_ca_certdir), 0, 0, 0}, {"tlscertificate", store_dir, ITEM(dir_res.tls_certfile), 0, 0, 0}, {"tlskey", store_dir, ITEM(dir_res.tls_keyfile), 0, 0, 0}, {"heartbeatinterval", store_time, ITEM(dir_res.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60}, {NULL, NULL, {0}, 0, 0, 0} }; static RES_ITEM con_items[] = { {"name", store_name, ITEM(con_res.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(con_res.hdr.desc), 0, 0, 0}, {"password", store_password, ITEM(con_res.password), 0, ITEM_REQUIRED, 0}, {"tlsauthenticate",store_bool, ITEM(con_res.tls_authenticate), 0, 0, 0}, {"tlsenable", store_bool, ITEM(con_res.tls_enable), 0, 0, 0}, {"tlspskenable", store_bool, ITEM(con_res.tls_psk_enable), 0, ITEM_DEFAULT, true}, {"tlsrequire", store_bool, ITEM(con_res.tls_require), 0, 0, 0}, {"tlscacertificatefile", store_dir, ITEM(con_res.tls_ca_certfile), 0, 0, 0}, {"tlscacertificatedir", store_dir, ITEM(con_res.tls_ca_certdir), 0, 0, 0}, {"tlscertificate", store_dir, ITEM(con_res.tls_certfile), 0, 0, 0}, {"tlskey", store_dir, ITEM(con_res.tls_keyfile), 0, 0, 0}, {"heartbeatinterval", store_time, ITEM(con_res.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60}, {"director", store_str, ITEM(con_res.director), 0, 0, 0}, {"CommCompression", store_bool, ITEM(con_res.comm_compression), 0, ITEM_DEFAULT, true}, {NULL, NULL, {0}, 0, 0, 0} }; static RES_ITEM con_font_items[] = { {"name", store_name, ITEM(con_font.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(con_font.hdr.desc), 0, 0, 0}, {"font", store_str, ITEM(con_font.fontface), 0, 0, 0}, {NULL, NULL, {0}, 0, 0, 0} }; /* * This is the master resource definition. * It must have one item for each of the resources. */ RES_TABLE resources[] = { {"director", dir_items, R_DIRECTOR}, {"console", con_items, R_CONSOLE}, {"consolefont", con_font_items, R_CONSOLE_FONT}, {NULL, NULL, 0} }; /* Dump contents of resource */ void dump_resource(int type, RES *ares, void sendit(void *sock, const char *fmt, ...), void *sock) { RES *next; URES *res = (URES *)ares; bool recurse = true; if (res == NULL) { printf(_("No record for %d %s\n"), type, res_to_str(type)); return; } if (type < 0) { /* no recursion */ type = - type; recurse = false; } switch (type) { case R_DIRECTOR: printf(_("Director: name=%s address=%s DIRport=%d\n"), ares->name, res->dir_res.address, res->dir_res.DIRport); break; case R_CONSOLE: printf(_("Console: name=%s\n"), ares->name); break; case R_CONSOLE_FONT: printf(_("ConsoleFont: name=%s font face=%s\n"), ares->name, NPRT(res->con_font.fontface)); break; default: printf(_("Unknown resource type %d\n"), type); } if (recurse) { next = GetNextRes(0, (RES *)res); if (next) { dump_resource(type, next, sendit, sock); } } } /* * Free memory of resource. * NB, we don't need to worry about freeing any references * to other resources as they will be freed when that * resource chain is traversed. Mainly we worry about freeing * allocated strings (names). */ void free_resource(RES *sres, int type) { URES *res = (URES *)sres; if (res == NULL) return; /* common stuff -- free the resource name */ if (res->dir_res.hdr.name) { free(res->dir_res.hdr.name); } if (res->dir_res.hdr.desc) { free(res->dir_res.hdr.desc); } switch (type) { case R_DIRECTOR: if (res->dir_res.address) { free(res->dir_res.address); } if (res->dir_res.tls_ctx) { free_tls_context(res->dir_res.tls_ctx); } if (res->dir_res.psk_ctx) { free_psk_context(res->dir_res.psk_ctx); } if (res->dir_res.tls_ca_certfile) { free(res->dir_res.tls_ca_certfile); } if (res->dir_res.tls_ca_certdir) { free(res->dir_res.tls_ca_certdir); } if (res->dir_res.tls_certfile) { free(res->dir_res.tls_certfile); } if (res->dir_res.tls_keyfile) { free(res->dir_res.tls_keyfile); } break; case R_CONSOLE: if (res->con_res.password) { free(res->con_res.password); } if (res->con_res.tls_ctx) { free_tls_context(res->con_res.tls_ctx); } if (res->con_res.psk_ctx) { free_psk_context(res->con_res.psk_ctx); } if (res->con_res.tls_ca_certfile) { free(res->con_res.tls_ca_certfile); } if (res->con_res.tls_ca_certdir) { free(res->con_res.tls_ca_certdir); } if (res->con_res.tls_certfile) { free(res->con_res.tls_certfile); } if (res->con_res.tls_keyfile) { free(res->con_res.tls_keyfile); } if (res->con_res.director) { free(res->con_res.director); } break; case R_CONSOLE_FONT: if (res->con_font.fontface) { free(res->con_font.fontface); } break; default: printf(_("Unknown resource type %d\n"), type); } /* Common stuff again -- free the resource, recurse to next one */ if (res) { free(res); } } /* Save the new resource by chaining it into the head list for * the resource. If this is pass 2, we update any resource * pointers (currently only in the Job resource). */ bool save_resource(CONFIG *config, int type, RES_ITEM *items, int pass) { int rindex = type - r_first; int i, size = 0; int error = 0; /* * Ensure that all required items are present */ for (i=0; items[i].name; i++) { if (items[i].flags & ITEM_REQUIRED) { if (!bit_is_set(i, res_all.dir_res.hdr.item_present)) { Mmsg(config->m_errmsg, _("\"%s\" directive is required in \"%s\" resource, but not found.\n"), items[i].name, resources[rindex].name); return false; } } } /* During pass 2, we looked up pointers to all the resources * referrenced in the current resource, , now we * must copy their address from the static record to the allocated * record. */ if (pass == 2) { switch (type) { /* Resources not containing a resource */ case R_DIRECTOR: break; case R_CONSOLE: case R_CONSOLE_FONT: break; default: Emsg1(M_ERROR, 0, _("Unknown resource type %d\n"), type); error = 1; break; } /* Note, the resoure name was already saved during pass 1, * so here, we can just release it. */ if (res_all.dir_res.hdr.name) { free(res_all.dir_res.hdr.name); res_all.dir_res.hdr.name = NULL; } if (res_all.dir_res.hdr.desc) { free(res_all.dir_res.hdr.desc); res_all.dir_res.hdr.desc = NULL; } return true; } /* The following code is only executed during pass 1 */ switch (type) { case R_DIRECTOR: size = sizeof(DIRRES); break; case R_CONSOLE_FONT: size = sizeof(CONFONTRES); break; case R_CONSOLE: size = sizeof(CONRES); break; default: printf(_("Unknown resource type %d\n"), type); error = 1; break; } /* Common */ if (!error) { if (!config->insert_res(rindex, size)) { return false; } } return true; } bool parse_bat_config(CONFIG *config, const char *configfile, int exit_code) { config->init(configfile, NULL, exit_code, (void *)&res_all, res_all_size, r_first, r_last, resources, &res_head); return config->parse_config(); } bacula-15.0.3/src/qt-console/fileset/0000755000175000017500000000000014771010173017170 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/fileset/fileset.cpp0000644000175000017500000002071514771010173021334 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * FileSet Class * * Dirk Bartley, March 2007 * */ #include "bat.h" #include #include #include "fileset/fileset.h" #include "util/fmtwidgetitem.h" FileSet::FileSet() : Pages() { setupUi(this); m_name = tr("FileSets"); pgInitialize(); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/system-file-manager.png"))); /* tableWidget, FileSet Tree Tree Widget inherited from ui_fileset.h */ m_populated = false; m_checkcurwidget = true; m_closeable = false; readSettings(); /* add context sensitive menu items specific to this classto the page * selector tree. m_contextActions is QList of QActions */ m_contextActions.append(actionRefreshFileSet); } FileSet::~FileSet() { writeSettings(); } /* * The main meat of the class!! The function that querries the director and * creates the widgets with appropriate values. */ void FileSet::populateTable() { Freeze frz(*tableWidget); /* disable updating*/ m_checkcurwidget = false; tableWidget->clear(); m_checkcurwidget = true; QStringList headerlist = (QStringList() << tr("FileSet Name") << tr("FileSet Id") << tr("Create Time")); tableWidget->setColumnCount(headerlist.count()); tableWidget->setHorizontalHeaderLabels(headerlist); tableWidget->horizontalHeader()->setHighlightSections(false); tableWidget->verticalHeader()->hide(); tableWidget->setSelectionBehavior(QAbstractItemView::SelectRows); tableWidget->setSelectionMode(QAbstractItemView::SingleSelection); tableWidget->setSortingEnabled(false); /* rows move on insert if sorting enabled */ QString fileset_comsep(""); bool first = true; QStringList notFoundList = m_console->fileset_list; foreach(QString filesetName, m_console->fileset_list) { if (first) { fileset_comsep += "'" + filesetName + "'"; first = false; } else fileset_comsep += ",'" + filesetName + "'"; } int row = 0; tableWidget->setRowCount(m_console->fileset_list.count()); if (fileset_comsep != "") { /* Set up query QString and header QStringList */ QString query(""); query += "SELECT FileSet AS Name, FileSetId AS Id, CreateTime" " FROM FileSet" " WHERE FileSetId IN (SELECT MAX(FileSetId) FROM FileSet WHERE"; query += " FileSet IN (" + fileset_comsep + ")"; query += " GROUP BY FileSet) ORDER BY FileSet"; QStringList results; if (mainWin->m_sqlDebug) { Pmsg1(000, "FileSet query cmd : %s\n",query.toUtf8().data()); } if (m_console->sql_cmd(query, results)) { QStringList fieldlist; /* Iterate through the record returned from the query */ foreach (QString resultline, results) { fieldlist = resultline.split("\t"); if (fieldlist.size() != 3) { Pmsg1(000, "Unexpected line %s", resultline.toUtf8().data()); continue; } /* remove this fileSet from notFoundList */ int indexOf = notFoundList.indexOf(fieldlist[0]); if (indexOf != -1) { notFoundList.removeAt(indexOf); } TableItemFormatter item(*tableWidget, row); /* Iterate through fields in the record */ QStringListIterator fld(fieldlist); int col = 0; /* name */ item.setTextFld(col++, fld.next()); /* id */ item.setNumericFld(col++, fld.next()); /* creation time */ item.setTextFld(col++, fld.next()); row++; } } } foreach(QString filesetName, notFoundList) { TableItemFormatter item(*tableWidget, row); item.setTextFld(0, filesetName); row++; } /* set default sorting */ tableWidget->sortByColumn(headerlist.indexOf(tr("FileSet Name")), Qt::AscendingOrder); tableWidget->setSortingEnabled(true); /* Resize rows and columns */ tableWidget->resizeColumnsToContents(); tableWidget->resizeRowsToContents(); /* make read only */ int rcnt = tableWidget->rowCount(); int ccnt = tableWidget->columnCount(); for(int r=0; r < rcnt; r++) { for(int c=0; c < ccnt; c++) { QTableWidgetItem* item = tableWidget->item(r, c); if (item) { item->setFlags(Qt::ItemFlags(item->flags() & (~Qt::ItemIsEditable))); } } } m_populated = true; } /* * When the treeWidgetItem in the page selector tree is singleclicked, Make sure * The tree has been populated. */ void FileSet::PgSeltreeWidgetClicked() { if (!m_populated) { populateTable(); createContextMenu(); } if (!isOnceDocked()) { dockPage(); } } /* * Added to set the context menu policy based on currently active treeWidgetItem * signaled by currentItemChanged */ void FileSet::tableItemChanged(QTableWidgetItem *currentwidgetitem, QTableWidgetItem *previouswidgetitem) { /* m_checkcurwidget checks to see if this is during a refresh, which will segfault */ if (m_checkcurwidget && currentwidgetitem) { int currentRow = currentwidgetitem->row(); QTableWidgetItem *currentrowzeroitem = tableWidget->item(currentRow, 0); m_currentlyselected = currentrowzeroitem->text(); /* The Previous item */ if (previouswidgetitem) { /* avoid a segfault if first time */ tableWidget->removeAction(actionStatusFileSetInConsole); tableWidget->removeAction(actionShowJobs); } if (m_currentlyselected.length() != 0) { /* set a hold variable to the fileset name in case the context sensitive * menu is used */ tableWidget->addAction(actionStatusFileSetInConsole); tableWidget->addAction(actionShowJobs); } } } /* * Setup a context menu * Made separate from populate so that it would not create context menu over and * over as the tree is repopulated. */ void FileSet::createContextMenu() { tableWidget->setContextMenuPolicy(Qt::ActionsContextMenu); tableWidget->addAction(actionRefreshFileSet); connect(tableWidget, SIGNAL( currentItemChanged(QTableWidgetItem *, QTableWidgetItem *)), this, SLOT(tableItemChanged(QTableWidgetItem *, QTableWidgetItem *))); /* connect to the action specific to this pages class */ connect(actionRefreshFileSet, SIGNAL(triggered()), this, SLOT(populateTable())); connect(actionStatusFileSetInConsole, SIGNAL(triggered()), this, SLOT(consoleShowFileSet())); connect(actionShowJobs, SIGNAL(triggered()), this, SLOT(showJobs())); } /* * Function responding to actionListJobsofFileSet which calls mainwin function * to create a window of a list of jobs of this fileset. */ void FileSet::consoleShowFileSet() { QString cmd("show fileset=\""); cmd += m_currentlyselected + "\""; consoleCommand(cmd); } /* * Virtual function which is called when this page is visible on the stack */ void FileSet::currentStackItem() { if (!m_populated) { populateTable(); /* Create the context menu for the fileset table */ createContextMenu(); } } /* * Save user settings associated with this page */ void FileSet::writeSettings() { QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup("FileSet"); settings.setValue("geometry", saveGeometry()); settings.endGroup(); } /* * Read and restore user settings associated with this page */ void FileSet::readSettings() { QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup("FileSet"); restoreGeometry(settings.value("geometry").toByteArray()); settings.endGroup(); } /* * Create a JobList object pre-populating a fileset */ void FileSet::showJobs() { QTreeWidgetItem *parentItem = mainWin->getFromHash(this); mainWin->createPageJobList("", "", "", m_currentlyselected, parentItem); } bacula-15.0.3/src/qt-console/fileset/fileset.h0000644000175000017500000000260014771010173020772 0ustar bsbuildbsbuild#ifndef _FILESET_H_ #define _FILESET_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_fileset.h" #include "console.h" #include "pages.h" class FileSet : public Pages, public Ui::FileSetForm { Q_OBJECT public: FileSet(); ~FileSet(); virtual void PgSeltreeWidgetClicked(); virtual void currentStackItem(); public slots: void tableItemChanged(QTableWidgetItem *, QTableWidgetItem *); private slots: void populateTable(); void consoleShowFileSet(); void showJobs(); private: void writeSettings(); void readSettings(); void createContextMenu(); QString m_currentlyselected; bool m_populated; bool m_checkcurwidget; }; #endif /* _FILESET_H_ */ bacula-15.0.3/src/qt-console/fileset/fileset.ui0000644000175000017500000000261514771010173021166 0ustar bsbuildbsbuild FileSetForm 0 0 341 277 FileSet Tree :/images/view-refresh.png Refresh FileSet List Requery the director for the list of storage objects. :/images/status-console.png Show FileSet In Console :/images/emblem-system.png ShowJobs bacula-15.0.3/src/qt-console/main.ui0000644000175000017500000004003414771010173017021 0ustar bsbuildbsbuild Kern Sibbald MainForm 0 0 785 660 bat - Bacula Admin Tool :/images/bat.png:/images/bat.png Bacula Administration Tool It's a Dock widget to allow page selection 9 6 -1 0 0 785 25 Settings &Help &File Current Status Current Status 51 39 Tool Bar Qt::Horizontal TopToolBarArea false 0 1 112 179 524287 524287 Qt::StrongFocus false QDockWidget::DockWidgetFloatable|QDockWidget::DockWidgetMovable Qt::AllDockWidgetAreas 1 1 1 0 0 16777215 16777215 0 0 Qt::StrongFocus Selects panel window Use items in this tree to select what window is in panel true 1 Command: Click here to enter command &Quit Ctrl+Q :/images/bat.png:/images/bat.png &About bat :/images/copy.png:/images/copy.png &Copy :/images/cut.png:/images/cut.png Cu&t :/images/new.png:/images/new.png new :/images/open.png:/images/open.png open :/images/paste.png:/images/paste.png &Paste :/images/print.png:/images/print.png &Print Print :/images/save.png:/images/save.png &Save Save (not implemented) :/images/disconnected.png:/images/disconnected.png Connect Connect/disconnect :/images/label.png:/images/label.png Label Label a Volume Label a Volume :/images/restore.png:/images/restore.png Restore Restore Files false :/images/run.png:/images/run.png Run Job Run Job Run Job Run a Job false :/images/estimate-job.png:/images/estimate-job.png Estimate Job Estimate Job Estimate Job Estimate a Job :/images/status-console.png:/images/status-console.png Status Dir Status Dir Query status of director in console Query status of director in console true &Select Font ... :/images/up.png:/images/up.png Undock Window Undock Current Window :/images/up.png:/images/up.png ToggleDock Toggle Dock Status Toggle Dock Status :/images/unmark.png:/images/unmark.png Close Page Close The Current Page :/images/mail-message-new.png:/images/mail-message-new.png Messages Display any messages queued at the director false &Preferences ... Set Preferences Set Preferences :/images/help-browser.png:/images/help-browser.png bat &Help false :/images/browse.png:/images/browse.png Browse Browse Cataloged Files Browse Cataloged Files :/images/applications-graphics.png:/images/applications-graphics.png JobPlot Plot Job Files and Bytes Plot Job Files and Bytes :/images/status.png:/images/status.png Status Dir Page Director Status Page :/images/mark.png:/images/mark.png Repop Lists :/images/mark.png:/images/mark.png Reload and Repop :/images/go-jump.png:/images/go-jump.png back Previous Page Previous Page bacula-15.0.3/src/qt-console/console/0000755000175000017500000000000014771010173017177 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/console/console.cpp0000644000175000017500000005661414771010173021361 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Console Class * * Written by Kern Sibbald, January MMVII * */ #include "bat.h" #include "console.h" #include "restore.h" #include "select.h" #include "run/run.h" Console::Console(QTabWidget *parent) : Pages() { QFont font; m_name = tr("Console"); m_messages_pending = false; m_parent = parent; m_closeable = false; m_console = this; m_warningPrevent = false; m_dircommCounter = 0; /* * Create a connection to the Director and put it in a hash table */ m_dircommHash.insert(m_dircommCounter, new DirComm(this, m_dircommCounter)); setupUi(this); m_textEdit = textEdit; /* our console screen */ m_cursor = new QTextCursor(m_textEdit->document()); mainWin->actionConnect->setIcon(QIcon(":images/disconnected.png")); m_timer = NULL; m_contextActions.append(actionStatusDir); m_contextActions.append(actionConsoleHelp); m_contextActions.append(actionRequestMessages); m_contextActions.append(actionConsoleReload); connect(actionStatusDir, SIGNAL(triggered()), this, SLOT(status_dir())); connect(actionConsoleHelp, SIGNAL(triggered()), this, SLOT(consoleHelp())); connect(actionConsoleReload, SIGNAL(triggered()), this, SLOT(consoleReload())); connect(actionRequestMessages, SIGNAL(triggered()), this, SLOT(messages())); } Console::~Console() { } void Console::startTimer() { m_timer = new QTimer(this); QWidget::connect(m_timer, SIGNAL(timeout()), this, SLOT(poll_messages())); m_timer->start(mainWin->m_checkMessagesInterval*30000); } void Console::stopTimer() { if (m_timer) { QWidget::disconnect(m_timer, SIGNAL(timeout()), this, SLOT(poll_messages())); m_timer->stop(); delete m_timer; m_timer = NULL; } } /* slot connected to the timer * requires preferences of check messages and operates at interval */ void Console::poll_messages() { int conn; /* Do not poll if notifier off */ if (!mainWin->m_notify) { return; } /* * Note if we call getDirComm here, we continuously consume * file descriptors. */ if (!findDirComm(conn)) { /* find a free DirComm */ return; /* try later */ } DirComm *dircomm = m_dircommHash.value(conn); if (mainWin->m_checkMessages && dircomm->m_at_main_prompt && hasFocus() && !mainWin->getWaitState()){ dircomm->write(".messages"); displayToPrompt(conn); messagesPending(false); } } /* * Connect to Director. This does not connect to the director, dircomm does. * This creates the first and possibly 2nd dircomm instance */ void Console::connect_dir() { DirComm *dircomm = m_dircommHash.value(0); if (!m_console->m_dir) { mainWin->set_status( tr("No Director found.")); return; } m_textEdit = textEdit; /* our console screen */ if (dircomm->connect_dir()) { if (mainWin->m_connDebug) { Pmsg1(000, "DirComm 0 Seems to have Connected %s\n", m_dir->name()); } beginNewCommand(0); } mainWin->set_status(_("Connected")); startTimer(); /* start message timer */ } /* * A function created to separate out the population of the lists * from the Console::connect_dir function */ void Console::populateLists(bool /*forcenew*/) { int conn; if (!getDirComm(conn)) { if (mainWin->m_connDebug) Pmsg0(000, "call newDirComm\n"); if (!newDirComm(conn)) { Emsg1(M_INFO, 0, "Failed to connect to %s for populateLists.\n", m_dir->name()); return; } } populateLists(conn); notify(conn, true); } void Console::populateLists(int conn) { job_list.clear(); restore_list.clear(); client_list.clear(); fileset_list.clear(); messages_list.clear(); pool_list.clear(); storage_list.clear(); type_list.clear(); level_list.clear(); volstatus_list.clear(); mediatype_list.clear(); dir_cmd(conn, ".jobs", job_list); dir_cmd(conn, ".jobs type=R", restore_list); dir_cmd(conn, ".clients", client_list); dir_cmd(conn, ".filesets", fileset_list); dir_cmd(conn, ".msgs", messages_list); dir_cmd(conn, ".pools", pool_list); dir_cmd(conn, ".storage", storage_list); dir_cmd(conn, ".types", type_list); dir_cmd(conn, ".levels", level_list); dir_cmd(conn, ".volstatus", volstatus_list); dir_cmd(conn, ".mediatypes", mediatype_list); dir_cmd(conn, ".locations", location_list); if (mainWin->m_connDebug) { QString dbgmsg = QString("jobs=%1 clients=%2 filesets=%3 msgs=%4 pools=%5 storage=%6 types=%7 levels=%8 conn=%9 %10\n") .arg(job_list.count()).arg(client_list.count()).arg(fileset_list.count()).arg(messages_list.count()) .arg(pool_list.count()).arg(storage_list.count()).arg(type_list.count()).arg(level_list.count()) .arg(conn).arg(m_dir->name()); Pmsg1(000, "%s", dbgmsg.toUtf8().data()); } job_list.sort(); client_list.sort(); fileset_list.sort(); messages_list.sort(); pool_list.sort(); storage_list.sort(); type_list.sort(); level_list.sort(); } /* * Overload function for dir_cmd with a QString * Ease of use */ bool Console::dir_cmd(QString &cmd, QStringList &results) { return dir_cmd(cmd.toUtf8().data(), results); } /* * Overload function for dir_cmd, this is if connection is not worried about */ bool Console::dir_cmd(const char *cmd, QStringList &results) { int conn; if (getDirComm(conn)) { dir_cmd(conn, cmd, results); return true; } else { Pmsg1(000, "dir_cmd failed to connect to %s\n", m_dir->name()); return false; } } /* * Send a command to the Director, and return the * results in a QStringList. */ bool Console::dir_cmd(int conn, const char *cmd, QStringList &results) { mainWin->waitEnter(); DirComm *dircomm = m_dircommHash.value(conn); int stat; bool prev_notify = is_notify_enabled(conn); if (mainWin->m_connDebug) { QString dbgmsg = QString("dir_cmd conn %1 %2 %3\n").arg(conn).arg(m_dir->name()).arg(cmd); Pmsg1(000, "%s", dbgmsg.toUtf8().data()); } if (prev_notify) { notify(conn, false); } dircomm->write(cmd); while ((stat = dircomm->read()) > 0 && dircomm->is_in_command()) { if (mainWin->m_displayAll) display_text(dircomm->msg()); strip_trailing_junk(dircomm->msg()); results << dircomm->msg(); } if (stat > 0 && mainWin->m_displayAll) display_text(dircomm->msg()); if (prev_notify) { notify(conn, true); /* turn it back on */ } discardToPrompt(conn); mainWin->waitExit(); return true; /* ***FIXME*** return any command error */ } /* * OverLoads for sql_cmd */ bool Console::sql_cmd(int &conn, QString &query, QStringList &results) { return sql_cmd(conn, query.toUtf8().data(), results, false); } bool Console::sql_cmd(QString &query, QStringList &results) { int conn; if (!getDirComm(conn)) { return false; } return sql_cmd(conn, query.toUtf8().data(), results, true); } bool Console::sql_cmd(const char *query, QStringList &results) { int conn; if (!getDirComm(conn)) { return false; } return sql_cmd(conn, query, results, true); } /* * Send an sql query to the Director, and return the * results in a QStringList. */ bool Console::sql_cmd(int &conn, const char *query, QStringList &results, bool donotify) { DirComm *dircomm = m_dircommHash.value(conn); int stat; POOL_MEM cmd(PM_MESSAGE); bool prev_notify = is_notify_enabled(conn); if (!is_connectedGui()) { return false; } if (mainWin->m_connDebug) Pmsg2(000, "sql_cmd conn %i %s\n", conn, query); if (donotify) { dircomm->notify(false); } mainWin->waitEnter(); pm_strcpy(cmd, ".sql query=\""); pm_strcat(cmd, query); pm_strcat(cmd, "\""); dircomm->write(cmd.c_str()); while ((stat = dircomm->read()) > 0) { bool first = true; if (mainWin->m_displayAll) { display_text(dircomm->msg()); display_text("\n"); } strip_trailing_junk(dircomm->msg()); bool doappend = true; if (first) { QString dum = dircomm->msg(); if ((dum.left(6) == "*None*")) doappend = false; } if (doappend) { results << dircomm->msg(); } first = false; } if (donotify && prev_notify) { dircomm->notify(true); } discardToPrompt(conn); mainWin->waitExit(); if (donotify && prev_notify) { dircomm->notify(true); } return !mainWin->isClosing(); /* return false if closing */ } /* * Overloads for * Sending a command to the Director */ int Console::write_dir(const char *msg) { int conn; if (getDirComm(conn)) { write_dir(conn, msg); } return conn; } int Console::write_dir(const char *msg, bool dowait) { int conn; if (getDirComm(conn)) { write_dir(conn, msg, dowait); } return conn; } void Console::write_dir(int conn, const char *msg) { write_dir(conn, msg, true); } /* * Send a command to the Director */ void Console::write_dir(int conn, const char *msg, bool dowait) { DirComm *dircomm = m_dircommHash.value(conn); if (dircomm->m_sock) { mainWin->set_status(_("Processing command ...")); if (dowait) mainWin->waitEnter(); dircomm->write(msg); if (dowait) mainWin->waitExit(); } else { mainWin->set_status( tr(" Director not connected. Click on connect button.")); mainWin->actionConnect->setIcon(QIcon(":images/disconnected.png")); QBrush redBrush(Qt::red); QTreeWidgetItem *item = mainWin->getFromHash(this); item->setForeground(0, redBrush); dircomm->m_at_prompt = false; dircomm->m_at_main_prompt = false; } } /* * get_job_defaults overload */ bool Console::get_job_defaults(struct job_defaults &job_defs) { int conn; getDirComm(conn); return get_job_defaults(conn, job_defs, true); } bool Console::get_job_defaults(int &conn, struct job_defaults &job_defs) { return get_job_defaults(conn, job_defs, true); } /* * Send a job name to the director, and read all the resulting * defaults. */ bool Console::get_job_defaults(int &conn, struct job_defaults &job_defs, bool donotify) { QString scmd; int stat; char *def; bool prev_notify = is_notify_enabled(conn); bool rtn = false; DirComm *dircomm = m_dircommHash.value(conn); if (donotify) { dircomm->notify(false); } beginNewCommand(conn); bool prevWaitState = mainWin->getWaitState(); if (!prevWaitState) { mainWin->waitEnter(); } if (mainWin->m_connDebug) { Pmsg2(000, "job_defaults conn %i %s\n", conn, m_dir->name()); } scmd = QString(".defaults job=\"%1\"").arg(job_defs.job_name); dircomm->write(scmd); while ((stat = dircomm->read()) > 0) { if (mainWin->m_displayAll) display_text(dircomm->msg()); def = strchr(dircomm->msg(), '='); if (!def) { continue; } /* Pointer to default value */ *def++ = 0; strip_trailing_junk(def); if (strcmp(dircomm->msg(), "job") == 0) { if (strcmp(def, job_defs.job_name.toUtf8().data()) != 0) { goto bail_out; } continue; } if (strcmp(dircomm->msg(), "pool") == 0) { job_defs.pool_name = def; continue; } if (strcmp(dircomm->msg(), "messages") == 0) { job_defs.messages_name = def; continue; } if (strcmp(dircomm->msg(), "client") == 0) { job_defs.client_name = def; continue; } if (strcmp(dircomm->msg(), "storage") == 0) { job_defs.store_name = def; continue; } if (strcmp(dircomm->msg(), "where") == 0) { job_defs.where = def; continue; } if (strcmp(dircomm->msg(), "level") == 0) { job_defs.level = def; continue; } if (strcmp(dircomm->msg(), "type") == 0) { job_defs.type = def; continue; } if (strcmp(dircomm->msg(), "fileset") == 0) { job_defs.fileset_name = def; continue; } if (strcmp(dircomm->msg(), "catalog") == 0) { job_defs.catalog_name = def; continue; } if (strcmp(dircomm->msg(), "enabled") == 0) { job_defs.enabled = *def == '1' ? true : false; continue; } } rtn = true; /* Fall through wanted */ bail_out: if (donotify && prev_notify) { notify(conn, true); } if (!prevWaitState) { mainWin->waitExit(); } return rtn; } /* * Save user settings associated with this console */ void Console::writeSettings() { QFont font = get_font(); QSettings settings(m_dir->name(), "bat"); settings.beginGroup("Console"); settings.setValue("consoleFont", font.family()); settings.setValue("consolePointSize", font.pointSize()); settings.setValue("consoleFixedPitch", font.fixedPitch()); settings.endGroup(); } /* * Read and restore user settings associated with this console */ void Console::readSettings() { QFont font = get_font(); QSettings settings(m_dir->name(), "bat"); settings.beginGroup("Console"); font.setFamily(settings.value("consoleFont", "Courier").value()); font.setPointSize(settings.value("consolePointSize", 10).toInt()); font.setFixedPitch(settings.value("consoleFixedPitch", true).toBool()); settings.endGroup(); m_textEdit->setFont(font); } /* * Set the console textEdit font */ void Console::set_font() { bool ok; QFont font = QFontDialog::getFont(&ok, QFont(m_textEdit->font()), this); if (ok) { m_textEdit->setFont(font); } } /* * Get the console text edit font */ const QFont Console::get_font() { return m_textEdit->font(); } /* * Slot for responding to status dir button on button bar */ void Console::status_dir() { QString cmd("status dir"); consoleCommand(cmd); } /* * Slot for responding to messages button on button bar * Here we want to bring the console to the front so use pages' consoleCommand */ void Console::messages() { QString cmd(".messages"); consoleCommand(cmd); messagesPending(false); } /* * Put text into the console window */ void Console::display_textf(const char *fmt, ...) { va_list arg_ptr; char buf[1000]; va_start(arg_ptr, fmt); bvsnprintf(buf, sizeof(buf), fmt, arg_ptr); va_end(arg_ptr); display_text(buf); } void Console::display_text(const QString buf) { if (mainWin->isClosing()) return; if (buf.size() != 0) { m_cursor->insertText(buf); update_cursor(); } } void Console::display_text(const char *buf) { if (mainWin->isClosing()) return; if (*buf != 0) { m_cursor->insertText(buf); update_cursor(); } } void Console::display_html(const QString buf) { if (mainWin->isClosing()) return; if (buf.size() != 0) { m_cursor->insertHtml(buf); update_cursor(); } } /* Position cursor to end of screen */ void Console::update_cursor() { m_textEdit->moveCursor(QTextCursor::End); m_textEdit->ensureCursorVisible(); } void Console::beginNewCommand(int conn) { DirComm *dircomm = m_dircommHash.value(conn); if (dircomm->m_at_main_prompt) { return; } for (int i=0; i < 3; i++) { dircomm->write("."); while (dircomm->read() > 0) { if (mainWin->m_commDebug) Pmsg2(000, "begin new command loop %i %s\n", i, m_dir->name()); if (mainWin->m_displayAll) display_text(dircomm->msg()); } if (dircomm->m_at_main_prompt) { break; } } //display_text("\n"); } void Console::displayToPrompt(int conn) { DirComm *dircomm = m_dircommHash.value(conn); int stat = 0; QString buf; if (mainWin->m_commDebug) Pmsg1(000, "DisplaytoPrompt %s\n", m_dir->name()); while (!dircomm->m_at_prompt) { if ((stat=dircomm->read()) > 0) { buf += dircomm->msg(); if (buf.size() >= 8196 || m_messages_pending) { display_text(buf); buf.clear(); messagesPending(false); } } } display_text(buf); if (mainWin->m_commDebug) Pmsg2(000, "endDisplaytoPrompt=%d %s\n", stat, m_dir->name()); } void Console::discardToPrompt(int conn) { DirComm *dircomm = m_dircommHash.value(conn); int stat = 0; if (mainWin->m_commDebug) Pmsg1(000, "discardToPrompt %s\n", m_dir->name()); if (mainWin->m_displayAll) { displayToPrompt(conn); } else { while (!dircomm->m_at_prompt) { stat = dircomm->read(); if (stat < 0) { break; } } } if (mainWin->m_commDebug) { Pmsg2(000, "endDiscardToPrompt conn=%i %s\n", conn, m_dir->name()); } } QString Console::returnFromPrompt(int conn) { DirComm *dircomm = m_dircommHash.value(conn); QString text(""); int stat = 0; text = ""; dircomm->read(); text += dircomm->msg(); if (mainWin->m_commDebug) Pmsg1(000, "returnFromPrompt %s\n", m_dir->name()); while (!dircomm->m_at_prompt) { if ((stat=dircomm->read()) > 0) { text += dircomm->msg(); } } if (mainWin->m_commDebug) Pmsg2(000, "endreturnFromPrompt=%d %s\n", stat, m_dir->name()); return text; } /* * When the notifier is enabled, read_dir() will automatically be * called by the Qt event loop when ever there is any output * from the Director, and read_dir() will then display it on * the console. * * When we are in a bat dialog, we want to control *all* output * from the Director, so we set notify to off. * m_console->notifiy(false); */ /* dual purpose function to turn notify off and return a connection */ int Console::notifyOff() { int conn = 0; if (getDirComm(conn)) { notify(conn, false); } return conn; } /* knowing a connection, turn notify off or on */ bool Console::notify(int conn, bool enable) { DirComm *dircomm = m_dircommHash.value(conn); if (dircomm) { return dircomm->notify(enable); } else { return false; } } /* knowing a connection, return notify state */ bool Console::is_notify_enabled(int conn) const { DirComm *dircomm = m_dircommHash.value(conn); if (dircomm) { return dircomm->is_notify_enabled(); } else { return false; } } void Console::setDirectorTreeItem(QTreeWidgetItem *item) { m_directorTreeItem = item; } void Console::setDirRes(DIRRES *dir) { m_dir = dir; } /* * To have the ability to get the name of the director resource. */ void Console::getDirResName(QString &name_returned) { name_returned = m_dir->name(); } /* Slot for responding to page selectors status help command */ void Console::consoleHelp() { QString cmd("help"); consoleCommand(cmd); } /* Slot for responding to page selectors reload bacula-dir.conf */ void Console::consoleReload() { QString cmd("reload"); consoleCommand(cmd); } /* For suppressing .messages * This may be rendered not needed if the multiple connections feature gets working */ bool Console::hasFocus() { if (mainWin->tabWidget->currentIndex() == mainWin->tabWidget->indexOf(this)) return true; else return false; } /* For adding feature to have the gui's messages button change when * messages are pending */ bool Console::messagesPending(bool pend) { bool prev = m_messages_pending; m_messages_pending = pend; mainWin->setMessageIcon(); return prev; } /* terminate all existing connections */ void Console::terminate() { foreach(DirComm* dircomm, m_dircommHash) { dircomm->terminate(); } m_console->stopTimer(); } /* Maybe this should be checking the list, for the moment lets check 0 which should be connected */ bool Console::is_connectedGui() { if (is_connected(0)) { return true; } else { QString message = tr("Director is currently disconnected\nPlease reconnect!"); QMessageBox::warning(this, "Bat", message, QMessageBox::Ok ); return false; } } int Console::read(int conn) { DirComm *dircomm = m_dircommHash.value(conn); return dircomm->read(); } char *Console::msg(int conn) { DirComm *dircomm = m_dircommHash.value(conn); return dircomm->msg(); } int Console::write(int conn, const QString msg) { DirComm *dircomm = m_dircommHash.value(conn); mainWin->waitEnter(); int ret = dircomm->write(msg); mainWin->waitExit(); return ret; } int Console::write(int conn, const char *msg) { DirComm *dircomm = m_dircommHash.value(conn); mainWin->waitEnter(); int ret = dircomm->write(msg); mainWin->waitExit(); return ret; } /* This checks to see if any is connected */ bool Console::is_connected() { bool connected = false; foreach(DirComm* dircomm, m_dircommHash) { if (dircomm->is_connected()) return true; } return connected; } /* knowing the connection id, is it connected */ bool Console::is_connected(int conn) { DirComm *dircomm = m_dircommHash.value(conn); return dircomm->is_connected(); } /* * Need a connection. Check existing connections or create one */ bool Console::getDirComm(int &conn) { if (findDirComm(conn)) { return true; } if (mainWin->m_connDebug) Pmsg0(000, "call newDirComm\n"); return newDirComm(conn); } /* * Try to find a free (unused but established) connection * KES: Note, I think there is a problem here because for * some reason, the notifier is often turned off on file * descriptors that seem to me to be available. That means * that we do not use a free descriptor and thus we will create * a new connection that is maybe not necessary. Someone needs * to look into whether or not notify() is correctly turned on * when we are back at the command prompt and idle. * */ bool Console::findDirComm(int &conn) { QHash::const_iterator iter = m_dircommHash.constBegin(); while (iter != m_dircommHash.constEnd()) { DirComm *dircomm = iter.value(); if (dircomm->m_at_prompt && dircomm->m_at_main_prompt && dircomm->is_notify_enabled()) { conn = dircomm->m_conn; return true; } if (mainWin->m_connDebug) { Pmsg4(000, "currentDirComm=%d at_prompt=%d at_main=%d && notify=%d\n", dircomm->m_conn, dircomm->m_at_prompt, dircomm->m_at_main_prompt, dircomm->is_notify_enabled()); } ++iter; } return false; } /* * Create a new connection */ bool Console::newDirComm(int &conn) { m_dircommCounter++; if (mainWin->m_connDebug) { Pmsg2(000, "newDirComm=%i to: %s\n", m_dircommCounter, m_dir->name()); } DirComm *dircomm = new DirComm(this, m_dircommCounter); m_dircommHash.insert(m_dircommCounter, dircomm); bool success = dircomm->connect_dir(); if (mainWin->m_connDebug) { if (success) { Pmsg2(000, "newDirComm=%i Connected %s\n", m_dircommCounter, m_dir->name()); } else { Emsg2(M_ERROR, 0, "DirComm=%i. Unable to connect to %s\n", m_dircommCounter, m_dir->name()); } } if (!success) { m_dircommHash.remove(m_dircommCounter); delete dircomm; m_dircommCounter--; } conn = m_dircommCounter; return success; } bacula-15.0.3/src/qt-console/console/console.h0000644000175000017500000001073714771010173021022 0ustar bsbuildbsbuild#ifndef _CONSOLE_H_ #define _CONSOLE_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Kern Sibbald, January 2007 */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "pages.h" #include "ui_console.h" #include "bcomm/dircomm.h" #ifndef MAX_NAME_LENGTH #define MAX_NAME_LENGTH 128 #endif /* * Structure for obtaining the defaults for a job */ struct job_defaults { QString job_name; QString pool_name; QString messages_name; QString client_name; QString store_name; QString where; QString level; QString type; QString fileset_name; QString catalog_name; bool enabled; }; //class DIRRES; //class BSOCK; //class JCR; //class CONRES; class Console : public Pages, public Ui::ConsoleForm { Q_OBJECT friend class DirComm; public: Console(QTabWidget *parent); ~Console(); int read(int conn); char *msg(int conn); void discardToPrompt(int conn); int write(int conn, const char *msg); int write(int conn, QString msg); int notifyOff(); // enables/disables socket notification - returns the previous state bool notify(int conn, bool enable); // enables/disables socket notification - returns the previous state bool is_notify_enabled(int conn) const; bool getDirComm(int &conn); bool findDirComm(int &conn); void displayToPrompt(int conn); QString returnFromPrompt(int conn); bool dir_cmd(int conn, const char *cmd, QStringList &results); bool dir_cmd(const char *cmd, QStringList &results); bool dir_cmd(QString &cmd, QStringList &results); bool sql_cmd(const char *cmd, QStringList &results); bool sql_cmd(QString &cmd, QStringList &results); bool sql_cmd(int &conn, QString &cmd, QStringList &results); bool sql_cmd(int &conn, const char *cmd, QStringList &results, bool donotify); int write_dir(const char *buf); int write_dir(const char *buf, bool dowait); void write_dir(int conn, const char *buf); void write_dir(int conn, const char *buf, bool dowait); void getDirResName(QString &); void setDirRes(DIRRES *dir); void writeSettings(); void readSettings(); void setDirectorTreeItem(QTreeWidgetItem *); void terminate(); bool is_messagesPending() { return m_messages_pending; }; bool is_connected(); bool is_connected(int conn); QTreeWidgetItem *directorTreeItem() { return m_directorTreeItem; }; void startTimer(); void display_text(const char *buf); void display_text(const QString buf); void display_textf(const char *fmt, ...); void display_html(const QString buf); bool get_job_defaults(struct job_defaults &); bool get_job_defaults(int &conn, struct job_defaults &); const QFont get_font(); void beginNewCommand(int conn); void populateLists(bool forcenew); private: bool get_job_defaults(int &conn, struct job_defaults &, bool donotify); void update_cursor(void); void stopTimer(); bool is_connectedGui(); bool newDirComm(int &conn); void populateLists(int conn); public: QStringList job_list; QStringList restore_list; QStringList client_list; QStringList fileset_list; QStringList messages_list; QStringList pool_list; QStringList storage_list; QStringList type_list; QStringList level_list; QStringList volstatus_list; QStringList mediatype_list; QStringList location_list; public slots: void connect_dir(); void status_dir(void); void messages(void); void set_font(void); void poll_messages(void); void consoleHelp(); void consoleReload(); public: DIRRES *m_dir; /* so various pages can reference it */ bool m_warningPrevent; private: QTextEdit *m_textEdit; QTextCursor *m_cursor; QTreeWidgetItem *m_directorTreeItem; bool m_messages_pending; QTimer *m_timer; bool messagesPending(bool pend); bool hasFocus(); QHash m_dircommHash; int m_dircommCounter; }; #endif /* _CONSOLE_H_ */ bacula-15.0.3/src/qt-console/console/console.ui0000644000175000017500000000611514771010173021203 0ustar bsbuildbsbuild ConsoleForm 0 0 432 456 Console 9 6 7 7 200 0 0 0 1 0 Qt::StrongFocus false Qt::ScrollBarAsNeeded QTextEdit::AutoNone false QTextEdit::NoWrap true :/images/status.png StatusDir :/images/utilities-terminal.png Console Help :/images/utilities-terminal.png Request Messages :/images/utilities-terminal.png Reload bacula-dir.conf bacula-15.0.3/src/qt-console/External-qt-console0000644000175000017500000000205014771010173021321 0ustar bsbuildbsbuild# This file provides information about the External dependencies required by # Bacula. # # There are four fields delimited by |. Only the first two fields are # required. The other two are used when the top level directory of the # archive is not the same as the file name with any suffixes removed. # # Field 1 is the name of the dependency. It is used to define the # name of the three variables which are assigned the values of fields 2 to 4. # # Field 2 is the URL of the archive. It is assigned to the variable # URL_[field1]. # # Field 3 is the top directory of the archive or the name of a directory that # must be created and the archive extracted into it. It is assigned to the # variable DIR_[field1]. # # Field 4 indicates if the directory specified in field 3 must be created # first and the archive extracted into it. It is assigned to the variable # MKD_[field1] # QWT|http://www.bacula.org/depkgs/qwt-5.0.2.tar.bz2|qwt-5.0.2|1 # # Original location # #QWT|http://superb-west.dl.sourceforge.net/sourceforge/qwt/qwt-5.0.2.tar.bz2|qwt-5.0.2|1 bacula-15.0.3/src/qt-console/joblog/0000755000175000017500000000000014771010173017011 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/joblog/joblog.ui0000644000175000017500000000415414771010173020630 0ustar bsbuildbsbuild JobLogForm 0 0 432 456 Job Log 9 6 7 7 200 0 0 0 1 0 Qt::StrongFocus false Qt::ScrollBarAsNeeded QTextEdit::AutoNone false QTextEdit::NoWrap true bacula-15.0.3/src/qt-console/joblog/joblog.cpp0000644000175000017500000001142314771010173020772 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * JobLog Class * * Dirk Bartley, March 2007 * */ #include "bat.h" #include "joblog.h" JobLog::JobLog(QString &jobId, QTreeWidgetItem *parentTreeWidgetItem) : Pages() { setupUi(this); pgInitialize(tr("JobLog"), parentTreeWidgetItem); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/joblog.png"))); m_cursor = new QTextCursor(textEdit->document()); m_jobId = jobId; getFont(); populateText(); dockPage(); setCurrent(); } void JobLog::getFont() { QFont font = textEdit->font(); QString dirname; m_console->getDirResName(dirname); QSettings settings(dirname, "bat"); settings.beginGroup("Console"); font.setFamily(settings.value("consoleFont", "Courier").value()); font.setPointSize(settings.value("consolePointSize", 10).toInt()); font.setFixedPitch(settings.value("consoleFixedPitch", true).toBool()); settings.endGroup(); textEdit->setFont(font); } /* * Populate the text in the window */ void JobLog::populateText() { QString query; query = "SELECT Time, LogText FROM Log WHERE JobId='" + m_jobId + "' order by Time"; /* This could be a log item */ if (mainWin->m_sqlDebug) { Pmsg1(000, "Log query cmd : %s\n", query.toUtf8().data()); } QStringList results; if (m_console->sql_cmd(query, results)) { if (!results.size()) { QMessageBox::warning(this, tr("Bat"), tr("There were no results!\n" "It is possible you may need to add \"catalog = all\" " "to the Messages resource for this job.\n"), QMessageBox::Ok); return; } QString jobstr("JobId "); /* FIXME: should this be translated ? */ jobstr += m_jobId; QString htmlbuf("" + tr("Log records for job %1").arg(m_jobId) ); htmlbuf += "
\n"; char trailer[] = "\n"; off_t howBig = HOW_BIG; fwrite(header, sizeof header, 1, fp); fseeko(fp, howBig - strlen(trailer), 0); fwrite(trailer, strlen(trailer), 1, fp); fclose(fp); return 0; } bacula-15.0.3/src/tools/testls.c0000644000175000017500000001631614771010173016302 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Test program for listing files during regression testing * Links have their permissions and time bashed since they cannot * be set by Bacula. * * Kern Sibbald, MM * */ #include "bacula.h" #include "findlib/find.h" /* Dummy functions */ int generate_job_event(JCR *jcr, const char *event) { return 1; } void generate_plugin_event(JCR *jcr, bEventType eventType, void *value) { } /* Global variables */ int attrs = 0; static JCR *jcr; static int num_files = 0; static int print_file(JCR *jcr, FF_PKT *ff, bool); static void print_ls_output(char *fname, char *link, int type, struct stat *statp); static int count_files(JCR *jcr, FF_PKT *ff, bool top_level); static void usage() { fprintf(stderr, _( "\n" "Usage: testls [-d debug_level] [-] [pattern1 ...]\n" " -a print extended attributes (Win32 debug)\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -e specify file of exclude patterns\n" " -i specify file of include patterns\n" " -q quiet, don't print filenames (debug)\n" " - read pattern(s) from stdin\n" " -? print this message.\n" "\n" "Patterns are file inclusion -- normally directories.\n" "Debug level >= 1 prints each file found.\n" "Debug level >= 10 prints path/file for catalog.\n" "Errors always printed.\n" "Files/paths truncated is number with len > 255.\n" "Truncation is only in catalog.\n" "\n")); exit(1); } int main(int argc, char *const *argv) { FF_PKT *ff; char name[1000]; bool quiet = false; int i, ch; char *inc = NULL; char *exc = NULL; FILE *fd; setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); lmgr_init_thread(); while ((ch = getopt(argc, argv, "ad:e:i:q?")) != -1) { switch (ch) { case 'a': /* print extended attributes *debug* */ attrs = 1; break; case 'd': /* set debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } } break; case 'e': /* exclude patterns */ exc = optarg; break; case 'i': /* include patterns */ inc = optarg; break; case 'q': quiet = true; break; case '?': default: usage(); } } argc -= optind; argv += optind; jcr = new_jcr(sizeof(JCR), NULL); ff = init_find_files(); if (argc == 0 && !inc) { add_fname_to_include_list(ff, 0, "/"); /* default to / */ } else { for (i=0; i < argc; i++) { if (strcmp(argv[i], "-") == 0) { while (fgets(name, sizeof(name)-1, stdin)) { strip_trailing_junk(name); add_fname_to_include_list(ff, 0, name); } continue; } add_fname_to_include_list(ff, 0, argv[i]); } } if (inc) { fd = fopen(inc, "rb"); if (!fd) { printf(_("Could not open include file: %s\n"), inc); exit(1); } while (fgets(name, sizeof(name)-1, fd)) { strip_trailing_junk(name); add_fname_to_include_list(ff, 0, name); } fclose(fd); } if (exc) { fd = fopen(exc, "rb"); if (!fd) { printf(_("Could not open exclude file: %s\n"), exc); exit(1); } while (fgets(name, sizeof(name)-1, fd)) { strip_trailing_junk(name); add_fname_to_exclude_list(ff, name); } fclose(fd); } if (quiet) { match_files(jcr, ff, count_files); } else { match_files(jcr, ff, print_file); } printf(_("Files seen = %d\n"), num_files); term_include_exclude_files(ff); term_find_files(ff); free_jcr(jcr); term_last_jobs_list(); /* free jcr chain */ close_memory_pool(); lmgr_cleanup_main(); sm_dump(false); exit(0); } static int count_files(JCR *jcr, FF_PKT *ff, bool top_level) { num_files++; return 1; } static int print_file(JCR *jcr, FF_PKT *ff, bool top_level) { switch (ff->type) { case FT_LNKSAVED: case FT_REGE: case FT_REG: case FT_LNK: case FT_DIREND: case FT_SPEC: print_ls_output(ff->fname, ff->link, ff->type, &ff->statp); break; case FT_DIRBEGIN: break; case FT_NOACCESS: printf(_("Err: Could not access %s: %s\n"), ff->fname, strerror(errno)); break; case FT_NOFOLLOW: printf(_("Err: Could not follow ff->link %s: %s\n"), ff->fname, strerror(errno)); break; case FT_NOSTAT: printf(_("Err: Could not stat %s: %s\n"), ff->fname, strerror(errno)); break; case FT_NOCHG: printf(_("Skip: File not saved. No change. %s\n"), ff->fname); break; case FT_ISARCH: printf(_("Err: Attempt to backup archive. Not saved. %s\n"), ff->fname); break; case FT_NORECURSE: printf(_("Recursion turned off. Directory not entered. %s\n"), ff->fname); break; case FT_NOFSCHG: printf(_("Skip: File system change prohibited. Directory not entered. %s\n"), ff->fname); break; case FT_NOOPEN: printf(_("Err: Could not open directory %s: %s\n"), ff->fname, strerror(errno)); break; default: printf(_("Err: Unknown file ff->type %d: %s\n"), ff->type, ff->fname); break; } num_files++; return 1; } static void print_ls_output(char *fname, char *link, int type, struct stat *statp) { char buf[2000]; char ec1[30]; char *p, *f; int n; if (type == FT_LNK) { statp->st_mtime = 0; statp->st_mode |= 0777; } p = encode_mode(statp->st_mode, buf); n = sprintf(p, " %2d ", (uint32_t)statp->st_nlink); p += n; n = sprintf(p, "%-4d %-4d", (int)statp->st_uid, (int)statp->st_gid); p += n; n = sprintf(p, "%10.10s ", edit_uint64(statp->st_size, ec1)); p += n; if (S_ISCHR(statp->st_mode) || S_ISBLK(statp->st_mode)) { n = sprintf(p, "%4x ", (int)statp->st_rdev); } else { n = sprintf(p, " "); } p += n; p = encode_time(statp->st_mtime, p); *p++ = ' '; /* Copy file name */ for (f=fname; *f && (p-buf) < (int)sizeof(buf); ) *p++ = *f++; if (type == FT_LNK) { *p++ = '-'; *p++ = '>'; *p++ = ' '; /* Copy link name */ for (f=link; *f && (p-buf) < (int)sizeof(buf); ) *p++ = *f++; } *p++ = '\n'; *p = 0; fputs(buf, stdout); } bool python_set_prog(JCR*, char const*) { return false; } bacula-15.0.3/src/tools/joblist.c0000644000175000017500000001513114771010173016424 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "lib/unittests.h" #include "filed/fd_plugins.h" #include "findlib/bfile.h" /* Pointers to Bacula functions */ static const char *working="./"; #define USE_JOB_LIST #define UNITTESTS #include "plugins/fd/fd_common.h" void *start_heap; int main (int argc, char *argv[]) { Unittests t("joblist_test"); POOL_MEM tmp; const char *k = "netapp:/vol/vol1"; const char *k2 = "netapp:/vol/vol2"; const char *k3 = "netapp:/vol/vol2xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"; const char *k4 = "netapp:/vol/vol2xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxz"; char key1[128], key2[128], key3[128], key4[128], key5[128], jobF[128], jobI[128], jobI2[128], jobD[128], jobI3[128]; strcpy(key1, "ID=704ba403-fa5d-45f9-8681-859eec950723 DATE=1:15f18c5c-3824-40d2-8843-e6d161f77504:15f18c5c-3824-40d2-8843-e6d161f77504"); strcpy(key2, "ID=704ba403-fa5d-45f9-8681-859eec950723 DATE=2:15f18c5c-3824-40d2-8843-e6d161f77504:15f18c5c-3824-40d2-8843-e6d161f77504"); strcpy(key3, "ID=704ba403-fa5d-45f9-8681-859eec950723 DATE=3:15f18c5c-3824-40d2-8843-e6d161f77504:15f18c5c-3824-40d2-8843-e6d161f77504"); strcpy(key4, "ID=704ba403-fa5d-45f9-8681-859eec950723 DATE=4:15f18c5c-3824-40d2-8843-e6d161f77504:15f18c5c-3824-40d2-8843-e6d161f77504"); strcpy(key5, "ID=704ba403-fa5d-45f9-8681-859eec950723 DATE=5:15f18c5c-3824-40d2-8843-e6d161f77504:15f18c5c-3824-40d2-8843-e6d161f77504"); strcpy(jobF, "Test1.2020-03-12_09.45.31_24"); strcpy(jobI, "Test1.2020-03-12_09.45.31_25"); strcpy(jobI2, "Test1.2020-03-12_09.45.31_26"); strcpy(jobD, "Test1.2020-03-12_09.45.31_27"); strcpy(jobI3, "Test1.2020-03-12_09.45.31_28"); if (argc != 1) { exit (1); } unlink("test.dat"); { joblist job_history(NULL, k, jobF, NULL, 'F'); job_history.set_base("test.dat"); job_history.store_job(key1); job_history.prune_jobs(NULL, NULL, NULL); } { joblist job_history(NULL, k, jobI, jobF, 'I'); job_history.set_base("test.dat"); ok(job_history.find_job(jobF, tmp.handle()), "Find previous job"); ok(strcmp(tmp.c_str(), key1) == 0, "test key"); job_history.prune_jobs(NULL, NULL, NULL); job_history.store_job(key2); } { joblist job_history(NULL, k, jobI2, jobI, 'I'); job_history.set_base("test.dat"); ok(job_history.find_job(jobF, tmp.handle()), "Find previous job"); ok(strcmp(tmp.c_str(), key1) == 0, "test key"); ok(job_history.find_job(jobI, tmp.handle()), "Find previous job"); ok(strcmp(tmp.c_str(), key2) == 0, "test key"); job_history.prune_jobs(NULL, NULL, NULL); job_history.store_job(key3); } { joblist job_history(NULL, k, jobD, jobF, 'D'); job_history.set_base("test.dat"); ok(job_history.find_job(jobF, tmp.handle()), "Find previous job"); ok(strcmp(tmp.c_str(), key1) == 0, "test key"); job_history.prune_jobs(NULL, NULL, NULL); job_history.store_job(key4); } { joblist job_history(NULL, k, jobI3, jobD, 'I'); job_history.set_base("test.dat"); ok(job_history.find_job(jobF, tmp.handle()), "Find previous job"); ok(strcmp(tmp.c_str(), key1) == 0, "test key"); job_history.prune_jobs(NULL, NULL, NULL); job_history.store_job(key5); nok(job_history.find_job(jobI, tmp.handle()), "Find pruned job"); } { joblist job_history(NULL, k2, jobF, NULL, 'F'); job_history.set_base("test.dat"); job_history.store_job(key1); job_history.prune_jobs(NULL, NULL, NULL); } { joblist job_history(NULL, k2, jobI, jobF, 'I'); job_history.set_base("test.dat"); ok(job_history.find_job(jobF, tmp.handle()), "Find previous job"); ok(strcmp(tmp.c_str(), key1) == 0, "test key"); job_history.prune_jobs(NULL, NULL, NULL); job_history.store_job(key2); } { joblist job_history(NULL, k2, jobI2, jobI, 'I'); job_history.set_base("test.dat"); ok(job_history.find_job(jobF, tmp.handle()), "Find previous job"); ok(strcmp(tmp.c_str(), key1) == 0, "test key"); ok(job_history.find_job(jobI, tmp.handle()), "Find previous job"); ok(strcmp(tmp.c_str(), key2) == 0, "test key"); job_history.prune_jobs(NULL, NULL, NULL); job_history.store_job(key3); } { joblist job_history(NULL, k2, jobD, jobF, 'D'); job_history.set_base("test.dat"); ok(job_history.find_job(jobF, tmp.handle()), "Find previous job"); ok(strcmp(tmp.c_str(), key1) == 0, "test key"); job_history.prune_jobs(NULL, NULL, NULL); job_history.store_job(key4); } { joblist job_history(NULL, k2, jobI3, jobD, 'I'); job_history.set_base("test.dat"); ok(job_history.find_job(jobF, tmp.handle()), "Find previous job"); ok(strcmp(tmp.c_str(), key1) == 0, "test key"); job_history.prune_jobs(NULL, NULL, NULL); job_history.store_job(key5); nok(job_history.find_job(jobI, tmp.handle()), "Find pruned job"); } { joblist job_history(NULL, k3, jobF, NULL, 'F'); job_history.set_base("test.dat"); job_history.store_job(key1); job_history.prune_jobs(NULL, NULL, NULL); } { joblist job_history(NULL, k3, jobI, jobF, 'I'); job_history.set_base("test.dat"); ok(job_history.find_job(jobF, tmp.handle()), "Find previous very long job"); ok(strcmp(tmp.c_str(), key1) == 0, "test key"); job_history.prune_jobs(NULL, NULL, NULL); job_history.store_job(key2); } { joblist job_history(NULL, k4, jobI, jobF, 'I'); job_history.set_base("test.dat"); nok(job_history.find_job(jobF, tmp.handle()), "Find previous very long job"); } return report(); } bacula-15.0.3/src/tools/fstype.c0000644000175000017500000001072514771010173016274 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Program for determining file system type * * Written by Preben 'Peppe' Guldberg, December MMIV */ #include "bacula.h" #include "findlib/find.h" static void usage() { fprintf(stderr, _( "\n" "Usage: fstype [-v] path ...\n" "\n" " Print the file system type for each file/directory argument given.\n" " The following options are supported:\n" "\n" " -l print all file system types in mtab.\n" " -m print full entries in mtab.\n" " -v print both path and file system type of each argument.\n" " -? print this message.\n" "\n")); exit(1); } struct mtab_item { rblink link; uint64_t dev; char fstype[1]; }; /* Compare two device types */ static int compare_mtab_items(void *item1, void *item2) { mtab_item *mtab1, *mtab2; mtab1 = (mtab_item *)item1; mtab2 = (mtab_item *)item2; if (mtab1->dev < mtab2->dev) return -1; if (mtab1->dev > mtab2->dev) return 1; return 0; } void print_mtab_item(void *user_ctx, struct stat *st, const char *fstype, const char *mountpoint, const char *mntopts, const char *fsname) { fprintf(stderr, "dev=%llx fstype=%s mountpoint=%s mntopts=%s\n", (unsigned long long)st->st_dev, fstype, mountpoint, mntopts); } static void add_mtab_item(void *user_ctx, struct stat *st, const char *fstype, const char *mountpoint, const char *mntopts, const char *fsname) { rblist *mtab_list = (rblist *)user_ctx; mtab_item *item, *ritem; int len = strlen(fstype) + 1; item = (mtab_item *)malloc(sizeof(mtab_item) + len); item->dev = (uint64_t)st->st_dev; bstrncpy(item->fstype, fstype, len); //fprintf(stderr, "Add dev=%lx fstype=%s\n", item->dev, item->fstype); ritem = (mtab_item *)mtab_list->insert((void *)item, compare_mtab_items); if (ritem != item) { fprintf(stderr, "Problem!! Returned item not equal added item\n"); } //fprintf(stderr, "dev=%p fstype=%s mountpoint=%s mntopts=%s\n", // ((void *)st->st_dev), fstype, mountpoint, mntopts); } int main (int argc, char *const *argv) { char fs[1000]; bool verbose = false; bool list = false; bool mtab = false; int status = 0; int ch, i; setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); while ((ch = getopt(argc, argv, "lmv?")) != -1) { switch (ch) { case 'l': list = true; break; case 'm': mtab = true; /* list mtab */ break; case 'v': verbose = true; break; case '?': default: usage(); } } argc -= optind; argv += optind; OSDependentInit(); if (mtab) { read_mtab(print_mtab_item, NULL); status = 1; goto get_out; } if (list) { rblist *mtab_list; mtab_item *item; mtab_list = New(rblist()); read_mtab(add_mtab_item, mtab_list); fprintf(stderr, "Size of mtab=%d\n", mtab_list->size()); foreach_rblist(item, mtab_list) { fprintf(stderr, "Found dev=%llx fstype=%s\n", (unsigned long long)item->dev, item->fstype); } delete mtab_list; goto get_out; } if (argc < 1) { usage(); } for (i = 0; i < argc; --argc, ++argv) { FF_PKT ff_pkt; memset((void *)&ff_pkt, 0, sizeof(ff_pkt)); ff_pkt.fname = ff_pkt.link = *argv; if (lstat(ff_pkt.fname, &ff_pkt.statp) != 0) { fprintf(stderr, "lstat of %s failed.\n", ff_pkt.fname); status = 1; break; } if (fstype(&ff_pkt, fs, sizeof(fs))) { if (verbose) { printf("%s: %s\n", *argv, fs); } else { puts(fs); } } else { fprintf(stderr, _("%s: unknown file system type\n"), *argv); status = 1; } } get_out: exit(status); } bacula-15.0.3/src/tools/bpipe-test.c0000644000175000017500000000613014771010173017031 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "lib/unittests.h" char *run_cmd(char *command, POOLMEM **out) { **out=0; if (run_program_full_output(command, 0, *out) != 0) { berrno be; Jmsg(NULL, M_ERROR, 0, _("Can't run command %s. ERR=%s\n"), command, be.bstrerror(errno)); if (**out) { Dmsg1(0, "%s", *out); } return NULL; } strip_trailing_junk(*out); return *out; } void *th1(void *arg) { const char *ret; POOLMEM *q = get_pool_memory(PM_FNAME); run_cmd((char *)"echo toto", &q); if (strcmp(q, "toto") == 0) { ret = "ok"; } else { ret = "not ok"; } free_pool_memory(q); return (void *)ret; } int main(int argc, char **argv) { pthread_t ids[1000]; void *ret; Unittests t("bpipe_test", true, true); debug_level = 400; //t.configure(TEST_QUIET); init_stack_dump(); my_name_is(argc, argv, "bpipe-test"); init_msg(NULL, NULL); daemon_start_time = time(NULL); setup_daemon_message_queue(); start_watchdog(); for (int i = 0 ; i < 990 ; i++) { pthread_create(&(ids[i]), NULL, th1, NULL); pthread_create(&(ids[i+1]), NULL, th1, NULL); pthread_create(&(ids[i+2]), NULL, th1, NULL); pthread_join(ids[i], &ret); ok(strcmp((char *)ret, "ok") == 0, "Checking thread output"); pthread_join(ids[++i], &ret); ok(strcmp((char *)ret, "ok") == 0, "Checking thread output"); pthread_join(ids[++i], &ret); ok(strcmp((char *)ret, "ok") == 0, "Checking thread output"); } FILE *fp = fopen("tmp/a.pl", "w"); if (fp) { fprintf(fp, "#!/usr/bin/perl -w\n" "use strict;\n" "sleep 1;\n" "print STDERR \"error\\n\";" "print \"ok\\n\";\n"); fclose(fp); chmod("tmp/a.pl", 0700); char buf[512]; BPIPE *p = open_bpipe((char *)"./tmp/a.pl", 0, "re"); close_epipe(p); fgets(buf, sizeof(buf), p->rfd); int ret = close_bpipe(p); isnt(ret, 0, "checking bpipe output status (re)"); is(buf, "ok\n", "checking bpipe output string (re)"); p = open_bpipe((char *)"./tmp/a.pl", 0, "rE"); fgets(buf, sizeof(buf), p->rfd); ret = close_bpipe(p); is(ret, 0, "checking bpipe output status (rE)"); is(buf, "ok\n", "checking bpipe output string (rE)"); } else { ok(0, "Unable to open tmp/a.sh for tests"); } free_daemon_message_queue(); stop_watchdog(); term_msg(); close_memory_pool(); return report(); } bacula-15.0.3/src/tools/breaddir.c0000644000175000017500000000300514771010173016527 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include #include #include "../bacula.h" #include "../lib/unittests.h" int breaddir(DIR *dirp, POOLMEM *&d_name); void *th1(void *) { int nb=0; intptr_t ret; POOL_MEM p; while (nb++ < 100) { DIR *dir = opendir("."); if (dir) { while ((ret = breaddir(dir, p.addr())) == 0) { /* nop */ } if (ret > 0) { berrno be; Dmsg1(0, "errno=%s\n", be.bstrerror()); } closedir(dir); } } return (void *)ret; } int main() { void *ret; pthread_t t1[100]; Unittests t("breaddir_test", true, true); t.configure(TEST_QUIET); for (int i=0; i < 100; i++) { pthread_create(&(t1[i]), NULL, th1, NULL); } for (int i=0; i < 100; i++) { pthread_join(t1[i], &ret); ok((intptr_t)ret == -1, "Check the readdir() value"); } return report(); } bacula-15.0.3/src/tools/cats_test.c0000644000175000017500000005554714771010173016766 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Program to test CATS DB routines * * */ #define _BDB_PRIV_INTERFACE_ #include "bacula.h" #include "cats/cats.h" #include "cats/bvfs.h" #include "findlib/find.h" /* Local variables */ static BDB *db; static const char *file = "COPYRIGHT"; //static DBId_t fnid=0; static const char *db_name = "bacula"; static const char *db_user = "bacula"; static const char *db_password = ""; static const char *db_host = NULL; static const char *db_address = NULL; static int db_port = 0; static int64_t pid = 0; static JCR *jcr=NULL; #define PLINE "\n============================================================\n" static void usage() { fprintf(stderr, _( PROG_COPYRIGHT "\n%sVersion: %s (%s)\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -n specify the database name (default bacula)\n" " -u specify database user name (default bacula)\n" " -P specify database host (default NULL)\n" " -w specify working directory\n" " -p specify path\n" " -f specify file\n" " -l maximum tuple to fetch\n" " -q print only errors\n" " -v verbose\n" " -? print this message\n\n"), 2011, "", VERSION, BDATE); exit(1); } bool print_ok=true; int _err=0; int _wrn=0; int _nb=0; bool _warn(const char *file, int l, const char *op, int value, const char *label) { bool ret=false; _nb++; if (!value) { _wrn++; printf("WRN %.30s %s:%i on %s\n", label, file, l, op); } else { ret=true; printf("OK %.30s\n", label); } return ret; } #define warn(x, label) _warn(__FILE__, __LINE__, #x, (x), label) bool _ok(const char *file, int l, const char *op, int value, const char *label) { bool ret=false; _nb++; if (!value) { _err++; printf("ERR %.30s %s:%i on %s\n", label, file, l, op); } else { ret=true; if (print_ok) { printf("OK %.30s\n", label); } } return ret; } #define ok(x, label) _ok(__FILE__, __LINE__, #x, (x), label) bool _nok(const char *file, int l, const char *op, int value, const char *label) { bool ret=false; _nb++; if (value) { _err++; printf("ERR %.30s %s:%i on !%s\n", label, file, l, op); } else { ret = true; if (print_ok) { printf("OK %.30s\n", label); } } return ret; } #define nok(x, label) _nok(__FILE__, __LINE__, #x, (x), label) int report() { printf("Result %i/%i OK\n", _nb - _err, _nb); return _err>0; } static void cmp_pool(POOL_DBR &pr, POOL_DBR &pr2) { ok(pr.MaxVols == pr2.MaxVols, " Check Pool MaxVols"); ok(pr.UseOnce == pr2.UseOnce, " Check Pool UseOnce"); ok(pr.UseCatalog == pr2.UseCatalog, " Check Pool UseCatalog"); ok(pr.AcceptAnyVolume == pr2.AcceptAnyVolume," Check Pool AcceptAnyVolume"); ok(pr.AutoPrune == pr2.AutoPrune, " Check Pool AutoPrune"); ok(pr.Recycle == pr2.Recycle, " Check Pool Recycle"); ok(pr.VolRetention == pr2.VolRetention , " Check Pool VolRetention"); ok(pr.VolUseDuration == pr2.VolUseDuration, " Check Pool VolUseDuration"); ok(pr.MaxVolJobs == pr2.MaxVolJobs, " Check Pool MaxVolJobs"); ok(pr.MaxVolFiles == pr2.MaxVolFiles, " Check Pool MaxVolFiles"); ok(pr.MaxVolBytes == pr2.MaxVolBytes, " Check Pool MaxVolBytes"); ok(!strcmp(pr.PoolType, pr2.PoolType), " Check Pool PoolType"); ok(pr.LabelType == pr2.LabelType, " Check Pool LabelType"); ok(!strcmp(pr.LabelFormat, pr2.LabelFormat), " Check Pool LabelFormat"); ok(pr.RecyclePoolId == pr2.RecyclePoolId, " Check Pool RecyclePoolId"); ok(pr.ScratchPoolId == pr2.ScratchPoolId, " Check Pool ScratchPoolId"); ok(pr.ActionOnPurge == pr2.ActionOnPurge, " Check Pool ActionOnPurge"); } static void cmp_client(CLIENT_DBR &cr, CLIENT_DBR &cr2) { ok(!strcmp(cr2.Name, cr.Name), " Check Client Name"); ok(!strcmp(cr2.Uname, cr.Uname), " Check Client Uname"); ok(cr.AutoPrune == cr2.AutoPrune, " Check Client Autoprune"); ok(cr.JobRetention == cr2.JobRetention, " Check Client JobRetention"); ok(cr.FileRetention == cr2.FileRetention," Check Client FileRetention"); } static void cmp_job(JOB_DBR &jr, JOB_DBR &jr2) { ok(jr.VolSessionId == jr2.VolSessionId, " Check VolSessionId"); ok(jr.VolSessionTime == jr2.VolSessionTime, " Check VolSessionTime"); ok(jr.PoolId == jr2.PoolId, " Check PoolId"); ok(jr.StartTime == jr2.StartTime, " Check StartTime"); ok(jr.EndTime == jr2.EndTime, " Check EndTime"); ok(jr.JobFiles == jr2.JobFiles, " Check JobFiles"); ok(jr.JobBytes == jr2.JobBytes, " Check JobBytes"); ok(jr.JobTDate == jr2.JobTDate, " Check JobTDate"); ok(!strcmp(jr.Job, jr2.Job), " Check Job"); ok(jr.JobStatus == jr2.JobStatus, " Check JobStatus"); ok(jr.JobType == jr2.JobType, " Check Type"); ok(jr.JobLevel == jr2.JobLevel, " Check Level"); ok(jr.ClientId == jr2.ClientId, " Check ClientId"); ok(!strcmp(jr.Name, jr2.Name), " Check Name"); ok(jr.PriorJobId == jr2.PriorJobId, " Check PriorJobId"); ok(jr.RealEndTime == jr2.RealEndTime, " Check RealEndTime"); ok(jr.JobId == jr2.JobId, " Check JobId"); ok(jr.FileSetId == jr2.FileSetId, " Check FileSetId"); ok(jr.SchedTime == jr2.SchedTime, " Check SchedTime"); ok(jr.RealEndTime == jr2.RealEndTime, " Check RealEndTime"); ok(jr.ReadBytes == jr2.ReadBytes, " Check ReadBytes"); ok(jr.HasBase == jr2.HasBase, " Check HasBase"); ok(jr.PurgedFiles == jr2.PurgedFiles, " Check PurgedFiles"); } #define aPATH "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" #define aFILE "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" static int list_files(void *ctx, int nb_col, char **row) { uint32_t *k = (uint32_t*) ctx; (*k)++; ok(nb_col > 4, "Check result columns"); ok(!strcmp(row[0], aPATH aPATH aPATH aPATH "/"), "Check path"); ok(!strcmp(row[1], aFILE aFILE ".txt"), "Check filename"); ok(str_to_int64(row[2]) == 10, "Check FileIndex"); ok(str_to_int64(row[3]) == jcr->JobId, "Check JobId"); return 1; } static int count_col(void *ctx, int nb_col, char **row) { *((int32_t*) ctx) = nb_col; return 1; } /* number of thread started */ int main (int argc, char *argv[]) { int ch; char *path=NULL, *client=NULL; uint64_t limit=0; bool clean=false; bool full_test=false; int dbtype; uint32_t j; char temp[20]; POOLMEM *buf = get_pool_memory(PM_FNAME); POOLMEM *buf2 = get_pool_memory(PM_FNAME); POOLMEM *buf3 = get_pool_memory(PM_FNAME); setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); pid = getpid(); Pmsg0(0, "Starting cats_test tool" PLINE); my_name_is(argc, argv, ""); init_msg(NULL, NULL); OSDependentInit(); while ((ch = getopt(argc, argv, "qh:c:l:d:n:P:Su:vFw:?p:f:T")) != -1) { switch (ch) { case 'q': print_ok = false; break; case 'd': /* debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } } break; case 'l': limit = str_to_int64(optarg); break; case 'c': client = optarg; break; case 'h': db_host = optarg; break; case 'n': db_name = optarg; break; case 'w': working_directory = optarg; break; case 'u': db_user = optarg; break; case 'P': db_password = optarg; break; case 'v': verbose++; break; case 'p': path = optarg; break; case 'F': full_test = true; break; case 'f': file = optarg; break; case 'T': clean = true; break; case '?': default: usage(); } } argc -= optind; argv += optind; if (argc != 0) { Pmsg0(0, _("Wrong number of arguments: \n")); usage(); } /* TODO: * - Open DB * - With errors * - With good info * - With multiple thread in // * - Test cats.h * - Test all sql_cmds.c * - Test all sql.c (db_) * - Test all sql_create.c * - Test db_handler */ jcr = new_jcr(sizeof(JCR), NULL); jcr->setJobType(JT_CONSOLE); jcr->setJobLevel(L_NONE); jcr->JobStatus = JS_Running; bstrncpy(jcr->Job, "**dummy**", sizeof(jcr->Job)); jcr->JobId = pid; /* this is JobId on tape */ jcr->start_time = jcr->sched_time = time(NULL); /* Test DB connexion */ Pmsg1(0, PLINE "Test DB connection \"%s\"" PLINE, db_name); if (full_test) { db = db_init_database(jcr /* JCR */, NULL /* dbi driver */, db_name, db_user, db_password, db_address, db_port + 100, NULL /* db_socket */, db_ssl_mode, db_ssl_key, db_ssl_cert, db_ssl_ca, db_ssl_capath, db_ssl_cipher, 0 /* mult_db_connections */, false); ok(db != NULL, "Test bad connection"); if (!db) { report(); exit (1); } nok(db_open_database(jcr, db), "Open bad Database"); db_close_database(jcr, db); } db = db_init_database(jcr /* JCR */, NULL /* dbi driver */, db_name, db_user, db_password, db_address, db_port, NULL /* db_socket */, db_ssl_mode, db_ssl_key, db_ssl_cert, db_ssl_ca, db_ssl_capath, db_ssl_cipher, false /* mult_db_connections */, false); ok(db != NULL, "Test db connection"); if (!db) { report(); exit (1); } if (!ok(db_open_database(jcr, db), "Open Database")) { Pmsg1(000, _("Could not open database \"%s\".\n"), db_name); Jmsg(jcr, M_FATAL, 0, _("Could not open, database \"%s\".\n"), db_name); Jmsg(jcr, M_FATAL, 0, _("%s"), db_strerror(db)); Pmsg1(000, "%s", db_strerror(db)); db_close_database(jcr, db); report(); exit (1); } dbtype = db_get_type_index(db); /* Check if the SQL library is thread-safe */ ok(check_tables_version(jcr, db), "Check table version"); ok(db_sql_query(db, "SELECT VersionId FROM Version", db_int_handler, &j), "SELECT VersionId"); ok(UPDATE_DB(jcr, db, (char*)"UPDATE Version SET VersionId = 1"), "Update VersionId"); nok(check_tables_version(jcr, db), "Check table version"); Mmsg(buf, "UPDATE Version SET VersionId = %d", j); ok(UPDATE_DB(jcr, db, buf), "Restore VersionId"); if (dbtype != SQL_TYPE_SQLITE3) { ok(db_check_max_connections(jcr, db, 1), "Test min Max Connexion"); nok(db_check_max_connections(jcr, db, 10000), "Test max Max Connexion"); } ok(db_open_batch_connexion(jcr, db), "Opening batch connection"); db_close_database(jcr, jcr->db_batch); jcr->db_batch = NULL; /* ---------------------------------------------------------------- */ uint32_t storageid=0; ok(db_sql_query(db, "SELECT MIN(StorageId) FROM Storage", db_int_handler, &storageid), "Get StorageId"); ok(storageid > 0, "Check StorageId"); if (!storageid) { Pmsg0(0, "Please, run REGRESS_DEBUG=1 tests/bacula-backup-test before this test"); exit (1); } /* ---------------------------------------------------------------- */ Pmsg0(0, PLINE "Doing Basic SQL tests" PLINE); ok(db_sql_query(db, "SELECT 1,2,3,4,5", count_col, &j), "Count 5 rows"); ok(j == 5, "Check number of columns"); ok(db_sql_query(db, "SELECT 1,2,3,4,5,'a','b','c','d','e'", count_col, &j), "Count 10 rows"); ok(j == 10, "Check number of columns"); bsnprintf(temp, sizeof(temp), "t%lld", pid); ok(db_sql_query(db, "SELECT 2", db_int_handler, &j), "Good SELECT query"); ok(db_sql_query(db, "SELECT 1 FROM Media WHERE VolumeName='missing'", db_int_handler, &j), "Good empty SELECT query"); db_int64_ctx i64; i64.value = 0; i64.count = 0; ok(db_sql_query(db, "SELECT 1",db_int64_handler, &i64),"db_int64_handler"); ok(i64.value == 1, "Check db_int64_handler return"); db_list_ctx lctx; ok(db_sql_query(db, "SELECT FileId FROM File ORDER By FileId LIMIT 10", db_list_handler, &lctx), "db_list_ctx"); ok(lctx.count == 10, "Check db_list_ctx count "); ok(!strcmp(lctx.list, "1,2,3,4,5,6,7,8,9,10"), "Check db_list_ctx list"); nok(db_sql_query(db, "blabla", db_int_handler, &j), "Bad query"); Mmsg(buf, "CREATE Table %s (a int)", temp); ok(db_sql_query(db, buf, NULL, NULL), "CREATE query"); Mmsg(buf, "INSERT INTO %s (a) VALUES (1)", temp); ok(INSERT_DB(jcr, db, buf), "INSERT query"); ok(INSERT_DB(jcr, db, buf), "INSERT query"); ok(sql_affected_rows(db) == 1, "Check sql_affected_rows"); Mmsg(buf, "INSERT INTO aaa%s (a) VALUES (1)", temp); nok(INSERT_DB(jcr, db, buf), "Bad INSERT query"); ok(sql_affected_rows(db) == 0, "Check sql_affected_rows"); Mmsg(buf, "UPDATE %s SET a = 2", temp); ok(UPDATE_DB(jcr, db, buf), "UPDATE query"); ok(sql_affected_rows(db) == 2, "Check sql_affected_rows"); Mmsg(buf, "UPDATE %s SET a = 2 WHERE a = 1", temp); nok(UPDATE_DB(jcr, db, buf), "Empty UPDATE query"); Mmsg(buf, "UPDATE aaa%s SET a = 2", temp); nok(UPDATE_DB(jcr, db, buf), "Bad UPDATE query"); Mmsg(buf, "DELETE FROM %s", temp); ok(DELETE_DB(jcr, db, buf), "DELETE query"); nok(DELETE_DB(jcr, db, buf), "Empty DELETE query"); /* TODO bug ? */ Mmsg(buf, "DELETE FROM aaa%s", temp); ok(DELETE_DB(jcr, db, buf), "Bad DELETE query"); /* TODO bug ? */ Mmsg(buf, "DROP TABLE %s", temp); ok(QUERY_DB(jcr, db, buf), "DROP query"); nok(QUERY_DB(jcr, db, buf), "Empty DROP query"); /* ---------------------------------------------------------------- */ strcpy(buf, "This string should be 'escaped'"); db_escape_string(jcr, db, buf2, buf, strlen(buf)); ok((strlen(buf) + 2) == strlen(buf2),"Quoted string should be longer"); Mmsg(buf, "INSERT INTO Path (Path) VALUES ('%lld-%s')", pid, buf2); ok(db_sql_query(db, buf, NULL, NULL), "Inserting quoted string"); /* ---------------------------------------------------------------- */ Pmsg0(0, PLINE "Doing Job tests" PLINE); JOB_DBR jr, jr2; memset(&jr, 0, sizeof(jr)); memset(&jr2, 0, sizeof(jr2)); jr.JobId = 1; ok(db_get_job_record(jcr, db, &jr), "Get Job record for JobId=1"); ok(jr.JobFiles > 10, "Check number of files"); jr.JobId = (JobId_t)pid; Mmsg(buf, "%s-%lld", jr.Job, pid); strcpy(jr.Job, buf); ok(db_create_job_record(jcr, db, &jr), "Create Job record"); ok(db_update_job_start_record(jcr, db, &jr), "Update Start Record"); ok(db_update_job_end_record(jcr, db, &jr), "Update End Record"); jr2.JobId = jr.JobId; ok(db_get_job_record(jcr, db, &jr2), "Get Job record by JobId"); cmp_job(jr, jr2); memset(&jr2, 0, sizeof(jr2)); strcpy(jr2.Job, jr.Job); ok(db_get_job_record(jcr, db, &jr2), "Get Job record by Job name"); cmp_job(jr, jr2); memset(&jr2, 0, sizeof(jr2)); jr2.JobId = 99999; nok(db_get_job_record(jcr, db, &jr2), "Get non existing Job record (JobId)"); memset(&jr2, 0, sizeof(jr2)); strcpy(jr2.Job, "test"); nok(db_get_job_record(jcr, db, &jr2), "Get non existing Job record (Job)"); /* ---------------------------------------------------------------- */ ATTR_DBR ar; memset(&ar, 0, sizeof(ar)); Mmsg(buf2, aPATH aPATH aPATH aPATH "/" aFILE aFILE ".txt"); ar.fname = buf2; Mmsg(buf3, "gD ImIZ IGk B Po Po A A A JY BNNvf5 BNKzS7 BNNuwC A A C"); ar.attr = buf3; ar.FileIndex = 10; ar.Stream = STREAM_UNIX_ATTRIBUTES; ar.FileType = FT_REG; jcr->JobId = ar.JobId = jr.JobId; jcr->JobStatus = JS_Running; ok(db_create_attributes_record(jcr, db, &ar), "Inserting Filename"); ok(db_write_batch_file_records(jcr), "Commit batch session"); Mmsg(buf, "SELECT FileIndex FROM File WHERE JobId=%lld",(int64_t)jcr->JobId); ok(db_sql_query(db, buf, db_int_handler, &j), "Get Inserted record"); ok(j == ar.FileIndex, "Check FileIndex"); Mmsg(buf, "SELECT COUNT(1) FROM File WHERE JobId=%lld",(int64_t)jcr->JobId); ok(db_sql_query(db, buf, db_int_handler, &j), "List records"); ok(j == 1, "Check batch session records"); j = 0; Mmsg(buf, "%lld", (uint64_t)jcr->JobId); ok(db_get_file_list(jcr, jcr->db_batch, buf, DBL_NONE, list_files, &j), "List files with db_get_file_list()"); ok(j == 1, "Check db_get_file_list results"); /* ---------------------------------------------------------------- */ Pmsg0(0, PLINE "Doing Client tests" PLINE); CLIENT_DBR cr, cr2; memset(&cr, 0, sizeof(cr)); memset(&cr2, 0, sizeof(cr2)); cr.AutoPrune = 1; cr.FileRetention = 10; cr.JobRetention = 15; bsnprintf(cr.Name, sizeof(cr.Name), "client-%lld-fd", pid); bsnprintf(cr.Uname, sizeof(cr.Uname), "uname-%lld", pid); ok(db_create_client_record(jcr, db, &cr), "db_create_client_record()"); ok(cr.ClientId > 0, "Check ClientId"); cr2.ClientId = cr.ClientId; /* Save it */ cr.ClientId = 0; Pmsg0(0, "Search client by ClientId\n"); ok(db_create_client_record(jcr, db, &cr),"Should get the client record"); ok(cr.ClientId == cr2.ClientId, "Check if ClientId is the same"); ok(db_get_client_record(jcr, db, &cr2), "Search client by ClientId"); cmp_client(cr, cr2); Pmsg0(0, "Search client by Name\n"); memset(&cr2, 0, sizeof(cr2)); strcpy(cr2.Name, cr.Name); ok(db_get_client_record(jcr, db, &cr2),"Search client by Name"); cmp_client(cr, cr2); Pmsg0(0, "Search non existing client by Name\n"); memset(&cr2, 0, sizeof(cr2)); bsnprintf(cr2.Name, sizeof(cr2.Name), "hollow-client-%lld-fd", pid); nok(db_get_client_record(jcr, db, &cr2), "Search non existing client"); ok(cr2.ClientId == 0, "Check ClientId after failed search"); cr.AutoPrune = 0; strcpy(cr.Uname, "NewUname"); ok(db_update_client_record(jcr, db, &cr), "Update Client record"); memset(&cr2, 0, sizeof(cr2)); cr2.ClientId = cr.ClientId; ok(db_get_client_record(jcr, db, &cr2),"Search client by ClientId"); cmp_client(cr, cr2); int nb, i; uint32_t *ret_ids; ok(db_get_client_ids(jcr, db, &nb, &ret_ids), "Get Client Ids"); ok(nb > 0, "Should find at least 1 Id"); for (i = 0; i < nb; i++) { if (ret_ids[i] == cr2.ClientId) { break; } } ok(i < nb, "Check if ClientId was found"); /* ---------------------------------------------------------------- */ Pmsg0(0, PLINE "Doing Pool tests" PLINE); POOL_DBR pr, pr2; memset(&pr, 0, sizeof(pr)); memset(&pr2, 0, sizeof(pr2)); bsnprintf(pr.Name, sizeof(pr.Name), "pool-%lld", pid); pr.MaxVols = 10; pr.UseOnce = 0; pr.UseCatalog = true; pr.AcceptAnyVolume = true; pr.AutoPrune = true; pr.Recycle = true; pr.VolRetention = 1000; pr.VolUseDuration = 1000; pr.MaxVolJobs = 100; pr.MaxVolFiles = 1000; pr.MaxVolBytes = 1000000; strcpy(pr.PoolType, "Backup"); pr.LabelType = 0; pr.LabelFormat[0] = 0; pr.RecyclePoolId = 0; pr.ScratchPoolId = 0; pr.ActionOnPurge = 1; ok(db_create_pool_record(jcr, db, &pr), "db_create_pool_record()"); ok(pr.PoolId > 0, "Check PoolId"); pr2.PoolId = pr.PoolId; pr.PoolId = 0; Pmsg0(0, "Search pool by PoolId\n"); nok(db_create_pool_record(jcr, db, &pr),"Can't create pool twice"); ok(db_get_pool_numvols(jcr, db, &pr2), "Search pool by PoolId"); cmp_pool(pr, pr2); pr2.MaxVols++; pr2.AutoPrune = false; pr2.Recycle = false; pr2.VolRetention++; pr2.VolUseDuration++; pr2.MaxVolJobs++; pr2.MaxVolFiles++; pr2.MaxVolBytes++; strcpy(pr2.PoolType, "Restore"); strcpy(pr2.LabelFormat, "VolFormat"); pr2.RecyclePoolId = 0; pr2.ScratchPoolId = 0; pr2.ActionOnPurge = 2; ok(db_update_pool_record(jcr, db, &pr2), "Update Pool record"); memset(&pr, 0, sizeof(pr)); pr.PoolId = pr2.PoolId; ok(db_get_pool_numvols(jcr, db, &pr), "Search pool by PoolId"); cmp_pool(pr, pr2); ok(db_delete_pool_record(jcr, db, &pr), "Delete Pool"); nok(db_delete_pool_record(jcr, db, &pr), "Delete non existing Pool"); nok(db_update_pool_record(jcr, db, &pr), "Update non existing Pool"); ok(db_create_pool_record(jcr, db, &pr), "Recreate Pool"); /* ---------------------------------------------------------------- */ Pmsg0(0, PLINE "Doing Media tests" PLINE); MEDIA_DBR mr, mr2; memset(&mr, 0, sizeof(mr)); memset(&mr2, 0, sizeof(mr2)); bsnprintf(mr.VolumeName, sizeof(mr.VolumeName), "media-%lld", pid); bsnprintf(mr.MediaType, sizeof(mr.MediaType), "type-%lld", pid); /* from set_pool_dbr_defaults_in_media_dbr(&mr, &pr); */ mr.PoolId = pr.PoolId; bstrncpy(mr.VolStatus, NT_("Append"), sizeof(mr.VolStatus)); mr.Recycle = pr.Recycle; mr.VolRetention = pr.VolRetention; mr.VolUseDuration = pr.VolUseDuration; mr.ActionOnPurge = pr.ActionOnPurge; mr.RecyclePoolId = pr.RecyclePoolId; mr.MaxVolJobs = pr.MaxVolJobs; mr.MaxVolFiles = pr.MaxVolFiles; mr.MaxVolBytes = pr.MaxVolBytes; mr.LabelType = pr.LabelType; mr.Enabled = 1; mr.VolCapacityBytes = 1000; mr.Slot = 1; mr.VolBytes = 1000; mr.InChanger = 1; mr.VolReadTime = 10000; mr.VolWriteTime = 99999; mr.StorageId = 0; mr.DeviceId = 0; mr.LocationId = 0; mr.ScratchPoolId = 0; mr.RecyclePoolId = 0; ok(db_create_media_record(jcr, db, &mr), "Create Media"); nok(db_create_media_record(jcr, db, &mr), "Create Media twice"); /* ---------------------------------------------------------------- */ Pmsg0(0, PLINE "Doing ... tests" PLINE); db_close_database(jcr, db); report(); free_pool_memory(buf); free_pool_memory(buf2); free_pool_memory(buf3); return 0; } bacula-15.0.3/src/tools/testfind.c0000644000175000017500000004442114771010173016602 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Test program for find files * * Kern Sibbald, MM * */ #include "bacula.h" #include "dird/dird.h" #include "findlib/find.h" #include "ch.h" #if defined(HAVE_WIN32) #define isatty(fd) (fd==0) #endif /* Dummy functions */ int generate_job_event(JCR *jcr, const char *event) { return 1; } void generate_plugin_event(JCR *jcr, bEventType eventType, void *value) { } extern bool parse_dir_config(CONFIG *config, const char *configfile, int exit_code); /* Global variables */ static int num_files = 0; static int max_file_len = 0; static int max_path_len = 0; static int trunc_fname = 0; static int trunc_path = 0; static int attrs = 0; static CONFIG *config; static JCR *jcr; static int print_file(JCR *jcr, FF_PKT *ff, bool); static void count_files(FF_PKT *ff); static bool copy_fileset(FF_PKT *ff, JCR *jcr); static void set_options(findFOPTS *fo, const char *opts); static void usage() { fprintf(stderr, _( "\n" "Usage: testfind [-d debug_level] [-] [pattern1 ...]\n" " -a print extended attributes (Win32 debug)\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -c specify config file containing FileSet resources\n" " -f specify which FileSet to use\n" " -? print this message.\n" "\n" "Patterns are used for file inclusion -- normally directories.\n" "Debug level >= 1 prints each file found.\n" "Debug level >= 10 prints path/file for catalog.\n" "Errors are always printed.\n" "Files/paths truncated is the number of files/paths with len > 255.\n" "Truncation is only in the catalog.\n" "\n")); exit(1); } int main (int argc, char *const *argv) { FF_PKT *ff; const char *configfile = "bacula-dir.conf"; const char *fileset_name = "Windows-Full-Set"; int ch, hard_links; OSDependentInit(); setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); lmgr_init_thread(); while ((ch = getopt(argc, argv, "ac:d:f:?")) != -1) { switch (ch) { case 'a': /* print extended attributes *debug* */ attrs = 1; break; case 'c': /* set debug level */ configfile = optarg; break; case 'd': /* set debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } } break; case 'f': /* exclude patterns */ fileset_name = optarg; break; case '?': default: usage(); } } argc -= optind; argv += optind; config = New(CONFIG()); parse_dir_config(config, configfile, M_ERROR_TERM); MSGS *msg; foreach_res(msg, R_MSGS) { init_msg(NULL, msg); } jcr = new_jcr(sizeof(JCR), NULL); jcr->fileset = (FILESET *)GetResWithName(R_FILESET, fileset_name); if (jcr->fileset == NULL) { fprintf(stderr, "%s: Fileset not found\n", fileset_name); FILESET *var; fprintf(stderr, "Valid FileSets:\n"); foreach_res(var, R_FILESET) { fprintf(stderr, " %s\n", var->hdr.name); } exit(1); } ff = init_find_files(); copy_fileset(ff, jcr); find_files(jcr, ff, print_file, NULL); free_jcr(jcr); if (config) { delete config; config = NULL; } term_last_jobs_list(); /* Clean up fileset */ findFILESET *fileset = ff->fileset; if (fileset) { int i, j, k; /* Delete FileSet Include lists */ for (i=0; iinclude_list.size(); i++) { findINCEXE *incexe = (findINCEXE *)fileset->include_list.get(i); for (j=0; jopts_list.size(); j++) { findFOPTS *fo = (findFOPTS *)incexe->opts_list.get(j); for (k=0; kregex.size(); k++) { regfree((regex_t *)fo->regex.get(k)); } fo->regex.destroy(); fo->regexdir.destroy(); fo->regexfile.destroy(); fo->wild.destroy(); fo->wilddir.destroy(); fo->wildfile.destroy(); fo->wildbase.destroy(); fo->fstype.destroy(); fo->drivetype.destroy(); } incexe->opts_list.destroy(); incexe->name_list.destroy(); } fileset->include_list.destroy(); /* Delete FileSet Exclude lists */ for (i=0; iexclude_list.size(); i++) { findINCEXE *incexe = (findINCEXE *)fileset->exclude_list.get(i); for (j=0; jopts_list.size(); j++) { findFOPTS *fo = (findFOPTS *)incexe->opts_list.get(j); fo->regex.destroy(); fo->regexdir.destroy(); fo->regexfile.destroy(); fo->wild.destroy(); fo->wilddir.destroy(); fo->wildfile.destroy(); fo->wildbase.destroy(); fo->fstype.destroy(); fo->drivetype.destroy(); } incexe->opts_list.destroy(); incexe->name_list.destroy(); } fileset->exclude_list.destroy(); free(fileset); } ff->fileset = NULL; hard_links = term_find_files(ff); printf(_("\n" "Total files : %d\n" "Max file length: %d\n" "Max path length: %d\n" "Files truncated: %d\n" "Paths truncated: %d\n" "Hard links : %d\n"), num_files, max_file_len, max_path_len, trunc_fname, trunc_path, hard_links); term_msg(); close_memory_pool(); lmgr_cleanup_main(); sm_dump(false); exit(0); } static int print_file(JCR *jcr, FF_PKT *ff, bool top_level) { switch (ff->type) { case FT_LNKSAVED: if (debug_level == 1) { printf("%s\n", ff->fname); } else if (debug_level > 1) { printf("Lnka: %s -> %s\n", ff->fname, ff->link); } break; case FT_REGE: if (debug_level == 1) { printf("%s\n", ff->fname); } else if (debug_level > 1) { printf("Empty: %s\n", ff->fname); } count_files(ff); break; case FT_REG: if (debug_level == 1) { printf("%s\n", ff->fname); } else if (debug_level > 1) { printf(_("Reg: %s\n"), ff->fname); } count_files(ff); break; case FT_LNK: if (debug_level == 1) { printf("%s\n", ff->fname); } else if (debug_level > 1) { printf("Lnk: %s -> %s\n", ff->fname, ff->link); } count_files(ff); break; case FT_DIRBEGIN: return 1; case FT_NORECURSE: case FT_NOFSCHG: case FT_INVALIDFS: case FT_INVALIDDT: case FT_DIREND: if (debug_level) { char errmsg[100] = ""; if (ff->type == FT_NORECURSE) { bstrncpy(errmsg, _("\t[will not descend: recursion turned off]"), sizeof(errmsg)); } else if (ff->type == FT_NOFSCHG) { bstrncpy(errmsg, _("\t[will not descend: file system change not allowed]"), sizeof(errmsg)); } else if (ff->type == FT_INVALIDFS) { bstrncpy(errmsg, _("\t[will not descend: disallowed file system]"), sizeof(errmsg)); } else if (ff->type == FT_INVALIDDT) { bstrncpy(errmsg, _("\t[will not descend: disallowed drive type]"), sizeof(errmsg)); } printf("%s%s%s\n", (debug_level > 1 ? "Dir: " : ""), ff->fname, errmsg); } ff->type = FT_DIREND; count_files(ff); break; case FT_SPEC: if (debug_level == 1) { printf("%s\n", ff->fname); } else if (debug_level > 1) { printf("Spec: %s\n", ff->fname); } count_files(ff); break; case FT_NOACCESS: printf(_("Err: Could not access %s: %s\n"), ff->fname, strerror(errno)); break; case FT_NOFOLLOW: printf(_("Err: Could not follow ff->link %s: %s\n"), ff->fname, strerror(errno)); break; case FT_NOSTAT: printf(_("Err: Could not stat %s: %s\n"), ff->fname, strerror(errno)); break; case FT_NOCHG: printf(_("Skip: File not saved. No change. %s\n"), ff->fname); break; case FT_ISARCH: printf(_("Err: Attempt to backup archive. Not saved. %s\n"), ff->fname); break; case FT_NOOPEN: printf(_("Err: Could not open directory %s: %s\n"), ff->fname, strerror(errno)); break; default: printf(_("Err: Unknown file ff->type %d: %s\n"), ff->type, ff->fname); break; } if (attrs) { char attr[200]; encode_attribsEx(NULL, attr, ff); if (*attr != 0) { printf("AttrEx=%s\n", attr); } // set_attribsEx(NULL, ff->fname, NULL, NULL, ff->type, attr); } return 1; } static void count_files(FF_PKT *ar) { int fnl, pnl; char *l, *p; char file[MAXSTRING]; char spath[MAXSTRING]; num_files++; /* Find path without the filename. * I.e. everything after the last / is a "filename". * OK, maybe it is a directory name, but we treat it like * a filename. If we don't find a / then the whole name * must be a path name (e.g. c:). */ for (p=l=ar->fname; *p; p++) { if (IsPathSeparator(*p)) { l = p; /* set pos of last slash */ } } if (IsPathSeparator(*l)) { /* did we find a slash? */ l++; /* yes, point to filename */ } else { /* no, whole thing must be path name */ l = p; } /* If filename doesn't exist (i.e. root directory), we * simply create a blank name consisting of a single * space. This makes handling zero length filenames * easier. */ fnl = p - l; if (fnl > max_file_len) { max_file_len = fnl; } if (fnl > 255) { printf(_("===== Filename truncated to 255 chars: %s\n"), l); fnl = 255; trunc_fname++; } if (fnl > 0) { bstrncpy(file, l, fnl); /* copy filename */ file[fnl] = 0; } else { file[0] = ' '; /* blank filename */ file[1] = 0; } pnl = l - ar->fname; if (pnl > max_path_len) { max_path_len = pnl; } if (pnl > 255) { printf(_("========== Path name truncated to 255 chars: %s\n"), ar->fname); pnl = 255; trunc_path++; } bstrncpy(spath, ar->fname, pnl); spath[pnl] = 0; if (pnl == 0) { spath[0] = ' '; spath[1] = 0; printf(_("========== Path length is zero. File=%s\n"), ar->fname); } if (debug_level >= 10) { printf(_("Path: %s\n"), spath); printf(_("File: %s\n"), file); } } bool python_set_prog(JCR*, char const*) { return false; } static bool copy_fileset(FF_PKT *ff, JCR *jcr) { FILESET *jcr_fileset = jcr->fileset; int num; bool include = true; findFILESET *fileset; findFOPTS *current_opts; fileset = (findFILESET *)bmalloc(sizeof(findFILESET)); ff->fileset = fileset; fileset->state = state_none; fileset->include_list.init(1, true); fileset->exclude_list.init(1, true); for ( ;; ) { if (include) { num = jcr_fileset->num_includes; } else { num = jcr_fileset->num_excludes; } for (int i=0; iinclude_items[i]; /* New include */ fileset->incexe = (findINCEXE *)bmalloc(sizeof(findINCEXE)); fileset->incexe->opts_list.init(1, true); fileset->incexe->name_list.init(0, 0); fileset->include_list.append(fileset->incexe); } else { ie = jcr_fileset->exclude_items[i]; /* New exclude */ fileset->incexe = (findINCEXE *)bmalloc(sizeof(findINCEXE)); fileset->incexe->opts_list.init(1, true); fileset->incexe->name_list.init(0, 0); fileset->exclude_list.append(fileset->incexe); } for (j=0; jnum_opts; j++) { FOPTS *fo = ie->opts_list[j]; current_opts = (findFOPTS *)bmalloc(sizeof(findFOPTS)); fileset->incexe->current_opts = current_opts; fileset->incexe->opts_list.append(current_opts); current_opts->regex.init(1, true); current_opts->regexdir.init(1, true); current_opts->regexfile.init(1, true); current_opts->wild.init(1, true); current_opts->wilddir.init(1, true); current_opts->wildfile.init(1, true); current_opts->wildbase.init(1, true); current_opts->fstype.init(1, true); current_opts->drivetype.init(1, true); set_options(current_opts, fo->opts); for (k=0; kregex.size(); k++) { // fd->fsend("R %s\n", fo->regex.get(k)); current_opts->regex.append(bstrdup((const char *)fo->regex.get(k))); } for (k=0; kregexdir.size(); k++) { // fd->fsend("RD %s\n", fo->regexdir.get(k)); current_opts->regexdir.append(bstrdup((const char *)fo->regexdir.get(k))); } for (k=0; kregexfile.size(); k++) { // fd->fsend("RF %s\n", fo->regexfile.get(k)); current_opts->regexfile.append(bstrdup((const char *)fo->regexfile.get(k))); } for (k=0; kwild.size(); k++) { current_opts->wild.append(bstrdup((const char *)fo->wild.get(k))); } for (k=0; kwilddir.size(); k++) { current_opts->wilddir.append(bstrdup((const char *)fo->wilddir.get(k))); } for (k=0; kwildfile.size(); k++) { current_opts->wildfile.append(bstrdup((const char *)fo->wildfile.get(k))); } for (k=0; kwildbase.size(); k++) { current_opts->wildbase.append(bstrdup((const char *)fo->wildbase.get(k))); } for (k=0; kfstype.size(); k++) { current_opts->fstype.append(bstrdup((const char *)fo->fstype.get(k))); } for (k=0; kdrivetype.size(); k++) { current_opts->drivetype.append(bstrdup((const char *)fo->drivetype.get(k))); } } for (j=0; jname_list.size(); j++) { fileset->incexe->name_list.append(bstrdup((const char *)ie->name_list.get(j))); } } if (!include) { /* If we just did excludes */ break; /* all done */ } include = false; /* Now do excludes */ } return true; } static void set_options(findFOPTS *fo, const char *opts) { int j; const char *p; for (p=opts; *p; p++) { switch (*p) { case 'a': /* alway replace */ case '0': /* no option */ break; case 'e': fo->flags |= FO_EXCLUDE; break; case 'f': fo->flags |= FO_MULTIFS; break; case 'h': /* no recursion */ fo->flags |= FO_NO_RECURSION; break; case 'H': /* no hard link handling */ fo->flags |= FO_NO_HARDLINK; break; case 'i': fo->flags |= FO_IGNORECASE; break; case 'M': /* MD5 */ fo->flags |= FO_MD5; break; case 'n': fo->flags |= FO_NOREPLACE; break; case 'p': /* use portable data format */ fo->flags |= FO_PORTABLE; break; case 'R': /* Resource forks and Finder Info */ fo->flags |= FO_HFSPLUS; case 'r': /* read fifo */ fo->flags |= FO_READFIFO; break; case 'S': switch(*(p + 1)) { case ' ': /* Old director did not specify SHA variant */ fo->flags |= FO_SHA1; break; case '1': fo->flags |= FO_SHA1; p++; break; #ifdef HAVE_SHA2 case '2': fo->flags |= FO_SHA256; p++; break; case '3': fo->flags |= FO_SHA512; p++; break; #endif default: /* Automatically downgrade to SHA-1 if an unsupported * SHA variant is specified */ fo->flags |= FO_SHA1; p++; break; } break; case 's': fo->flags |= FO_SPARSE; break; case 'm': fo->flags |= FO_MTIMEONLY; break; case 'k': fo->flags |= FO_KEEPATIME; break; case 'A': fo->flags |= FO_ACL; break; case 'V': /* verify options */ /* Copy Verify Options */ for (j=0; *p && *p != ':'; p++) { fo->VerifyOpts[j] = *p; if (j < (int)sizeof(fo->VerifyOpts) - 1) { j++; } } fo->VerifyOpts[j] = 0; break; case 'w': fo->flags |= FO_IF_NEWER; break; case 'W': fo->flags |= FO_ENHANCEDWILD; break; case 'Z': /* compression */ p++; /* skip Z */ if (*p >= '0' && *p <= '9') { fo->flags |= FO_COMPRESS; fo->Compress_algo = COMPRESS_GZIP; fo->Compress_level = *p - '0'; } else if (*p == 'o') { fo->flags |= FO_COMPRESS; fo->Compress_algo = COMPRESS_LZO1X; fo->Compress_level = 1; /* not used with LZO */ } Dmsg2(200, "Compression alg=%d level=%d\n", fo->Compress_algo, fo->Compress_level); break; case 'X': fo->flags |= FO_XATTR; break; default: Emsg1(M_ERROR, 0, _("Unknown include/exclude option: %c\n"), *p); break; } } } bacula-15.0.3/src/tools/bwild.c0000644000175000017500000000572614771010173016070 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Test program for testing wild card expressions * * Kern Sibbald, MMVI * */ #include "bacula.h" #include "lib/fnmatch.h" static void usage() { fprintf(stderr, "\n" "Usage: bwild [-d debug_level] -f \n" " -f specify file of data to be matched\n" " -i use case insenitive match\n" " -l suppress line numbers\n" " -n print lines that do not match\n" " -? print this message.\n" "\n\n"); exit(1); } int main(int argc, char *const *argv) { char *fname = NULL; int rc, ch; char data[1000]; char pat[500]; FILE *fd; bool match_only = true; int lineno; bool no_linenos = false; int ic = 0; setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); while ((ch = getopt(argc, argv, "d:f:iln?")) != -1) { switch (ch) { case 'd': /* set debug level */ debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } break; case 'f': /* data */ fname = optarg; break; case 'i': /* ignore case */ ic = FNM_CASEFOLD; break; case 'l': no_linenos = true; break; case 'n': match_only = false; break; case '?': default: usage(); } } argc -= optind; argv += optind; if (!fname) { printf("A data file must be specified.\n"); usage(); } OSDependentInit(); for ( ;; ) { printf("Enter a wild-card: "); if (fgets(pat, sizeof(pat)-1, stdin) == NULL) { break; } strip_trailing_newline(pat); if (pat[0] == 0) { exit(0); } fd = bfopen(fname, "r"); if (!fd) { printf(_("Could not open data file: %s\n"), fname); exit(1); } lineno = 0; while (fgets(data, sizeof(data)-1, fd)) { strip_trailing_newline(data); lineno++; rc = fnmatch(pat, data, ic); if ((match_only && rc == 0) || (!match_only && rc != 0)) { if (no_linenos) { printf("%s\n", data); } else { printf("%5d: %s\n", lineno, data); } } } fclose(fd); } exit(0); } bacula-15.0.3/src/tools/bbatch.c0000644000175000017500000002317014771010173016203 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Program to test batch mode * * Eric Bollengier, March 2007 * */ /* to create datafile for i in $(seq 10000 99999) ; do j=$((($i % 1000) + 555)) echo "$i;/tmp/totabofds$j/fiddddle${j}$i;xxxLSTATxxxx;xxxxxxxMD5xxxxxx" done > dat1 or j=0 find / | while read a; do j=$(($j+1)) echo "$j;$a;xxxLSTATxxxx;xxxxxxxMD5xxxxxx" done > dat1 */ #include "bacula.h" #include "stored/stored.h" #include "findlib/find.h" #include "cats/cats.h" /* Forward referenced functions */ static void *do_batch(void *); /* Local variables */ static BDB *db; static const char *db_name = "bacula"; static const char *db_user = "bacula"; static const char *db_password = ""; static const char *db_host = NULL; static const char *db_ssl_mode = NULL; static const char *db_ssl_key = NULL; static const char *db_ssl_cert = NULL; static const char *db_ssl_ca = NULL; static const char *db_ssl_capath = NULL; static const char *db_ssl_cipher = NULL; static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; static void usage() { fprintf(stderr, _( PROG_COPYRIGHT "\n%sVersion: %s (%s)\n" "Example : bbatch -w /path/to/workdir -h localhost -f dat1 -f dat -f datx\n" " will start 3 thread and load dat1, dat and datx in your catalog\n" "See bbatch.c to generate datafile\n\n" "Usage: bbatch [ options ] -w working/dir -f datafile\n" " -b with batch mode\n" " -B without batch mode\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -n specify the database name (default bacula)\n" " -u specify database user name (default bacula)\n" " -P specify database host (default NULL)\n" " -k path name to the key file (default NULL)\n" " -e path name to the certificate file (default NULL)\n" " -a path name to the CA certificate file (default NULL)\n" " -w specify working directory\n" " -r call restore code with given jobids\n" " -v verbose\n" " -f specify data file\n" " -? print this message\n\n"), 2001, "", VERSION, BDATE); exit(1); } /* number of thread started */ int nb=0; static int list_handler(void *ctx, int num_fields, char **row) { uint64_t *a = (uint64_t*) ctx; (*a)++; return 0; } int main (int argc, char *argv[]) { int ch; bool use_batch_insert = true; char *restore_list=NULL; setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); lmgr_init_thread(); char **files = (char **) malloc (10 * sizeof(char *)); int i; my_name_is(argc, argv, "bbatch"); init_msg(NULL, NULL); OSDependentInit(); while ((ch = getopt(argc, argv, "bBh:o:k:e:a:c:d:n:P:Su:vf:w:r:?")) != -1) { switch (ch) { case 'r': restore_list=bstrdup(optarg); break; case 'B': use_batch_insert = true; break; case 'b': use_batch_insert = false; break; case 'd': /* debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } } break; case 'h': db_host = optarg; break; case 'o': db_ssl_mode = optarg; break; case 'k': db_ssl_key = optarg; break; case 'e': db_ssl_cert = optarg; break; case 'a': db_ssl_ca = optarg; break; case 'n': db_name = optarg; break; case 'w': working_directory = optarg; break; case 'u': db_user = optarg; break; case 'P': db_password = optarg; break; case 'v': verbose++; break; case 'f': if (nb < 10 ) { files[nb++] = optarg; } break; case '?': default: usage(); } } argc -= optind; argv += optind; if (argc != 0) { Pmsg0(0, _("Wrong number of arguments: \n")); usage(); } if (restore_list) { uint64_t nb_file=0; btime_t start, end; /* To use the -r option, the catalog should already contains records */ if ((db = db_init_database(NULL, NULL, db_name, db_user, db_password, db_host, 0, NULL, db_ssl_mode, db_ssl_key, db_ssl_cert, db_ssl_ca, db_ssl_capath, db_ssl_cipher, false, !use_batch_insert)) == NULL) { Emsg0(M_ERROR_TERM, 0, _("Could not init Bacula database\n")); } if (!db_open_database(NULL, db)) { Emsg0(M_ERROR_TERM, 0, db_strerror(db)); } start = get_current_btime(); db_get_file_list(NULL, db, restore_list, DBL_NONE, list_handler, &nb_file); end = get_current_btime(); Pmsg3(0, _("Computing file list for jobid=%s files=%lld secs=%d\n"), restore_list, nb_file, (uint32_t)btime_to_unix(end-start)); free(restore_list); return 0; } if (use_batch_insert) { printf("With Batch Insert mode\n"); } else { printf("Without Batch Insert mode\n"); } i = nb; while (--i >= 0) { pthread_t thid; JCR *bjcr = new_jcr(sizeof(JCR), NULL); bjcr->bsr = NULL; bjcr->VolSessionId = 1; bjcr->VolSessionTime = (uint32_t)time(NULL); bjcr->NumReadVolumes = 0; bjcr->NumWriteVolumes = 0; bjcr->JobId = getpid(); bjcr->setJobType(JT_CONSOLE); bjcr->setJobLevel(L_FULL); bjcr->JobStatus = JS_Running; bjcr->where = bstrdup(files[i]); bjcr->job_name = get_pool_memory(PM_FNAME); pm_strcpy(bjcr->job_name, "Dummy.Job.Name"); bjcr->client_name = get_pool_memory(PM_FNAME); pm_strcpy(bjcr->client_name, "Dummy.Client.Name"); bstrncpy(bjcr->Job, "bbatch", sizeof(bjcr->Job)); bjcr->fileset_name = get_pool_memory(PM_FNAME); pm_strcpy(bjcr->fileset_name, "Dummy.fileset.name"); bjcr->fileset_md5 = get_pool_memory(PM_FNAME); pm_strcpy(bjcr->fileset_md5, "Dummy.fileset.md5"); if ((db = db_init_database(NULL, NULL, db_name, db_user, db_password, db_host, 0, NULL, db_ssl_mode, db_ssl_key, db_ssl_cert, db_ssl_ca, db_ssl_capath, db_ssl_cipher, false, false)) == NULL) { Emsg0(M_ERROR_TERM, 0, _("Could not init Bacula database\n")); } if (!db_open_database(NULL, db)) { Emsg0(M_ERROR_TERM, 0, db_strerror(db)); } Dmsg0(200, "Database opened\n"); if (verbose) { Pmsg2(000, _("Using Database: %s, User: %s\n"), db_name, db_user); } bjcr->db = db; pthread_create(&thid, NULL, do_batch, bjcr); } while (nb > 0) { bmicrosleep(1,0); } return 0; } static void fill_attr(ATTR_DBR *ar, char *data) { char *p; char *b; int index=0; ar->Stream = STREAM_UNIX_ATTRIBUTES; ar->JobId = getpid(); for(p = b = data; *p; p++) { if (*p == ';') { *p = '\0'; switch (index) { case 0: ar->FileIndex = str_to_int64(b); break; case 1: ar->fname = b; break; case 2: ar->attr = b; break; case 3: ar->Digest = b; break; } index++; b = ++p; } } } static void *do_batch(void *jcr) { JCR *bjcr = (JCR *)jcr; char data[1024]; int lineno = 0; struct ATTR_DBR ar; memset(&ar, 0, sizeof(ar)); btime_t begin = get_current_btime(); char *datafile = bjcr->where; FILE *fd = fopen(datafile, "r"); if (!fd) { Emsg1(M_ERROR_TERM, 0, _("Error opening datafile %s\n"), datafile); } while (fgets(data, sizeof(data)-1, fd)) { strip_trailing_newline(data); lineno++; if (verbose && ((lineno % 5000) == 1)) { printf("\r%i", lineno); } fill_attr(&ar, data); if (!db_create_attributes_record(bjcr, bjcr->db, &ar)) { Emsg0(M_ERROR_TERM, 0, _("Error while inserting file\n")); } } fclose(fd); db_write_batch_file_records(bjcr); btime_t end = get_current_btime(); P(mutex); char ed1[200], ed2[200]; printf("\rbegin = %s, end = %s\n", edit_int64(begin, ed1),edit_int64(end, ed2)); printf("Insert time = %sms\n", edit_int64((end - begin) / 10000, ed1)); printf("Create %u files at %.2f/s\n", lineno, (lineno / ((float)((end - begin) / 1000000)))); nb--; V(mutex); pthread_exit(NULL); return NULL; } bacula-15.0.3/src/tools/windows-env-test.c0000644000175000017500000000670214771010173020217 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "lib/unittests.h" char *run_cmd(char *command, POOLMEM **out, char *env[]) { **out=0; if (run_program_full_output(command, 0, *out, env) != 0) { berrno be; Dmsg1(0, "run_program_full_output done. %s", *out); Jmsg(NULL, M_ERROR, 0, _("Can't run command %s. ERR=%s\n"), command, be.bstrerror(errno)); if (**out) { Dmsg1(0, "%s", *out); } return NULL; } strip_trailing_junk(*out); return *out; } void *test1(char *env[]) { const char *ret; POOLMEM *q = get_pool_memory(PM_FNAME); run_cmd((char *)"set HY_ENV_VM_NAME", &q, env); Dmsg1(0, "test1 returns %s\n", q); if (strcmp(q, "HY_ENV_VM_NAME=NULL") == 0) { ret = "ok"; } else { ret = "not ok"; } free_pool_memory(q); return (void *)ret; } void *test2(char *env[]) { const char *ret; POOLMEM *q = get_pool_memory(PM_FNAME); run_cmd((char *)"set HY_ENV_JOB_NAME", &q, env); Dmsg1(0, "test2 returns %s\n", q); if (strcmp(q, "HY_ENV_JOB_NAME=hyperv-backup.2022-02-18_13.42.54_15") == 0) { ret = "ok"; } else { ret = "not ok"; } free_pool_memory(q); return (void *)ret; } void *test3(char *env[]) { const char *ret; POOLMEM *q = get_pool_memory(PM_FNAME); run_cmd((char *)"set HY_ENV_REF_JOB_NAME", &q, env); Dmsg1(0, "test3 returns %s\n", q); if (strcmp(q, "HY_ENV_REF_JOB_NAME=hyperv-backup.2022-02-17_17.58.09_21") == 0) { ret = "ok"; } else { ret = "not ok"; } free_pool_memory(q); return (void *)ret; } int main(int argc, char **argv) { void *ret; Unittests t("windows_env_test", true, true); t.configure(TEST_QUIET); init_stack_dump(); my_name_is(argc, argv, "windows_env_test"); init_msg(NULL, NULL); daemon_start_time = time(NULL); setup_daemon_message_queue(); start_watchdog(); debug_level = 500; char *_env[4]; POOLMEM *env_item1 = get_pool_memory(PM_FNAME); Mmsg(env_item1, "HY_ENV_VM_NAME=NULL"); _env[0] = env_item1; POOLMEM *env_item2 = get_pool_memory(PM_FNAME); Mmsg(env_item2, "HY_ENV_JOB_NAME=hyperv-backup.2022-02-18_13.42.54_15"); _env[1] = env_item2; POOLMEM *env_item3 = get_pool_memory(PM_FNAME); Mmsg(env_item3, "HY_ENV_REF_JOB_NAME=hyperv-backup.2022-02-17_17.58.09_21"); _env[2] = env_item3; _env[3] = NULL; ret = test1(_env); Dmsg1(0, "test1. ret = %s\n", (char *)ret); ok(strcmp((char *)ret, "ok") == 0, "Checking test1"); ret = test2(_env); ok(strcmp((char *)ret, "ok") == 0, "Checking test2"); ret = test3(_env); ok(strcmp((char *)ret, "ok") == 0, "Checking test3"); free_pool_memory(env_item3); free_pool_memory(env_item2); free_pool_memory(env_item1); free_daemon_message_queue(); stop_watchdog(); term_msg(); close_memory_pool(); return report(); }bacula-15.0.3/src/tools/bpipe_test.c0000644000175000017500000000251414771010173017115 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "../lib/unittests.h" int main(int argc, char **argv) { Unittests u("t"); char buf[512] = {0}; BPIPE *bp; FILE *fp; fp = fopen("toto.sh", "w"); fprintf(fp, "#!/bin/sh\necho hello\nread a\necho $a\n"); fclose(fp); chmod("toto.sh", 0755); bp = open_bpipe("./toto.sh", 0, "rw"); ok(bp != NULL, "open_bpipe"); fgets(buf, sizeof(buf), bp->rfd); ok(strcmp(buf, "hello\n") == 0, "fgets on bpipe"); printf("%s", buf); fprintf(bp->wfd, "titi\n"); fflush(bp->wfd); fgets(buf, sizeof(buf), bp->rfd); ok(strcmp(buf, "titi\n") == 0, "fgets on bpipe"); printf("%s", buf); ok(close_bpipe(bp) == 0, "closing bpipe"); return report(); } bacula-15.0.3/src/tools/debug-tags.c0000644000175000017500000000275614771010173017011 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* Written by Eric Bollengier */ #include "bacula.h" #include "lib/unittests.h" void test_list(int64_t tags, int nb, const char *format) { alist tlist(10, not_owned_by_alist); debug_get_tags_list(&tlist, tags); is(tlist.size(), nb, "Parse simple tags back"); POOL_MEM tmp; debug_get_tags(tmp.handle(), tags); is(tmp.c_str(), format, "Check tags format"); } int main(int argc, char **argv) { int64_t tags=0; Unittests alist_test("debug-tags"); log("Test debug tags feature"); ok(debug_parse_tags("network,snapshot", &tags), "Parse simple tags"); is(tags, DT_NETWORK|DT_SNAPSHOT, "Check simple parsing"); test_list(tags, 2, "network,snapshot"); ok(debug_parse_tags("network,!snapshot", &tags), "Parse tag deletion"); is(tags, DT_NETWORK, "Check tag deletion"); test_list(tags, 1, "network"); return report(); } bacula-15.0.3/src/tools/Makefile.in0000644000175000017500000003061214771010173016660 0ustar bsbuildbsbuild# # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Bacula Tools Makefile # @MCOMMON@ TOKYOCABINET_INC = @TOKYOCABINET_INC@ TOKYOCABINET_LIBS = @TOKYOCABINET_LIBS@ srcdir = . VPATH = . .PATH: . # one up basedir = .. # top dir topdir = ../.. # this dir relative to top dir thisdir = src/tools DEBUG=@DEBUG@ ZLIBS=@ZLIBS@ DB_LIBS=@DB_LIBS@ first_rule: all dummy: # GETTEXT_LIBS = @LIBINTL@ FINDOBJS = testfind.o ../dird/dird_conf.o ../dird/inc_conf.o ../dird/run_conf.o ../dird/ua_acl.o # these are the objects that are changed by the .configure process EXTRAOBJS = @OBJLIST@ DIRCONFOBJS = ../dird/dird_conf.o ../dird/run_conf.o ../dird/inc_conf.o ../dird/ua_acl.o NODIRTOOLS = bsmtp bjoblist DIRTOOLS = bsmtp dbcheck drivetype fstype testfind testls bregex bwild bbatch bregtest bvfs_test bjoblist TOOLS = $(@DIR_TOOLS@) INSNODIRTOOLS = bsmtp INSDIRTOOLS = bsmtp dbcheck bwild bregex INSTOOLS = $(INS@DIR_TOOLS@) @TOOLS_INSTALL@ INSTTESTS = breaddir_test bpipe_test tags_test xattr_append_test crypto_test .SUFFIXES: .c .o .PHONY: .DONTCARE: # inference rules .c.o: @echo "Compiling $<" $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< #------------------------------------------------------------------------- all: Makefile $(TOOLS) gigaslam grow @echo "==== Make of tools is good ====" @echo " " ../lib/unittests.o: ../lib/unittests.c (cd ../lib ; make unittests.o) test-cpp: test-cpp.o ../lib/unittests.o $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ test-cpp.o ../lib/unittests.o -lbac $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) debug-tags_test: debug-tags.o ../lib/unittests.o $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ debug-tags.o ../lib/unittests.o -lbac $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ fs-io-error: fs-io-error.c $(CXX) -Wall fs-io-error.c -D_FILE_OFFSET_BITS=64 -lfuse -o fs-io-error breaddir_test: breaddir.o ../lib/unittests.o $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ breaddir.o ../lib/unittests.o -lbac $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ joblist_test: joblist.o make -C ../lib/ alist_test $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -L../findlib -o $@ joblist.o ../lib/unittests.o -lbac -lbacfind $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ bjoblist: bjoblist.o $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -L../findlib -o $@ bjoblist.o -lbac -lbacfind $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) bsock_meeting_test: bsock_meeting_test.o ../lib/unittests.o $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ bsock_meeting_test.o ../lib/unittests.o -lbac $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) xattr_append_test: xattr_append_test.o ../lib/unittests.o $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ xattr_append_test.o ../lib/unittests.o -lbac $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) bpipe_test: bpipe_test.o ../lib/unittests.o $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ bpipe_test.o ../lib/unittests.o -lbac $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ crypto_test: crypto_test.o ../lib/unittests.o $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ crypto_test.o ../lib/unittests.o -lbac $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) bsparse: bsparse.c $(CXX) -Wall -o $@ bsparse.c bsmtp: Makefile bsmtp.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ bsmtp.o -lbac -lm $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) bsnapshot: Makefile bsnapshot.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ bsnapshot.o ../lib/ini$(DEFAULT_OBJECT_TYPE) -lbac -lm $(DLIB) $(LIBS) $(GETTEXT_LIBS) bregtest: Makefile bregtest.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ bregtest.o -lbac -lm $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) hdtest: Makefile hdtest.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ hdtest.o -lbac -lm $(DLIB) $(LIBS) thread: Makefile thread.o $(CXX) $(LDFLAGS) -o $@ thread.o -lpthread dbcheck: Makefile dbcheck.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) \ ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE) $(DIRCONFOBJS) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -L../cats -L../findlib -o $@ dbcheck.o $(DIRCONFOBJS) \ -lbaccats -lbacsql -lbaccfg -lbac -lbacfind -lm $(ZLIBS) $(DB_LIBS) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) fstype: Makefile fstype.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -L../findlib -o $@ fstype.o -lbacfind -lbac -lm \ $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) bnet_test: Makefile bnet_test.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -L../findlib -o $@ bnet_test.o -lbacfind -lbac -lm \ $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) block_crypto_test: Makefile block_crypto_test.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/unittests.o $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ block_crypto_test.o ../lib/unittests.o -lbac -lm \ $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ tags_test: Makefile test_tags.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/unittests.o $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ test_tags.o ../lib/unittests.o -lbac \ $(DLIB) $(LIBS) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $@ $(DESTDIR)$(sbindir)/ drivetype: Makefile drivetype.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -L../findlib -o $@ drivetype.o -lbacfind -lbac -lm \ $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) dird_conf.o: ../dird/dird_conf.c $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< run_conf.o: ../dird/run_conf.c $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< inc_conf.o: ../dird/inc_conf.c $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< timelimit: timelimit.o ${CC} ${DEFS} ${DEBUG} -pipe -DHAVE_ERRNO_H -DHAVE_SETITIMER -DHAVE_SIGACTION -c timelimit.c ${CC} -o timelimit timelimit.o testfind: Makefile ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) \ ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) $(FINDOBJS) $(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -o $@ $(FINDOBJS) -L. -L../lib -L../findlib \ $(DLIB) -lbacfind -lbaccfg -lbac -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) testls: Makefile ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) testls.o $(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -L. -L../lib -L../findlib -o $@ testls.o \ $(DLIB) -lbacfind -lbac -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) bregex: Makefile ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) bregex.o $(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -L. -L../lib -o $@ bregex.o \ $(DLIB) -lbac -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) bwild: Makefile ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) bwild.o $(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -L. -L../lib -o $@ bwild.o \ $(DLIB) -lbac -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) bbatch: Makefile ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) \ ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE) bbatch.o $(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -L../cats -L. -L../lib -L../findlib -o $@ bbatch.o \ -lbaccats -lbacsql -lbac -lbacfind -lm $(ZLIBS) $(DB_LIBS) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) bvfs_test: Makefile ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) \ ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE) bvfs_test.o $(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -L../cats -L. -L../lib -L../findlib -o $@ bvfs_test.o \ -lbaccats -lbacsql -lbacfind -lbac -lm $(ZLIBS) $(DB_LIBS) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) # Turn on cats_test when it builds correctly #cats_test.o: cats_test.c # echo "Compiling $<" # $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< #cats_test: Makefile ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) cats_test.o # $(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -L../cats -L. -L../lib -L../findlib -o $@ cats_test.o \ # -lbaccats -lbacsql -lbacfind -lbac -lm $(DB_LIBS) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) gigaslam.o: gigaslam.c $(CXX) $(CFLAGS) -c $(CPPFLAGS) $< gigaslam: gigaslam.o $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -o $@ gigaslam.o grow: Makefile grow.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ grow.o -lbac -lm $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) bpluginfo.o: bpluginfo.c $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) -I../filed -I../dird -I../stored $(DINCLUDE) $(CFLAGS) $< bpluginfo: Makefile bpluginfo.o $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ bpluginfo.o -lbac $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) TESTDEDUPOBJS = ../stored/bitarray.o ../stored/dedupengine.o test-dedup.o test-dedup.o: test-dedup.c $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) $(TOKYOCABINET_INC) -I$(srcdir) -I$(basedir) -I../stored -I../lib $(DINCLUDE) $(CFLAGS) $< test-dedup: Makefile $(TESTDEDUPOBJS) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -static -L../lib -o $@ $(TESTDEDUPOBJS) $(ZLIBS) \ -lbaccfg -lbac -lm $(TOKYOCABINET_LIBS) $(DLIB) $(LIBS) $(WRAPLIBS) \ $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS) Makefile: $(srcdir)/Makefile.in $(topdir)/config.status cd $(topdir) \ && CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status libtool-clean: @$(RMF) -r .libs _libs clean: libtool-clean @$(RMF) core core.* a.out *.o *.bak *~ *.intpro *.extpro 1 2 3 fs-io-error @$(RMF) $(DIRTOOLS) realclean: clean @$(RMF) tags distclean: realclean if test $(srcdir) = .; then $(MAKE) realclean; fi (cd $(srcdir); $(RMF) Makefile) devclean: realclean if test $(srcdir) = .; then $(MAKE) realclean; fi (cd $(srcdir); $(RMF) Makefile) install-debug-tags_test: debug-tags_test $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) debug-tags_test $(DESTDIR)$(sbindir)/ install-fs-io-error: fs-io-error $(INSTALL_PROGRAM) fs-io-error $(DESTDIR)$(sbindir)/fs-io-error install-bjoblist: bjoblist $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bjoblist $(DESTDIR)$(sbindir)/ install-bsnapshot: bsnapshot $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bsnapshot $(DESTDIR)$(sbindir)/bsnapshot installall: $(TOOLS) timelimit @for tool in ${TOOLS} timelimit ; do \ $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $$tool $(DESTDIR)$(sbindir)/$$tool ; \ done # chattr +i $(DESTDIR)$(sbindir)/bsmtp # chmod 755 $(DESTDIR)$(sbindir)/bsmtp install-cdp: $(MAKE) -C cdp-client install # Allow non-root execution of bsmtp for non-root Directors install: $(INSTOOLS) @for tool in ${INSTOOLS} ; do \ $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM_ALL) $$tool $(DESTDIR)$(sbindir)/$$tool ; \ done # chattr +i $(DESTDIR)$(sbindir)/bsmtp install-unittests: $(INSTTESTS) @for tool in ${INSTTESTS} ; do \ $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $$tool $(DESTDIR)$(sbindir)/$$tool ; \ done uninstall: @for tool in ${INSTOOLS} ${INSTTESTS} ; do \ $(RMF) $(DESTDIR)$(sbindir)/$$tool ; \ done # Semi-automatic generation of dependencies: # Use gcc -MM because X11 `makedepend' doesn't work on all systems # and it also includes system headers. # `semi'-automatic since dependencies are generated at distribution time. depend: @$(MV) Makefile Makefile.bak @$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile @$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile @$(CXX) -S -M $(CPPFLAGS) -I$(srcdir) -I$(basedir) -I../stored *.c >> Makefile @if test -f Makefile ; then \ $(RMF) Makefile.bak; \ else \ $(MV) Makefile.bak Makefile; \ echo " ===== Something went wrong in make depend ====="; \ fi # ----------------------------------------------------------------------- # DO NOT DELETE: nice dependency list follows bacula-15.0.3/src/tools/dbcheck.c0000755000175000017500000013466314771010173016360 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Program to check a Bacula database for consistency and to * make repairs * * Kern E. Sibbald, August 2002 * */ #include "bacula.h" #include "cats/cats.h" #include "lib/runscript.h" #include "dird/dird_conf.h" extern bool parse_dir_config(CONFIG *config, const char *configfile, int exit_code); typedef struct s_id_ctx { int64_t *Id; /* ids to be modified */ int num_ids; /* ids stored */ int max_ids; /* size of array */ int num_del; /* number deleted */ int tot_ids; /* total to process */ } ID_LIST; typedef struct s_name_ctx { char **name; /* list of names */ int num_ids; /* ids stored */ int max_ids; /* size of array */ int num_del; /* number deleted */ int tot_ids; /* total to process */ } NAME_LIST; /* Global variables */ static bool fix = false; static bool batch = false; static bool timestamps = false; static BDB *db; static ID_LIST id_list; static NAME_LIST name_list; static char buf[20000]; static bool quit = false; static CONFIG *config; static const char *idx_tmp_name; static uint64_t nb_changes = 300000; #define MAX_ID_LIST_LEN 10000000 /* Forward referenced functions */ static int make_id_list(const char *query, ID_LIST *id_list); static int delete_id_list(const char *query, ID_LIST *id_list); static int make_name_list(const char *query, NAME_LIST *name_list); static void print_name_list(NAME_LIST *name_list); static void free_name_list(NAME_LIST *name_list); static char *get_cmd(const char *prompt); static void eliminate_duplicate_paths(); static void eliminate_orphaned_jobmedia_records(); static void eliminate_orphaned_file_records(); static void eliminate_orphaned_path_records(); static void eliminate_orphaned_fileset_records(); static void eliminate_orphaned_client_records(); static void eliminate_orphaned_job_records(); static void eliminate_admin_records(); static void eliminate_restore_records(); static void eliminate_verify_records(); static void eliminate_orphaned_object_records(); static void eliminate_orphaned_meta_records(); static void eliminate_orphaned_fileevents_records(); static void repair_bad_paths(); static void do_interactive_mode(); static bool yes_no(const char *prompt); static bool check_idx(const char *col_name); static bool create_tmp_idx(const char *idx_name, const char *table_name, const char *col_name); static bool drop_tmp_idx(const char *idx_name, const char *table_name); static int check_idx_handler(void *ctx, int num_fields, char **row); /* Wrapper for printf function which can add timestamp to the logs if user requested it */ static void printf_tstamp(char const * const format, ...) { va_list args; va_start(args, format); /* Add timestamp prefix if requested by the user */ if (timestamps) { char buf[64]; utime_t mtime = time(NULL); bstrftimes(buf, sizeof(buf), mtime); printf("%s ", buf); } vprintf(format, args); va_end(args); } static void usage() { fprintf(stderr, PROG_COPYRIGHT "\n%sVersion: %s (%s)\n\n" "Usage: dbcheck [-c config ] [-B] [-C catalog name] [-d debug_level] [] []\n" " -b batch mode\n" " -C catalog name in the director conf file\n" " -c Director conf filename\n" " -B print catalog configuration and exit\n" " -d set debug level to \n" " -n custom number of records selected at a time for large pruning operations done in a loop (orphan paths/files, ...)\n" " -dt print a timestamp in debug output\n" " -t print a timestamp for each log line\n" " -f fix inconsistencies\n" " -v verbose\n" " -? print this message\n" "\n", 2002, BDEMO, VERSION, BDATE); exit(1); } int main (int argc, char *argv[]) { int ch; const char *user, *password, *db_name, *dbhost; const char *dbsslmode = NULL, *dbsslkey = NULL, *dbsslcert = NULL, *dbsslca = NULL; const char *dbsslcapath = NULL, *dbsslcipher = NULL; int dbport = 0; bool print_catalog=false; char *configfile = NULL; char *catalogname = NULL; char *endptr; setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); lmgr_init_thread(); my_name_is(argc, argv, "dbcheck"); init_msg(NULL, NULL); /* setup message handler */ memset(&id_list, 0, sizeof(id_list)); memset(&name_list, 0, sizeof(name_list)); while ((ch = getopt(argc, argv, "bc:C:d:fvB?n:t")) != -1) { switch (ch) { case 'n': /* Number of changes we can make in one run */ nb_changes = str_to_uint64(optarg); break; case 'B': print_catalog = true; /* get catalog information from config */ break; case 'b': /* batch */ batch = true; break; case 'C': /* CatalogName */ catalogname = optarg; break; case 'c': /* configfile */ configfile = optarg; break; case 'd': /* debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } } break; case 'f': /* fix inconsistencies */ fix = true; break; case 't': /* Add timestamps to logs */ timestamps = true; break; case 'v': verbose++; break; case '?': default: usage(); } } argc -= optind; argv += optind; OSDependentInit(); if (configfile) { CAT *catalog = NULL; int found = 0; if (argc > 0) { Pmsg0(0, _("Warning skipping the additional parameters for working directory/dbname/user/password/host.\n")); } config = New(CONFIG()); parse_dir_config(config, configfile, M_ERROR_TERM); LockRes(); foreach_res(catalog, R_CATALOG) { if (catalogname && !strcmp(catalog->hdr.name, catalogname)) { ++found; break; } else if (!catalogname) { // stop on first if no catalogname is given ++found; break; } } UnlockRes(); if (!found) { if (catalogname) { Pmsg2(0, _("Error can not find the Catalog name[%s] in the given config file [%s]\n"), catalogname, configfile); } else { Pmsg1(0, _("Error there is no Catalog section in the given config file [%s]\n"), configfile); } exit(1); } else { DIRRES *director; LockRes(); director = (DIRRES *)GetNextRes(R_DIRECTOR, NULL); UnlockRes(); if (!director) { Pmsg0(0, _("Error no Director resource defined.\n")); exit(1); } set_working_directory(director->working_directory); /* Print catalog information and exit (-B) */ if (print_catalog) { POOLMEM *catalog_details = get_pool_memory(PM_MESSAGE); db = db_init_database(NULL, catalog->db_driver, catalog->db_name, catalog->db_user, catalog->db_password, catalog->db_address, catalog->db_port, catalog->db_socket, catalog->db_ssl_mode, catalog->db_ssl_key, catalog->db_ssl_cert, catalog->db_ssl_ca, catalog->db_ssl_capath, catalog->db_ssl_cipher, catalog->mult_db_connections, catalog->disable_batch_insert); if (db) { printf("%sdb_type=%s\nworking_dir=%s\n", catalog->display(catalog_details), db_get_engine_name(db), working_directory); db_close_database(NULL, db); } free_pool_memory(catalog_details); exit(0); } db_name = catalog->db_name; user = catalog->db_user; password = catalog->db_password; dbhost = catalog->db_address; if (dbhost && dbhost[0] == 0) { dbhost = NULL; } dbport = catalog->db_port; dbsslmode = catalog->db_ssl_mode; dbsslkey = catalog->db_ssl_key; dbsslcert = catalog->db_ssl_cert; dbsslca = catalog->db_ssl_ca; dbsslcapath = catalog->db_ssl_capath; dbsslcipher = catalog->db_ssl_cipher; } } else { if (argc > 6) { Pmsg0(0, _("Wrong number of arguments.\n")); usage(); } if (argc < 1) { Pmsg0(0, _("Working directory not supplied.\n")); usage(); } /* This is needed by SQLite to find the db */ working_directory = argv[0]; db_name = "bacula"; user = db_name; password = ""; dbhost = NULL; if (argc >= 2) { db_name = argv[1]; user = db_name; if (argc >= 3) { user = argv[2]; if (argc >= 4) { password = argv[3]; if (argc >= 5) { dbhost = argv[4]; if (argc >= 6) { errno = 0; dbport = strtol(argv[5], &endptr, 10); if (*endptr != '\0') { Pmsg0(0, _("Database port must be a numeric value.\n")); exit(1); } else if (errno == ERANGE) { Pmsg0(0, _("Database port must be a int value.\n")); exit(1); } if (argc >= 7) { dbsslmode = argv[6]; if (argc >= 8) { dbsslkey = argv[7]; dbsslcert = argv[8]; if (argc == 10) { dbsslca = argv[9]; } /* if (argc == 10) */ } /* if (argc >= 8) */ } /* if (argc >= 7) */ } /* if (argc >= 6) */ } /* if (argc >= 5) */ } /* if (argc >= 4) */ } /* if (argc >= 3) */ } /* if (argc >= 2) */ } /* Open database */ db = db_init_database(NULL, NULL, db_name, user, password, dbhost, dbport, NULL, dbsslmode, dbsslkey, dbsslcert, dbsslca, dbsslcapath, dbsslcipher, false, false); if (!db || !db_open_database(NULL, db)) { Emsg1(M_FATAL, 0, "%s", db_strerror(db)); return 1; } /* Drop temporary index idx_tmp_name if it already exists */ drop_tmp_idx("idxPIchk", "File"); if (batch) { repair_bad_paths(); eliminate_duplicate_paths(); eliminate_orphaned_jobmedia_records(); eliminate_orphaned_file_records(); eliminate_orphaned_path_records(); eliminate_orphaned_fileset_records(); eliminate_orphaned_client_records(); eliminate_orphaned_job_records(); eliminate_admin_records(); eliminate_restore_records(); eliminate_orphaned_meta_records(); eliminate_orphaned_object_records(); eliminate_orphaned_fileevents_records(); } else { do_interactive_mode(); } /* Drop temporary index idx_tmp_name */ drop_tmp_idx("idxPIchk", "File"); if (db) db_close_database(NULL, db); close_msg(NULL); term_msg(); lmgr_cleanup_main(); return 0; } static void do_interactive_mode() { const char *cmd; printf(_("Hello, this is the database check/correct program.\n")); if (fix) printf(_("Modify database is on.")); else printf(_("Modify database is off.")); if (verbose) printf(_(" Verbose is on.\n")); else printf(_(" Verbose is off.\n")); printf(_("Please select the function you want to perform.\n")); while (!quit) { if (fix) { printf(_("\n" " 1) Toggle modify database flag\n" " 2) Toggle verbose flag\n" " 3) Repair bad Path records\n" " 4) Eliminate duplicate Path records\n" " 5) Eliminate orphaned Jobmedia records\n" " 6) Eliminate orphaned File records\n" " 7) Eliminate orphaned Path records\n" " 8) Eliminate orphaned FileSet records\n" " 9) Eliminate orphaned Client records\n" " 10) Eliminate orphaned Meta/FileEvents data records\n" " 11) Eliminate orphaned Job records\n" " 12) Eliminate all Admin records\n" " 13) Eliminate all Restore records\n" " 14) Eliminate all Verify records\n" " 15) All (3-14)\n" " 16) Quit\n")); } else { printf(_("\n" " 1) Toggle modify database flag\n" " 2) Toggle verbose flag\n" " 3) Check for bad Path records\n" " 4) Check for duplicate Path records\n" " 5) Check for orphaned Jobmedia records\n" " 6) Check for orphaned File records\n" " 7) Check for orphaned Path records\n" " 8) Check for orphaned FileSet records\n" " 9) Check for orphaned Client records\n" " 10) Check all orphaned Meta/FileEvents data records\n" " 11) Check for orphaned Job records\n" " 12) Check for all Admin records\n" " 13) Check for all Restore records\n" " 14) Check all Verify records\n" " 15) All (3-14)\n" " 16) Quit\n")); } cmd = get_cmd(_("Select function number: ")); if (cmd) { int item = atoi(cmd); switch (item) { case 1: fix = !fix; if (fix) printf(_("Database will be modified.\n")); else printf(_("Database will NOT be modified.\n")); break; case 2: verbose = verbose?0:1; if (verbose) printf(_(" Verbose is on.\n")); else printf(_(" Verbose is off.\n")); break; case 3: repair_bad_paths(); break; case 4: eliminate_duplicate_paths(); break; case 5: eliminate_orphaned_jobmedia_records(); break; case 6: eliminate_orphaned_file_records(); break; case 7: eliminate_orphaned_path_records(); break; case 8: eliminate_orphaned_fileset_records(); break; case 9: eliminate_orphaned_client_records(); break; case 10: eliminate_orphaned_meta_records(); eliminate_orphaned_fileevents_records(); break; case 11: eliminate_orphaned_job_records(); break; case 12: eliminate_admin_records(); break; case 13: eliminate_restore_records(); break; case 14: eliminate_verify_records(); break; case 15: repair_bad_paths(); eliminate_duplicate_paths(); eliminate_orphaned_jobmedia_records(); eliminate_orphaned_file_records(); eliminate_orphaned_path_records(); eliminate_orphaned_fileset_records(); eliminate_orphaned_client_records(); eliminate_orphaned_meta_records(); eliminate_orphaned_fileevents_records(); eliminate_orphaned_job_records(); eliminate_admin_records(); eliminate_restore_records(); eliminate_verify_records(); break; case 16: quit = true; break; } } } } static int print_name_handler(void *ctx, int num_fields, char **row) { if (row[0]) { printf_tstamp("%s\n", row[0]); } return 0; } static int get_name_handler(void *ctx, int num_fields, char **row) { POOLMEM *buf = (POOLMEM *)ctx; if (row[0]) { pm_strcpy(&buf, row[0]); } return 0; } static int print_job_handler(void *ctx, int num_fields, char **row) { printf_tstamp(_("JobId=%s Name=\"%s\" StartTime=%s\n"), NPRT(row[0]), NPRT(row[1]), NPRT(row[2])); return 0; } static int print_jobmedia_handler(void *ctx, int num_fields, char **row) { printf_tstamp(_("Orphaned JobMediaId=%s JobId=%s Volume=\"%s\"\n"), NPRT(row[0]), NPRT(row[1]), NPRT(row[2])); return 0; } static int print_file_handler(void *ctx, int num_fields, char **row) { printf_tstamp(_("Orphaned FileId=%s JobId=%s Volume=\"%s\"\n"), NPRT(row[0]), NPRT(row[1]), NPRT(row[2])); return 0; } static int print_fileset_handler(void *ctx, int num_fields, char **row) { printf_tstamp(_("Orphaned FileSetId=%s FileSet=\"%s\" MD5=%s\n"), NPRT(row[0]), NPRT(row[1]), NPRT(row[2])); return 0; } static int print_client_handler(void *ctx, int num_fields, char **row) { printf_tstamp(_("Orphaned ClientId=%s Name=\"%s\"\n"), NPRT(row[0]), NPRT(row[1])); return 0; } /* * Called here with each id to be added to the list */ static int id_list_handler(void *ctx, int num_fields, char **row) { ID_LIST *lst = (ID_LIST *)ctx; if (lst->num_ids == MAX_ID_LIST_LEN) { return 1; } if (lst->num_ids == lst->max_ids) { if (lst->max_ids == 0) { lst->max_ids = 10000; lst->Id = (int64_t *)bmalloc(sizeof(int64_t) * lst->max_ids); } else { lst->max_ids = (lst->max_ids * 3) / 2; lst->Id = (int64_t *)brealloc(lst->Id, sizeof(int64_t) * lst->max_ids); } } lst->Id[lst->num_ids++] = str_to_int64(row[0]); return 0; } /* * Construct record id list */ static int make_id_list(const char *query, ID_LIST *id_list) { id_list->num_ids = 0; id_list->num_del = 0; id_list->tot_ids = 0; if (!db_sql_query(db, query, id_list_handler, (void *)id_list)) { printf_tstamp("%s", db_strerror(db)); return 0; } return 1; } /* * Delete all entries in the list */ static int delete_id_list(const char *query, ID_LIST *id_list) { char ed1[50]; db_start_transaction(NULL, db); for (int i=0; i < id_list->num_ids; i++) { bsnprintf(buf, sizeof(buf), query, edit_int64(id_list->Id[i], ed1)); if (verbose) { printf_tstamp(_("Deleting: %s\n"), buf); } db_sql_query(db, buf, NULL, NULL); } db_end_transaction(NULL, db); return 1; } /* * Called here with each name to be added to the list */ static int name_list_handler(void *ctx, int num_fields, char **row) { NAME_LIST *name = (NAME_LIST *)ctx; if (name->num_ids == MAX_ID_LIST_LEN) { return 1; } if (name->num_ids == name->max_ids) { if (name->max_ids == 0) { name->max_ids = 10000; name->name = (char **)bmalloc(sizeof(char *) * name->max_ids); } else { name->max_ids = (name->max_ids * 3) / 2; name->name = (char **)brealloc(name->name, sizeof(char *) * name->max_ids); } } name->name[name->num_ids++] = bstrdup(row[0]); return 0; } /* * Construct name list */ static int make_name_list(const char *query, NAME_LIST *name_list) { name_list->num_ids = 0; name_list->num_del = 0; name_list->tot_ids = 0; if (!db_sql_query(db, query, name_list_handler, (void *)name_list)) { printf_tstamp("%s", db_strerror(db)); return 0; } return 1; } /* * Print names in the list */ static void print_name_list(NAME_LIST *name_list) { for (int i=0; i < name_list->num_ids; i++) { printf_tstamp("%s\n", name_list->name[i]); } } /* * Free names in the list */ static void free_name_list(NAME_LIST *name_list) { for (int i=0; i < name_list->num_ids; i++) { free(name_list->name[i]); } name_list->num_ids = 0; } static void eliminate_duplicate_paths() { const char *query; char esc_name[5000]; printf_tstamp(_("Checking for duplicate Path entries.\n")); /* Make list of duplicated names */ query = "SELECT Path, count(Path) as Count FROM Path " "GROUP BY Path HAVING count(Path) > 1"; if (!make_name_list(query, &name_list)) { exit(1); } printf_tstamp(_("Found %d duplicate Path records.\n"), name_list.num_ids); if (name_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { print_name_list(&name_list); } if (quit) { return; } if (fix) { /* Loop through list of duplicate names */ for (int i=0; i 1) { printf_tstamp("%s\n", buf); } if (!make_id_list(buf, &id_list)) { exit(1); } if (verbose) { printf_tstamp(_("Found %d for: %s\n"), id_list.num_ids, name_list.name[i]); } /* Force all records to use the first id then delete the other ids */ for (int j=1; j 1) { printf_tstamp("%s\n", buf); } db_sql_query(db, buf, NULL, NULL); bsnprintf(buf, sizeof(buf), "DELETE FROM Path WHERE PathId=%s", ed2); if (verbose > 2) { printf_tstamp("%s\n", buf); } db_sql_query(db, buf, NULL, NULL); } } } free_name_list(&name_list); } static void eliminate_orphaned_jobmedia_records() { POOL_MEM query; const char *q = "SELECT JobMedia.JobMediaId,Job.JobId FROM JobMedia " "LEFT OUTER JOIN Job ON (JobMedia.JobId=Job.JobId) " "WHERE Job.JobId IS NULL LIMIT %llu"; Mmsg(query, q, nb_changes); printf_tstamp(_("Checking for orphaned JobMedia entries.\n")); if (!make_id_list(query.c_str(), &id_list)) { exit(1); } /* Loop doing 300000 at a time */ while (id_list.num_ids != 0) { printf_tstamp(_("Found %d orphaned JobMedia records.\n"), id_list.num_ids); if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { for (int i=0; i < id_list.num_ids; i++) { char ed1[50]; bsnprintf(buf, sizeof(buf), "SELECT JobMedia.JobMediaId,JobMedia.JobId,Media.VolumeName FROM JobMedia,Media " "WHERE JobMedia.JobMediaId=%s AND Media.MediaId=JobMedia.MediaId", edit_int64(id_list.Id[i], ed1)); if (!db_sql_query(db, buf, print_jobmedia_handler, NULL)) { printf_tstamp("%s\n", db_strerror(db)); } } } if (quit) { return; } if (fix && id_list.num_ids > 0) { printf_tstamp(_("Deleting %d orphaned JobMedia records.\n"), id_list.num_ids); delete_id_list("DELETE FROM JobMedia WHERE JobMediaId=%s", &id_list); } else { break; /* get out if not updating db */ } if (!make_id_list(query.c_str(), &id_list)) { exit(1); } } } static void eliminate_orphaned_file_records() { POOL_MEM query; const char *q = "SELECT File.FileId,Job.JobId FROM File " "LEFT OUTER JOIN Job ON (File.JobId=Job.JobId) " "WHERE Job.JobId IS NULL LIMIT %llu"; Mmsg(query, q, nb_changes); printf_tstamp(_("Checking for orphaned File entries. This may take some time!\n")); if (verbose > 1) { printf_tstamp("%s\n", query.c_str()); } if (!make_id_list(query.c_str(), &id_list)) { exit(1); } /* Loop doing 300000 at a time */ while (id_list.num_ids != 0) { printf_tstamp(_("Found %d orphaned File records.\n"), id_list.num_ids); if (name_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { for (int i=0; i < id_list.num_ids; i++) { char ed1[50]; bsnprintf(buf, sizeof(buf), "SELECT File.FileId,File.JobId,File.Filename FROM File " "WHERE File.FileId=%s", edit_int64(id_list.Id[i], ed1)); if (!db_sql_query(db, buf, print_file_handler, NULL)) { printf_tstamp("%s\n", db_strerror(db)); } } } if (quit) { return; } if (fix && id_list.num_ids > 0) { printf_tstamp(_("Deleting %d orphaned File records.\n"), id_list.num_ids); delete_id_list("DELETE FROM File WHERE FileId=%s", &id_list); } else { break; /* get out if not updating db */ } if (!make_id_list(query.c_str(), &id_list)) { exit(1); } } } static void eliminate_orphaned_object_records() { POOL_MEM query; const char *q = "SELECT Object.ObjectId,Job.JobId FROM Object " "LEFT OUTER JOIN Job ON (Object.JobId=Job.JobId) " "WHERE Job.JobId IS NULL LIMIT %llu"; Mmsg(query, q, nb_changes); printf_tstamp(_("Checking for orphaned Object entries.\n")); if (verbose > 1) { printf_tstamp("%s\n", query.c_str()); } if (!make_id_list(query.c_str(), &id_list)) { exit(1); } /* Loop doing 300000 at a time */ while (id_list.num_ids != 0) { printf_tstamp(_("Found %d orphaned Object records.\n"), id_list.num_ids); if (name_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { for (int i=0; i < id_list.num_ids; i++) { char ed1[50]; bsnprintf(buf, sizeof(buf), "SELECT Object.ObjectId.JobId,Object.ObjectName FROM Object " "WHERE Object.ObjectId=%s", edit_int64(id_list.Id[i], ed1)); if (!db_sql_query(db, buf, print_file_handler, NULL)) { printf_tstamp("%s\n", db_strerror(db)); } } } if (quit) { return; } if (fix && id_list.num_ids > 0) { printf_tstamp(_("Deleting %d orphaned Object records.\n"), id_list.num_ids); delete_id_list("DELETE FROM Object WHERE ObjectId=%s", &id_list); } else { break; /* get out if not updating db */ } if (!make_id_list(query.c_str(), &id_list)) { exit(1); } } } static void eliminate_orphaned_meta_records() { const char *q; db_int64_ctx lctx1, lctx2; printf_tstamp(_("Checking for orphaned Meta entries.\n")); q = "SELECT COUNT(1) FROM MetaEmail WHERE JobId NOT IN (SELECT DISTINCT JobId from Job)"; if (!db_sql_query(db, q, db_int64_handler, &lctx1)) { printf_tstamp("%s\n", db_strerror(db)); } printf_tstamp(_("Found %lld orphaned MetaEmail records.\n"), lctx1.value); q = "SELECT COUNT(1) FROM MetaAttachment WHERE JobId NOT IN (SELECT DISTINCT JobId from Job)"; if (!db_sql_query(db, q, db_int64_handler, &lctx2)) { printf_tstamp("%s\n", db_strerror(db)); } printf_tstamp(_("Found %lld orphaned MetaAttachment records.\n"), lctx2.value); if (fix && lctx1.count > 0) { printf_tstamp(_("Deleting %lld orphaned MetaEmail records.\n"), lctx1.value); q = "DELETE FROM MetaEmail WHERE JobId NOT IN (SELECT DISTINCT JobId from Job)"; if (!db_sql_query(db, q, NULL, NULL)) { printf_tstamp("%s\n", db_strerror(db)); } } if (fix && lctx2.count > 0) { printf_tstamp(_("Deleting %lld orphaned MetaEmail records.\n"), lctx1.value); q = "DELETE FROM MetaAttachment WHERE JobId NOT IN (SELECT DISTINCT JobId from Job)"; if (!db_sql_query(db, q, NULL, NULL)) { printf_tstamp("%s\n", db_strerror(db)); } } } static void eliminate_orphaned_fileevents_records() { const char *q; db_int64_ctx lctx1; printf_tstamp(_("Checking for orphaned FileEvents entries.\n")); q = "SELECT COUNT(1) FROM FileEvents WHERE JobId NOT IN (SELECT DISTINCT JobId from Job)"; if (!db_sql_query(db, q, db_int64_handler, &lctx1)) { printf_tstamp("%s\n", db_strerror(db)); } printf_tstamp(_("Found %lld orphaned FileEvents records.\n"), lctx1.value); if (fix && lctx1.count > 0) { printf_tstamp(_("Deleting %lld orphaned FileEvents records.\n"), lctx1.value); q = "DELETE FROM FileEvents WHERE JobId NOT IN (SELECT DISTINCT JobId from Job)"; if (!db_sql_query(db, q, NULL, NULL)) { printf_tstamp("%s\n", db_strerror(db)); } } } static void eliminate_orphaned_path_records() { db_int64_ctx lctx; lctx.count=0; db_sql_query(db, "SELECT 1 FROM Job WHERE HasCache=1 LIMIT 1", db_int64_handler, &lctx); printf_tstamp(_("Found %lld orphaned MetaEmail records.\n"), lctx.value); /* The BVFS code uses Path records that are not in the File table, for * example if a Job has /home/test/ BVFS will need to create a Path record / * and /home/ to work correctly */ if (lctx.count == 1) { printf_tstamp(_("To prune orphaned Path entries, it is necessary to clear the BVFS Cache first with the bconsole \".bvfs_clear_cache yes\" command.\n")); return; } idx_tmp_name = NULL; /* Check the existence of the required "one column" index */ if (!check_idx("PathId")) { if (yes_no(_("Create temporary index? (yes/no): "))) { /* create temporary index PathId */ create_tmp_idx("idxPIchk", "File", "PathId"); } } POOL_MEM query; const char *q = "SELECT DISTINCT Path.PathId,File.PathId FROM Path " "LEFT OUTER JOIN File ON (Path.PathId=File.PathId) " "WHERE File.PathId IS NULL LIMIT %llu"; Mmsg(query, q, nb_changes); printf_tstamp(_("Checking for orphaned Path entries. This may take some time!\n")); if (verbose > 1) { printf_tstamp("%s\n", query.c_str()); } if (!make_id_list(query.c_str(), &id_list)) { exit(1); } /* Loop doing 300000 at a time */ while (id_list.num_ids != 0) { printf_tstamp(_("Found %d orphaned Path records.\n"), id_list.num_ids); if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { for (int i=0; i < id_list.num_ids; i++) { char ed1[50]; bsnprintf(buf, sizeof(buf), "SELECT Path FROM Path WHERE PathId=%s", edit_int64(id_list.Id[i], ed1)); db_sql_query(db, buf, print_name_handler, NULL); } } if (quit) { return; } if (fix && id_list.num_ids > 0) { printf_tstamp(_("Deleting %d orphaned Path records.\n"), id_list.num_ids); delete_id_list("DELETE FROM Path WHERE PathId=%s", &id_list); } else { break; /* get out if not updating db */ } if (!make_id_list(query.c_str(), &id_list)) { exit(1); } } /* Drop temporary index idx_tmp_name */ drop_tmp_idx("idxPIchk", "File"); } static void eliminate_orphaned_fileset_records() { const char *query; printf_tstamp(_("Checking for orphaned FileSet entries. This takes some time!\n")); query = "SELECT FileSet.FileSetId,Job.FileSetId FROM FileSet " "LEFT OUTER JOIN Job ON (FileSet.FileSetId=Job.FileSetId) " "WHERE Job.FileSetId IS NULL"; if (verbose > 1) { printf_tstamp("%s\n", query); } if (!make_id_list(query, &id_list)) { exit(1); } printf_tstamp(_("Found %d orphaned FileSet records.\n"), id_list.num_ids); if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { for (int i=0; i < id_list.num_ids; i++) { char ed1[50]; bsnprintf(buf, sizeof(buf), "SELECT FileSetId,FileSet,MD5 FROM FileSet " "WHERE FileSetId=%s", edit_int64(id_list.Id[i], ed1)); if (!db_sql_query(db, buf, print_fileset_handler, NULL)) { printf_tstamp("%s\n", db_strerror(db)); } } } if (quit) { return; } if (fix && id_list.num_ids > 0) { printf_tstamp(_("Deleting %d orphaned FileSet records.\n"), id_list.num_ids); delete_id_list("DELETE FROM FileSet WHERE FileSetId=%s", &id_list); } } static void eliminate_orphaned_client_records() { const char *query; printf_tstamp(_("Checking for orphaned Client entries.\n")); /* In English: * Wiffle through Client for every Client * joining with the Job table including every Client even if * there is not a match in Job (left outer join), then * filter out only those where no Job points to a Client * i.e. Job.Client is NULL */ query = "SELECT Client.ClientId,Client.Name FROM Client " "LEFT OUTER JOIN Job ON (Client.ClientId=Job.ClientId) " "WHERE Job.ClientId IS NULL"; if (verbose > 1) { printf_tstamp("%s\n", query); } if (!make_id_list(query, &id_list)) { exit(1); } printf_tstamp(_("Found %d orphaned Client records.\n"), id_list.num_ids); if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { for (int i=0; i < id_list.num_ids; i++) { char ed1[50]; bsnprintf(buf, sizeof(buf), "SELECT ClientId,Name FROM Client " "WHERE ClientId=%s", edit_int64(id_list.Id[i], ed1)); if (!db_sql_query(db, buf, print_client_handler, NULL)) { printf_tstamp("%s\n", db_strerror(db)); } } } if (quit) { return; } if (fix && id_list.num_ids > 0) { printf_tstamp(_("Deleting %d orphaned Client records.\n"), id_list.num_ids); delete_id_list("DELETE FROM Client WHERE ClientId=%s", &id_list); } } static void eliminate_orphaned_job_records() { const char *query; printf_tstamp(_("Checking for orphaned Job entries.\n")); /* In English: * Wiffle through Job for every Job * joining with the Client table including every Job even if * there is not a match in Client (left outer join), then * filter out only those where no Client exists * i.e. Client.Name is NULL */ query = "SELECT Job.JobId,Job.Name FROM Job " "LEFT OUTER JOIN Client ON (Job.ClientId=Client.ClientId) " "WHERE Client.Name IS NULL " "UNION " "SELECT Job.JobId, Job.Name FROM Job LEFT JOIN JobMedia USING (JobId) WHERE JobMediaId IS NULL and Type = 'B' and JobStatus = 'T' AND JobFiles > 0 AND JobBytes > 0" ; if (verbose > 1) { printf_tstamp("%s\n", query); } if (!make_id_list(query, &id_list)) { exit(1); } printf_tstamp(_("Found %d orphaned Job records.\n"), id_list.num_ids); if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { for (int i=0; i < id_list.num_ids; i++) { char ed1[50]; bsnprintf(buf, sizeof(buf), "SELECT JobId,Name,StartTime FROM Job " "WHERE JobId=%s", edit_int64(id_list.Id[i], ed1)); if (!db_sql_query(db, buf, print_job_handler, NULL)) { printf_tstamp("%s\n", db_strerror(db)); } } } if (quit) { return; } if (fix && id_list.num_ids > 0) { db_sql_query(db, "BEGIN", NULL, NULL); printf_tstamp(_("Deleting %d orphaned TagJob records.\n"), id_list.num_ids); delete_id_list("DELETE FROM TagJob WHERE JobId=%s", &id_list); printf_tstamp(_("Deleting %d orphaned File records.\n"), id_list.num_ids); delete_id_list("DELETE FROM File WHERE JobId=%s", &id_list); printf_tstamp(_("Deleting %d orphaned BaseFile records.\n"), id_list.num_ids); delete_id_list("DELETE FROM BaseFiles WHERE JobId=%s", &id_list); printf_tstamp(_("Deleting %d orphaned PathVisibility records.\n"), id_list.num_ids); delete_id_list("DELETE FROM PathVisibility WHERE JobId=%s", &id_list); printf_tstamp(_("Deleting %d orphaned Job records.\n"), id_list.num_ids); delete_id_list("DELETE FROM Job WHERE JobId=%s", &id_list); printf_tstamp(_("Deleting JobMedia records of orphaned Job records.\n")); delete_id_list("DELETE FROM JobMedia WHERE JobId=%s", &id_list); printf_tstamp(_("Deleting Object records of orphaned Job records.\n")); delete_id_list("DELETE FROM Object WHERE JobId=%s", &id_list); printf_tstamp(_("Deleting Meta records of orphaned Job records.\n")); delete_id_list("DELETE FROM MetaEmail WHERE JobId=%s", &id_list); delete_id_list("DELETE FROM MetaAttachment WHERE JobId=%s", &id_list); printf_tstamp(_("Deleting Object records of orphaned Job records.\n")); delete_id_list("DELETE FROM RestoreObject WHERE JobId=%s", &id_list); printf_tstamp(_("Deleting Log records of orphaned Job records.\n")); delete_id_list("DELETE FROM Log WHERE JobId=%s", &id_list); db_sql_query(db, "COMMIT", NULL, NULL); } } static void eliminate_admin_records() { const char *query; printf_tstamp(_("Checking for Admin Job entries.\n")); query = "SELECT Job.JobId FROM Job " "WHERE Job.Type='D'"; if (verbose > 1) { printf_tstamp("%s\n", query); } if (!make_id_list(query, &id_list)) { exit(1); } printf_tstamp(_("Found %d Admin Job records.\n"), id_list.num_ids); if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { for (int i=0; i < id_list.num_ids; i++) { char ed1[50]; bsnprintf(buf, sizeof(buf), "SELECT JobId,Name,StartTime FROM Job " "WHERE JobId=%s", edit_int64(id_list.Id[i], ed1)); if (!db_sql_query(db, buf, print_job_handler, NULL)) { printf_tstamp("%s\n", db_strerror(db)); } } } if (quit) { return; } if (fix && id_list.num_ids > 0) { printf_tstamp(_("Deleting %d Admin Job records.\n"), id_list.num_ids); delete_id_list("DELETE FROM Job WHERE JobId=%s", &id_list); } } static void eliminate_restore_records() { const char *query; printf_tstamp(_("Checking for Restore Job entries.\n")); query = "SELECT Job.JobId FROM Job " "WHERE Job.Type='R'"; if (verbose > 1) { printf_tstamp("%s\n", query); } if (!make_id_list(query, &id_list)) { exit(1); } printf_tstamp(_("Found %d Restore Job records.\n"), id_list.num_ids); if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { for (int i=0; i < id_list.num_ids; i++) { char ed1[50]; bsnprintf(buf, sizeof(buf), "SELECT JobId,Name,StartTime FROM Job " "WHERE JobId=%s", edit_int64(id_list.Id[i], ed1)); if (!db_sql_query(db, buf, print_job_handler, NULL)) { printf_tstamp("%s\n", db_strerror(db)); } } } if (quit) { return; } if (fix && id_list.num_ids > 0) { printf_tstamp(_("Deleting %d Restore Job records.\n"), id_list.num_ids); delete_id_list("DELETE FROM Job WHERE JobId=%s", &id_list); } } static void eliminate_verify_records() { const char *query; printf_tstamp(_("Checking for Verify Job entries.\n")); query = "SELECT Job.JobId FROM Job " "WHERE Job.Type='V'"; if (verbose > 1) { printf_tstamp("%s\n", query); } if (!make_id_list(query, &id_list)) { exit(1); } printf_tstamp(_("Found %d Verify Job records.\n"), id_list.num_ids); if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { for (int i=0; i < id_list.num_ids; i++) { char ed1[50]; bsnprintf(buf, sizeof(buf), "SELECT JobId,Name,StartTime FROM Job " "WHERE JobId=%s", edit_int64(id_list.Id[i], ed1)); if (!db_sql_query(db, buf, print_job_handler, NULL)) { printf_tstamp("%s\n", db_strerror(db)); } } } if (quit) { return; } if (fix && id_list.num_ids > 0) { printf_tstamp(_("Deleting %d Verify Job records.\n"), id_list.num_ids); delete_id_list("DELETE FROM Job WHERE JobId=%s", &id_list); } } static void repair_bad_paths() { const char *query; int i; printf_tstamp(_("Checking for Paths without a trailing slash\n")); query = "SELECT PathId,Path from Path " "WHERE Path NOT LIKE '%/' and Path != ''"; if (verbose > 1) { printf_tstamp("%s\n", query); } if (!make_id_list(query, &id_list)) { exit(1); } printf_tstamp(_("Found %d bad Path records.\n"), id_list.num_ids); if (id_list.num_ids && verbose && yes_no(_("Print them? (yes/no): "))) { for (i=0; i < id_list.num_ids; i++) { char ed1[50]; bsnprintf(buf, sizeof(buf), "SELECT Path FROM Path WHERE PathId=%s", edit_int64(id_list.Id[i], ed1)); if (!db_sql_query(db, buf, print_name_handler, NULL)) { printf_tstamp("%s\n", db_strerror(db)); } } } if (quit) { return; } if (fix && id_list.num_ids > 0) { POOLMEM *name = get_pool_memory(PM_FNAME); char esc_name[5000]; printf_tstamp(_("Reparing %d bad Filename records.\n"), id_list.num_ids); for (i=0; i < id_list.num_ids; i++) { int len; char ed1[50]; bsnprintf(buf, sizeof(buf), "SELECT Path FROM Path WHERE PathId=%s", edit_int64(id_list.Id[i], ed1)); if (!db_sql_query(db, buf, get_name_handler, name)) { printf_tstamp("%s\n", db_strerror(db)); } /* Strip trailing blanks */ for (len=strlen(name); len > 0 && name[len-1]==' '; len--) { name[len-1] = 0; } /* Add trailing slash */ len = pm_strcat(&name, "/"); db_escape_string(NULL, db, esc_name, name, len); bsnprintf(buf, sizeof(buf), "UPDATE Path SET Path='%s' WHERE PathId=%s", esc_name, edit_int64(id_list.Id[i], ed1)); if (verbose > 1) { printf_tstamp("%s\n", buf); } db_sql_query(db, buf, NULL, NULL); } free_pool_memory(name); } } /* * Gen next input command from the terminal */ static char *get_cmd(const char *prompt) { static char cmd[1000]; printf("%s", prompt); if (fgets(cmd, sizeof(cmd), stdin) == NULL) { printf("\n"); quit = true; return NULL; } strip_trailing_junk(cmd); return cmd; } static bool yes_no(const char *prompt) { char *cmd; cmd = get_cmd(prompt); if (!cmd) { quit = true; return false; } return (strcasecmp(cmd, "yes") == 0) || (strcasecmp(cmd, _("yes")) == 0); } bool python_set_prog(JCR*, char const*) { return false; } /* * The code below to add indexes is needed only for MySQL, and * that to improve the performance. */ #define MAXIDX 100 typedef struct s_idx_list { char *key_name; int count_key; /* how many times the index meets *key_name */ int count_col; /* how many times meets the desired column name */ } IDX_LIST; static IDX_LIST idx_list[MAXIDX]; /* * Called here with each table index to be added to the list */ static int check_idx_handler(void *ctx, int num_fields, char **row) { /* * Table | Non_unique | Key_name | Seq_in_index | Column_name |... * File | 0 | PRIMARY | 1 | FileId |... */ char *name, *key_name, *col_name; int i, len; int found = false; name = (char *)ctx; key_name = row[2]; col_name = row[4]; for(i = 0; (idx_list[i].key_name != NULL) && (i < MAXIDX); i++) { if (strcasecmp(idx_list[i].key_name, key_name) == 0 ) { idx_list[i].count_key++; found = true; if (strcasecmp(col_name, name) == 0) { idx_list[i].count_col++; } break; } } /* If the new Key_name, add it to the list */ if (!found) { len = strlen(key_name) + 1; idx_list[i].key_name = (char *)malloc(len); bstrncpy(idx_list[i].key_name, key_name, len); idx_list[i].count_key = 1; if (strcasecmp(col_name, name) == 0) { idx_list[i].count_col = 1; } else { idx_list[i].count_col = 0; } } return 0; } /* * Return TRUE if "one column" index over *col_name exists */ static bool check_idx(const char *col_name) { int i; int found = false; const char *query = "SHOW INDEX FROM File"; if (db_get_type_index(db) != SQL_TYPE_MYSQL) { return true; } /* Continue for MySQL */ memset(&idx_list, 0, sizeof(idx_list)); if (!db_sql_query(db, query, check_idx_handler, (void *)col_name)) { printf_tstamp("%s\n", db_strerror(db)); } for (i = 0; (idx_list[i].key_name != NULL) && (i < MAXIDX) ; i++) { /* * NOTE : if (idx_list[i].count_key > 1) then index idx_list[i].key_name is "multiple-column" index */ if ((idx_list[i].count_key == 1) && (idx_list[i].count_col == 1)) { /* "one column" index over *col_name found */ found = true; } } if (found) { if (verbose) { printf_tstamp(_("Ok. Index over the %s column already exists and dbcheck will work faster.\n"), col_name); } } else { printf_tstamp(_("Note. Index over the %s column not found, that can greatly slow down dbcheck.\n"), col_name); } return found; } /* * Create temporary one-column index */ static bool create_tmp_idx(const char *idx_name, const char *table_name, const char *col_name) { idx_tmp_name = NULL; printf_tstamp(_("Create temporary index... This may take some time!\n")); bsnprintf(buf, sizeof(buf), "CREATE INDEX %s ON %s (%s)", idx_name, table_name, col_name); if (verbose) { printf_tstamp("%s\n", buf); } if (db_sql_query(db, buf, NULL, NULL)) { idx_tmp_name = idx_name; if (verbose) { printf_tstamp(_("Temporary index created.\n")); } } else { printf_tstamp("%s\n", db_strerror(db)); return false; } return true; } /* * Drop temporary index */ static bool drop_tmp_idx(const char *idx_name, const char *table_name) { if (idx_tmp_name != NULL) { printf_tstamp(_("Drop temporary index.\n")); bsnprintf(buf, sizeof(buf), "DROP INDEX %s ON %s", idx_name, table_name); if (verbose) { printf_tstamp("%s\n", buf); } if (!db_sql_query(db, buf, NULL, NULL)) { printf_tstamp("%s\n", db_strerror(db)); return false; } else { if (verbose) { printf_tstamp(_("Temporary index %s deleted.\n"), idx_tmp_name); } } } idx_tmp_name = NULL; return true; } bacula-15.0.3/src/tools/cdp-client/0000755000175000017500000000000014771010173016633 5ustar bsbuildbsbuildbacula-15.0.3/src/tools/cdp-client/TODO0000644000175000017500000000030014771010173017314 0ustar bsbuildbsbuildPermission issues: 1-) Handle permission issues with file copy to Spool Dir 2-) Handle permission issues with file watch Watched Folders: 1-) Write list of watched folders into the Journal bacula-15.0.3/src/tools/cdp-client/cdp.h0000644000175000017500000000325714771010173017561 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef CDP_CLI #define CDP_CLI #include "bacula.h" #ifdef HAVE_WIN32 #define HOME_VAR "APPDATA" #define SPOOL_DIR "cdp-sdir" #else #define HOME_VAR "HOME" #define SPOOL_DIR ".cdp-sdir" #endif class CDP { public: static void winToUnixPath(char *path) { #ifdef HAVE_WIN32 for (int i = 0; path[i] != '\0'; i++) { if (path[i] == '\\') { path[i] = '/'; } } #endif } static POOLMEM *spoolDir() { char *home = getenv(HOME_VAR); if (HOME_VAR == NULL) { return NULL; } CDP::winToUnixPath(home); POOLMEM *spath = get_pool_memory(PM_FNAME); Mmsg(spath, "%s/%s", home, SPOOL_DIR); return spath; } static POOLMEM *journalPath(const char *journalName) { char *home = getenv(HOME_VAR); if(home == NULL) { return NULL; } CDP::winToUnixPath(home); POOLMEM *jpath = get_pool_memory(PM_FNAME); Mmsg(jpath, "%s/%s", home, journalName); return jpath; } }; #endif bacula-15.0.3/src/tools/cdp-client/cdp-client.cpp0000644000175000017500000001405514771010173021366 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "backupservice.h" #include "cdp.h" static void strip_trailing_slash(char *arg) { #ifndef HAVE_WIN32 const char slash = '/'; #else const char slash = '\\'; #endif int len = strlen(arg); if (len == 0) { return; } len--; if (arg[len] == slash) { /* strip any trailing slash */ arg[len] = 0; } } static void waitForChanges(BackupService *bservice) { if (bservice->_watchers.size() > 0) { FolderWatcher *watcher = bservice->_watchers.begin()->second; #ifndef HAVE_WIN32 pthread_join(watcher->_inotifyThread, NULL); #else pthread_join(watcher->_watcherThread, NULL); #endif } else { Dmsg0(10, "No folders te be watched. Terminating Client.\n"); } } static void writeFolders(Journal *journal, alist *newFolders) { char *folder; void *tmp1; void *tmp2; alist savedFolders(100, false); FolderRecord *rec; // Read all the saved Folders to make sure we are // not saving any repeated Folder journal->beginTransaction("r"); for(;;) { FolderRecord *rec = journal->readFolderRecord(); if(rec == NULL) { break; } savedFolders.append((void *) rec); } journal->endTransaction(); //We add the Folders specified by the user, making sure //that we don't add any repeated one bool found = false; foreach_alist(tmp1, newFolders) { folder = (char *) tmp1; foreach_alist(tmp2, &savedFolders) { rec = (FolderRecord *) tmp2; if (strcmp(rec->path, folder) == 0) { found = true; } } if (found) { found = false; continue; } FolderRecord rec; rec.path = bstrdup(folder); if(!journal->writeFolderRecord(rec)) { Pmsg1(0, "ERROR: Could not write folder %s", rec.path); exit(-1); } } } static void makeJournalFile(Journal *journal, const char *journal_path, const char *spool_dir) { if (!journal->setJournalPath(journal_path, spool_dir)) { Pmsg1(0, "Could not set journal file to: %s\n", journal_path); exit(1); } Dmsg1(10, "Set journal file to: %s\n", journal_path); } static void makeSpoolDir(const char *spool_dir) { int rc = mkdir(spool_dir, 0750); const char *error = NULL; switch (rc) { case EACCES: error = "Permission Denied"; break; case ELOOP: error = "Symbolic Link loop"; break; case EMLINK: error = "Link would exceed LINK_MAX"; break; case ENAMETOOLONG: error = "The path length is too long"; break; case ENOENT: case ENOTDIR: error = "A component on the path does not exist"; break; case ENOSPC: error = "No space available"; break; case EROFS: error = "Read-only file system"; break; default: break; } if (error != NULL) { Pmsg2(0, "(ERROR) - could not set spool directory to %s. %s\n", spool_dir, error); exit(-1); } Dmsg1(10, "Set spool directory to: %s\n", spool_dir); } static void printHelp() { fprintf(stderr, _( PROG_COPYRIGHT "\n%sVersion: %s (%s) %s %s %s\n\n" "Usage: cdp-client\n" " -j sets the location of the journal file to \n" " -s sets spool directory to \n" " -f watches the directory for changes.\n" " -d set debug level to \n" " -? print this message.\n" "\n"), 2004, BDEMO, VERSION, BDATE, HOST_OS, DISTNAME, DISTVER); } /** * Main Bacula CDP Client -- User Interface Program * */ int main(int argc, char *argv[]) { char *fpath; char ch; POOLMEM *spool_dir = NULL; POOLMEM *journal_path = NULL; alist folders(100, true); BackupService bservice; Journal journal; lmgr_init_thread(); while ((ch = getopt(argc, argv, "f:s:j:d:h?")) != -1) { switch (ch) { case 's': free_and_null_pool_memory(spool_dir); spool_dir = get_pool_memory(PM_FNAME); strip_trailing_slash(optarg); CDP::winToUnixPath(optarg); pm_strcpy(spool_dir, optarg); break; case 'j': free_and_null_pool_memory(journal_path); journal_path = get_pool_memory(PM_FNAME); strip_trailing_slash(optarg); CDP::winToUnixPath(optarg); Mmsg(journal_path, "%s/%s", optarg, JOURNAL_CLI_FNAME); break; case 'f': strip_trailing_slash(optarg); fpath = bstrdup(optarg); folders.append((void *) fpath); break; case 'd': debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } break; case 'h': case '?': default: printHelp(); exit(1); } } argc -= optind; if (argc) { printHelp(); exit(1); } if (spool_dir == NULL) { spool_dir = CDP::spoolDir(); } if (journal_path == NULL) { journal_path = CDP::journalPath(JOURNAL_CLI_FNAME); } makeSpoolDir(spool_dir); makeJournalFile(&journal, journal_path, spool_dir); writeFolders(&journal, &folders); bservice.start(spool_dir, &journal); waitForChanges(&bservice); free_and_null_pool_memory(spool_dir); free_and_null_pool_memory(journal_path); lmgr_cleanup_main(); return 0; } bacula-15.0.3/src/tools/cdp-client/backupservice.h0000644000175000017500000000310214771010173021626 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef BACKUPSERVICE_H #define BACKUPSERVICE_H #ifdef HAVE_WIN32 #define _STAT_DEFINED #endif #include #include #include #include "folderwatcher.h" #include "cdp.h" #include "../../plugins/fd/journal.h" #include "../../plugins/fd/file-record.h" #include /** * @brief This class controls which files should be sent to the Spool * Directory (and also have their metadata saved on the Journal). */ class BackupService: public FileChangeHandler { private: std::map _last_mtime; public: char *_spoolPath; Journal *_journal; std::map _watchers; BackupService(): _spoolPath(NULL) {} virtual ~BackupService(); void start(const char *spoolPath, Journal *journal); POOLMEM *watch(const char *folder); void stopWatching(const char *folder); /** Called by one of the @param _watchers */ void onChange(const char* fpath); }; #endif bacula-15.0.3/src/tools/cdp-client/Makefile.in0000644000175000017500000000620314771010173020701 0ustar bsbuildbsbuild# # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Bacula Tools Makefile # @MCOMMON@ srcdir = . VPATH = . .PATH: . # one up basedir = ../.. # top dir topdir = ../../.. # this dir relative to top dir thisdir = src/tools/cdp-client DEBUG=@DEBUG@ DB_LIBS=@DB_LIBS@ first_rule: all dummy: # GETTEXT_LIBS = @LIBINTL@ FINDOBJS = backupservice.cpp cdp-client.cpp folderwatcher.cpp backupservice.h cdp.h folderwatcher.h ../../plugins/fd/journal.c ../../plugins/fd/journal.h ../../plugins/fd/file-record.h ../../plugins/fd/settings-record.h ../../plugins/fd/folder-record.h .SUFFIXES: .c .lo .PHONY: .DONTCARE: # inference rules .c.lo: @echo "Compiling $<" $(NO_ECHO) $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) -I$(basedir)/lib -I$(basedir)/plugins/fd $(DINCLUDE) $(CFLAGS) $< .cpp.lo: @echo "Compiling $<" $(NO_ECHO) $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) -I$(basedir)/lib -I$(basedir)/plugins/fd $(DINCLUDE) $(CFLAGS) $< #------------------------------------------------------------------------- all: Makefile cdp-client @echo "==== Make of cdp-client is good ====" @echo " " cdp-client: Makefile cdp-client.lo $(FINDOBJS) ../../plugins/fd/journal.lo backupservice.lo folderwatcher.lo ../../lib/libbac$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../../lib -L../../findlib -o $@ cdp-client.lo ../../plugins/fd/journal.lo backupservice.lo folderwatcher.lo -lbac -lbacfind -lm $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) Makefile: $(srcdir)/Makefile.in $(topdir)/config.status cd $(topdir) \ && CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status ../../plugins/fd/journal.lo: ../../plugins/fd/journal.c make -C ../../plugins/fd journal.lo libtool-clean: @$(RMF) -r .libs _libs clean: libtool-clean @$(RMF) core core.* a.out *.o *.lo @$(RMF) $(DIRTOOLS) realclean: clean @$(RMF) tags distclean: realclean if test $(srcdir) = .; then $(MAKE) realclean; fi (cd $(srcdir); $(RMF) Makefile) devclean: realclean if test $(srcdir) = .; then $(MAKE) realclean; fi (cd $(srcdir); $(RMF) Makefile) # Allow non-root execution of bsmtp for non-root Directors install-cdp: install install: cdp-client $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM_ALL) cdp-client $(DESTDIR)$(sbindir)/ uninstall: $(RMF) $(DESTDIR)$(sbindir)/cdp-client # Semi-automatic generation of dependencies: # Use gcc -MM because X11 `makedepend' doesn't work on all systems # and it also includes system headers. # `semi'-automatic since dependencies are generated at distribution time. depend: @$(MV) Makefile Makefile.bak @$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile @$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile @$(CXX) -S -M $(CPPFLAGS) $(TOKYOCABINET_INC) -I$(srcdir) -I$(basedir) -I../stored *.c >> Makefile @if test -f Makefile ; then \ $(RMF) Makefile.bak; \ else \ $(MV) Makefile.bak Makefile; \ echo " ===== Something went wrong in make depend ====="; \ fi # ----------------------------------------------------------------------- # DO NOT DELETE: nice dependency list follows bacula-15.0.3/src/tools/cdp-client/folderwatcher.h0000644000175000017500000000450414771010173021640 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef FOLDERWATCHER_H #define FOLDERWATCHER_H #ifndef HAVE_WIN32 #include #include #include #include #endif #include "bacula.h" #include "cdp.h" /** * @brief Monitors a Folder recursively for changes on files. * Changes are signalled through the fileChanged() signal. */ class FileChangeHandler { public: /** * @brief Emitted when a file is created or modified. * @param file - Absolute path of the file. */ virtual void onChange(const char *fpath) = 0; virtual ~FileChangeHandler() {} }; #ifndef HAVE_WIN32 class FolderWatcher { private: FileChangeHandler *_changeHandler; std::map _watchedDirs; std::set _openedFiles; POOLMEM *watchDirRecursive(const char *dir); void handleEvent(struct inotify_event *e); public: int _fd; bool _run_inotify_thread; pthread_t _inotifyThread; FolderWatcher(FileChangeHandler *handler); ~FolderWatcher(); /** * @brief watches recursively the Folder. * @param folder - Absolue path of the folder. */ POOLMEM *watch(const char *folder); /** * @brief those slots are called by our INotify Thread * when it detects a change on a directory or a file. */ void handleINotifyEvents(int fd); }; #else class FolderWatcher { public: pthread_t _watcherThread; FileChangeHandler *_changeHandler; char * _watchedDirPath; HANDLE _dirHandle; bool _run_watcher_thread; FolderWatcher(FileChangeHandler *handler); ~FolderWatcher(); /** * @brief watches recursively the Folder. * @param folder - Absolue path of the folder. */ POOLMEM *watch(const char *folder); }; #endif #endif bacula-15.0.3/src/tools/cdp-client/folderwatcher.cpp0000644000175000017500000003045414771010173022176 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "folderwatcher.h" #ifndef HAVE_WIN32 /** Linux Implementation */ void *thread_inotify(void *arg) { FolderWatcher *watcher = (FolderWatcher *) arg; fd_set rfds; int rc; while(watcher->_run_inotify_thread) { FD_ZERO(&rfds); FD_SET(watcher->_fd, &rfds); /* Wait until an event happens or we get interrupted by a signal that we catch */ rc = select(FD_SETSIZE, &rfds, NULL, NULL, NULL); if (!watcher->_run_inotify_thread) { break; } if (rc > 0) { watcher->handleINotifyEvents(watcher->_fd); } } return NULL; } FolderWatcher::FolderWatcher(FileChangeHandler *handler) { _run_inotify_thread = false; _changeHandler = handler; } POOLMEM *FolderWatcher::watch(const char *folder) { _fd = inotify_init1(IN_CLOEXEC); if (_fd == -1) { POOLMEM *err_msg = get_pool_memory(PM_EMSG); Mmsg(err_msg, "Error: could not initialize Watcher\n"); return err_msg; } _run_inotify_thread = true; int rc = pthread_create(&_inotifyThread, NULL, thread_inotify, (void *) this); if (rc) { POOLMEM *err_msg = get_pool_memory(PM_EMSG); Mmsg(err_msg, "Error: could not start INotify Thread." "Please, contact Bacula Support.\n"); return err_msg; } return this->watchDirRecursive(folder); } POOLMEM *FolderWatcher::watchDirRecursive(const char *dir) { DIR *dirReader = NULL; struct dirent *dirFile = NULL; const char *separator = NULL; POOLMEM *subdirPath = NULL; char *err_msg = NULL; uint32_t mask = IN_CLOSE | IN_ATTRIB | IN_MOVE | IN_CREATE | IN_DELETE | IN_DELETE_SELF | IN_OPEN | IN_MOVE_SELF | IN_UNMOUNT | IN_ONLYDIR; int wd = inotify_add_watch(_fd, dir, mask); if (wd < 0) { err_msg = get_pool_memory(PM_EMSG); switch (errno) { case EACCES: Mmsg(err_msg, "Could not watch Directory. Access Denied for: %s", dir); break; case ENOENT: Mmsg(err_msg, "(ENOENT) Directory Not found: %s", dir); break; case ENOTDIR: Mmsg(err_msg, "(ENOTDIR) Directory Not found: %s", dir); break; case ENOMEM: Mmsg(err_msg, "Insuficient kernel memory available."); break; case ENOSPC: Mmsg(err_msg, "Exceeded limit of watched directories. " "Please increase the number of available " "file descriptors in your system."); break; default: Mmsg(err_msg, "Unknown Error. Please contact Bacula Support."); break; } goto bail_out; } Dmsg1(10, "Started Watching: %s\n", dir); _watchedDirs[wd] = bstrdup(dir); dirReader = opendir(dir); if (strcmp(dir, "/") == 0) { separator = ""; } else { separator = "/"; } while ((dirFile = readdir(dirReader)) != NULL) { if (dirFile->d_type != DT_DIR || strcmp(dirFile->d_name, ".") == 0 || strcmp(dirFile->d_name, ".." ) == 0) { continue; } subdirPath = get_pool_memory(PM_FNAME); Mmsg(subdirPath, "%s%s%s", dir, separator, dirFile->d_name); err_msg = this->watchDirRecursive(subdirPath); free_and_null_pool_memory(subdirPath); if (err_msg != NULL) { goto bail_out; } } bail_out: if (dirReader != NULL) { closedir(dirReader); } return err_msg; } void FolderWatcher::handleEvent(struct inotify_event *event) { Dmsg1(50, "INotify Event: 0x%08x\n", event->mask); POOLMEM *fpath = get_pool_memory(PM_FNAME); Mmsg(fpath, "%s/%s", _watchedDirs[event->wd], event->name ); Dmsg1(50, "TAGH: %s\n", fpath); bool openFileEvent = ((event->mask & IN_OPEN) == IN_OPEN); bool closeWriteFileEvent = ((event->mask & IN_CLOSE_WRITE) == IN_CLOSE_WRITE); bool closeNoWriteFileEvent = ((event->mask & IN_CLOSE_NOWRITE) == IN_CLOSE_NOWRITE); bool modifyEvent = ((event->mask & IN_MODIFY) == IN_MODIFY); bool attribsChangeEvent = ((event->mask & IN_ATTRIB) == IN_ATTRIB); bool movedToEvent = ((event->mask & IN_MOVED_TO) == IN_MOVED_TO); bool createEvent = ((event->mask & IN_CREATE) == IN_CREATE); bool isDir = ((event->mask & IN_ISDIR) == IN_ISDIR); if (openFileEvent && !isDir) { _openedFiles.insert(event->wd); } else if (closeWriteFileEvent && !isDir) { _openedFiles.erase(event->wd); _changeHandler->onChange(fpath); } else if (closeNoWriteFileEvent) { _openedFiles.erase(event->wd); } else if (createEvent && isDir) { this->watchDirRecursive(fpath); } else if (movedToEvent) { _changeHandler->onChange(fpath); } else if ((modifyEvent || attribsChangeEvent) && (_openedFiles.find(event->wd) == _openedFiles.end()) && !isDir) { _changeHandler->onChange(fpath); } free_and_null_pool_memory(fpath); } void FolderWatcher::handleINotifyEvents(int fd) { int readlen; struct inotify_event *event; int event_struct_size = sizeof(struct inotify_event); int i = 0; int error; int bsize = 2048; char *buffer = (char *) malloc(bsize); try_again: readlen = read(fd, buffer, bsize); error = errno; if (readlen < 0 && error == EINVAL) { bsize = bsize * 2; realloc(buffer, bsize); goto try_again; } else if (readlen < 0) { return; } while (i + event_struct_size < readlen) { event = (struct inotify_event *) &buffer[i]; if (event == NULL) { i += event_struct_size; continue; } if (event->len > 0 && event->wd > -1) { this->handleEvent(event); } i += event_struct_size + event->len; } free(buffer); } FolderWatcher::~FolderWatcher() { _run_inotify_thread = false; pthread_kill(_inotifyThread, SIGUSR2); pthread_join(_inotifyThread, NULL); std::map::iterator it; for(it = _watchedDirs.begin(); it != _watchedDirs.end(); it++) { inotify_rm_watch(_fd, it->first); Dmsg1(10, "Stopped Watching: %s\n", it->second); free(it->second); } } #else // HAVE_WIN32 BOOL IsDirectory(LPCTSTR szPath) { DWORD dwAttrib = GetFileAttributes(szPath); return (dwAttrib & FILE_ATTRIBUTE_DIRECTORY); } /** Windows Implementation */ void *thread_watcher(void *arg) { FolderWatcher *watcher = (FolderWatcher *) arg; HANDLE _resultEvent = CreateEvent(NULL, true, false, NULL); HANDLE _stopEvent = CreateEvent(NULL, true, false, NULL); OVERLAPPED overlapped; overlapped.hEvent = _resultEvent; int bsize = 10 * 4096; char *buffer = (char *) malloc(bsize); POOLMEM *fpath; char *fsubpath; FILE * fp; while(watcher->_run_watcher_thread) { ResetEvent(_resultEvent); FILE_NOTIFY_INFORMATION *pBuffer = (FILE_NOTIFY_INFORMATION *) buffer; DWORD dwBytesReturned = 0; SecureZeroMemory(pBuffer, bsize); if (!ReadDirectoryChangesW(watcher->_dirHandle, (LPVOID) pBuffer, bsize, true, FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_LAST_WRITE | FILE_NOTIFY_CHANGE_SIZE, &dwBytesReturned, &overlapped, NULL)) { DWORD errorCode = GetLastError(); if (errorCode == ERROR_NOTIFY_ENUM_DIR) { Dmsg0(10, "WinNotify buffer overflow\n"); } else { Dmsg0(10, "Generic ReadDirectoryChangesW error\n"); } continue; } HANDLE handles[] = { _resultEvent, _stopEvent }; DWORD result = WaitForMultipleObjects( 2, handles, false, // awake once one of them arrives INFINITE); if (result == 1) { Dmsg0(10, "Received stop event, aborting folder watcher thread\n"); continue; } if (result != 0) { Dmsg1(10, "WaitForMultipleObjects failed. Error: %ld\n", GetLastError()); continue; } bool ok = GetOverlappedResult(watcher->_dirHandle, &overlapped, &dwBytesReturned, false); if (!ok) { DWORD errorCode = GetLastError(); if (errorCode == ERROR_NOTIFY_ENUM_DIR) { Dmsg0(10, "WinNotify buffer overflow\n"); } else { Dmsg0(10, "Generic GetOverlappedResult error\n"); } continue; } FILE_NOTIFY_INFORMATION *curEntry = pBuffer; for (;;) { fpath = get_pool_memory(PM_FNAME); fsubpath = get_pool_memory(PM_FNAME); const size_t fnameLen = curEntry->FileNameLength * 2; check_pool_memory_size(fsubpath, fnameLen); wcstombs(fsubpath, curEntry->FileName, fnameLen); Mmsg(fpath, "%s/%s", watcher->_watchedDirPath, fsubpath); CDP::winToUnixPath(fpath); Dmsg1(10, "WinNotify Event: 0x%08x\n", curEntry->Action); Dmsg1(10, "Event for file: %s\n", fpath); #ifdef UNICODE const size_t fpathSize = strlen(fpath) + 1; wchar_t *wc_fpath = new wchar_t[fpathSize]; mbstowcs(wc_fpath, fpath, fpathSize); LPCTSTR lpct_fpath = wc_fpath; #else LPCTSTR lpct_fpath = fpath; #endif // Only notify changes if fpath is a file // and if fpath is NOT a new empty file if ((curEntry->Action == FILE_ACTION_MODIFIED || curEntry->Action == FILE_ACTION_RENAMED_NEW_NAME) && !IsDirectory(lpct_fpath)) { /* Windows may still be holding the file lock, so we only notify the change when we make sure we can access the file */ for (int i = 0; i < 120; i++) { // Run for 1 minute fp = fopen(fpath, "r"); if (fp) { Dmsg1(10, "Change detected in file: %s\n", fpath); watcher->_changeHandler->onChange(fpath); fclose(fp); break; } Sleep(500); } } free_and_null_pool_memory(fsubpath); free_and_null_pool_memory(fpath); if (curEntry->NextEntryOffset == 0) { break; } curEntry = (FILE_NOTIFY_INFORMATION *) ((char *) curEntry + curEntry->NextEntryOffset); } } return NULL; } FolderWatcher::FolderWatcher(FileChangeHandler *handler) { _run_watcher_thread = false; _changeHandler = handler; } POOLMEM *FolderWatcher::watch(const char *folder) { _watchedDirPath = bstrdup(folder); const size_t size = cstrlen(folder) + 1; wchar_t *wfolder = new wchar_t[size]; mbstowcs(wfolder, folder, size); _dirHandle = CreateFileW( wfolder, FILE_LIST_DIRECTORY, FILE_SHARE_WRITE | FILE_SHARE_READ | FILE_SHARE_DELETE, NULL, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OVERLAPPED, NULL); if (_dirHandle == INVALID_HANDLE_VALUE) { _dirHandle = NULL; POOLMEM *err_msg = get_pool_memory(PM_EMSG); Mmsg(err_msg, "Error: could not create handle for folder %s. " "Please, contact Bacula Support.\n", folder); return err_msg; } _run_watcher_thread = true; int rc = pthread_create(&_watcherThread, NULL, thread_watcher, (void *) this); if (rc) { POOLMEM *err_msg = get_pool_memory(PM_EMSG); Mmsg(err_msg, "Error: could not start Watcher Thread. " "Please, contact Bacula Support.\n"); return err_msg; } delete wfolder; Dmsg1(10, "Started watching: %s\n", folder); return NULL; } FolderWatcher::~FolderWatcher() { _run_watcher_thread = false; pthread_kill(_watcherThread, SIGUSR2); pthread_join(_watcherThread, NULL); CancelIo(_dirHandle); CloseHandle(_dirHandle); _dirHandle = NULL; Dmsg1(10, "Stopped watching: %s\n", _watchedDirPath); free(_watchedDirPath); } #endif bacula-15.0.3/src/tools/cdp-client/backupservice.cpp0000644000175000017500000000657114771010173022176 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "backupservice.h" void BackupService::start(const char *spoolPath, Journal *j) { POOLMEM *err_msg; _journal = j; _spoolPath = bstrdup(spoolPath); FolderRecord *rec; _journal->beginTransaction("r"); for (;;) { rec = _journal->readFolderRecord(); if(rec == NULL) { break; } FolderWatcher *w = new FolderWatcher(this); err_msg = w->watch(rec->path); if (err_msg != NULL) { Dmsg2(10, "Error while trying to watch %s. %s", rec->path, err_msg); free_and_null_pool_memory(err_msg); } else { _watchers[rec->path] = w; } delete rec; } _journal->endTransaction(); } POOLMEM *BackupService::watch(const char *folder) { POOLMEM *err_msg; char *fpath = bstrdup(folder); CDP::winToUnixPath(fpath); FolderWatcher *w = new FolderWatcher(this); err_msg = w->watch(fpath); if (err_msg == NULL) { FolderRecord rec; rec.path = fpath; _journal->writeFolderRecord(rec); _watchers[rec.path] = w; } else { Dmsg2(10, "Error while trying to watch %s. %s", fpath, err_msg); delete w; } return err_msg; } void BackupService::stopWatching(const char *folder) { _journal->removeFolderRecord(folder); delete _watchers[folder]; _watchers[folder] = NULL; } void BackupService::onChange(const char *fpath) { char *tmp_fpath = bstrdup(fpath); char *fname = basename(tmp_fpath); FileRecord rec; rec.name = bstrdup(fpath); POOLMEM *spoolFilename = get_pool_memory(PM_FNAME); if (strcmp(fpath, _journal->_jPath) == 0) { Dmsg0(10, "Change on Journal File ignored.\n"); goto bail_out; } if (!rec.encode_attrs()) { goto bail_out; } if (rec.mtime - _last_mtime[fpath] < 5) { Dmsg3(50, "File %s: current mtime: %d. Previous mtime: %d. Ignoring\n", rec.name, rec.mtime, _last_mtime[fpath]); goto bail_out; } _last_mtime[fpath] = rec.mtime; char str_mtime[50]; edit_int64(rec.mtime, str_mtime); Mmsg(spoolFilename, "%s/%s_%s", _spoolPath, str_mtime, fname); if (copyfile(fpath, spoolFilename) == 0) { Dmsg2(10, "Copied file %s into %s\n", fpath, spoolFilename); rec.sname = bstrdup(spoolFilename); _journal->writeFileRecord(rec); } else { Dmsg2(10, "Could not copy file %s into %s\n", fpath, spoolFilename); } bail_out: free(tmp_fpath); free_and_null_pool_memory(spoolFilename); } BackupService::~BackupService() { //Delete all Watchers std::map::iterator it; for(it = _watchers.begin(); it != _watchers.end(); it++) { delete it->second; } } bacula-15.0.3/src/tools/bvfs_test.c0000644000175000017500000002077414771010173016766 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Program to test cache path * * Eric Bollengier, August 2009 * * */ #include "bacula.h" #include "cats/cats.h" #include "cats/bvfs.h" #include "findlib/find.h" /* Local variables */ static BDB *db; static const char *file = "COPYRIGHT"; static DBId_t fnid=0; static const char *db_name = "regress"; static const char *db_user = "regress"; static const char *db_password = ""; static const char *db_host = NULL; static const char *db_ssl_mode = NULL; static const char *db_ssl_key = NULL; static const char *db_ssl_cert = NULL; static const char *db_ssl_ca = NULL; static const char *db_ssl_capath = NULL; static const char *db_ssl_cipher = NULL; static void usage() { fprintf(stderr, _( PROG_COPYRIGHT "\n%sVersion: %s (%s)\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -n specify the database name (default bacula)\n" " -u specify database user name (default bacula)\n" " -P specify database host (default NULL)\n" " -k path name to the key file (default NULL)\n" " -e path name to the certificate file (default NULL)\n" " -a path name to the CA certificate file (default NULL)\n" " -w specify working directory\n" " -j specify jobids\n" " -p specify path\n" " -f specify file\n" " -l maximum tuple to fetch\n" " -T truncate cache table before starting\n" " -v verbose\n" " -? print this message\n\n"), 2001, "", VERSION, BDATE); exit(1); } static int result_handler(void *ctx, int fields, char **row) { Bvfs *vfs = (Bvfs *)ctx; ATTR *attr = vfs->get_attr(); char empty[] = "A A A A A A A A A A A A A A"; memset(&attr->statp, 0, sizeof(struct stat)); decode_stat((row[BVFS_LStat] && row[BVFS_LStat][0])?row[BVFS_LStat]:empty, &attr->statp, sizeof(attr->statp), &attr->LinkFI); if (bvfs_is_dir(row) || bvfs_is_file(row)) { /* display clean stuffs */ if (bvfs_is_dir(row)) { pm_strcpy(attr->ofname, bvfs_basename_dir(row[BVFS_Name])); } else { /* if we see the requested file, note his filenameid */ if (bstrcmp(row[BVFS_Name], file)) { fnid = str_to_int64(row[BVFS_FilenameId]); } pm_strcpy(attr->ofname, row[BVFS_Name]); } print_ls_output(vfs->get_jcr(), attr); } else { Pmsg5(0, "JobId=%s FileId=%s\tMd5=%s\tVolName=%s\tVolInChanger=%s\n", row[BVFS_JobId], row[BVFS_FileId], row[BVFS_Md5], row[BVFS_VolName], row[BVFS_VolInchanger]); pm_strcpy(attr->ofname, file); print_ls_output(vfs->get_jcr(), attr); } return 0; } /* number of thread started */ int main (int argc, char *argv[]) { int ch; char *jobids = (char *)"1"; char *path=NULL, *client=NULL; uint64_t limit=0; bool clean=false; setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); Dmsg0(0, "Starting bvfs_test tool\n"); my_name_is(argc, argv, "bvfs_test"); init_msg(NULL, NULL); OSDependentInit(); while ((ch = getopt(argc, argv, "h:o:k:e:a:c:l:d:n:P:Su:vf:w:?j:p:f:T")) != -1) { switch (ch) { case 'd': /* debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } } break; case 'l': limit = str_to_int64(optarg); break; case 'c': client = optarg; break; case 'h': db_host = optarg; break; case 'o': db_ssl_mode = optarg; break; case 'k': db_ssl_key= optarg; break; case 'e': db_ssl_cert= optarg; break; case 'a': db_ssl_ca= optarg; break; case 'n': db_name = optarg; break; case 'w': working_directory = optarg; break; case 'u': db_user = optarg; break; case 'P': db_password = optarg; break; case 'v': verbose++; break; case 'p': path = optarg; break; case 'f': file = optarg; break; case 'j': jobids = optarg; break; case 'T': clean = true; break; case '?': default: usage(); } } argc -= optind; argv += optind; if (argc != 0) { Pmsg0(0, _("Wrong number of arguments: \n")); usage(); } JCR *bjcr = new_jcr(sizeof(JCR), NULL); bjcr->JobId = getpid(); bjcr->setJobType(JT_CONSOLE); bjcr->setJobLevel(L_FULL); bjcr->JobStatus = JS_Running; bjcr->client_name = get_pool_memory(PM_FNAME); pm_strcpy(bjcr->client_name, "Dummy.Client.Name"); bstrncpy(bjcr->Job, "bvfs_test", sizeof(bjcr->Job)); if ((db = db_init_database(NULL, NULL, db_name, db_user, db_password, db_host, 0, NULL, db_ssl_mode, db_ssl_key, db_ssl_cert, db_ssl_ca, db_ssl_capath, db_ssl_cipher, false, false)) == NULL) { Emsg0(M_ERROR_TERM, 0, _("Could not init Bacula database\n")); } Dmsg1(0, "db_type=%s\n", db_get_engine_name(db)); if (!db_open_database(NULL, db)) { Emsg0(M_ERROR_TERM, 0, db_strerror(db)); } Dmsg0(200, "Database opened\n"); if (verbose) { Pmsg2(000, _("Using Database: %s, User: %s\n"), db_name, db_user); } bjcr->db = db; if (clean) { Pmsg0(0, "Clean old table\n"); db_sql_query(db, "DELETE FROM PathHierarchy", NULL, NULL); db_sql_query(db, "UPDATE Job SET HasCache=0", NULL, NULL); db_sql_query(db, "DELETE FROM PathVisibility", NULL, NULL); bvfs_update_cache(bjcr, db); } Bvfs fs(bjcr, db); fs.set_handler(result_handler, &fs); fs.set_jobids(jobids); fs.update_cache(); if (limit) fs.set_limit(limit); if (path) { fs.ch_dir(path); fs.ls_special_dirs(); fs.ls_dirs(); while (fs.ls_files()) { fs.next_offset(); } if (fnid && client) { alist clients(1, not_owned_by_alist); clients.append(client); Pmsg0(0, "---------------------------------------------\n"); Pmsg1(0, "Getting file version for %s\n", file); fs.get_all_file_versions(fs.get_pwd(), fnid, &clients); } exit (0); } Pmsg0(0, "list /\n"); fs.ch_dir("/"); fs.ls_special_dirs(); fs.ls_dirs(); fs.ls_files(); Pmsg0(0, "list /tmp/\n"); fs.ch_dir("/tmp/"); fs.ls_special_dirs(); fs.ls_dirs(); fs.ls_files(); Pmsg0(0, "list /tmp/regress/\n"); fs.ch_dir("/tmp/regress/"); fs.ls_special_dirs(); fs.ls_files(); fs.ls_dirs(); Pmsg0(0, "list /tmp/regress/build/\n"); fs.ch_dir("/tmp/regress/build/"); fs.ls_special_dirs(); fs.ls_dirs(); fs.ls_files(); fs.get_all_file_versions(1, 347, (char*)"zog4-fd"); char p[200]; strcpy(p, "/tmp/toto/rep/"); bvfs_parent_dir(p); if(strcmp(p, "/tmp/toto/")) { Pmsg0(000, "Error in bvfs_parent_dir\n"); } bvfs_parent_dir(p); if(strcmp(p, "/tmp/")) { Pmsg0(000, "Error in bvfs_parent_dir\n"); } bvfs_parent_dir(p); if(strcmp(p, "/")) { Pmsg0(000, "Error in bvfs_parent_dir\n"); } bvfs_parent_dir(p); if(strcmp(p, "")) { Pmsg0(000, "Error in bvfs_parent_dir\n"); } bvfs_parent_dir(p); if(strcmp(p, "")) { Pmsg0(000, "Error in bvfs_parent_dir\n"); } return 0; } bacula-15.0.3/src/tools/crypto_test.c0000644000175000017500000001251714771010173017342 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* the goal is to show how to use the functions in lib/crypto.c * to sign a stream of data */ #include "bacula.h" #include "../lib/unittests.h" /* the goal is to show how to use the functions in lib/crypto.c * to sign a stream of data and verify the signature * the code come */ int test_signature() { JCR *jcr = NULL; X509_KEYPAIR *pki_keypair_pub; X509_KEYPAIR *pki_keypair_priv; // these two keypaire could only be one uint8_t data[65536]; uint8_t mysignature[65536]; uint8_t mysignature2[65536]; int ret; log("Test signatures"); int len=sizeof(data); // Find the keys and certificates const char *tmp = getenv("tmp"); if (!tmp) { tmp="/tmp"; } else { printf("Use tmp=%s\n", tmp); } POOL_MEM filename(PM_FNAME); struct stat statbuf; Mmsg(filename, "%s/cryptokeypair.pem", tmp); if (stat(filename.c_str(), &statbuf)!=0) { fprintf(stderr, "crypto pair not found: %s\n", filename.c_str()); ok(false, "crypto pair not found"); return 1; } // load the keypair pki_keypair_pub = crypto_keypair_new(); pki_keypair_priv = crypto_keypair_new(); // load public certificate ret = crypto_keypair_load_cert(pki_keypair_pub, filename.c_str()); ok(ret, "load public certificate"); // load public certificate and ist private key into the other keypair ret = crypto_keypair_load_cert(pki_keypair_priv, filename.c_str()); // needed too initialize keyid ret = crypto_keypair_load_key(pki_keypair_priv, filename.c_str(), NULL, NULL); ok(ret, "load private key"); // in crypto_setup_digests(bctx_t &bctx) crypto_digest_t signing_algorithm = CRYPTO_DIGEST_SHA256; DIGEST *signing_digest = crypto_digest_new(jcr, signing_algorithm); ok(signing_digest != NULL, "signature digest initialization"); // call crypto_digest_update() int cont = 1; for (int i=0; cont != 0 && i<1000; i++) { cont = crypto_digest_update(signing_digest, data, len); } ok(cont!=0, "update"); // in crypto_terminate_digests(bctx_t &bctx) SIGNATURE *sig = crypto_sign_new(jcr); ok(sig != NULL, "crypto_sign_new"); ret = crypto_sign_add_signer(sig, signing_digest, pki_keypair_priv); ok(ret, "An error occurred while adding signer the stream"); // Get signature size uint32_t size = 0; ret = crypto_sign_encode(sig, NULL, &size); // this is mendatory ok(ret, "retrieving the size of the signature"); ok(size<=sizeof(mysignature), "check signature size"); ret = crypto_sign_encode(sig, mysignature, &size); ok(ret, "signe the stream"); // decode the "encoded" signature SIGNATURE *sig2 = crypto_sign_decode(jcr, mysignature, size); ok(sig2 != NULL, "decode the signature"); // verify_signature(r_ctx &rctx) // reuse the same digest (the lazy way) crypto_error_t crypto_err = crypto_sign_verify(sig2, pki_keypair_pub, signing_digest); ok(crypto_err == CRYPTO_ERROR_NONE, "crypto_sign_verify"); { // No do the same calculation on another piece of data and check // that the new digest dont match the previous signature DIGEST *signing_digest = crypto_digest_new(jcr, signing_algorithm); const char *lorem_ipsum = "Lorem Ipsum"; int upd = crypto_digest_update(signing_digest, (const uint8_t *)lorem_ipsum, strlen(lorem_ipsum)); SIGNATURE *sig = crypto_sign_new(jcr); int ret = crypto_sign_add_signer(sig, signing_digest, pki_keypair_priv); uint32_t size = 0; crypto_sign_encode(sig, NULL, &size); // this is required to initialize size=~122 int enc = crypto_sign_encode(sig, mysignature2, &size); SIGNATURE *sig3 = crypto_sign_decode(jcr, mysignature2, size); crypto_error_t c_err = crypto_sign_verify(sig3, pki_keypair_pub, signing_digest); ok(signing_digest!=NULL && upd!=0 && ret && enc && sig2 != NULL && c_err== CRYPTO_ERROR_NONE, "second digest"); // the line below throw a Qmsg() // openssl.c:79 OpenSSL digest Verify final failed: ERR=error:04091068:lib(4):func(145):reason(104) crypto_error_t c_err2 = crypto_sign_verify(sig2, pki_keypair_pub, signing_digest); ok(c_err2 == CRYPTO_ERROR_BAD_SIGNATURE, "second digest should not match first signature"); crypto_sign_free(sig); crypto_sign_free(sig3); crypto_digest_free(signing_digest); } // Release everything crypto_sign_free(sig); sig = NULL; crypto_sign_free(sig2); sig2 = NULL; crypto_digest_free(signing_digest); crypto_keypair_free(pki_keypair_pub); crypto_keypair_free(pki_keypair_priv); return 0; } int main() { setup_daemon_message_queue(); // a Qmsg() is thrown Unittests sign_test("crypto_test"); test_signature(); int ret = report(); free_daemon_message_queue(); // release the Qmsg() return ret; } bacula-15.0.3/src/tools/timelimit.c0000644000175000017500000003643614771010173016766 0ustar bsbuildbsbuild/*- * Copyright (c) 2001, 2007 - 2010 Peter Pentchev * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "../config.h" /* we hope all OS's have those..*/ #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_ERR #include #endif /* HAVE_ERR */ #ifdef HAVE_SYSEXITS_H #include #else #define EX_OK 0 /* successful termination */ #define EX__BASE 64 /* base value for error messages */ #define EX_USAGE 64 /* command line usage error */ #define EX_DATAERR 65 /* data format error */ #define EX_NOINPUT 66 /* cannot open input */ #define EX_NOUSER 67 /* addressee unknown */ #define EX_NOHOST 68 /* host name unknown */ #define EX_UNAVAILABLE 69 /* service unavailable */ #define EX_SOFTWARE 70 /* internal software error */ #define EX_OSERR 71 /* system error (e.g., can't fork) */ #define EX_OSFILE 72 /* critical OS file missing */ #define EX_CANTCREAT 73 /* can't create (user) output file */ #define EX_IOERR 74 /* input/output error */ #define EX_TEMPFAIL 75 /* temp failure; user is invited to retry */ #define EX_PROTOCOL 76 /* remote error in protocol */ #define EX_NOPERM 77 /* permission denied */ #define EX_CONFIG 78 /* configuration error */ #define EX__MAX 78 /* maximum listed value */ #endif /* HAVE_SYSEXITS_H */ #ifndef __unused # ifdef __GNUC__ # if GCC_VERSION >= 3004 # define __unused __attribute__((unused)) # else # define __unused # endif # else /* __GNUC__ */ # define __unused # endif /* __GNUC__ */ #endif /* __unused */ #ifndef __dead2 #ifdef __GNUC__ #define __dead2 __attribute__((noreturn)) #else /* __GNUC__ */ #define __dead2 #endif /* __GNUC__ */ #endif /* __dead2 */ #define PARSE_CMDLINE unsigned long warntime, warnmsec, killtime, killmsec; unsigned long warnsig, killsig; volatile int fdone, falarm, fsig, sigcaught; int propagate, quiet; static struct { const char *name, opt, issig; unsigned long *sec, *msec; } envopts[] = { {"KILLSIG", 'S', 1, &killsig, NULL}, {"KILLTIME", 'T', 0, &killtime, &killmsec}, {"WARNSIG", 's', 1, &warnsig, NULL}, {"WARNTIME", 't', 0, &warntime, &warnmsec}, {NULL, 0, 0, NULL, NULL} }; static struct { const char *name; int num; } signals[] = { /* We kind of assume that the POSIX-mandated signals are present */ {"ABRT", SIGABRT}, {"ALRM", SIGALRM}, {"BUS", SIGBUS}, {"CHLD", SIGCHLD}, {"CONT", SIGCONT}, {"FPE", SIGFPE}, {"HUP", SIGHUP}, {"ILL", SIGILL}, {"INT", SIGINT}, {"KILL", SIGKILL}, {"PIPE", SIGPIPE}, {"QUIT", SIGQUIT}, {"SEGV", SIGSEGV}, {"STOP", SIGSTOP}, {"TERM", SIGTERM}, {"TSTP", SIGTSTP}, {"TTIN", SIGTTIN}, {"TTOU", SIGTTOU}, {"USR1", SIGUSR1}, {"USR2", SIGUSR2}, {"PROF", SIGPROF}, {"SYS", SIGSYS}, {"TRAP", SIGTRAP}, {"URG", SIGURG}, {"VTALRM", SIGVTALRM}, {"XCPU", SIGXCPU}, {"XFSZ", SIGXFSZ}, /* Some more signals found on a Linux 2.6 system */ #ifdef SIGIO {"IO", SIGIO}, #endif #ifdef SIGIOT {"IOT", SIGIOT}, #endif #ifdef SIGLOST {"LOST", SIGLOST}, #endif #ifdef SIGPOLL {"POLL", SIGPOLL}, #endif #ifdef SIGPWR {"PWR", SIGPWR}, #endif #ifdef SIGSTKFLT {"STKFLT", SIGSTKFLT}, #endif #ifdef SIGWINCH {"WINCH", SIGWINCH}, #endif /* Some more signals found on a FreeBSD 8.x system */ #ifdef SIGEMT {"EMT", SIGEMT}, #endif #ifdef SIGINFO {"INFO", SIGINFO}, #endif #ifdef SIGLWP {"LWP", SIGLWP}, #endif #ifdef SIGTHR {"THR", SIGTHR}, #endif }; #define SIGNALS (sizeof(signals) / sizeof(signals[0])) #ifndef HAVE_ERR static void err(int, const char *, ...); static void errx(int, const char *, ...); #endif /* !HAVE_ERR */ static void usage(void); static void init(int, char *[]); static pid_t doit(char *[]); static void child(char *[]); static void raisesignal(int) __dead2; static void setsig_fatal(int, void (*)(int)); static void setsig_fatal_gen(int, void (*)(int), int, const char *); static void terminated(const char *); #ifndef HAVE_ERR static void err(int code, const char *fmt, ...) { va_list v; va_start(v, fmt); vfprintf(stderr, fmt, v); va_end(v); fprintf(stderr, ": %s\n", strerror(errno)); exit(code); } static void errx(int code, const char *fmt, ...) { va_list v; va_start(v, fmt); vfprintf(stderr, fmt, v); va_end(v); fprintf(stderr, "\n"); exit(code); } static void warnx(const char *fmt, ...) { va_list v; va_start(v, fmt); vfprintf(stderr, fmt, v); va_end(v); fprintf(stderr, "\n"); } #endif /* !HAVE_ERR */ static void usage(void) { errx(EX_USAGE, "usage: timelimit [-pq] [-S ksig] [-s wsig] " "[-T ktime] [-t wtime] command"); } static void atou_fatal(const char *s, unsigned long *sec, unsigned long *msec, int issig) { unsigned long v, vm, mul; const char *p; size_t i; if (s[0] < '0' || s[0] > '9') { if (s[0] == '\0' || !issig) usage(); for (i = 0; i < SIGNALS; i++) if (!strcmp(signals[i].name, s)) break; if (i == SIGNALS) usage(); *sec = (unsigned long)signals[i].num; if (msec != NULL) *msec = 0; return; } v = 0; for (p = s; (*p >= '0') && (*p <= '9'); p++) v = v * 10 + *p - '0'; if (*p == '\0') { *sec = v; if (msec != NULL) *msec = 0; return; } else if (*p != '.' || msec == NULL) { usage(); } p++; vm = 0; mul = 1000000; for (; (*p >= '0') && (*p <= '9'); p++) { vm = vm * 10 + *p - '0'; mul = mul / 10; } if (*p != '\0') usage(); else if (mul < 1) errx(EX_USAGE, "no more than microsecond precision"); #ifndef HAVE_SETITIMER if (msec != 0) errx(EX_UNAVAILABLE, "subsecond precision not supported on this platform"); #endif *sec = v; *msec = vm * mul; } static void init(int argc, char *argv[]) { #ifdef PARSE_CMDLINE int ch, listsigs; #endif int optset; unsigned i; char *s; /* defaults */ quiet = 0; warnsig = SIGTERM; killsig = SIGKILL; warntime = 900; warnmsec = 0; killtime = 5; killmsec = 0; optset = 0; /* process environment variables first */ for (i = 0; envopts[i].name != NULL; i++) if ((s = getenv(envopts[i].name)) != NULL) { atou_fatal(s, envopts[i].sec, envopts[i].msec, envopts[i].issig); optset = 1; } #ifdef PARSE_CMDLINE listsigs = 0; while ((ch = getopt(argc, argv, "+lqpS:s:T:t:")) != -1) { switch (ch) { case 'l': listsigs = 1; break; case 'p': propagate = 1; break; case 'q': quiet = 1; break; default: /* check if it's a recognized option */ for (i = 0; envopts[i].name != NULL; i++) if (ch == envopts[i].opt) { atou_fatal(optarg, envopts[i].sec, envopts[i].msec, envopts[i].issig); optset = 1; break; } if (envopts[i].name == NULL) usage(); } } if (listsigs) { for (i = 0; i < SIGNALS; i++) printf("%s%c", signals[i].name, i + 1 < SIGNALS? ' ': '\n'); exit(EX_OK); } #else optind = 1; #endif if (!optset) /* && !quiet? */ warnx("using defaults: warntime=%lu, warnsig=%lu, " "killtime=%lu, killsig=%lu", warntime, warnsig, killtime, killsig); argc -= optind; argv += optind; if (argc == 0) usage(); /* sanity checks */ if ((warntime == 0 && warnmsec == 0) || (killtime == 0 && killmsec == 0)) usage(); } static void sigchld(int sig __unused) { fdone = 1; } static void sigalrm(int sig __unused) { falarm = 1; } static void sighandler(int sig) { sigcaught = sig; fsig = 1; } static void setsig_fatal(int sig, void (*handler)(int)) { setsig_fatal_gen(sig, handler, 1, "setting"); } static void setsig_fatal_gen(int sig, void (*handler)(int), int nocld, const char *what) { #ifdef HAVE_SIGACTION struct sigaction act; memset(&act, 0, sizeof(act)); act.sa_handler = handler; act.sa_flags = 0; #ifdef SA_NOCLDSTOP if (nocld) act.sa_flags |= SA_NOCLDSTOP; #endif /* SA_NOCLDSTOP */ if (sigaction(sig, &act, NULL) < 0) err(EX_OSERR, "%s signal handler for %d", what, sig); #else /* HAVE_SIGACTION */ if (signal(sig, handler) == SIG_ERR) err(EX_OSERR, "%s signal handler for %d", what, sig); #endif /* HAVE_SIGACTION */ } static void settimer(const char *name, unsigned long sec, unsigned long msec) { #ifdef HAVE_SETITIMER struct itimerval tval; tval.it_interval.tv_sec = tval.it_interval.tv_usec = 0; tval.it_value.tv_sec = sec; tval.it_value.tv_usec = msec; if (setitimer(ITIMER_REAL, &tval, NULL) == -1) err(EX_OSERR, "could not set the %s timer", name); #else alarm(sec); #endif } static pid_t doit(char *argv[]) { pid_t pid; /* install signal handlers */ fdone = falarm = fsig = sigcaught = 0; setsig_fatal(SIGALRM, sigalrm); setsig_fatal(SIGCHLD, sigchld); setsig_fatal(SIGTERM, sighandler); setsig_fatal(SIGHUP, sighandler); setsig_fatal(SIGINT, sighandler); setsig_fatal(SIGQUIT, sighandler); /* fork off the child process */ if ((pid = fork()) < 0) err(EX_OSERR, "fork"); if (pid == 0) child(argv); /* sleep for the allowed time */ settimer("warning", warntime, warnmsec); while (!(fdone || falarm || fsig)) pause(); alarm(0); /* send the warning signal */ if (fdone) return (pid); if (fsig) terminated("run"); falarm = 0; if (!quiet) warnx("sending warning signal %lu", warnsig); kill(pid, (int) warnsig); #ifndef HAVE_SIGACTION /* reset our signal handlers, just in case */ setsig_fatal(SIGALRM, sigalrm); setsig_fatal(SIGCHLD, sigchld); setsig_fatal(SIGTERM, sighandler); setsig_fatal(SIGHUP, sighandler); setsig_fatal(SIGINT, sighandler); setsig_fatal(SIGQUIT, sighandler); #endif /* HAVE_SIGACTION */ /* sleep for the grace time */ settimer("kill", killtime, killmsec); while (!(fdone || falarm || fsig)) pause(); alarm(0); /* send the kill signal */ if (fdone) return (pid); if (fsig) terminated("grace"); if (!quiet) warnx("sending kill signal %lu", killsig); kill(pid, (int) killsig); setsig_fatal_gen(SIGCHLD, SIG_DFL, 0, "restoring"); return (pid); } static void terminated(const char *period) { errx(EX_SOFTWARE, "terminated by signal %d during the %s period", sigcaught, period); } static void child(char *argv[]) { execvp(argv[0], argv); err(EX_OSERR, "executing %s", argv[0]); } static __dead2 void raisesignal (int sig) { setsig_fatal_gen(sig, SIG_DFL, 0, "restoring"); raise(sig); while (1) pause(); /* NOTREACHED */ } int main(int argc, char *argv[]) { pid_t pid; int status; init(argc, argv); argc -= optind; argv += optind; pid = doit(argv); if (waitpid(pid, &status, 0) == -1) err(EX_OSERR, "could not get the exit status for process %ld", (long)pid); if (WIFEXITED(status)) return (WEXITSTATUS(status)); else if (!WIFSIGNALED(status)) return (EX_OSERR); if (propagate) raisesignal(WTERMSIG(status)); else return (WTERMSIG(status) + 128); } bacula-15.0.3/src/tools/bregtest.c0000644000175000017500000000651014771010173016576 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Test program for testing regular expressions. * * Kern Sibbald, MMVI * */ /* * If you define BACULA_REGEX, bregex will be built with the * Bacula bregex library, which is the same code that we * use on Win32, thus using Linux, you can test your Win32 * expressions. Otherwise, this program will link with the * system library routines. */ //#define BACULA_REGEX #include "bacula.h" #include #include "lib/breg.h" static void usage() { fprintf(stderr, "\n" "Usage: bregtest [-d debug_level] [-s] -f -e /test/test2/\n" " -f specify file of data to be matched\n" " -e specify expression\n" " -s sed output\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -? print this message.\n" "\n"); exit(1); } int main(int argc, char *const *argv) { char *fname = NULL; char *expr = NULL; int ch; bool sed=false; char data[1000]; FILE *fd; setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); while ((ch = getopt(argc, argv, "sd:f:e:")) != -1) { switch (ch) { case 'd': /* set debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } } break; case 'f': /* data */ fname = optarg; break; case 'e': expr = optarg; break; case 's': sed=true; break; case '?': default: usage(); } } argc -= optind; argv += optind; if (!fname) { printf("A data file must be specified.\n"); usage(); } if (!expr) { printf("An expression must be specified.\n"); usage(); } OSDependentInit(); struct stat sp; stat("/etc/passwd", &sp); alist *list; char *p; list = get_bregexps(expr); if (!list) { printf("Can't use %s as 'sed' expression\n", expr); exit (1); } fd = bfopen(fname, "r"); if (!fd) { printf(_("Could not open data file: %s\n"), fname); exit(1); } while (fgets(data, sizeof(data)-1, fd)) { strip_trailing_newline(data); apply_bregexps(data, NULL, list, &p); if (sed) { printf("%s\n", p); } else { printf("%s => %s\n", data, p); } } fclose(fd); free_bregexps(list); delete list; exit(0); } /* TODO: - ajout /g - tests * test avec /i * test avec un sed et faire un diff * test avec une boucle pour voir les fuites */ bacula-15.0.3/src/tools/bjoblist.c0000644000175000017500000001402514771010173016567 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "filed/fd_plugins.h" #include "findlib/bfile.h" static const char *working = "./"; #define USE_JOB_LIST #define COMMAND_LINE_TOOL // See in src/plugin/fd/fd_common.h #define MAX_RETURN_PRUNE 10 #define RETRY_LOCK 10 #define ALWAYS_LEVEL 0 #define CRITICAL_LEVEL 100 #define STANDARD_LEVEL 300 #define PROG_NAME "bjoblist" #include "plugins/fd/fd_common.h" void *start_heap; int main(int argc, char *argv[]) { int opt; int lock_return = 1; // Good by default int lock_fd; char lock_name[20] = "/tmp/bjoblist.lck"; POOL_MEM pool_mem; POOLMEM *lock_poolmem = get_pool_memory(PM_NAME); alist tmp_list(MAX_RETURN_PRUNE, owned_by_alist); bool store_return = true; bool search_return = true; char *to_delete_job; // Args to be received by the command line char *dat_file = NULL; char *key = NULL; char *prev_job = NULL; char *curr_job = NULL; char level = 0; char *data = NULL; bool is_store = false; bool is_search = false; bool use_lock = false; debug_level = 0; // Parsing of command line arguments while ((opt = getopt(argc, argv, "w:f:k:p:j:l:D:sSiLd:h")) != -1) { switch (opt) { case 'w': // Working dir working = working_directory = optarg; // working is for fd_common.h , working directory global variable in bacula break; case 'f': // datfile dat_file = optarg; break; case 'k': // key (hostname + volume name) key = optarg; break; case 'p': // previous job prev_job = optarg; break; case 'j': // current job curr_job = optarg; break; case 'l': // level level = optarg[0]; break; case 'D': // Data data = optarg; break; case 's': // Store is_store = true; break; case 'S': // Search is_search = true; break; case 'L': // Use file lock use_lock = true; break; case 'd': // Debug-level debug_level = atoi(optarg); // global var break; case 'h': // help default is help as well default: Dmsg(NULL, ALWAYS_LEVEL, "bjoblist: joblist command line tool : " "store/search jobs and find deletable ones\n" "USAGE: bjoblist [OPTIONS]\n" "Options-styles: -option\n\n" "\t-w WORK_DIR\t\tWorking directory\n" "\t-f DATA_FILE\t\tDat file with job hierarchy info\n" "\t-k KEY\t\t\tKey host_name:volume_name\n" "\t-p PREVIOUS_JOB\t\tPrevious job\n" "\t-j CURRENT_JOB\t\tCurrent job\n" "\t-l {F,I,D}\t\tLevel {(F)ull, (D)ifferential, (I)ncremental}\n" "\t-D DATA\t\t\tData\n" "\t-d DEBUG_LEVEL\t\tDebug-level\n" "\t-s\t\t\tStore-mode\n" "\t-S\t\t\tSearch-mode\n" "\t-L\t\t\tUse file lock\n" "\t-h\t\t\tDisplay this text\n\n"); exit(EXIT_SUCCESS); } } // Check for correctly initialized variables // Prev job can eventually be NULL if (working_directory == NULL) { Dmsg(NULL, CRITICAL_LEVEL, "Working directory not set EXIT\n"); exit(EXIT_FAILURE); } // Check if data_file has been initilaized if (dat_file == NULL) { Dmsg(NULL, CRITICAL_LEVEL, "Data file not set EXIT\n"); exit(EXIT_FAILURE); } // Check that current job is not null if (curr_job == NULL) { Dmsg(NULL, CRITICAL_LEVEL, "Current job not set EXIT\n"); exit(EXIT_FAILURE); } // Check if key = either F/I/D if (level != 'F' && level != 'I' && level != 'D') { Dmsg(NULL, CRITICAL_LEVEL, "Bad key has to be either F/I/D is %d\n", level); exit(EXIT_FAILURE); } // If data is null set it to current job if (data == NULL) { data = curr_job; } // Check if there is a search and or store to do if (!is_store && !is_search) { Dmsg(NULL, CRITICAL_LEVEL, "Store or search not called. Nothing to do EXIT\n"); exit(EXIT_SUCCESS); } // If -L is passed try to acquiere lock if (use_lock) { lock_return = create_lock_file(lock_name, PROG_NAME, lock_name, &lock_poolmem, &lock_fd); } // If fail to acquiere lock wait and retry if (lock_return != 1) { Dmsg(NULL, STANDARD_LEVEL, "Fail to acquire lock retry\n"); bmicrosleep(RETRY_LOCK, 0); lock_return = create_lock_file(lock_name, PROG_NAME, lock_name, &lock_poolmem, &lock_fd); // IF lock fail once again just abort if (lock_return != 1) { Dmsg(NULL, CRITICAL_LEVEL, "Fail to acquire lock twice EXIT\n"); exit(EXIT_FAILURE); } } joblist job_history(NULL, key, curr_job, prev_job, level); // context can be null job_history.set_base(dat_file); job_history.set_return_jobname_on_prune(true); if (is_store) { store_return = job_history.store_job(data); // return boolean job_history.prune_jobs(NULL, NULL, &tmp_list); // retrun void // print all deletable jobs foreach_alist(to_delete_job, &tmp_list) { Dmsg(NULL, ALWAYS_LEVEL, "%s\n", to_delete_job); } } if (is_search) { search_return = job_history.find_job(curr_job, pool_mem.handle()); // Return bool Dmsg(NULL, ALWAYS_LEVEL, "%s\n", pool_mem.c_str()); } // Check that search/store op returned correctly (true if not called) if (store_return && search_return && lock_return == 1) { exit(EXIT_SUCCESS); } else { Dmsg(NULL, CRITICAL_LEVEL, "Store return : %d, Search return %d Lock return : %d\n", store_return, search_return, lock_return); exit(EXIT_FAILURE); } } bacula-15.0.3/src/tools/drivetype.c0000644000175000017500000000552214771010173016774 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Program for determining drive type * * Written by Robert Nelson, June 2006 */ #include "bacula.h" #include "findlib/find.h" static void usage() { fprintf(stderr, _( "\n" "Usage: drivetype [-v] path ...\n" "\n" " Print the drive type a given file/directory is on.\n" " The following options are supported:\n" "\n" " -l print local fixed hard drive\n" " -a display information on all drives\n" " -v print both path and file system type.\n" " -? print this message.\n" "\n")); exit(1); } int display_drive(char *drive, bool display_local, int verbose) { char dt[100]; int status = 0; if (drivetype(drive, dt, sizeof(dt))) { if (display_local) { /* in local mode, display only harddrive */ if (strcmp(dt, "fixed") == 0) { printf("%s\n", drive); } } else if (verbose) { printf("%s: %s\n", drive, dt); } else { puts(dt); } } else if (!display_local) { /* local mode is used by FileSet scripts */ fprintf(stderr, _("%s: unknown\n"), drive); status = 1; } return status; } int main (int argc, char *const *argv) { int verbose = 0; int status = 0; int ch, i; bool display_local = false; bool display_all = false; char drive='A'; char buf[16]; setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); while ((ch = getopt(argc, argv, "alv?")) != -1) { switch (ch) { case 'v': verbose = 1; break; case 'l': display_local = true; break; case 'a': display_all = true; break; case '?': default: usage(); } } argc -= optind; argv += optind; OSDependentInit(); if (argc < 1 && display_all) { /* Try all letters */ for (drive = 'A'; drive <= 'Z'; drive++) { bsnprintf(buf, sizeof(buf), "%c:/", drive); display_drive(buf, display_local, verbose); } exit(status); } if (argc < 1) { usage(); } for (i = 0; i < argc; --argc, ++argv) { status += display_drive(*argv, display_local, verbose); } exit(status); } bacula-15.0.3/src/qt-console/0000755000175000017500000000000014771010173015535 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/build-depkgs-qt-console0000755000175000017500000001057614771010173022130 0ustar bsbuildbsbuild#!/bin/sh # # This file is driven by the parameters that are defined in # the file External-qt-console # usage() { echo "usage: $0 [-h] [-C] [] [] ..." echo " -h Displays this usage" echo " -C Clobbers (overwrites) the source code by " echo " reextracting the archive and reapplying the" echo " patches." echo "" echo " Optional dependency, If none are given then all" echo " of them will be built." echo "" echo "Valid dependencies are:" grep -v '^#' < External-qt-console | cut -d'|' -f1 | cut -d'_' -f1 | tr A-Z a-z | sort -u | awk '{ print " " $1 }' } CLOBBER_SOURCE= while getopts "hHC" opt; do case ${opt} in H|h|\?) usage;exit 1;; C) CLOBBER_SOURCE=true;; esac done [ ${OPTIND} -gt 1 ] && shift `expr ${OPTIND} - 1` cwd=`pwd` cd `dirname $0` SCRIPT_DIR=`pwd` TOP_DIR=`pwd` [ ! -e ${TOP_DIR}/depkgs ] && mkdir ${TOP_DIR}/depkgs cd ${TOP_DIR}/depkgs DEPPKG_DIR=`pwd` OLD_IFS=${IFS};IFS="|"; while read package url dir mkd; do case ${package} in \#*) ;; *) eval "URL_${package}=${url};DIR_${package}=${dir};MKD_${package}=${mkd}";; esac done < ${SCRIPT_DIR}/External-qt-console IFS=${OLD_IFS};unset OLD_IFS get_source() { URL=$1 SRC_DIR=$2 MAKE_DIR=$3 ARCHIVE=`basename ${URL}` echo "in get_source URL is $URL SRC_DIR is $SRC_DIR MAKE_DIR is $MAKE_DIR ARCHIVE is $ARCHIVE" case ${ARCHIVE} in *.tar.gz) ARCHIVER="tar xzf"; [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.tar\.gz'`;; *.tar.bz2) ARCHIVER="tar xjf"; [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.tar\.bz2'`;; *.zip) ARCHIVER="unzip -q"; [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.zip'`;; *.exe) ARCHIVER=""; [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.zip'`;; *) echo "Unsupported archive type - $ARCHIVE"; exit 1;; esac # cd ${DEPPKG_DIR}/src if [ ! -e "${ARCHIVE}" ] then echo "Downloading ${URL}" if wget --passive-ftp "${URL}" then : else echo "Unable to download ${ARCHIVE}" exit 1 fi fi [ -z "${ARCHIVER}" ] && return 0 if [ ! -e "${SRC_DIR}" -o "${CLOBBER_SOURCE}" = "true" ] then rm -rf ${SRC_DIR} echo "Extracting ${ARCHIVE}" if [ "${MAKE_DIR}" = "true" ] then mkdir ${SRC_DIR} cd ${SRC_DIR} ${ARCHIVER} ../${ARCHIVE} > ../${ARCHIVE}.log 2>&1 else ${ARCHIVER} ${ARCHIVE} > ${ARCHIVE}.log 2>&1 cd ${SRC_DIR} fi return 0 fi cd ${SRC_DIR} return 1 } parse_output() { sed -ne '/\\$/N' -e 's/\\\n//' -e 's/\t\+/ /g' -e 's/ \+/ /g' \ -e '/ error: /p' \ -e "s%.*Entering directory[ ]\\+.${DEPPKG_DIR}/\\([^ ]\+\).%Entering \\1%p" \ -e "s%.*Leaving directory[ ]\\+.${DEPPKG_DIR}/\\([^ ]\+.\).%Leaving \\1%p" \ -e '/gcc \|g\+\+ \|ar /!d' \ -e 's/ \(\.\.\/\)\+/ /g' \ -e 's/.* \([^ ]\+\(\.c\|\.cpp\|\.cc\|\.cxx\)\)\( .*\|\)$/Compiling \1/p' \ -e 's/.* \([^ ]\+\.s\)\( .*\|\)$/Assembling \1/p' \ -e 's/.*ar [^ ]\+ \([^ ]\+\)\(\( [^ ]\+\.o\)\+\)/Updating \1 -\2/p' \ -e 's/.* -o \([^ ]\+\)\( .*\|\)$/Linking \1/p' } do_patch() { PATCH_FILE=${SCRIPT_DIR}/patches/$1; shift if patch -f -p0 "$@" >>patch.log < ${PATCH_FILE} then : else echo "Patch failed - Check `pwd`/patch.log" > /dev/tty exit 1 fi } do_make() { if make -f "$@" 2>&1 then : else echo "Make failed - Check `pwd`/make.log" > /dev/tty exit 1 fi | tee -a make.log #| parse_output } process_qwt() { get_source "${URL_QWT}" "${DIR_QWT}" "${MKD_QWT}" echo "Building qwt graphics library. This takes some time ..." echo "unix {" >${TOP_DIR}/depkgs/qwt-5.0.2/qwtconfig.pri echo " INSTALLBASE = ${TOP_DIR}/qwt" >>${TOP_DIR}/depkgs/qwt-5.0.2/qwtconfig.pri echo "}" >>${TOP_DIR}/depkgs/qwt-5.0.2/qwtconfig.pri cat ${TOP_DIR}/qwtconfig.pri >>${TOP_DIR}/depkgs/qwt-5.0.2/qwtconfig.pri qmake >make.log do_make Makefile >>make.log do_make Makefile install >>make.log echo "In case of problems see: `pwd`/make.log" } if [ "$#" -eq 0 ] then process_qwt else for dependency in "$@" do eval "process_${dependency}" done fi bacula-15.0.3/src/qt-console/TODO0000644000175000017500000002721514771010173016234 0ustar bsbuildbsbuilddhb ==================================================== can "schedule" be a member of job_defs and populated?? ========LOW priority items: Human readable in joblist for purged, joblevel and job type. Possibly a stack of past screens so that when you open a window from another, closing would bring the previous one back. ======================================================== This release or next: A page showing a list of schedule resources. A page list of message resources?? Kern discussed windows showing statistics like web based interfaces. I think the above is very important. ======================================================== Future Releases : The ablility to modify configuration files to create jobs, schedules, filesets and any other director resources. The ablility to modify configuration files to create storage resources. Add a status dir graphical interface. It would auto update every ?? seconds and have a list of scheduled jobs, and in the que to run jobs that could be cancelled graphically. Add a status client window. Keep updating showing what file is being processed. Documentation, Documentation, Documentaion. Help. Add help documentation. Have context sensitve help. bRestore add code to get working. May be in brestore, find a file by name, find a directory by name Interfaces to commands like bextract, bscan, bcopy, btape????? Is there a way to query the director/database for whether a storage is currently mounted so I am not presenting both mount and unmount to the user?? Yes, but it requires being able to directly connect to the SD (at least for the moment). Is there a way to identify a slot as having a cleaning tape??? (Kern says more work needs to be done in bacula with autochangers) Yes, there is a cleaning prefix for Volume labels defined in the DIR (not currently available to bat). Typically it is CLNxxx and by looking at the database, you can see these cleaning volumes. Migration Jobs?? =========================================================== NOT SURE =========================================================== I'm not sure about this one?? Things seem to work and I did not do a thing to make it happen: the "dir" is a member of Console - We also must somehow make the low level I/O routines know which director/console to use. Currently they always use the single global one defined in the mainWin class (if I remember right). I'm working on this (kes). It is not so simple, but moving forward gradually ... Create edit pool interface. This is done from config file ============================================================ CALLING GOOD: ============================================================ See if there is a solution to images fun with designer other than: %s/[\.\/]*:images/images/g %s/images/..\/images/g Images that are in the binary are referenced with :/images/... This is a Qt convention. If the image is in a file, it can be referenced directly, but for the most part, I prefer images in the binary (not lost, not accidently deleted, no installation problems, ... Utilizing designer to select the main.qrc resource file seems to do the job. Designer then puts the : in front of images and work. Think about good ways to clean up the Console creation part of the loop creating pages. I don't think it is bad as it is. There is, however, a construct called a Factory that could more or less automate this in one big for loop. Probably not neccesary for the time being. ============================================================ DONE: ============================================================ Use settings object to size the restore window. Similar to the saving of the state of the main window. Add context sensitive options for most commands see COMMANDS file A window showing a list of jobs and defaults. Defaults can be gotten in manner similar to what the first restore window does. status dir on page select director item All items with jobid= that I thought could work from joblist are done. As well as many more update slots scan Preferences for the messages timer. Get the 5 second bring to bottom of console to stop joblist cancel a running job. Fixes to final restore widgets. Set default for replace in run restore job to "always"?????? Option in joblist like with restore from jobid but restore populating timestamp of the selected job. User preferences. With log to stdout options. Have settings for defaults of limits on joblist Resolve issue of connection during restore selection. Could go with preempt of connections. Temporary resolution is in. (Kern is to work on) Further testing of restore with .mod Tested a few things, not all. Add fileset to joblist. Test left pane of restore with 2 windows drives in one backup job. Yup, id didn't work, now it does. Purging not working from console or from context sensitive. This was a confusion with the restore command. Now resolved. Can produce a segfault by attempting to restore from a restore job. In pre-restore, prevent a job in the list from being a restore job. Need to figure out the functionality and inteligence that the last restore window should have and give it to it. Right now it shows drop downs with no options. Allow for selecting multiple jobs to restore from in joblist. Right click restore from job works, but not with multiple selected jobs. See if it would be possible to have user provided console text show up in a color Get status codes in dropdown for joblist select. Create class to display messages from a specific job. Want the ability to create an instance of that class from joblist. Color code termination code in joblist. I also want a table to convert termination code into human readable text. show purged flag in joblist. Don't have purge option show if already purged. move behavior of: MainWin::setContextMenuDockText MainWin::setTreeWidgetItemDockColor to the pages class preempt all connections to console with if (!is_connectedGui()) or some other mechanism. May find more as users start finding them. Create documentation for any other developers interested in creating new classes to add more pages. Explain how to use the pages class and about not populating until the tree widget is clicked etc... Add numerous are you sure dialog boxes. Like are you sure you want to delete/purge that volume. Show a little of the documentation about what the consequences of delete or purging are. A Tree widget context sensitive menu option and class to jump from known job to surf the filestructure on the job. This was future, but it is kind of done with restore from jobid Get rid of "Warning: name layoutWidget is already used" when make uic's restore.ui Create the ability to start a restore from joblist. Right click, select "restore from Jobid=xx" create an instance of restore defaulting in the jobid or a list of selected jobs. Update README describe bat.conf.example to bat.conf Test restore and get anything not working, working. Add inteligence to prerestore. Color code Media Red->Error Append->green Full/Used->Yellow Get restore into stack. Should the jobs dialog be turned into a page as well?? Possilbe: Turn run and label into docked pages. (remove button bar buttons??) Where and bootstrap are confused in runjobs of restore. This was just the labels. Create list of what does not work. From what I can tell, just the restore window on the left. Add option to LIMIT the number of jobs shown in all jobs page for users with multiple hundreds to thousands of jobs. Play with includes to Make these compiles shorter. moved includes of of includes and into files only console.h should be long relabel storage=DDS3 oldvolume=ddsvol003 volume=dds3vol003 slot=3 pool=dds3_hope in label slot spinner, limit the upper to the value of slots for that storage. Fix bug in myth box not working with .sql query="" command. This was a fix in mysql Figure out how to get tables like Joblist to do the equivalent of double clicking on the separating lines between each of the headings. Tried the hard way first. Oops. If the console command line entry docked widget gets the focus, make m_currentConsole the top widget by setting the treewidgetitem selected. Did this in MainWin::input_line almost better to let the person see whatever they want until they hit enter. Set Window titles to reflect what director it is. Must:: get page selector to follow undocked windows. Otherwise current console won't be current. Re-add class for storage, I accidentally reverted because I left it on my laptop. This is why I like committing often. Add class for FileSets Another idea for what you have implemented: - I think that the dynamic pages that you create on the fly should be nested under the item that creates them more like a directory tree. For example: Jobs on Volume xxx, probably should be shown under "All Jobs" (or Media if that is what created it) and "Jobs of Client Rufus" probably should be shown under "Clients". I base this on looking at the Select page list after I have brought up 3 or 4 dynamic pages. Once there are a good number, I get a bit confused where they came from. This would also permit selecting multipe Volumes then displaying multiple pages, one for each Volume selected. If they are nested, then that nested level can be expanded or collapsed, which would be pretty cool at keeping information, but getting it out of the way, sort of like what happens for a directory tree. dhb: ref above My original concept was to put these in a tabbed widget. Your Idea may make for a cleaner user experience. I like it. It could save the effort of getting a tabbed widget to work. - I think we need to make the current Director more explicit, by perhaps highlighting it in the page selector when it is current and unhighlighting it when it is not (we could use color as we do for the console, though some color blind people may have problems. - When any director is clicked, we need to set it as the current director and set the current console as well. Remove DoubleClicking, From pages class not needed any more. Broken with multiple directors: - If you click on the second director, it will probably open, but none of the pages that are defined below it will be able to talk to it. They will most likely talk to the first director. - When any console is clicked we need to set it as the current console (m_console) and also set its director as the current director (m_topItem). These are in the mainwin class. - When any page is selected, we must set both the current director (m_topItem) and current console (m_console) that this page is connected to. dhb: m_topItem has been changed to Console::directorTreeItem() m_currentConsole->directorTreeItem(); returns disired treeWidgetItem - We also need a concept of a "local" director/console for each page, so the page knows who it is talking to -- this doesn't currently exist, so I think we must pass the director and console to each page widget constructor. dhb: m_currentConsole is saved in each page subclass's m_console. This value is set by all but the console class calling Pages::pgInitialie() in it's constructor In short, there is a lot of work to be done to make multiple simultaneous directors work. dhb: this may be moot: If the above prooves to be too much, we might consider to only have a single director at a time, and simply let the user select which director he wants to connect to (one at a time, but dynamically). In the end, this may be the best thing to do, so any user who wishes to connect to multiple directors would run two instances of bat. I am a bit unsure now, but the above list of things to do is much bigger than I thought it was going to be. bacula-15.0.3/src/qt-console/relabel/0000755000175000017500000000000014771010173017143 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/relabel/relabel.h0000644000175000017500000000224314771010173020723 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Kern Sibbald, February MMVII */ #ifndef _RELABEL_H_ #define _RELABEL_H_ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_relabel.h" #include "console.h" class relabelDialog : public QDialog, public Ui::relabelForm { Q_OBJECT public: relabelDialog(Console *console, QString &fromVolume); private: int getDefs(QStringList &fieldlist); private slots: void accept(); void reject(); private: Console *m_console; QString m_fromVolume; int m_conn; }; #endif /* _RELABEL_H_ */ bacula-15.0.3/src/qt-console/relabel/relabel.ui0000644000175000017500000001440214771010173021111 0ustar bsbuildbsbuild relabelForm Qt::WindowModal 0 0 400 212 Label 9 6 16777215 20 From Volume : Qt::AlignCenter Qt::Vertical QSizePolicy::Maximum 382 16 Qt::Vertical QSizePolicy::Maximum 21 16 Qt::Horizontal QDialogButtonBox::Cancel|QDialogButtonBox::NoButton|QDialogButtonBox::Ok 0 6 10000 Pool: poolCombo Storage: storageCombo 200 0 New Volume Name: volumeName Slot: slotSpin 0 6 Qt::Horizontal 71 21 16777215 30 <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Relabel a Volume</span></p></body></html> Qt::Horizontal 81 20 buttonBox accepted() relabelForm accept() 248 254 157 274 buttonBox rejected() relabelForm reject() 316 260 286 274 bacula-15.0.3/src/qt-console/relabel/relabel.cpp0000644000175000017500000000707714771010173021270 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Label Dialog class * * Kern Sibbald, February MMVII * */ #include "bat.h" #include "relabel.h" #include /* * An overload of the constructor to have a default storage show in the * combobox on start. Used from context sensitive in storage class. */ relabelDialog::relabelDialog(Console *console, QString &fromVolume) { m_console = console; m_fromVolume = fromVolume; m_conn = m_console->notifyOff(); setupUi(this); storageCombo->addItems(console->storage_list); poolCombo->addItems(console->pool_list); volumeName->setText(fromVolume); QString fromText(tr("From Volume : ")); fromText += fromVolume; fromLabel->setText(fromText); QStringList defFields; if (getDefs(defFields) >= 1) { poolCombo->setCurrentIndex(poolCombo->findText(defFields[1], Qt::MatchExactly)); storageCombo->setCurrentIndex(storageCombo->findText(defFields[0], Qt::MatchExactly)); } this->show(); } /* * Use an sql statment to get some defaults */ int relabelDialog::getDefs(QStringList &fieldlist) { QString job, client, fileset; QString query(""); query = "SELECT MediaType AS MediaType, Pool.Name AS PoolName" " FROM Media" " LEFT OUTER JOIN Pool ON Media.PoolId = Pool.PoolId" " WHERE VolumeName = \'" + m_fromVolume + "\'"; if (mainWin->m_sqlDebug) { Pmsg1(000, "query = %s\n", query.toUtf8().data()); } QStringList results; if (m_console->sql_cmd(query, results)) { QString field; /* Iterate through the lines of results, there should only be one. */ foreach (QString resultline, results) { fieldlist = resultline.split("\t"); } /* foreach resultline */ } /* if results from query */ return results.count(); } void relabelDialog::accept() { QString scmd; if (volumeName->text().toUtf8().data()[0] == 0) { QMessageBox::warning(this, tr("No Volume name"), tr("No Volume name given"), QMessageBox::Ok, QMessageBox::Ok); return; } if (m_fromVolume == volumeName->text().toUtf8()) { QMessageBox::warning(this, tr("New name must be different"), tr("New name must be different"), QMessageBox::Ok, QMessageBox::Ok); return; } this->hide(); scmd = QString("relabel storage=\"%1\" oldvolume=\"%2\" volume=\"%3\" pool=\"%4\" slot=%5") .arg(storageCombo->currentText()) .arg(m_fromVolume) .arg(volumeName->text()) .arg(poolCombo->currentText()) .arg(slotSpin->value()); if (mainWin->m_commandDebug) { Pmsg1(000, "sending command : %s\n",scmd.toUtf8().data()); } m_console->write_dir(scmd.toUtf8().data()); m_console->displayToPrompt(m_conn); m_console->notify(m_conn, true); delete this; mainWin->resetFocus(); } void relabelDialog::reject() { this->hide(); m_console->notify(m_conn, true); delete this; mainWin->resetFocus(); } bacula-15.0.3/src/qt-console/jobs/0000755000175000017500000000000014771010173016472 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/jobs/jobs.h0000644000175000017500000000274514771010173017610 0ustar bsbuildbsbuild#ifndef _JOBS_H_ #define _JOBS_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_jobs.h" #include "console.h" #include "pages.h" class Jobs : public Pages, public Ui::jobsForm { Q_OBJECT public: Jobs(); ~Jobs(); virtual void PgSeltreeWidgetClicked(); virtual void currentStackItem(); public slots: void tableItemChanged(QTableWidgetItem *, QTableWidgetItem *); private slots: void populateTable(); void consoleListFiles(); void consoleListVolume(); void consoleListNextVolume(); void consoleEnable(); void consoleDisable(); void consoleCancel(); void listJobs(); void runJob(); private: void createContextMenu(); QString m_currentlyselected; bool m_populated; bool m_checkcurwidget; int m_typeIndex; }; #endif /* _JOBS_H_ */ bacula-15.0.3/src/qt-console/jobs/jobs.cpp0000644000175000017500000002067214771010173020142 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Jobs Class * * Dirk Bartley, March 2007 */ #include "bat.h" #include "jobs/jobs.h" #include "run/run.h" #include "util/fmtwidgetitem.h" Jobs::Jobs() : Pages() { setupUi(this); m_name = tr("Jobs"); pgInitialize(); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/run.png"))); /* tableWidget, Storage Tree Tree Widget inherited from ui_client.h */ m_populated = false; m_checkcurwidget = true; m_closeable = false; /* add context sensitive menu items specific to this classto the page * selector tree. m_contextActions is QList of QActions */ m_contextActions.append(actionRefreshJobs); createContextMenu(); connect(tableWidget, SIGNAL(itemDoubleClicked(QTableWidgetItem*)), this, SLOT(runJob())); } Jobs::~Jobs() { } /* * The main meat of the class!! The function that querries the director and * creates the widgets with appropriate values. */ void Jobs::populateTable() { m_populated = true; mainWin->waitEnter(); Freeze frz(*tableWidget); /* disable updating*/ QBrush blackBrush(Qt::black); m_checkcurwidget = false; tableWidget->clear(); m_checkcurwidget = true; QStringList headerlist = (QStringList() << tr("Job Name") << tr("Pool") << tr("Messages") << tr("Client") << tr("Storage") << tr("Level") << tr("Type") << tr("FileSet") << tr("Catalog") << tr("Enabled") << tr("Where")); m_typeIndex = headerlist.indexOf(tr("Type")); tableWidget->setColumnCount(headerlist.count()); tableWidget->setHorizontalHeaderLabels(headerlist); tableWidget->horizontalHeader()->setHighlightSections(false); tableWidget->setRowCount(m_console->job_list.count()); tableWidget->verticalHeader()->hide(); tableWidget->setSelectionBehavior(QAbstractItemView::SelectRows); tableWidget->setSelectionMode(QAbstractItemView::SingleSelection); tableWidget->setSortingEnabled(false); /* rows move on insert if sorting enabled */ int row = 0; foreach (QString jobName, m_console->job_list){ job_defaults job_defs; job_defs.job_name = jobName; if (m_console->get_job_defaults(job_defs)) { int col = 0; TableItemFormatter jobsItem(*tableWidget, row); jobsItem.setTextFld(col++, jobName); jobsItem.setTextFld(col++, job_defs.pool_name); jobsItem.setTextFld(col++, job_defs.messages_name); jobsItem.setTextFld(col++, job_defs.client_name); jobsItem.setTextFld(col++, job_defs.store_name); jobsItem.setTextFld(col++, job_defs.level); jobsItem.setTextFld(col++, job_defs.type); jobsItem.setTextFld(col++, job_defs.fileset_name); jobsItem.setTextFld(col++, job_defs.catalog_name); jobsItem.setBoolFld(col++, job_defs.enabled); jobsItem.setTextFld(col++, job_defs.where); } row++; } /* set default sorting */ tableWidget->sortByColumn(headerlist.indexOf(tr("Job Name")), Qt::AscendingOrder); tableWidget->setSortingEnabled(true); /* Resize rows and columns */ tableWidget->resizeColumnsToContents(); tableWidget->resizeRowsToContents(); /* make read only */ tableWidget->setEditTriggers(QAbstractItemView::NoEditTriggers); mainWin->waitExit(); dockPage(); } /* * When the treeWidgetItem in the page selector tree is singleclicked, Make sure * The tree has been populated. */ void Jobs::PgSeltreeWidgetClicked() { if(!m_populated) { populateTable(); } } /* * Added to set the context menu policy based on currently active tableWidgetItem * signaled by currentItemChanged */ void Jobs::tableItemChanged(QTableWidgetItem *currentwidgetitem, QTableWidgetItem *previouswidgetitem ) { /* m_checkcurwidget checks to see if this is during a refresh, which will segfault */ if (m_checkcurwidget && currentwidgetitem) { /* The Previous item */ if (previouswidgetitem) { /* avoid a segfault if first time */ foreach(QAction* jobAction, tableWidget->actions()) { tableWidget->removeAction(jobAction); } } int currentRow = currentwidgetitem->row(); QTableWidgetItem *currentrowzeroitem = tableWidget->item(currentRow, 0); m_currentlyselected = currentrowzeroitem->text(); QTableWidgetItem *currenttypeitem = tableWidget->item(currentRow, m_typeIndex); QString type = currenttypeitem->text(); if (m_currentlyselected.length() != 0) { /* set a hold variable to the client name in case the context sensitive * menu is used */ tableWidget->addAction(actionRefreshJobs); tableWidget->addAction(actionConsoleListFiles); tableWidget->addAction(actionConsoleListVolumes); tableWidget->addAction(actionConsoleListNextVolume); tableWidget->addAction(actionConsoleEnableJob); tableWidget->addAction(actionConsoleDisableJob); tableWidget->addAction(actionConsoleCancel); tableWidget->addAction(actionJobListQuery); tableWidget->addAction(actionRunJob); } } } /* * Setup a context menu * Made separate from populate so that it would not create context menu over and * over as the table is repopulated. */ void Jobs::createContextMenu() { tableWidget->setContextMenuPolicy(Qt::ActionsContextMenu); tableWidget->addAction(actionRefreshJobs); connect(tableWidget, SIGNAL( currentItemChanged(QTableWidgetItem *, QTableWidgetItem *)), this, SLOT(tableItemChanged(QTableWidgetItem *, QTableWidgetItem *))); /* connect to the action specific to this pages class */ connect(actionRefreshJobs, SIGNAL(triggered()), this, SLOT(populateTable())); connect(actionConsoleListFiles, SIGNAL(triggered()), this, SLOT(consoleListFiles())); connect(actionConsoleListVolumes, SIGNAL(triggered()), this, SLOT(consoleListVolume())); connect(actionConsoleListNextVolume, SIGNAL(triggered()), this, SLOT(consoleListNextVolume())); connect(actionConsoleEnableJob, SIGNAL(triggered()), this, SLOT(consoleEnable())); connect(actionConsoleDisableJob, SIGNAL(triggered()), this, SLOT(consoleDisable())); connect(actionConsoleCancel, SIGNAL(triggered()), this, SLOT(consoleCancel())); connect(actionJobListQuery, SIGNAL(triggered()), this, SLOT(listJobs())); connect(actionRunJob, SIGNAL(triggered()), this, SLOT(runJob())); } /* * Virtual function which is called when this page is visible on the stack */ void Jobs::currentStackItem() { if(!m_populated) { /* Create the context menu for the client table */ populateTable(); } } /* * The following functions are slots responding to users clicking on the context * sensitive menu */ void Jobs::consoleListFiles() { QString cmd = "list files job=\"" + m_currentlyselected + "\""; if (mainWin->m_longList) { cmd.prepend("l"); } consoleCommand(cmd); } void Jobs::consoleListVolume() { QString cmd = "list volumes job=\"" + m_currentlyselected + "\""; if (mainWin->m_longList) { cmd.prepend("l"); } consoleCommand(cmd); } void Jobs::consoleListNextVolume() { QString cmd = "list nextvolume job=\"" + m_currentlyselected + "\""; if (mainWin->m_longList) { cmd.prepend("l"); } consoleCommand(cmd); } void Jobs::consoleEnable() { QString cmd = "enable job=\"" + m_currentlyselected + "\""; consoleCommand(cmd); } void Jobs::consoleDisable() { QString cmd = "disable job=\"" + m_currentlyselected + "\""; consoleCommand(cmd); } void Jobs::consoleCancel() { QString cmd = "cancel job=\"" + m_currentlyselected + "\""; consoleCommand(cmd); } void Jobs::listJobs() { QTreeWidgetItem *parentItem = mainWin->getFromHash(this); mainWin->createPageJobList("", "", m_currentlyselected, "", parentItem); } /* * Open a new job run page with the currently selected job * defaulted In */ void Jobs::runJob() { new runPage(m_currentlyselected); } bacula-15.0.3/src/qt-console/jobs/jobs.ui0000644000175000017500000001076014771010173017772 0ustar bsbuildbsbuild jobsForm 0 0 449 307 Client Tree :/images/view-refresh.png Refresh Jobs List Requery the director for the list of clients. :/images/utilities-terminal.png List Files Command List Files Command List Files Command List Files Command :/images/utilities-terminal.png List Volumes Command List Volumes Command List Volumes Command List Volumes Command :/images/utilities-terminal.png List Next Volume Command List Next Volume Command List Next Volume Command List Next Volume Command :/images/utilities-terminal.png Enable Job Command Enable Job Command Enable Job Command Enable Job Command :/images/utilities-terminal.png Disable Job Command Disable Job Command Disable Job Command Disable Job Command :/images/emblem-system.png Open JobList on Job Open JobList on Job Open JobList on Job Open JobList on Job :/images/utilities-terminal.png Cancel Job Command Cancel Job Command :/images/run.png RunJob bacula-15.0.3/src/qt-console/bat_conf.h0000644000175000017500000000725314771010173017470 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Bacula Adminstration Tool (bat) * * Kern Sibbald, March 2002 */ #ifndef _BAT_CONF_H_ #define _BAT_CONF_H_ /* * Resource codes -- they must be sequential for indexing */ enum { R_DIRECTOR = 1001, R_CONSOLE, R_CONSOLE_FONT, R_FIRST = R_DIRECTOR, R_LAST = R_CONSOLE_FONT /* Keep this updated */ }; /* * Some resource attributes */ enum { R_NAME = 1020, R_ADDRESS, R_PASSWORD, R_TYPE, R_BACKUP }; /* Definition of the contents of each Resource */ class DIRRES { public: RES hdr; uint32_t DIRport; /* UA server port */ char *address; /* UA server address */ char *password; /* UA server password */ bool tls_authenticate; /* Authenticate with tls */ bool tls_enable; /* Enable TLS */ bool tls_psk_enable; /* Enable TLS-PSK */ bool tls_require; /* Require TLS */ char *tls_ca_certfile; /* TLS CA Certificate File */ char *tls_ca_certdir; /* TLS CA Certificate Directory */ char *tls_certfile; /* TLS Client Certificate File */ char *tls_keyfile; /* TLS Client Key File */ utime_t heartbeat_interval; /* Dir heartbeat interval */ TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ TLS_CONTEXT *psk_ctx; /* Shared TLS-PSK Context */ /* Methods */ char *name() const; }; inline char *DIRRES::name() const { return hdr.name; } struct CONFONTRES { RES hdr; char *fontface; /* Console Font specification */ }; class CONRES { public: RES hdr; char *password; /* UA server password */ bool comm_compression; /* Enable comm line compression */ bool tls_authenticate; /* Authenticate with tls */ bool tls_enable; /* Enable TLS on all connections */ bool tls_psk_enable; /* Enable TLS-PSK on all connections */ bool tls_require; /* Require TLS on all connections */ char *tls_ca_certfile; /* TLS CA Certificate File */ char *tls_ca_certdir; /* TLS CA Certificate Directory */ char *tls_certfile; /* TLS Client Certificate File */ char *tls_keyfile; /* TLS Client Key File */ char *director; /* bind to director */ utime_t heartbeat_interval; /* Cons heartbeat interval */ TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ TLS_CONTEXT *psk_ctx; /* Shared TLS Context */ /* Methods */ char *name() const; }; inline char *CONRES::name() const { return hdr.name; } /* Define the Union of all the above * resource structure definitions. */ union u_res { DIRRES dir_res; CONRES con_res; CONFONTRES con_font; RES hdr; }; typedef union u_res URES; #define GetConsoleResWithName(x) ((CONRES *)GetResWithName(R_CONSOLE, (x))) #define GetDirResWithName(x) ((DIRRES *)GetResWithName(R_DIRECTOR, (x))) #endif /* _BAT_CONF_H_ */ bacula-15.0.3/src/qt-console/bat.h0000644000175000017500000000254314771010173016460 0ustar bsbuildbsbuild#ifndef _BAT_H_ #define _BAT_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Kern Sibbald, January 2007 */ #if defined(HAVE_WIN32) #if !defined(_STAT_H) #define _STAT_H /* don't pull in MinGW stat.h */ #define _STAT_DEFINED /* don't pull in MinGW stat.h */ #endif #endif #if defined(HAVE_WIN32) #if defined(HAVE_MINGW) #include "mingwconfig.h" #else #include "winconfig.h" #endif #else #include "config.h" #endif #define __CONFIG_H #if QT_VERSION >= 0x050000 #include #else #include #endif #include #include "bacula.h" #ifndef TRAY_MONITOR #include "mainwin.h" #include "bat_conf.h" #include "jcr.h" #include "console.h" extern MainWin *mainWin; extern QApplication *app; #endif bool isWin32Path(QString &fullPath); #endif /* _BAT_H_ */ bacula-15.0.3/src/qt-console/help/0000755000175000017500000000000014771010173016465 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/help/help.ui0000644000175000017500000000322414771010173017755 0ustar bsbuildbsbuild helpForm 0 0 762 497 Form 9 6 0 6 &Home &Back Qt::Horizontal 40 20 Close bacula-15.0.3/src/qt-console/help/joblist.html0000644000175000017500000001006214771010173021020 0ustar bsbuildbsbuild Bat User's Guide

Bat User's Guide


The JobList Interface

The JobList interface displays a table of jobid's which are instances of a job having been run. The table displays data for each job stored in the daemons sql backend. The table currently includes the job name, the client name, the job type, the job level, the number of files, the number of bytes, the status, and a flag for the jobs files that have been purged or not, and the jobs fileset.

The job type is either "B" for backup or "R" for restore.

The job level is one of "I" for incremental or "F" for full or "D" for differential.

A purged value of yes means that the files for that job have been purged from the catalog's file table. Purged jobs are not be able to have their files browsed when restoring from a job with that jobid.

This interface uses a splitter to divide the display of the table and the controls used to modify the selection criterion. There are 7 drop down boxes that can be used to filter out the records in the table and to view only the items you are interested in. There are two limiting spin boxes and related check boxes that can be used to limit the number of and the age of the records that are to be displayed. These limiting controls begin with default values that can be configured in the Settings->Preferences dialog box in the JobList tab.

Pushing the Refresh button causes the interface to re-query the database. The database does also get requerried every time it comes back to the top of the stack of interfaces in the main window. Another refresh option exists in the popup when right clicking on the Joblist item in the Page Selector.

Pushing the Graph button opens up a new JobPlot interface. All of the drop downs and limiting controls in the JobPlot window default to the current settings of the controls in the JobList interface. If the graph button is not there, bat was not compiled with qwt libraries.

There are many options in the context sensitive popup that appear when the user right clicks on an item in the job table.

Listing console commands can be run to list the jobid, list the files on that jobid, list the jobmedia for that jobid, and list the volumes used for that jobid. There is a preferences item to determine whether a long list or a short list command is run. It can be found from the menu by following settings - preferences then the miscellaneous tab.

Dangerous commands of delete and purge are preceded by an "Are You Sure?" dialog box.

An interface to view the logs that have been stored in the database for the jobid can be viewed by using the show log for job option. If the database does not have a log, an popup explaining the modification that may be made to the bacula-dir.conf file appears. The change is to add "catalog = all" to the messages stanza of the messages resource that this job uses.

Restore from job opens up the Select Jobs interface. It opens prepopulated with the correct data to open up a standard restore interface displaying the filestructure backed up when the job ran. It displays only the files from this job if you don't change the controls in the "Select jobs interface"

Restore from time populates the Select Jobs interface with the endtime of the job populated in the Before Time entry box. This causes the daemon to use the Before Time to select the appropriate job id's for the restore. This contains the most recent full backup, the most recent differential backup done since the most recent full, and all the incremental backups done since the most recent full or differential. This job set is considered the best possible to restore to what the filesystem looked like after that job ran. bacula-15.0.3/src/qt-console/help/index.html0000644000175000017500000000415414771010173020466 0ustar bsbuildbsbuild Bat User's Guide

Bat User's Guide


The User's guide, for the Bacula Administration Tool.

Welcome to the BAT

Contents.

The Console Interface

The Clients Interface

The FileSets Interface

The Jobs Interface

The JobList Interface

The JobPlot Interface

The Media Interface

The Storage Interface

The two Restore Interfaces

What is the BAT.

Welcome to the Bacula Administration Tool. The bat is the graphical interface to communicate with and administer the bacula director daemon. If you have been using bacula for a while, you're familiar with bconsole, the text based interface to administer bacula. With bat, the user can perform any command that can be run from bconsole, and more.

The bat is bconsole with extra communication capabilities and a set of graphical user interfaces that utilize those capabilities. A bat programmer is able to create interfaces that interact with the director in all ways that a user can, by executing commands. When a command is entered by an interface, the bat types in the command in the console. Often times the interface visible is changed to the console so that the user can read the command as well as the resulting output from the director. Bat can also perform requests of the daemon, that the user would not normally do. These requests would include the ability to retrieve lists of various resources and to instruct the director to query the sql server. The director then forwards the query results to bat. bacula-15.0.3/src/qt-console/help/help.cpp0000644000175000017500000000370114771010173020122 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Help Window class * * Kern Sibbald, May MMVII * */ #include "bat.h" #include "help.h" /* * Note: HELPDIR is defined in src/host.h */ Help::Help(const QString &path, const QString &file, QWidget *parent) : QWidget(parent) { setAttribute(Qt::WA_DeleteOnClose); /* Make sure we go away */ setAttribute(Qt::WA_GroupLeader); /* allow calling from modal dialog */ setupUi(this); /* create window */ textBrowser->setSearchPaths(QStringList() << HELPDIR << path << ":/images"); textBrowser->setSource(file); //textBrowser->setCurrentFont(mainWin->m_consoleHash.values()[0]->get_font()); connect(textBrowser, SIGNAL(sourceChanged(const QUrl &)), this, SLOT(updateTitle())); connect(closeButton, SIGNAL(clicked()), this, SLOT(close())); connect(homeButton, SIGNAL(clicked()), textBrowser, SLOT(home())); connect(backButton, SIGNAL(clicked()), textBrowser, SLOT(backward())); this->show(); } void Help::updateTitle() { setWindowTitle(tr("Help: %1").arg(textBrowser->documentTitle())); } void Help::displayFile(const QString &file) { QRegExp rx; rx.setPattern("/\\.libs"); QString path = QApplication::applicationDirPath(); int pos = rx.indexIn(path); if (pos) path = path.remove(pos, 6); path += "/help"; new Help(path, file); } bacula-15.0.3/src/qt-console/help/help.h0000644000175000017500000000217714771010173017575 0ustar bsbuildbsbuild#ifndef _HELP_H_ #define _HELP_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Help Window class * * It reads an html file and displays it in a "browser" window. * * Kern Sibbald, May MMVII * * $Id$ */ #include "bat.h" #include "ui_help.h" class Help : public QWidget, public Ui::helpForm { Q_OBJECT public: Help(const QString &path, const QString &file, QWidget *parent = NULL); virtual ~Help() { }; static void displayFile(const QString &file); public slots: void updateTitle(); private: }; #endif /* _HELP_H_ */ bacula-15.0.3/src/qt-console/help/restore.html0000644000175000017500000001617014771010173021043 0ustar bsbuildbsbuild Bat User's Guide

Bat User's Guide


The Two Restore Interfaces

Both interfaces accomplish the same three steps. The steps are to select jobs to restore from, give the user the opportunity to select files/directories, then indicate details such as the host and path to restore to and trigger the job to run.

The Standard Restore Interface

Start the standard restore procedure by pressing the restore button in the task bar. There are also two options in the joblist context sensitive menu to start a restore. They are Restore From Time or Restore From Job.

This restore method is intended as a high performance option. It is a server side process. These interfaces assist the user in utilizing the text based restore capabilities of the standard console. It interprets the text to display the information in a way that simplifies the restore procedure.

The Opening interface allows the user to choose selection criterion to inform the server how to determine the set of backup job ids to use in the restore. This best possible set is he most recent full backup, the most recent differential backup done since the most recent full, and all the incremental backups done since the most recent full or differential. Then the server uses this set of jobs to create a file structure that is the most recent version of each file found in this job list.

The second interface allows the user to browse this file structure and choose the files and directories to restore. This is done in an explorer type interface with a directory tree on the left. In the right pane of a splitter is a table showing a list of files and directories that are the contents of the directory selected in the left pane. The user can mark and unmark either with the buttons on the top or by double clicking on the check mark icon to toggle whether an item is selected or not. Double clicking an item which is a directory on a part of the table which is not the check icon opens that directory. Clicking 'OK' completes the selection process.

The third step is the Restore Run interface. It's purpose is to allow the user to inform the bacula server details of the host and path to restore to, the replacement rules, when to restore and what priority to give the restore job.

The Version Browser Restore Interface

This restore interface is NOT intended to perform major restores of directory structures with large numbers of directories and files. It should work, however it is a chatty interface. This is due to the number of sql queries made of the server which is proportional to the number of files and directories selected plus the number of exceptions to defaults selected. It IS intended to allow the user to browse for specific files and choose between the different versions of those files that are available in the catalog to restore.

The interface contains a horizontal splitter. The bottom pane contains some controls for the interface. The top portion contains a vertical splitter with 4 panes for viewing the cataloged information. The left pane is for viewing and further sub selecting of jobs. The second pane is for viewing the directory tree. The third is for viewing a list of files in a directory that has been selected. Then lastly the fourth pane is for viewing a table of versions of a single file that has been selected from the file table.

The version browser accomplishes the three restore steps differently.

To select jobs and populate the directory tree, press the refresh button. The job table contains selected jobs. The selection criterion of the three dropdowns and the two limits are used as the filtering criterion for populating the job table the first time the refresh button is pushed. After the refresh button has been pushed, the job table has check marks that can selects and unselects jobs. Re-pressing the refresh button does one of two things. What occurs is dependent on if the controls in the bottom pane display the same data as the previous time the refresh button was pressed. If changed the jobs table is repopulated from the selection criterion. If unchanged any jobs that have been unchecked are excluded from the process of selecting directories, files and versions. The directory tree does get repopulated when the refresh button is pushed. There is a text label underneath the refresh button to inform the user as to which occurs when refresh is pressed.

The user can browse the directory tree and click on a directory folder which then populates the file table with the files that are contained in the selected directory path. Selecting or unselecting a directory does also select or unselect all files and all directories in the tree hierarchy beneath it. If there are any exceptions already selected beneath that directory, those exceptions do get deleted.

With the file table populated, the user can unselect a file in a selected directory and also select a file in an unselected directory.

With a file selected the version table populates with all the instances a file has been written to tape. The user can choose a specific version of a file for restore and override the default which is to restore the most recent version.

Pressing the restore button initiates a procedure preparing to perform the restore of the requested files. The same Restore Run interface that was the third step in the standard restore is then displayed. It allows the user to instruct the bacula server of the details of what host and what path to restore the files to. This part of the restore does take control of the connection to the server and does not allow any other communication to the server by the other interfaces.

There are two progress bars that appear when refreshing or after pressing Restore. These indicate to the user the time it may take to complete any tasks that could take a long time period.

A Version Browser Limitation

There is an important limitation of the version browser. If a fileset specifically points to a file instead of a directory that contains files, it will not be seen in the version browser. This is due to the way that the version browser searches for directories first, then for files contained in those directories. A common example is with the catalog job. It by default points directly to one file created in a databse dump.

Version Browser Performance

If you have used the version browser with a large database, you may have noticed that the performance can begin to be quite slow. A way to improve the response time of the database server is to add indexes that will assist a couple of the specific queries that are made.

If you have sqlite and would be willing to test out the creation of these indexes to see if they work, please let me know the commands. bacula-15.0.3/src/qt-console/help/clients.html0000644000175000017500000000235614771010173021022 0ustar bsbuildbsbuild Bat User's Guide

Bat User's Guide


The Clients Interface

The Clients interface shows a list of configured client resources. This list of clients is a tree structure and shows a number of attributes from the database for each client.

The user may right click on a client item to display a context sensitive menu of what actions can be performed on a client. This menu has options to allow the user to perform commands in the console. These commands include requesting the status of the client, and pruning or purging jobs and files from the catalog. There is also an option to open up a JobList interface with the control for clients already prepopulated with the selected client item.

To understand pruning and purging, please read the Bacula documentation at the following URL: http://www.bacula.org/rel-manual/Bacula_Console.html bacula-15.0.3/src/qt-console/help/console.html0000644000175000017500000000226314771010173021020 0ustar bsbuildbsbuild Bat User's Guide

Bat User's Guide


The Console

The Console is the interface that displays all of the text that would normally be displayed in bconsole. Use the command window at the bottom of the interface to manually enter any command as with bconsole.

There are 2 icons in the taskbar which are designed to run console commands.

Use the status   icon in the taskbar to run the status dir command.

Use the messages   icon in the taskbar to run the messages command.

There will be no attempt to document console commands here. An alphabetic list of the commands can be found on the following web URL http://www.bacula.org/rel-manual/Bacula_Console.html bacula-15.0.3/src/qt-console/help/storage.html0000644000175000017500000000221014771010173021012 0ustar bsbuildbsbuild Bat User's Guide

Bat User's Guide


The Storage Interface

The Storage interface shows a list of configured storage device resources. The list is a tree structure and shows the storage id and the auto changer flag.

The interface allows the user to right click on an item to popup a context sensitive menu of the actions that can be performed with that storage device. The actions allow performing commands in the console. The commands include requesting the status of the storage, and to mount, unmount or release the media in the storage device.

If the autochanger flag is true, two additional options in the context sensitive menu are found. These are options to run the command to update slots and the command to update slots scan. bacula-15.0.3/src/qt-console/help/jobs.html0000644000175000017500000000437014771010173020314 0ustar bsbuildbsbuild Bat User's Guide

Bat User's Guide


The Jobs Interface

The Jobs interface shows a tree of configured job resources. The data displayed for each job in this list are the defaults for the job resource as defined in the bacula-dir configuration file. When a job is run due to being scheduled, these defaults are the values that are used. They are also the values that are populated by default if the job is run manually.

The context sensitive popup menu includes a number of options. There are options to run commands and there is an option to open up a different interface.

The user can run a list files command in the console that displays a list of jobid's that have been run and some associated data. This is similar output to what can be seen in the joblist interface.

The list volumes console command can be run to output a list of volumes that have this jobs files stored on it.

The list nextvolume console command tells the user the next volume that will be asked for when the job is run. It also shows a list of past jobs.

There is a preferences item to determine whether a long list or a short list command is run. It can be found from the menu by following settings - preferences then the miscellaneous tab.

There are options to run commands to either enable or disable the job as scheduled. If disabled, the job is re-enabled when the director daemon is restarted.

The cancel job command is to cancel any running job id's with the job definition.

The open joblist option is to open a new JobList interface and pre-populate the job dropdown list with the jobs resource name.

The run job option is to open the Run a Job interface prepopulating the job dropdown box with the selected job. With the job name in the job dropdown box of the Run a Job interface, the remainder of the controls are populated with the jobs defaults. bacula-15.0.3/src/qt-console/help/filesets.html0000644000175000017500000000166714771010173021203 0ustar bsbuildbsbuild Bat User's Guide

Bat User's Guide


The Filesets Interface

The Filesets interface shows a list of filesets and allow the user to right click on a specific fileset to popup it's context sensitive menu. This interface is a tree structure and each item displays a number of attributes from the database for each fileset.

The context sensitive menu allows the user to perform the command status fileset in the console. The user can also open up a new JobList window with the Fileset dropdown pre-populated with the selected fileset. bacula-15.0.3/src/qt-console/help/jobplot.html0000644000175000017500000000164614771010173021033 0ustar bsbuildbsbuild Bat User's Guide

Bat User's Guide


The JobPlot Interface

The JobPlot interface has all of the same selection controls as the JobList interface. It also has controls that affects the plot's display. These settings are saved and used the next time the interface is opened. The user can change the Graph type to one of sticks, lines, steps or none. The user can also turn one or both of the plots off with the File Data and the Byte Data check boxes. The symbol type for each plot's points can be changed as well. bacula-15.0.3/src/qt-console/help/media.html0000644000175000017500000000337314771010173020440 0ustar bsbuildbsbuild Bat User's Guide

Bat User's Guide


The Media Interface

The Media interface displays a tree structure of pools, and under each pool branch, are items representing the volumes in the pool. The display shows data that are the values in the database for each volume. With this interface, the user can manage tape volumes and file volumes.

When the user right clicks on a volume item, a context sensitve menu appears that shows the actions that can be performed on this volume. This menu allows the user to open up other interfaces and to perform commands in the console. There are command options that modify the catalog. These commands include deleting the volume, purging the jobs/files which have been written to the volume, and pruning the jobs/files that meet the pruning criterion configured in bacula-dir.conf. There is also a command to reset the parameters on the volumes based on the parameters of the pool resource.

Other interfaces can be opened up from the context sensitive menu. The user can open up the Relabel a Volume dialog box. The user can also open up the Edit a Volume interface which allows the user to modify all user editable data items for the volume. Another interface that may be opened is the JobList. When created the media drop down control is preselected to the volume. It lists jobs which have been written to the volume. bacula-15.0.3/src/qt-console/images/0000755000175000017500000000000014771010173017002 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/images/edit-cut.png0000644000175000017500000000144714771010173021234 0ustar bsbuildbsbuildPNG  IHDRabKGDC pHYs  tIME 1hAd^IDAT8˅]HQǟsw}lhxUtaIWEd"hiXi}J$ 0m36c6uu_vMaZ? <}rG$d F`Jiym7TDb$[ccp@DG#mo@ +Vh(s6Ȉ%ȑm`}΁ВK$^iG7J 0_e\է|W3:TfxC\Fd)=!Ɉ7O Cw}ڇ &}r#psxϷwߡρׯv ER4e_F!-52,cv^6AՏc:K=5&(yG0cQ[\ȿ Y|Y( "aJT%,G Fn^_/ '`K,-0|ɽ[3?yx6gNIENDB`bacula-15.0.3/src/qt-console/images/edit-delete.svg0000644000175000017500000015251414771010173021720 0ustar bsbuildbsbuild image/svg+xml Delete Jakub Steiner edit delete shredder Novell, Inc. bacula-15.0.3/src/qt-console/images/weather-severe-alert.svg0000644000175000017500000052314014771010173023563 0ustar bsbuildbsbuild image/svg+xml weather-severe-alert January 2006 Ryan Collier (pseudo) http://www.tango-project.org http://www.pseudocode.org weather applet notify bacula-15.0.3/src/qt-console/images/96p.png0000644000175000017500000000036014771010173020125 0ustar bsbuildbsbuildPNG  IHDR#LsRGBbKGD pHYs  tIME 5 tEXtCommentCreated with GIMPWKIDATHc`4##=`l&t9H28H] c:f1aAw9@Q0{-TIENDB`bacula-15.0.3/src/qt-console/images/run.png0000644000175000017500000000437014771010173020320 0ustar bsbuildbsbuildPNG  IHDR szzbKGDIDATxڵypU?wykޒ&$$a1MZZKihu:PYR;.ԙi8UiuԂU bRH򶻜. (#̝=|sIA\'{zot&]8t[w } V'* WBq=g7`׭jMb5A#B,Eڊ&ńOŜŤPO|VZc\H5u'48#kРQ+tlJ5ǷSQ]i_UZRTrcX_ˀ @Ҁ ܐMcwُ܏Z,u3IrH98QO6@jˌ%&LnPd>S{=a9s* = ]C}l3B&GgT)Z:_5 rܶOmfu+4y(-ͷP6>%iEuU]HYP讇]E(6LG[+DԨqڑ/z`bi\< 12j$ ZO[*oEH0LGAd. DZ@ vkiIA vT*@,q|.X@]2zSs8+wh$çb8smD#n3Ig90Ӧa|i( (5wu+ ȒIK0mȒ) CJ3ysKv{1x;{O$~yة "H~>7 46?Or'S[gwy X:1sGSV/!~z1i^kUHDgL^mԿ5&prAbSW7_).j,u73;l$&딺ylb QA[3/|JqR&pl*+u]}-u3/>H_Øs3q? TI{\~HqH&[6$ 4 N*H.xᮽQ2[:z\WRz>T plsJ)ĝNXU p`5}ChIwm'q3JB q!_ӲCN-uWVeEb-EIw['BX`O4FZK<|0 _$Or0M2Gj}*%^f֏m=;˦"X: 68ڗ!  ۑEK<5&X!!{<=Z/2]MWTlHۨ*gv ,'|텣^τ~Oؐ<ޯϣ5h qi^r{cc=|fu^;hBH9և fh&F"sڐ谒w2'kfF=N綆k1%[B'M2q}?k~1+}=Ǜ^)GCpG(y^Uੵ<*ڜ F\S–L+9#[ԐBdQ`,7 l ɇw7}) D&Xn!2vSṼϵsW"]?)Գ!`QnpA'r:!<ŗWH)!nvTgo("-"vYٻxFfLE\X TJ0=u|YIENDB`bacula-15.0.3/src/qt-console/images/inflag0.png0000644000175000017500000000151714771010173021034 0ustar bsbuildbsbuildPNG  IHDR gAMA1_IDATx͔oUc;v$YkoVf0:%PiJ8  m6>{koSZY)L*qr(@a+⥙20G?>^&]߸B 7=C$[.c{h҄XJ2'J>V)NU=a.ZJ$ "I!dUYs>*mF6ɠO#xx0DFF"1OU&5HOk.n'.aMH)Z#SM(MWO7eqBu)5(I: H!~%G$IB4y&?6{R02F!? #8IڀQZ^%;Uޝc"o^͠e?Q+E qqWC>s_;'Ҽ0ƅX-qLtMQ;U%S> Zd-KDJ)惶).k`/l|BmbS&n~m޹pN6k /7(x^WMq E~|ˍ;|3ۿK1E1G s98nNQ24]_PdYJ]wvp\Qk58SҬ/;nH%Jܶ];.j䷊oZFy"~e6 M?!AQ1+3ubਫ,$ \dC4UM!XiՖ0mj_ǺE׵4'jФ ۤ!mOkKhh;Ҝ0P!1kz+?^W۰hiUY4?ϸҖk2> Ȟb Kid[ ȫZrʆslTdjӉXf!Qy{$*р.Y+, +f!(>"MV$r1E#j:|δѪ-)sE=|ۺf/EfMH%!tf+s'E|WO@@Ƭ\[R}" 09Q2FlQl#Ii];1kFfE{Kno3ao޺ejәsRLxo=^II2i|u$eyh]^Ymd"I ,SH1,c25`'+7,v%-M7qf2vCk_)ҳV4N~vOfu934U*=q%+bП4D|E~nQ2ZVd6NO3"uxGZ. ??q<&ǿ[< BP1#R%VUHx0B_f-<:9ܣ봥*wNz[sݷݲd3}H;Ƥ.NwIX#lDMjbxjލ̅ZE}|nj>J;s,޻Uk23RD}tT{AQy@pJaLFdo[@C3iJQW* Ƒ3Mj8aM#\^ ?Di/l^&Fx1PmKp#86(*A4K##^!ƛ ṣ;i ʫdspBO?9wx~~X>1B|>. )J:e )ZTN9۔xiny7ZxC~8ھԴV骼6vm|*ތPϋM!K5"rȨy!Y JpXLdvxnxnl/,J9<Ol1DʅuJg cy` >a _06I"/[C PZj BdT*ۭ\^$䣣'~r 7*yQY92簬#& D_7ݖm}UUUh@6 QdZ[eD 3cAp.FK6.^X_3Soy٬bNq"D@NJ#I5DȄ2A v*/ğ Kgj~Xc;8(:g|fA~6Prz\m(Fl,#^zq4ebHs%-U̙)1(gĸ,B4hHM{aws[oOmEQ 54u04lkOW hF\Њjۇj$c,}m똚wwwUժ܎^upܙjb2[t-ZzCOwӪiQQDcrAQTwy~Uzp ,[ J*YaN&Ǫ*yaǮ]oP<H޵0Š{ߴukU" lgYǽrn )-_ ' gj !FEJ`5knem xcfe7o^PɧvDFbA2czA(m|t=eg̨C?2[FcRSV1#ڍ)=OUŤ'ׅl;gf͕jidI V mv؉/+WߐvS}It:/nBȗ5ギIJz_^zG|06No+=Xi۲I( 6*SD)@,K?~Z9c5_G OyH2I{<3ӢE!!t;*:N}橧0MSG)-7mOqA&#‹*+ٛo^^Ǭ$vGũO=|H)(`|\?DRHBw:=(rwO//+qӣ2!ZC6kEfouwYs*RRFR5VM,8]~H.! ))PH=|-1BH&A y=g@7֍N=ChJ51 2 D6X̺hT`i uu~0v~̸ 0|(ŗD>cqji3534`P=eSHE1u2 |.DB\. :F"|[Lw"Q)YňzjD.x'g葎xשw{nfєBJʵA\&ᙟ?BwnXj~a{ mp ys3\KXz=!lC˟=:'e푆jW( JmCoxew{ RZj)"U戧@4ѫ4eض8gӓ@JI 43At u_`c})y5Vpv *@ @0 ٻˏ HL-b)u`4WDI$X,f7eB͌ù&ÓLBPnftMXK)y黝M< m=!VWWqV+ /07rv042MLmxĿ=Nt^v#e[qm_||W>vaڱsOoo$Ǧ(R 9B5[{_Nudpq ?8~H(~WNeڟ d\u$Pl<@%`@fG Bg{TIENDB`bacula-15.0.3/src/qt-console/images/80p.png0000644000175000017500000000040614771010173020117 0ustar bsbuildbsbuildPNG  IHDR#LsRGBbKGD pHYs  tIME %ftEXtCommentCreated with GIMPWaIDATHc`4##Μ9"fbbwT$T$i*&%% %%2L)F3Q Kǰ bϞ=F#, IENDB`bacula-15.0.3/src/qt-console/images/server.png0000644000175000017500000000507214771010173021022 0ustar bsbuildbsbuildPNG  IHDR00W pHYs  gAMA|Q cHRMz%u0`:o_F IDATxb?P0@ y 9S@Z00 --hb$6tupR@Cia I AVׯ?3 ?}c;|Z.@C sRATR&" %- )t$f4 ?{ 3{ۏ ?{{ X;aʔ x zmm {f9P3^>o>2| yqB yh~~n.REY iIY }3|(3CQl-6fe`x/tOp (Pcdy GY `bG8y!@<J2耋p`@X2 009x )FKzA Ё|@xD<Q?`hKx1pq1p f"Q @? = 'O>1{X"8/  %%{@DUd/^|ƛJwody`p0112> &"2\r42"@ s(C~go&`e+9Oь   _2|)%ėBDT) d$//q?(qs/''6Xl'F_  1P[OB`B? E$y"# 12| ,03DT&{ËLp(A0c6Eoiz DIV;!&p q> <t܁x2*(&@ Y=1xSk9 +GdD0dA?QCϟDbdt:/o#? "b| F 5 (H|ܐ25@Õ7n\@gٯY99q1Xh2i>>pMoCӧgOaq @ԜXG.5TePP``ee{4GOp׷gϞ_ƶ><|4<@Ԟ5dz&.!L,L :j zj b p o_ |Cv``eO@< {\@|bÆCv6A@'(31iH3ܺ?޾}׮YPG~EAr8x@:}_Nxp??>?`bbf{@CAI1 ;ba><ꠧ.5@w@ S>CChP#7I QѮ'fff@~_6 XH"#5G!sAڟB5B Xp0F2 fb@g`gkjdo +݄v~$OfSoi@pZ[A^7`%^LnN}U^.saY@ $I )6x{l$ X5B^zbJ}钞1L5o^BLWK0u6<( dq ~ |w%A=+!?`,k"%=9A&6x`+ =l[S kn kRٙwz@qۓ(FjdӼtSWhUMV'ޑc\\4O~!XVU<`P*+ d2w^_W{*I=6@/]&W_e0К'T Sfi1{U#@P`~v8 7yQ= A:MBrDCEWsu;:u>"7EH .& a}fK|e2Y5 &P $nc9X&BS\t<Jg;[Ijb߉;I..0N9+0##Ƀ TV8q͑P~R٤ӾN3^:Ie !Ѐ4?F_K_έ-g\)"FIENDB`bacula-15.0.3/src/qt-console/images/ss_resconf.jpg0000644000175000017500000044224214771010173021660 0ustar bsbuildbsbuildJFIFCC"   w     XWh!'VYfg(89x"#$17:GewAQuv2Haqy%&3456DTbstdrR Z !1AQU"&'SVWaegq#6t$245v37Bur ?F׶Cw@;O!.lC|RMx9Jw%ٸqm呸VRD2r㎒*Nk4Y\n$89g U}Þ}C=eq\wXZڐ*CsF"7D1"wA`D|mJ'u2^Qoό))Nr8efF`N99grYXraYsYpQFZT rPّK-H+qJ,y+q| bBȌ]S!]e.L&_Z18Df̻obJUcE@RmpҢ]phkQ.8X)^"4s3VWMHnv6A,I.Rӓ=v|;5j͇*&ؼI)tgZjV Tż\$l fAaa񱽷P-xo9Hu{g1[_k?hn%t gltq'IB(~%1XlG}oWxmmh ucpPek)ئsi\y<` )M(@;1TVRcs]quڱ~q{Xeɤ\2H1;4+s)CCuX9q9?BI9I'NQ< Kӎ8+2L,p,1,ˌqyu]4'ײmb#|uET&gű76cs$yoR̲ ǂh#P~-U;v'%Llls)v,dkm>#>Ft1jxv%9QD`z|Wjx}kgK**[sY(pP갃ϋǥ-q\,ڸ]89Ac\}92ѯ`_ .=l]KY,\rog\9Σ<ҝUU.YOsZClkf؞9Y\/<;2s#\RmMnhdntGU} &\H7f8әprey(yf=;bզ$:Ctkּ]]",v-W$eŒ&Bkcr\B"vݛw#g,`|SL2+1dbwf[! %QI(mrnVI\fխ9.FZg`D[5~ۛ(_glTHW N ,m 涖V-iVڅSA{t) GҬ˲ƿPbFՙ+i/ҷa67MGzFj iAU~ -|fMjP)it~8]B' gnph-:ܥS'=BS'p[+P"(H7WRxIJ.ű"AKN<bnIVRZrDA+T d lé/3-clb"O5NO/hbTdԭ'5pj_eDc+yjNu*h*-iLVdsFfI  ҥoi9!ŕ̞CUwåtɭ[W+TN -V\M_^NɑcwHkJZ98.9Lw KJW::J(Tqu<a$dvda?w'5Qyeya9\3-nն=-+[u|L?FG8Ѯj^sAf+DX IO^iEh9]pc,n)b2iӚ򑹙k34Oi, 3,3`Z8ll]Pϙ"xrjoˣl=$dr^ e )%bS+̓JyXdV^n:]x_XÊnM*\!$x4J)R+RdFSZ׳fޏѲy]5wNڸkuխkIYkƧFT)$a+rDbs2Bgy\^Mmq"y5]_o<q^yŞ!95#q߼s1Uujtd)Daג1%#6]bGxmhAHGo%N=IGwYh];uuئKZl =c2mm3)2-j+Ir$|+ |#Gӈ,^۽V&혽BM|Rh΋?d+Miy=)'y0K7sPg 4&M+d522RtwLS>MRf2җ7MHlY?'ȣ Bȸ&vŤe\'ZQ9ju$iyce5gPjև˭u}\,e'`Gq o~`6R+qT1.dd-nTesQ6cPP^mzfe-XU>S> #eA[JȲ3?D"S5ntwewS^dIN %rvQR,rERBW ݑ0W&e Z_Du`CD/Sp`ħqT^K#.)#`W/)<* )549N.Mq -PkHr 9S#e6 ,$و%mN8Nã phBث,@bݎ)z2BЪ>\1带Rɪ Dp1MZN 6%r%[`UWV{iѽחI2UU|M)"xHܽ Dt\~MY}i)RV|xwJbkvx ;1\3qyIp5I qK6[PspŎ|@HwB}ϳs*檍̴}*/cɤha|Ddq1b7.1.]kvjI,sN s[ע #qH#rې]HpӉ[YdSP,8E_kb6h\[-+XSGI!?(~$6l]ank-DR$mG 9ޡ)!`ʡ˯#)$q`hdRJZU;TB.XͪDmfC@\RH84'ƨBi 41sjsFSkIԷ]C2D<;IɊ/y(N qFpg8CR]VUW?̝ STbԆ6 Jpcwfgӻwa4$ⓧ(idA%in|`QEfag8^y8<KfS;{_u[V\`)YGqxDa ffR㈽9&k.޲ʴ6u LPAhLږ*礰+"V董723lHJfZjk쳍NgZeiL37w!"}7\V)/;݃sz 3^jFيOMb7e=,*TRvp昊=֝d0kZ'(y;!ӚN]Ӥyx,378nQК:hDSZx)mUBx{Jإ +n2Z%vm!{çMRBV>na(鍐S lTMJb6"2ԕasɨ| JjoX#v>W!NgQ*9(1ƪW| XLUW[L%,cM 917)K",x-2q}n[aMZXc dBTd g1.Ze,y.Dys>.p\aҙ]]MۊSlVjFM}7La"@eSܱn[W\x> W2;\a6- 8HM\~*d,^rOqїUjS  'jWF4֫L+U=VIHTgݶe1RPa{lA|`ж&sdNmLW}2ֆpqzݪ/g.燶۲DcU=O Áɽ"R2mzO5wRӾ?:hOUS #,5r}tzp0-\gd!HčYqk,3[6SNI250 -S: VI#P~_ף4KH޳]0^oZn1`M-R=~WddF 뼤:N>krdJq8xe ڡk'i]..4cїyv.CqT^[ {w$srl9!+#g,o%8M]wl,m{ [.5-t% C!+[ b)>ĒFkqL%9p'lA}Ba=/oQ?R2QP-e~3}_))\B\WYƃX⟞RƷ8'Njr,$ޯVge*`B2Ȅ!%18/ɂHݗSV%H[7O\F^매?ވio)z-\F^ fkn?`u {ğ`oDս R fkn?`y357Qװ:?OC7[jބ o357Qװ>fz3]='׼Iv@;KyM[Е m >>fz3ns3X=su{k$;z |Jns3X=su{7OOI5>SV%H[7O\F^매?ވio)z-\F^ fkn?`u {ğ`oDս R fkn?`y357Qװ:?OC7[jބ o357Qװ>fz3]='׼Iv@;KyM[Е m >>fz3ns3X=su{k$;z |Jns3X=su{7OOI5>SV%H[7O\F^매?ވio)z-\F^ fkn?`u {ğ`oDս R fkn?`y357Qװ:?OC7[jބ o357Qװ>fz3]='׼Iv@;KyM[Е m >>fz3ns3X=su{k$;z |Jns3X=su{7OOI5>SV%H[7O\F^매?ވio)z-\F^ fkn?`u {ğ`oDս R fkn?`y357Qװ:?OC7[jބP2 Mf>gB|"1GpJ.K/3$ q +.j>A|}`g >>fz3]='׼Iv@;KyM[Е m >>fz3ns3X=su{k$;z |Jns3X=su{7OOI5>SV%H[7O\F^매?ވio)z-\F^ fkn?`u {ğ`oDս R fkn?`y357Qװ:?OC7[jބ o357Qװ>fz3]='׼Iv@;KyM[Е m >>fz3ns3X=su{k$;z |Jns3X=su{7OOI5>SV%H[7O\F^매?ވio)z-\F^ fkn?`u {ğ`oDս R fkn?`y357Qװ:?OC7[jބ o357QװG5m ƕi-mnri\W (2]A:]Of[d|lW,N)ډY^*INƭeD yM4MN|(- tƴj)Z-98kS$Qh6{%y xP*XZڵ{4bxs\C 9p5dNNleqpAe!cu)Q&)sU_%wxw oIi#[]8nʮFV&ӦG1U=GTk5-Kf/a隼H^ؑ$H*etj"NӍ"Ml}wa`r B2IJz]d38w9%c&s< q`ZD+eӫV3 `t)J-RCS:U!!rp# +s3,q-dr^ZbN/'JɤF#^fZeM=Ir8|~{ {S鼝<8Q'}h9*>^VVmTsNs uVr*rƏռ/!M6r+3o )S\ 1 ܮ,}0Ⱦ=W{+x4 J'9BS$Qcی{JPNN^GcPŘwerfe-yҲӴ餍&יe'otkv]Uix(ͩ00i~:B\>:5rUl}#,72oZìH ,jR'8tYe̎ <4/,rcs#\DZױVEZZ^eEDT^e3AnmU5Fɫد#&xdj>9a7:9cds9'0X y\qkgYm.;Phhe,W&-P%b؁-٣Wbn r(x2uq6mrX V-kːAȵ#s_哎[^cG&ֺAއ"q0SV 1Hcj2RKQO]dn&J$*G1/ߡ~b0 ?_CQ?kD/Sb3V?̝a jd`bm k?u>pY y΍Mi84N֦"ien[ ]X9 cV MM۴2vyIiC+QHxF7D]~}D6Js99Ү/v*t%2Bؘb9UDW5;p]uO4Z..:o,xԩ&m? Ğss?_;dƟpMG,dPk8lc/GkUX7>6ֶ &ۖ6^*CeT ܕG%)|;?myH'r>TB=gbR#!Mcۖ{dтk@]5Wv:+Hߒ܉3F!JK|˚B"XY*ZmHlz̓[<&BK\* ]NBk<'BcQ'Z\ճ!2^zkqpM}/OLY:<:|ZBع3f\ꭤ4m]ez1M? W)>E58l2rl^ i%6TBO1W5u֙ neUeF,7.d͆^ګ1'&[Z!mMOcgfL4kɖdLyrۂQ=3}9붂nP cfRFQo.QeS>=̵x*vqqK|tĵxFW9#qt%gӘ:=alj\ í7t`ȳlc:D0nêsJG +.RT sii4{VNK͋{"`ζ@n8c3q䄒Xܝ{2#voBrĎ T]4ﵧn>¯/ܱa^-Dd6+g)&dρ^fk4\3}]C'#2׸~oJݺѧUr~>+]N+X2pkcnB6iqKN߰:휲U5+wGnEJpB%Pꑘwdn9ΝN 06&^Qv`p8G cMsE3c |#sJLS2\N8V'OHvE@EU=L>S$$lw3޽.JJe/nE-3%rl'f~}m1Z x9E 5 jspR]+̣ʧ.=bEs=$,ӊ-M#|VkdVW4j9^M(pQV!Z /isiƴKϧ*dE~j5*uR x.skB9]q94T옧gYr n$2͘%N^y\w~7Y~l#Q{'l+Z9~jxˈզK)yr2'trڵ#\ܔVVrvvnƔ+17 -3.$ˎ̎3!a!)fw >1ˎKsAoe0;ߦ=]2•řVHӋ7nU\2PcNz S)ŨL(br'8K[#g,+URJF+S_Z5[{=ڍ㷰8MzJSHW |$y8} S?SWVJa槚Tf{(ܲMŮ;xZԝ]4#w 'NIɭ[F2"&M#%b/@kھ @WTJ/jFL.8m=ęunDr"g@ߓ%  W| N$FQGp`uS%ߓM2WܴޭMXz,Ns+;kY%M3[IꞥJ{E<;g5il7%o;_S7%.[Mu+Lbna棕2)nuxcH'7G*]a 1lCM׌8Wg-8dNDq` Q]-i/'ũQ$Jq"dnWՕ۽}ݶɲ'&GRfuu]5j@:bQѭRSv/KVWǑ'j'񟠿hOO?AZ1V/Sb3\2u-?mYe.|kl$~ D3I"g/Z&/,wd[Pt# ?E? L<ȷCgAZS2^=s~8io(1>F̋t=6yn&d{,z1DpPb}l$z"l+[0"DMk{Lx:XcΉ᥼I2-DPVaE? L:udzKyA0d[艳ot̋t=6u3%)cяg:'㆖d`'ȷCgAZ釙z"l+[0fK׼RǣtO -'On&2-DPVa̗xF=蟎[ O"DMk{d[艳oté/^K{9?4[# ?E? L<ȷCgAZS2^=s~8io(1>F̋t=6yn&d{,z1DpPb}l$z"l+[0"DMk{Lx:XcΉ᥼I2-DPVaE? L:udzKyA0d[艳ot̋t=6u3%)cяg:'㆖d`'ȷCgAZ釙z"l+[0fK׼RǣtO -'On&2-DPVa̗xF=蟎[ O"DMk{d[艳oté/^K{9?4[# ?E? L<ȷCgAZS2^=s~8io(1>F̋t=6yn&d{,z1DpPb}l$z"l+[0"DMk{Lx:XcΉ᥼I2-DPVaE? L:udzKyA0d[艳ot̋t=6u3%)cяg:'㆖d`'ȷCgAZ釙z"l+[0fK׼RǣtO -'On&2-DPVa̗xF=蟎[ O"DMk{d[艳oté/^K{9?4[# ?E? L<ȷCgAZS2^=s~8io(1>F̋t=6yn&d{,z1DpPb}l$z"l+[0"DMk{Lx:XcΉ᥼I2-DPVaE? L:udzKyA0d[艳ot̋t=6u3%)cяg:'㆖d`'ȷCgAZ釙z"l+[0fK׼RǣtO -'On&2-DPVa̗xF=蟎[ O"DMk{d[艳oté/^K{9?4[# ?E? L<ȷCgAZS2^=s~8io(1>F̋t=6yn&d{,z1DpPb}l$z"l+[0"DMk{Lx:XcΉ᥼I2-DPVaE? L:udzKyA0d[艳ot̋t=6u3%)cяg:'㆖d`'ȷCgAZ釙z"l+[0fK׼RǣtO -'On&2-DPVa̗xF=蟎[ O"DMk{d[艳oté/^K{9?4[# ?E? L<ȷCgAZS2^=s~8io(1>F̋t=6yn&d{,z1DpPb}l$z"l+[0"DMk{Lx:XcΉ᥼I2-DPVaE? L:udzKyA0 ?_CQ?kD̋t=6z}mm[I5obOeW;/Bڅ9\BDi 9J$ȷ#s]FڪgDDIUUUdDNuUD5ktl7V2K&GqO|~*X1{ֵ*""S|q^a҇f;ZgwDuDFjL~))5vհ1͘Fd/ ̯,RN 숯we;[Y_U`z0xs5x,S}ceMϔG!ؒ$ f0Ec9kܧ7YAQ WK}u=./0f Xa^ɭ~2S%(o6HIݺ^;gRlúm9G \Ɠ{~M_' ?ÖTFRȴk:r&=lX%WiȬ]&D?~\F.9;[:<ˎq++ ̸8˞9`FȐ]"0dN/){jhɒr>.68g<,ȞMÌ{U쏨 &[M:?\B?S]s e@PON2pN|aE#Zq4TUkhV}*Z=h'f.B[>jШdtUbwC0zs}1BS`Uwc;]9!&u<2o6L1w l;.%nh| 2O1.xĠ[VQUGEÇ. &W ׍|e Id|Yg m+'w<}]z);n8L=թ,apŪd^br#&`q ]5Zex^\/oqGTE.C51whY3r29ڇy\*ī$D)fiB$l<+Hk 2FJ/ z[^qt ;?de(][mfu2^f88a,Ss%q JNQidIyqeQx󙆚f|ePC(w&8w9vٰ4Fpv"( (c-Z s)Qd>˝[Ox\)[ISҠ\r! 1n$+Pu\Ww9b"S [ʞ:0GZq4Ʌ|$-E(>NEܩK;$.?d5gMd+DdJߟ%ї,Tw8aNT3mCֵYٕH"6?=1>J蔥͎.hM=c#Xz3J)(Ni|1 Nte=5WueEЮnFd"^crl 7@fKy-jaGЍYٱAG5`+J׊8&ID1I;34wT8ZlMkVRxrK$YR+XYؖQE)R8sq,ˎ8瑢k`^RMVFEIݞ(?= xWnn!mգl˩K٫wei"٣X&οmEkz~^.Nb)'5*"tnIS yj~mPMyl}hUm[0Mȓ6Nع-ңJ,{~8:?{65@}[CN7Yz>TE5#bٙ/r̓6E:,k% e/&vUH-{2[$/c,69 @$\JMQZQzPaI2ƥh)6?^J^ SjB4C6s]:=a9{\dɒ/p8̸2idFqOm8+wrUWRT9li6X M](HN=lU|OH؍(GdNk+K6T&K=jl۳_̗.)6d-̲|MP8pdκꢃZg|) ͇ 2'138j<lr \xH_}8}N'S7e4+@Ūs`a3ت\^&o"u)ݒ ’+4xJ~_u[M{s}{"QdSvgL _ _3i0_~ 4ú ⤛vUV}5XIi^6mYɞ1˄|Ƹ;2`Z_GQWKxÃ)Q>>ez:+Wdv*YDjS&gpR UNĉթ䮧&FV.)/'xqϭ)蛄ܰd$:,ik"&ٚN JOSa|vX_gS[!׻ڛ򚳡6{CR9B_TqeIQ`Yc4e#3=]TX, Nj_Twϊ[U1K$=%&ښ ="v5=]w `ie{& o|)5KZ+4q#/S cdf8s <>;ɫ83:dZkO*/z!)R?T;;J_ #,YWH\NJc*B&TػuROvng5=WV^U{qT/G:<%jD h2L0L܊Mh3^u2% -8*{݊QnrEϹgc*zVK~-G0,{+PGYu},fڰXy]QI-ģ8{e+-_rg]:tLZH9KNk+Lڙ7,(Rc+ӎ}lUuħ'Ƈ 4[s~2]9iTO_ȮE,)֘4gN)dsY$edN.rMg9bW5`vNed|i㸫DN'#ͤl]NaW(+=4kŀe3Oi9?no<'az/Y{?{U+e>ÌAm MK Dș3 WBN[׹(VZVG9O4eݔm/@Oޞ2\5/"&Ѩ.ʡ9|rR,HVJ7E }MSf$$$IP>ĬUe$~5{T;V eOUud}9=NNd0cđP sb|)\zdqX/_aKMLZ ļ0=D0zH3YFk,n]}9OPJF/2\&\B yLBĦroxE ;.z=p濶_|9Cwg5liWR4/* 24?x2 qb0Da Q}88㏭qp@m45qlh݈5Sa(j5?rhJRL0U"HQpt<ͷeh39HqViu^1)-qZRDZd(ԟar4{ԓLJJ1nqj'Ni A*xn\NY 9|)pIl# 3<r 1V rӠ=#Ok}cxo?Pt 2(3o@gJre7+rX:J+2 16 R9[ow&r-eVr#JhfؽqNzEj:LwO4=-NE&mGSFuʒDgZÈ[ݏu/n6c>.GaiS}@%ʄ NDQ.LN3ЪLxmZPlkkFr!ooDF D !*D,$ / p8H%s*1#kc#vֹs[+ܩ:ݧP4]bwRYEZ;V!3d 0֒cɹ׼Űg$XQez e4=`"uRd!QY'mp=P4T]@u֎{&D1ZfFzVhQ pb=$e!᥸3HSv&' TN;zQaȶGax MDh9K%{J'YU6GGn)9ąKBf!ԏdcE3e嚀)e?s 9!7{R(wEQb|EftI=Y2 6=~Rd }xVT/qLJ]eKǢVX.P2Wa@i fZV%^Rzy|+EhvCp-qW'; VEU.G_ZBM;=P黰L4ݤCAtr։ͮ§JWm#R{ #Q&13:;qWG vJmzS<[)*~Ѹ\a=L)ɾJ(;4ZN2j=r"\d.iDjĥVVgVKbfR=j4꒔ D]Xe4ɿ(Pu;4yR3BwvlxF[%so/#PȌdV3*#jhZ̊3CSi Fܝc:ot u]D{IӂU5ΈV;B˶>yjZ""b6bOWWhaVKXݱu6bc5P{c"Zn)4U9.27sn NMĭ@$`YxvD9կM7NkzmYUM½leq5}\4ȜX6}5jۖJiME]m.*s ܽճ!&{:5m̜2hdOʕbd徰4 3-+ Rv:bzZ!aæWsM},qb2M;XĞFtE^ri/TdYQZ>F%utk$TS'!XDVKF準BEy[CUNT2y],A$Y2'"#b[#2Xe?vuOtMTY->M9$"&:֩>Q"%-ar5]p 6OvjԾʳmʻTҺ(4hV*ϰx" Vc2REWe9:VO [C4%3_>tvnٻZʂGmQGZkg.51,3IUkt>#`F֪S\OjE ~t.EZ -t2:닃T5 Q,)ԶM^\kFG6%x[;A^r?±S ыJ#4rw #*ZշĢIX_-`+̟L #5^cGS--&J{Zt@e-DL$1صDZHJ憧ekњdޫ ӽE1EkC/y,zϠYu3KUvI=Е{l cn1dq9{]$;{>Q]9{I'Y? y"puloT5QΦDҳ8a64H-PؾCu4꺒jڪ|&M+\L"xULo82[{ktފꯄl{]H9 8ĺa&u5#n/.Q`^yb_g )Y'M}鹼{4K%ZOKsV.{vNYJ]߰M[j ? e A[Q76֊(ۨFhG JN(Ii[Ԣ,E,LƱյ$}Bb~gRMNyxAIiF< ˜rǞ9y~"Z%gc) vvRӍZ˂yf`]cc'6>'ˌA vȲH*&;d5ݱKqjaxt%vsBe?`D-$ht=ؿ;<ĦDݵVv]Oh:u4b IUÝw#{kB4nkퟕR%;UHc(bB|u)F[V)w00cl54thtJ޳M9͔@'n #FcʢK|db{Z[ 3Q;/R1Q෣'S1%gپͺ6֍U;a[jbDyW'tYGV ^XqoNbERƵr\k:Hk-*'&Ĕs{FV)(#Ӯ/ <7&~$859 q8ĥ$YcxcKL͔s#5NMzH&kg1]ݰ%ѵǢW n +!EfZبO+&*6G\̭IMT6/`Aޑȡ&6m|ɡ)j)ÌҳȣqRbe% *3Bơq0mZY8P$41 3.9< >SV,F9#Y8V#\WoG*y[[Alδ[q>ift d.wDDQZD@ `JHo0xECc2C'mcǛ8J&j793>xO7?cmU@_jͥ4&Y5LI"mX^s߇ɴW4Y!2UZ#Kn'{O 9,7S;N?ؚ tr&exq\c\z`j ;oznúɄʸ` тoPv}ۆh(c6o6~XUO7i:f#PS/0Frpq.Oӻ쾛[XyTaץ¬Yen#\ܔc/sdc(qZ!aҳY{)2-*Na`'RY 7+}bQopTbl:Zדj3L}S0;*[͠^Fu`+^1h#P'c̙E$jfۡrr?J|=m,ׂ]w_b܍ʩ.ִ.LEїd{2{@;YhinX3GH٦uBks>TcGT^Dn'َhG/L_$`}j|;+iE5ʐ,i5@ T90 h;O¶Ll+ir$)8-B5l^6+v鵿[8n yNWCխˏ d[m6SdJ%W#31.r8c Rsy%h餓\XSu dgƱ^WūDyϲs I2ܻu{/)ɶV&tbOLUN]'5#0iIl5DĚ1)1&cq(I5VWTO;z.osi*#Ğ:F^ \9bynr4Q ./@Eza$-VZ|Ǚ>PsgG1"-̕-#R#5c|ąddFtƴVlQ`i>"аhͻpkJT[$[ɭ[4mj$IgūoHY};Njޑ)[ }j#uFRcK4dtn2iC';(e|$ܭRW[ ,c;ə9gtѩ=Ƭ:ql6F_"~).Xrdr$ R2*{ڸV}}tVk^ jo\!a[%"riXL*MOyd_s 웣dVDV.U|sTd^eəgegJŇB躞gd^nJ#E̥p,\T'=ya'+0xǎ6k{]8o\ j_\ NtۚH*{Ff+KӦʢ,m4VřHspǸܐ1ȩ =\jN&o|2>+^=)w}|v\~X՝%&Jӌ0˞*y릟+%_]lj~^{kղ)9LV=e|D,ɭ;S#2 v,593>(uVj`Azct%-wX ,@4rWsH/FkVTZ'JFaqw*'LRn=Ԣ-SL: S[KU]\B!65;Bƶcҡ=ŕ͒C3 9czsU AtU^jMI.RZVÿ sZݺ8_D4Q66\9?1zgG>5U=-k|mq^vJ&(84yoW| UL93t'f²,(I]mi1e SgãҫY;=hhw%֤;.q'/F{ )l[$~2jfe[*gꖖ)uKB ӱǿHLzm'&<(ָ|x*C|&O辰{_PEZߪݛ:͸6),{lX![#dk7gקFJ4[ړ;Q~MCՊ;n(#e\r*`g%gxYH2Jkk*uM:#_RH6gg0mCuP,W ^y{ųo(P@VV y98=; d8PItuVN'Kޣ%K\ _5ܱ8Uї>0mR Cf1Xrrzl*%ںhn{msK7U)ya\TMTM&2sT!I R2"!nq,8VRNHÃnV}p6-T3Ql]<"\3AISW7+j;|]B285#[Udd ~s=*մm$fHkxp-<9 $d\Mp|Im99v\v_qAñh]V*_5}Uznp8U`G+X^)]Ok㓋EJVv6ݑFV[=yAql1~iMeOgq6~u7ҟmY5Ϣ)׫H+/+Y䶔Inc%qB1>I)>>=4MRFy5< 3,31 $̾{2',,2yǟn3l~E\L"WUAI )Z*̴$Yd$_&TH77T⓸9Vyfexǹǹ˞9<xso?@K7ԫDy #lt۝XRSyI[KIR,cdFbkmEHo)SQv+g(.rݼ՛Qa7tˋsˊ@k>NaKӓsDm@K+c׹z\αJsryf6=Ye$8oyiލ诹mΝl jmta^6by[16F-Uު+)gF\yM'^εQdn]tt=Y獛'[=\YV3I<}hK3=R;RIM3-q2ލ}&ٚ W{lי8 6uLf~zً{|˞8㜹={_.׭q+hlq7TNCj){+[ٲD+ѫoɩi8ÕR zU=#CuG m2k "a/\S1sçKS&`|vz1DZ9ngs8<8c.2Ǟ ž#Fzm x0wҋ3= sv]u=kk?$֧Z- :z;kz}UD#5_ dt޴S [+{|*):4 JGqRʕǨQ9+l-3b[44 ڜm}nLv)U´ )Z5AE^xq$K8Ϗwf}Wg޲],;94|Ì]9aժ]9tj!MI5pCT;TŇ"uX#Ϗ ˤU).HykvRwpD8\+ K+ /8nj0?8qqk.\\LK׶Vykzz39i/. 9k%LUNؽ$J5+tF9ٳ"ШG·HlzDHdH2gVsE&3,h.$R3$L.. 7{XƩ kJsIJ2Xꅱ{zy? ,yļ0/s0qpǓ3#MϞ19s2.y.y a%gHWrR)Sa\2[6 Ey6p×V_ :+N֚4~'fQ4 S}-b{zCZ O'm%ܵ"kL^x$VBeMt|^9A!=pV%rZBrˬKK+Q l,.Mm/`9"0mjS~箧JUFk!u ]nr8>LTI*&x=*4]fnLNp-2${tRWvOvj-m!jzWA\07,o\I Vђ#pB&H䐥DCPnm솺l n4Mu7Msh,YZne2v{nNGg%aƢk/ o/Rn]];e#v+|jdoYt+ƫGyb fP'ۇ'HZZaz7cri}2JSK&QaM ,ټ͂L*GsIZU*0t2Ğq:ށh$9K#25S=qIBpmɦl8;e|hjpFӃ0f!ez~uQ!q3_p=g+BRdvƎWU6Hb&C5BccTf4pi֪k25-̿kq|'RjS{D K* J<ǜq?q]fnn<_M!{VA/o|v%ƚ0o#epsQ|0tjO,T'̌G箽 ޖW ԧoڇ4R&ֳmH̜9,iW)M Bf.#f:wI+B@[q4k"oM5*W#eHKa&sgFZt.poEʅhweXXOGQ|+*٧;!frV1KPzz*wEd8,x1=PJxѕ, ^˳T6ޢVq*|%&ǫ=.TQhvTpTM%ΩE㮙x3ymJ S4[]Xl-qĎ6E(RJi#UNs\,AwL^lyw\n8tDnO;™; ݃g!$L[vI^ՖAx'q,1YjO㌸4ʛDuծKwg!ܷ**]NȻeIfNHSN:esxT3 (oSM=6lsQJrXSak̔-kM##+r$r k4ֵSgmbUt [Kێ] ËT\\8hoA 2B #as=î?c Sg_M(,$5QWꮻ+H ZI9nE^C'^"҆F7>[[ݛZQ!H?{RpwH^_҃^*ٷ$rQ7TrB?"m?fM{ɊLyݔnPz.Zf`+9(S AXԺ((<.C@SҲ#+R诂KJyx59Qаfƍnc9K-VC#SB.ǶELNstXJbJ0̶@mуl_FjgQD7LmܙXJ.5ԖHkʸ gCڴ`DGv졓eV/u{(h5~7-7Weޣ8ol=hH(瓙ZGƠ *J433g,҄wm0m%z2:RŶa;GσyiZB8D-HZ33,qƎ-H-s&zܜE:-^t̬1ź*֝J,`y&όx(YmOۿhe=Dƥ+Je &=bX#Qy!9fg&4k$ hn^ӥ7ʇSGɯ{n&mҥ֕Mr.g&i3cf Fcmnbto`vhu}mj$Zp{d}c:fxo7omu{CmBposH_ kVnL\Ks35 /WON6qXBάjFӷd ^c_СN^ !zPBqI,yǙgG!XA+"eFmHX,`mZdH!*R ,1ǀ+ToivU{j֨*{ WՕpֲیtz{ {1#IiKs#m7 #qFb~KPAFc5gefoF֑:Ʀomn@@!$H1EL$ x`?g?ֳ@>~/=g>!^{ZC9 syk? 3絬3kY π`?g?ֳ@>~/=g>!^{ZC9 syk? 3絬3kY π`?g?ֳ@>~/=g>!^{ZC9 syk? 3絬3kY π`?g?ֳ@>~/=g>!^{ZC9 syk? 3絬3kY π`?g?ֳ@>~/=g>!^{ZC9 syk? 3絬3kY π`?g?ֳ@>~/=g>!^{ZC9 syk? 3絬3kY π`?g?ֳ@>~/=g>!^{ZC9 syk? 3絬3kY π`?g?ֳ@>~/=g>!^{ZC9 syk? 3絬3kY π`?g?ֳ@>~/=g>!^{ZC9 syk? 3絬3kY π`?g?ֳ@>~/=g>!^{ZC9 syk? 3絬3kY π`?g?ֳ@>~/=g>!^{ZC9 syk? 3絬3kY π`?g?ֳ@y fG*LR'5A%WGrY<x/%fw=<\r̀[SX[ RFxbU$)R:R2%DY)4M, ܰ- ccӊvft olkloNb.+՘JTH%$*թ4铔axey1+8bC)Ic˧xGyn18 |ݥJ~Z^XBS{3 r올[ong5_\͙\yJtK-d85ZڷSzH^}r=kjL.ìEjKzuͳ/-E.V.ضW hu\N H2i#5NRDlM]4=pCkTRz+e#9rMHK~4=#;QMO87(ѴЭbW'ihVMV759g.r)vD?cdIG唏_F* + `Or7%3!A[nF /N茧ÌXN;Ϗ)KrNPrı#J7qNUnwP.fnCif2Ib>HPwaMYܑ#VJXɓrNN_֟OP9^m<^qhcjGًpQeP?!C/ilL̄4+lC!/jq|S2Ԩ=PA^h£mkÌׯENlɋ찷>2ƤEWj竛t!]4B>*ˊء36s1x fbo\owZGN xuh᭛(ר*ف +ccIpjv}!.uYh){j֙"Ms1 *:]3NGvܯ~3ѵ:J[v'ZV0veɫ|z-i2cÑѧ'FM˕%B*NA$zZnt k"[ 0[%arI)⢳x+9\'#"dr<囊tpR8lŴO-,lȽѧulc{6PT򣌸5& s<. f፨ zUݑEۆh"ZstV'SObSqbX^ߊĤJ$-ȗa8!P#9E'[:)`q2yG'Rb6E׸%+BL94!_vMur_PC"89e4feŢKA%BF&MG0ғ\ç׽Mz%h}YW_$6aqD'lyKۥ9:FGx=X22f隗B8!r-? %O wWmg_\udƴ!q +o)btr8tq:gȜR`ҽ*Q"F#2L"UeAykR@!5ekkWR?DXfxJ*ԕ}84v_tD^iEsIp1o};V6بY!]J+F =!D[P n]6Rd؎$A[ÍܛZm4,Y*zj {*ɽUsoJaSAq$GR'fFԎl|ZjW[uuF2e-QW*ԺDc-<&JbsruhJ=Ov5Kgj]F(ʇb+U^-:J捄^`[0į۟YZhu!cJPC%91YKXSZbgՉOç0c0ӵunşOMJYg()n%$?owSY42IwmEIW.@SJ'32HҺfk1K3#1PSZ~SpZzldf]jͫ'M$v1E~Ӻ!o<i9H "f(pnMiMBQٖz2Ȯdqjmнv5YOwʮgonxm\!fa i(wuZߛ};3 V|,K~.-ZUIe`IBTv`_(ʅ&BڽV)! %J(Kxks.6m~?D &.Q,2* Lpˢ2@zùH?6+z]mI)}U%MkFS~mQRj6 WB1ɗH;l=s::)KU3FC8D]]QJldj+fzg}IM#2i&a=,,J3J7 L(2/HBr#:fy\etԚ/o$#3ަuf;th\;;.SɲeTW [HOVc=ڬ'mhZ4~aI-$6A$uC)%NZƩK :dTij/\Ջ2P"lDM{5YZEF NaWVFܵn\wӚo].͊| v]=Tm}'ށj]smi\Ylߘd]e6U z|0[2wN̤Xʿ\2<>wbҘ8 t/>89 I\q_y~_΂nVZ:{ p\TuU=hEF12.lG( \C|Di93gf2e%hT&k#0}|*;f&bbpጯ-SNoO+9?ҥΙd-go-GAQlVbZyl)Lr! Jg W4fQ BXby]MU:~T",q%Rwc Ȗ{*Jqz[ʷT$'ijpQY)7U!JV{{3i@kǽ TqEQ$DmԢs%_)^W<ꃱ_f%{UDxNu(E$ͩZ3OH[ؘ晅ĕ4o1;p>oM9;V3#Aȯe(xkF '>./g;gmi'>GZƥFqX̻y*[h~M\lyl-݁ 5;@/lmt/[x~kiw}FڵZ ӯ志3")QIDR)F׶66GkFrF9F7dC̶kvf[bi+Tc`|t5yQ$[-"#ޜg(Yn~ϩt]dȚ9b-TD+䱸W"we; k+F.s~Ai5^]: ]ͦ)j Kz=bd ^ i ^2"}{sn ֪f)]$q ĵ M*0ar@ ePgK IZ4,K>@,B̝M'=YY1,~m%AUg}dRB,UFC9HިP s]̊>[jNj)YC ^|&t3bGV7GD4Uߐi gEx߳nCPW)/g)&l7&R[77`޲U-mll8͹D"#w+{b(,mNYzeHv"x>Hi,(90IMJ;K2%DH[_۟+y727dꛦgZEEjyM=qeJҷk\ee\G`hs|!3'YvJ"{JԍCMր_%{_n2Y\=Z;c5;6#S>[I7E;"J|ZmE:-Q+Rs>,{9%9 %J y˱Fc01͞8^{sHl[~R]>a"vmR ^DO%IT5qqD$I#^4Fq=fĖ[u_;& KGJ:TށՖͿlg7&jq8?Ǫ)(y4њl`黢XkBFU=/H8-觘HcuaoE6k, "8wL=8ufY?=xsvELY[e@Ta]jVOL ZBrIVZw܁.MR)Zٱcbk}AyKܢF"vKRS؋Fu.IJ5ILrL䮬mBZ[\Z%1djIjO-e+5ꓤ 91M\ܰɤj׫$ћ 7,zFb띚qXd};+(7ddi]:Cg4ީ7!c+%vG$8UtN@kju:Gp\A f*si(8c &Lk<,&[/3@8 <ɳyq/&lAZ7MUH%[%G6i2 Bu\&ZL]#4g(JG1SyRZնi\it[֖G+Gc%b˛P9\3{4Ƥh6Ng24lмTٵzB1f-/frykD"Lw%M=RwR\'\@zJؤKŒ#rV]F;omqǕږ,1l"6}NhVӸ-9J=55k]mݒ(MP*;6[{$ȯMiNڑ˲ZQR3̘ѻ5g 1O,=Ďxȯ ONPHllI$k96j##wqQv.ۯG]-BIfy)Ӛ\WA^[id$1+c}sVNF>7+-@ ks[0 =^L鳧fhbHI%sAmyNAyD^ɇ6jv*Y]eQB@ʰO3 xěY"Ǣ d:KdI\4.2w }+Wv*eSĭJ6V4buʐ9Ǐ6pf%sYHp^8mO=K6ZhI^ֵ7jzhdsI~?3AG!nhTo47  v~r&34RS6ɥ$JOoT|aBdPɤ)qw653>z#Tua5Y Xbl&^yT;o4 /Vc\ZzsR*RrܕAa۲uܳ9+nUG,L.150R_ct, 3H" vJ*u{6aVA0p;kp}{b=Ս !nGlGQΟ0UA3]Cڈ]rbp{P2_;"s8:d~X:܌7 -6Yebq9 +4؟NDw<\gs8qk돏4}5|hu5X8$.YxM[_$j-ĨVM# n򥑗քİ}7u;]Z}e[kTQVxnnS\'%e9>HcsؒFd{Yli_4ߑ2;ƯAj1 vf>LsJ<˙ 嘢rOlRo&s3sMM-H9::9*! skr T{F$i5BJ ,axe%Zi|n%ӦtcH۩.Y4{mS[DUОIYB>ķyƷW3uem6nj: XSŏq !t6CM+7UM`ӍFެC _JHw Yԋ=CV{Ө6 C<Zɜc 2iI"{cQǡ[0֨lFR.ȫi) ΫΔ8\ UK0;?G)dMQ3HRX'>&g7Է;37)LPBAJTW\\R2sp=tqmNl<9D+#TWʦ/ hxPJ4AOJ2, -CCC]SuY&qshBWwDr&r1JŘ,2Ăˎxz)6kg:/Ӳ.jmJu1\$uBb-nGOcXI99E&P)^7bsCjP,Dmm"Gձ];|$FS6?L& f_z3`aD8S T,FqQMfXtB<#O,ks3J\I^9gBhӝuwaݫK>%T]m ؞Zo4DdN(PidzliRˎ9ږP5i4|rt3NmT>%W$4^ET9$\s$LjKi^i5HIUڭ/+sl~)z$G;.jMٍh.c-[zd:5eU %[6&/i 㓹-wmW2TVPTsHP;0+7\?< 8t)/*{ѝ.|y8kP&R4LG - $M{| q ZNwUhBW#<#Ja9rÌV۩^5;l]6OMv֗moaJ+zTQXZ#e+sn%)78#0(E-_?(t@1y짺$ l d4;TCaT8}F p pr B&BwiF0,,x溽7do.)a}ź]4([H5/Kr~{fn dI.9pvF[[eY# =q-st7y*vmyz_`QL' MWbgE0a+'.F$<Ȕ,L՛X ~ﶇ}nj Zy`T|n䩥klbycj<3L:y8 iψIRÁʐ`AL،,#gQ౽jf^xg9aSϭj .[kivg6QD igs\n0ennI*eXn˃ہW5PAUU 51hFZ 0IkaaiN3"QLJr,x W'Yէ{NbÍC)ze;W blf.W|"т$ %kf֫lX:HjњSC*6dĕSveZwԁ(%R#Fe~*K,5 EKm;gi f[t&o/i}A\n:MmYq&Q't/nW:O nŭ*,\"4iM1(y#ҀƷnd/~M2;56nFMKjݒͱYc(U<#۝_*^@{;N=>ŷ47PV=4Ѿӵsm^DJmts¹K4KȘ]&NFgkJì~R<O-5@~&(ZWDqiULdrag:{IX5b4kn6v೓ŸIp*2C!#]uH:[3^dεJkFs{LD$.mrqHpʠu<l@Qt,=G7ӝgQ5wu!)C"{-;j+uzT{Ć3*nck*z`ljh QnؽsVp#'lX$"YLbҖ)1˅esZsK郲͗|lz5{NPUh=I\Z=3P'wD%؁ixӃÔ>gW|mn=-ʞz^7, DƇՔLs*bbzQSE) #׮}}Y +/J-*2]%6ŭU)Z&^f9S$G]q{"9j X]QjӂaL9qNM͵ua^-rd%6|ugyM@S_nWRͺuZEUDn83p׏jO||>sjM1I#[v >thI͚p{=ǪZS?4SBrPfYE\[nnLXc<azT4ô]"PzimtGF/ M誩U\ƷxE úrA%$V [̏{{k4V ۉ`h{ f=چ&d; )S8jpEH)ja(禆 D=MWȞ,Es.uw5ߌ;+U,kR `윴˦NQ?gg6]tkz*8NT٠[R>^I۟Rrh(.+ೌ{-Իŏ֕ΑwuKuOZTbE %#!Cۙ1PS"w%F-莡JtZR+Y$yfX.8f^Xxa9q,rǞxxy9h|u+W+؝k-5rxk*qEe2hE?l=W#Fe6E2˓wIm^Tkx auq&.lk25c䠦s6jϘl0khһ:84`yQ"}>z7M!7ImvuW-ỉNk2OvIM$$|m}[ӲIߍ4 ^yt/ar4u6ř1FƃQb8GK7t@ڻG!Ħ.e[^p9qhkZ|׳{qp~mTĐbK=VCE1/X25 \s66u'*cs+*;5RoX4q\9t6\bʋ܉IG*NMV:M48$rNC#;')bCMX _bҪ'{m^M6~P{vm-]Q^ H2F)4Ptβ=+X3y{WF-CS{"RSs[Fb!-֒L}JL%Ҏ^R.Fظ=pSN@zlͷ :RZ٥,R yMj!ڑ! s90<<D웵2۞zbvٴCa@sђT1ikkOg/keZSkѼQsZmiSZe I j.hcl^i"tvd7!*~yaH7!Y@N5 Ju3u'*Ian*Ia5eWϳ+ƲoM#F$<-"tR!}m+$JDr`o;4/$Y1gEtR<]R=*>`͓ReaSF9S8;pWduߙ7,\M)G y>".p<'swyrf{ *v7A>-Eխ܊^j7)2WmYfyY:uj#Y,)K*D~c.WT`Ꙋ# K/rlbUycYbg9eDW>U/&ЭB]Hդ"kFaX^YP2Þ2<8˜x,םՊ)M 'Iu+Q2̘u3Y|A/|06#=:?_ŏlizBϨ}LUZZ(˅քz:ɓbT(n9>,o`|t˘hZ 9=-F6*z6Ί XID<1+)󫁼^I6Fs}Ҕ߲6inǒ[UyurtV=rח3y}s큧vG{]Qۥ=0xo7 sYπFMW kZ@k:J][4ȣFX!Hbb%JHsNk֬%#LxĚLr:|֬?"HǞq$q/y93?n?H;M\5A^@ryۑ^-pȤIƒ,00$8qÞ $ Ϟ0,@+CضTW:/"kdLv \$u/Uax+smKO~ ݙ_8-Qe%%`Vcx4]44\b[\JI%O)$nsK.P!lI]C[JMQNNlݑSsMDbuP1*S`1v52afN✌ ,4p/ ,2l,dua5Ö揵 oЕ\8fH |V8c9g\cG |(Deh }~}g0AQ~?x>G |(Deh }~}g0AQ~?x>G |(Deh }~}g0AQ~?x>G |(Deh }~}g0AQ~?x>G |(Deh }~}g0AQ~?x>G |(Deh }~}g0AQ~?x>G |(Deh }~}g0AQ~?x>G |(Deh }~}g0AQ~?x>G |(Deh }~}g0AQ~?x>G |(Deh }~}g0AQ~?x>G |(Deh yG' OW|#=yscf1|_GK'"*ْ\\ָ}%oE痕HdzRzPA 3/ @bb*Q^Lʅ R"Ț%,G*FgzVv5PjIFFg_Xck]Y'"̳[Li3Ӗo8fhn9%I|kw+Z46m -]~sd0wȚ_#)[2'^V5ߕipG-b51G'a{C)2$a1 l/),({cd%%1pO"8/!/Qv'31Z^WrJ"jhFsHHQ2D)9AaQeF q5gBv}; YvER/^.\rRZSd <'$hq,rd+?J&,m JqPѝMm-D  7<2.@@we;[Y_Ub"ݔn_gWI*_؟u?}ӝi{o"$Tc̏0ݜwf% {&n7hrZ^͑!&Is(:MlI8j͔続a%Kl|O4bWoWXn*î '?=Fc pS0zzwYr݂]>(i"&jl;q(W6GO$H Q[i 0TZGiDF'}.Oi?HN1gEs^UumLqk45ZKGΫlicmuWd{J!XcZJ{p>u@vLQjXMp髌y&_lxS,HșNRCHʎ/i]#x;!i4cc,(C%kFaNm^)$@F BaMO&$',8M'tpfs 铊~w VjU[8=Km@Sz;gO-vxi ;3~91v=;֚זkצ&"QXLЛI(lh$x5fQv80:[]Ym¥f0$;įbNs%\\qLa%7b@zWn3-~z_飏E=XUq fEEȫvhz$f:Ý{fԅ<':㛹5Z4aknw>t}Q9G¥6=]=h▬9!L { a0cɕ>% ʒA@t\PA6Bm$,YZ΅ eK'wH؄!^ʒy5)|n7CP5RJ)'hܪ{Ve*^kkqH$ TbY%#'T|?qCF/^,;m^aLI==Av8R7'R8SW059d9=v4C5`{HynV̽䅲y*1\7(J쉔s$}tf<6/|𽯜ݐOv5Kgj7 lS[{H4 xBS4KFԓv5&-IfCjVް1PQELagxmܵ;F~'OŒ'fobuE* ',YsR8Vg'H;񩵮^営k4k 0[YSR|2)TBFVVk_^W ff@z4gB]uZ=֧tuZa@$.K9Dsr(`wŽ*E=킳IPI] *CD]*5ޫJ,kqHdJڞvNx &&TE9i{:f 380-FL]Y.Y0J˻Rg(36ˏrC)Z8S P7yRA|t}kwOhx:;ͬ*ҍOm ϡIK%KRp GTuى\C3+ZVLڲR dX>ՍC) ./ Q\r7S1 ܝhfG`d&iG,mgwJ;* nzsK9qkE-{p9[P^s^=%ۼun> &|##~O~qdxw<owc$ ^wUu|6̽#F݆=%HRnK*ׇ畨t^!vH}KXԞ"iZ3]92"c'gm%XMP[GVt\!NGrlȢ0:k.52[Txm!D Yo9œ`w*]jXcLV,6CF\\m`kv'RT|Fn'&9 ˞9/خȤW_Z gy4¿n,OXI|Ryi\D6 VA%/C4F*uJJS5iPaVQY#]>58q|N}A.Yãgp7IJG&$_X`5Pu 9-KHQRWf9%WiآG:@Zn(S[C)I6cE TʀxC4xXCdt3JɂLSzt~Z͏{fKf*Vp^J+#CK͉:*zbKV(>B]RfהiyIFTP.3erQ|^|fppۯJ?zUY:Lw{J˅А50q5d 8Sp 2ϾWeSJR|M8d5Fq6'd5Kl3pm!ymoz]IkM췛sUTw0XʒbM1÷ W:/3zu99:9bsvt_7xTOϷ}ӛ. ][*u;5=bk30lOYO!mT bpÚKsiVVR7J-4-ctظU 0tKw}P;ep2Z"G4R+I222=<x(dٝm5#mnuX4T\)ܙ!Rt&GjKɥ^Jlt8Qp #RSuF-n]{j^_*yaH R,ip̌ך,Y+-3u֭҅yE$EDLԽrmq%) N.F&Ϲ;JOxt+P5%P' A c$j4I[2pSf~HBĨq䲹 a(DIi*YF%&Rap75.f0=8㌼m]f8UJCiY3*hJF¤d:&vv)yIS"AO/S/qoOO]Mt%tjj8UtXO.HܖSH>\p^HQ|oLP&RkrYIe;.6:nIj7NW/eixهsuV 8Z}c3r!+ښ"uA>[nLd>% יΰoPKe+Ak#'*Waxxm)rKรhq%G9.j5TaF28+5ҽj#r1b9#֢ZCvxܵf Ђř熍w%zqK+,_ v9!'w1YEsYlf;Vϯ:vgL%T^3uL{r. 4ݟzHdjjntDqϳ0JeG#4u%2 Pb$23p 7 5㭹T0qίyj}b4Xr%97 XL '*\ک*r\80@"]9\#5-ߖ3!.Sh&JGʜʔ<*9!46(-3ܐ. ɐLtoZ%uKd2Ɍ0)r2W3d HGNN֙?2Y`#( =cvzyE(edQH|QeT.,z2^\sʇwdI,ǎ@vPǰoz3H&QUU,}in&Ni-|dq> Ja2Cp+aexխuw65AG8_ 6su(y4ݖ/j3??N;ӎu4z\/DZR4UT{*svIr|U|D0bsYC16 Fa-1%uNiNkE;RvB(%s;SްpA "F]<Y [}:tm`x0шÞR%HaLYƘH#[Tϊ[ӱs[ihs|"6EVt/Ldd)vɣRVi$jBܙ͵aOmHT-:qHW=M+oܨE -qi'i w|ˮ,]Wt݆3=Akt['tT8%Wutrɣ?Yj(m% l.^8sI!=oUP( Yhq!kxqtqR oTz]:v0hkso>X.iCIdoI[$vNNb*C>ΝƠ[-iˑ#KdsK${ڜBk+iJ&K}gٍ2kN$oIC"QrfS tV#yԅ%)rRMvZO::85oAl'#]V\A^lS;F-FIdSIlqex.32{evb,{(+Upov9w ZeUer`^{| RG=GS̢:;IR\L+溫/IbF+\j K(~Rrgp6_eljTsTe)5EYٵ@Wd2'bIQF!J45<8) id' y}(T U[i+3f_gcC~ .|[JUj>WB|v9ybLe{%3M\]p3~絓Ĭ\c rO{^*j'w=u&smutm|,֣֒(CnK%..MN.hREoyԭ/?٧$ʱu*8Tz DF4ɣM(H{Y ,(ZH7|_c{{m0ز뾚-D&V\I3;3yۛ%P[x^--坹j*q"9P?C_ ۃcJihO XAAe82ȫf\<Ͳ'Mp\9玀SCLCm,YSd:\:z1vsg["Dlr6dO VFъLBn(W|]vBZyuxiJU=0fZ9DO̤a&~9 I0M)kj=jftx^ -}y 4@kNH\ț|xYoctA౭;)YE}2|њ؀MmvRu ))W 4dE!rqss$nHn +$ أ&![ױxl bB1$0⑖Ĭ+r|p! [;R$%'K!2rÎ< U hht{ޘt焱TŋlY3e'  =$lcyo/)@cg8ԽFUD=Y6Ӳ+aީ̏Zj6RBw43!{nXAyN;qF^XIAcZ7 IӋ\ʪ֝Lo5)x>ClkR"P|y|LfԘŌr .p?)KTu[̑I')%=y$Ny7Ei-uZZf+\~2BzԒ@/5TF:h T5?8@#$v|kQ<)PؕvRo<$HSZFHiL&Vu*wxs!IZsd_ȕ̸SV|Rw>3:wח[(+bī*`UOt.ajQf)"[94}2 eEG+ww遱X{v1T%l$2ɬѼ8%! k xn+G,l4âTj.8kE,mc=GyUi# m(_%kJ2qmBAF,|M s3QF:;%DTqm_?(tG;:޻f'hM<'vQGLl4H, IIzSQ458 rwR!(XěJBxCwDۍqNWִzzsUmJ$:.5{ۇ`=``owJ$n$ \zqYJĊbbF;.k8;6$T؍qj8ͷF|A4c秵i Rxy厶5dڡ2^{Mqp0swhBXk T֞ ͵U6y*gw.Jq }VSJJWbZwXgqS.۫5MU`ʥ;sQ,Bl}KMk&rڀ\FIʋz]/fQZ󰜬ԡqtd,VemHs\4)ROg1'bFK1 h" OOQN ;_"їiKS,-<#"PI"󨺖s*D?1HUap7?P}јQ@Λ}Ci[5Y]k.vUslD.‰bm隙q>@|fG*JCkfWvrLȟ==/{vP X\+D.[ԳCndw['hCCU[[ehJ-Q5 ,̼W.OK> _[{fo}kՋ4%,$V[C["W.Mcfr((5k/eSzl_Jԙv [39sVfT1$#;5 qIzc:ӮlVvd\_Ffdmq6kl %#Iρ)^l(}6g `R2eʤ3^u5D;7Kpi%nHwɳyHz3lEbz%ÓՍlS#3$pT-5A+ 4Dx@^97"+=,j8XIl~\+cP-5x<h48^x)G(7OtX[MѺSi5]_Zd}>II*᭐"zdQ [dҖ__[$pWPu7 tи9Yt,u emḍZ㰐w~$C\M5ќ^6b4v0҈37Y6gIzѥ[d̏i#Rxj9S*4o%pl@&L냢|9wsB#5zti*Jfl~kLƉC>uhV$p}d5B1mY.b!Ӵ:%4Uu' 3P_OM+X>F_FJ7tr ךtx%r(_]ڧ"DM`TdZ_艬t/o;KSK1绥z"k*0"DM`T~xLyްz}=/_OE L<ȴX=AU>釺_=>{_wj{wK.S`yi&z}2-/DPUOa?yO/4Ǟ]ڧK`TdZ_艬t̋K5S{SK1绥z"k*0"DM`T~xLyްz}=/_OE L<ȴX=AU>釺_=>{_wj{wK.S`yi&z}2-/DPUOa?yO/4Ǟ]ڧK`TdZ_艬t̋K5S{SK1绥z"k*0"DM`T~xLyްz}=/_OE L<ȴX=AU>釺_=>{_wj{wK.S`yi&z}2-/DPUOa?yO/4Ǟ]ڧK`TdZ_艬t̋K5S{SK1绥z"k*0"DM`T~xLyްz}=/_OE L<ȴX=AU>釺_=>{_wj{wK.S`yi&z}2-/DPUOa?yO/4Ǟ]ڧK`TdZ_艬t̋K5S{SK1绥z"k*0"DM`T~xLyްz}=/_OE L<ȴX=AU>釺_=>{_wj{wK.S`yi&z}2-/DPUOa?yO/4Ǟ]ڧK`TdZ_艬t̋K5S{SK1绥z"k*0"DM`T~xLyްz}=/_OE L<ȴX=AU>釺_=>{_wj{wK.S`yi&z}2-/DPUOa?yO/4Ǟ]ڧK`TdZ_艬t̋K5S{SK1绥z"k*0"DM`T~xLyްz}=/_OE L<ȴX=AU>釺_=>{_wj{wK.S`yi&z}2-/DPUOa?yO/4Ǟ]ڧK`TdZ_艬t̋K5S{SK1绥z"k*0"DM`T~xLyްz}=/_OE L<ȴX=AU>釺_=>{_wj{wK.S`yi&z}2-/DPUOa?yO/4Ǟ]ڧK`TdZ_艬t̋K5S{SK1绥z"k*0"DM`T~xLyްz}=/_OE L<ȴX=AU>釺_=>{_wj{wK.S`yi&z}2-/DPUOa?yO/4Ǟ]ڧK`TdZ_艬t̋K5S{SK1绥z"k*0"DM`T~xLyްz}=/_OE L<ȴX=AU>釺_=>{_wj{wK.S`yi&z}2-/DPUOa?yO/4Ǟ]ڧK`TdZ_艬t̋K5S{SK1绥z"k*0"DM`T~xLyްz}=/_OE L<ȴX=AU>釺_=>{_wj{wK.S`yi&z}2-/DPUOa?yO/4Ǟ]ڧRdMkE toc``ckGgǧedkhhkA&Pr7 kJdĘqa"DM`T=jLeIսtȣthҬn#)66NBGIR/ ȸN?]Ӎߛo;S&n3I^C/%rr\{~&_m"fsaNuk5nqV)Ȁ9C>qa[Pj܍%dY3 u|+1>*9RHp`c+d5m{S2$J&y]^R=aroR= F[?9еGI9Q,JjMjaYQZU۫lGjuFxH`o9C|* ݔn_gWIvSe]&&}6zUh{MoқHad%SZٚF8-:"H*TZr+ǥ4ԕWX['ojY*gk&=I6?Y2 \ެW6-CzLy%wΙU}@8>\)dPHHRh$N ?/pBFĤdqI HϓGl}`cG`kPm[hRrIxHLr\T/䂋vi'w' ^^}~юwZӺhN ^^CH+SfDnvD,vQEsy&OY<8)`:=*ykOWWղ:꿫J3_,v+c-gUhܫ;FpA(ňZ+Z2T7+lI!Y;dNA:dNj5[9ZV%5c !< _r^u猎=f?K2ĝn ΩF.dat5Mz\j*ոLd1OlM&G/1n;Nmn?B!IYEjI5)ZW 섩-Xҭ*ֺ`U݇0+^ϭlߢxKEGԭ˜RHW^.,oZE>u qq/genWS+$n Nݹy])̭0Wr(rd<2?-1VOu_eCڿ÷MO'|Li;B|׵EUtJi9.Y.1j!Jȼ ɫB5hӒ4zu'[>wt[^ӵ@{-- 7~EKCUՕv7=d>-g D:%Yљc}kkoP9;wLoCNh5V'J|b|*Lr,I(w0nW%~E$Rkmu饭Q:}`.i%Ǝ*di[2 ̛[!LDƌ\z卍:"<ͼz:?idR齣e;GlQ ϫc5ݏ"ǫZ6cqcJBVxij̪g&$}X~OnEcj?\|^\_2̮Mȷuޅ]'memuI(S PWj>5s*י9k+-d(@V@](\6}PW0:U|籘K4l q<+5*r75ySy-8Ok يت觲 7]!I[i'XQS"wҕ.N;{r\HH 7N wgnqֲN_!ۣ+kؽ[T[ޚ8ӧU*o'<1xSv!y4Y5;MIJ[T֍:Nnf?i^FxVLq,x9͒*\e`i\HV%㯝@w?!ɱ:(. A9qLo*T ۺl=ƣŎSVKVuJSuMaeUmL> Š>Ɠ3;ÞN\⫝̸q{:<DŽ9:XG9̹,,3J7 (2/I-Y#%#yE"NW![OX4% ]=m\(~RGXL]J{݇xkoVZ49,%$y8)wr]E!8bj28boXƣZu٭DD!rFfulMn݇Ǟ͙4?e{DM] `NJ1'|9d{džNZ3{5iҠw;p wuz? \yпo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~:yB=пo~N?*!9kJkR^א"H˹N)r'.bg9f8f"=n0O!HA!Jݍ̫t6sKtj WMLO2^WdȤ6єv<]<"S総% iNHeon%*'p13 <.9>`^+\BJñ+r7"b^9seMGp5x/"{}߁ahlNPDyr(ׇ 6?Kޑr.n jf Ԧbn vAՍNvQk=\h?I!Kv t[ F3^Z W)K[p_2owfҴ4Å?{&l?K–ضA18X!; yYss L jRͲZbvº:5_* kK/=D&u^V4HK'$ z[%H@G=l.G=l.HA.//fn;8%%-3D?v/멿QeC@gPzy3wjD>ʺCÃK)y97qhtF" ݦUXo4ʋt\5)Urq'ΐ剙nd[9|3Lr.8ʑ9coqy99x ]m҇ŇXvKkS0' 9Y<2tW~2Y>xþb_8ȼ^7ouDN}/ͷy7?YֈRjݙ{z{&()uvTݚ,v Xgi1W{z&y7=؅Y;.}E@g_"q2G#JvxsS%y@rNR8pJrN?2˹83] pf´pR⎨L=TC6'iF IC`V?Z)BœdBOVyS]&g޲/MJ׺璼TREwxNtdMw=u_j6wGu!{j'L䳝QF{ꨛs.;~-Vls=ERCsZVNN$ulW#fZdtujl>6nxW)/,zC"<\ɖ`*Ԝɤ IGg(rM}S'Қ+E]h*$KI.PHbeʍRbSbמco,q煸sPzQgPijwFo&ڲW zLE%0Z!.'}l%!2"}O׃g-i3kwDu:vm_7m+[oEa_yXdBz՜2QDsI#m)19.ت-2MwҷUnf~a.|MmiN + r0Cu s@z,Rfu/Z-Ԟ#e|;%6iEm^ 9LVs+ rVEs ժĮluz-jd5-7f5>;BNJZޤx!QzLYB|1᱁sKTdsrQwO]S_ ?S瑉goA8΀|[gsQHx\du ,JP{ӃZx%cRbᒮqiOM;)z4;Y P׵`s!h;4dJF0˵kz\2.y=Qoۮս%?UM״7s]=SP6{SO(eXK6}?lja͘2O ]]485I_[ !Cٞtni&9.ObOk([fLᒏ HƥCR,G n撙2VX3>f12Riy}I~&װj=cܣ 掕/fki\C+pH&"xyTz+BiҪTnetm_l'< 8$3!\)4zfYdmg{?4WB%nV.xcf-DBF Y+t%:00$9mә6m~~rUyTۙQ7?76rG[8lVnA.,_1im^G [^mf/(2"u=f|r8s 6gxKXбf/*rOh lTm֤•tƟni0;<ABb~Jセ8""콯m67Tߟ};9>Ktj;9`6wEI$QL[],&mWAE*3GSR&z^ ,}>SHf vq+2~O&Q%m4-lEln.'ő#6k^0s4}R!{+/ߵz5rcde`,JC̩Df2QX' X\rLs'&$b[rJ 6OBlۦZʰYRȪ eܕj_ 0-RD[+dYg4"(~ȳIԫ9Gwu1 Rb謖RV)Ì4Ya\amov9~6OWr(]7qO:X[$2&nv\Zu>8φ|w=#Y^n[5trͫ'xlBr• 8^bV;SdLW tG@oh[hs5K$:U~}A?IKj|7&eo겏ߓFOH΍qɘcCs2YJm"t]]D,ށ ƷDIR1+DRbE8RUG<8,2.xI+nD3gP0DaQ׹l|$ZXqK۲8f˜76#RF|c8N|<[RZ9|{M".;I*%Iܸ{m%Kr32Ü1H=Ǟ"[Va8P=5)G)eDod6Bi~]IlgYUU:G8#F,#2D *YǎN& XŹe!KUE_,Qq41v͘(; 𕙥HYUW#.bƵMbvbc*++^XRx1sBsTBdz5s#bS-wiw͓ qYb~(t۟3StEEO׿B{7凣4Jy\M> ¢4UgEн}rvlQIawE#njQ Ш$nJ#5JJ(1hۥn719"HHÝ$8x$+|63%<;%* - *0CBҵo-6{g%@\\x92c]R?C.ռ?۬qvDj?Igs)TJj BW`J\"xɆn\#x|rEBӝ=ןTT;]~d_nd?m;O}nOOhD}UW)#§`Kdl8ȶ!yG㥻4g֭B6*V1Hh7aU\KSXlC hoqX's+2l,Rb<~ >~ Z\oJ|)N+N#QQ#w%bS!Д Zs%$ȭwkEG.̻mͷsI{K-@WkK-kbXCy9C(NnUjk"y,9BŨa\pVWuz[ϻ(59d'%byosWb1.ib3De9 @։QjBF9 WD}U&\qdSN.xË/s?s6o;6*?D_S6kb7nnkxılґAĿ?ɛӊuH-ndQX`IP_wܢBY?P{b!{b}뮓|w䴜ZפLN/I'e*26fp_W8e3;@6PjQ-轱ǪeCyitok" d"G, Oʂ2/9Qz KS:ѠmUZoyrZ2lөh a))$ug ZT*S:%)Vm83Znw߭ڔi9>dr[T35%aNQ(cdne+J̗?tB*-zlnsoX J~kkz ʒf NE )1:N,tU_;IpR {T:nvrGGW6 6>sќf\QLI-IҌ \LÃ8>}CE#K}.4D,{>I1L, ^qIqIF,&0$!)rTUWcdO;vsT=3hg|/]-{6ǼVDTi!\H;(t)ˣՋ Mi /?a;m%J`(q 'x\gG/I:2Fy'1+Kr>=NhTwk:ZN%p4Y\≙V` kt%2piZLJ|$K141%Vwxכ]B/ohM_-y-{(xk@(RR E0' QGyfd=(4ï@&%kWP861,  +rK,f|yTşءj7VW6. jg&~z X-ݝf&ђv &%%,eeɉ1N܅RTfIZ$)JU3/!ԚW[st!KVOViC+bhbYƛri9$L-@Fq^x0j+Z-3`YK$7(ѫ,3m3lÄs*㌳+?v/:|˿V۝y=jnϫѧvFwR,LK)Ş)Ikd h;i}j7wTM;VCOZ1GWxbLv<΅"m) d.FS_k`?svdm&ҸsDQ(}fRC˒U9ҧ&bWS,*JybMqK)l T>FS⭩;gX6i6?(p %sļ_[R>hD詥ŋ5Ӑ6noZҾ˥i-}wMsO%8(Tc+kquv9#Yydmx}Hszl¾N,#a)S%0z̏9io +٩ie{mL J l'-̓*:4cS=qWѸ2c;-.҅9,yZ(Vs ;w^~&원an;/9 ʘ%1|GYh5Tlжf!%bqr;G!.rњ0,?G:dy%뇜Tx yM;M6O߿M G}-r#޾ d@[!栞=|fIͶbvZt?z7H@? <^3π{s}zG?>'7פ3^3σ? !G'7פ3s}zG?>H@? <^3π{s}zG?>'7פ3^3σ? !G'7פ3s}zG?>H@? <^3π{s}zG?>'7פ3^3σ? !G'7פ3s}zG?>H@? <^3π{s}zG?>'7פ3^3σ? !G'7פ3s}zG?>H@? <^3π{s}zG?>'7פ3^3σ? !G'7פ3s}zG?>H@? <^3π{s}zG?>'7פ3^3σ? !G'7פ3s}zG?>H@? <^3π{s}zG?>'7פ3^3σ? !G'7פ3s}zG?>H@? <^3π{s}zG?>'7פ3^3σ? !G'7פ3s}zG?>H@? <^3π{s}zG?>'7פ3^3σ? !G'7פ3s}zG?>HAK'NoHg2Lsd3rw),={fa˗'ǺP]'' %+q065u:]?[ jm%,D<˃„Ȳu}ulgAƒS <2g]f:LfK<ǎMؚĒ短ӤE^?xˎmz6XՓ<泘Gp^Sp%-ȃ?erV9}`~K, _53mY$ ײ")mwN0O/bSV'nWܠǎ^w +ֿ{{)3ܯZڦpc~zY 枿 (d;IV%5 yB\U9YRIBTi:.~ u~k4*:7V;[0iS&ѰjDʤ-ͭ%)9|MR%S]~7RWAM?:9dc2%IX`Z"g2gvŲxlC.gd3dr1ѷ⠺N%Wէ!ݨ29F є24;r9JdIϲ緗&-f"0'Bf^h^Hylp 쭎2Yf<E/Ǣ}+ţ|=,k6<8Nzh~/6EJMW3͡I\؉Ij"_Er"H٘ (tESZ:o7S-XCmxήHP_~ɵpYyqv ^ %R?vf_2KA)-UIwbܞcWcւ׋K|b[w]3x*a2{$5Rĩ;j.ZxL1S XM2dWRy迺KhVMV=ZZ.;-WeS\ !;w1Fu[Om Y^Hՠ6_Kc)fteԑ1c])Hnf}9 I6ڝk>IPQ[(N-֭Mmz&@&Ϙ+VcHf7[LV867o-UM='8NP(}ݣ6iVT]-u2Y/b6E䐬-\21scM;ْb'Ḅj#q&Wnu@(~ݰG[ AM$vJL"ձAbxq͝"o (rtF(鷵BQs9EQ E穲9dfL^J%LB왥LYo$o~*Y'ևhxÐ-Q_\WSuv=D&U6H__G'bj/KzhIhf\BE/UNQ.BDA:&4=? ک~ޭot iқUW] 5VKZ aG-epq BH"Pղ@'`:'S,V/WlU.j ys.eWP6V>Y#,XK&n[1jP\ޭ|TcMv4΅2JƌI6.>"_]:uωFmy ՟< ^Һ]UB?n[de[[4l*MvJ)F.ό1Im}Ұ/PtVy>Q'`M:ڙaܛH`~)npόfyɳkf1D$-6qiG jC]`# qHᵱe\C)j?Dk=)/vQuM}3_:ZۗQջ6e4 қ!ǎ9;i|O:ltgī'f^3ųqV\s*rHoW$x|. ϋ l}hpGChQČr7|6e>“P̲ 7OÆy:@Cj:SՎҤU qk5wd{sW䮋Iُe].N r%v\˒ӭYX8=+pPIrv TZHexsrZuonkXP^KYo̥v=B81:Wx]{-<7;>(OWFCY 5r5SۯsX]ijWch\YŹjpCWӎ$ ?)F.yh-ȳGQP 3I';4Dv? x}힣\!*D0d? &Y$G>&JV<1ˎ1qTu,{6k'-8cl.J8e{Jdwx)7.?g/rpW ]rv_[;Ym91ɚ^.,_B50V3S0*5a7[jJ9O,=)Oq_0l+juYJ^7iVn\Im1FyI3X&/m-w2c\*Vy;/htZŅ]u#U,>V$ţyX4c&k5PNSo<8KK[hX07p\k5M[5㙓RIHq8RS{8b?`e):ms.\<)&wXyiɺ Y룰YJxڃ)ٹ'Iށ$˶)e;{.e*<I-[߉Db.˦X)J*()بYݓg=t߳!O.&-ӑF 35y'j_ie ,8,lq+`q44ge \⫱}1GuRjڸ0\WT&w ?b G1V#D||q%yg$ӱ"e8+Yv:5kNIխ53͑cMڢHCcEgB_YX s^RjVGծvԆfrd": T(\by5z%3h]X09OƝF(O3BJM֊2z N,jvҢOM&HڜI]}mƏ\#9)1=1tu/M}*LH)<;W]mt^Iy'Vdi*XMe{ P)?x*\*/I֙j΄ìUd,5t*_'ɞɕ˚7yH* rAV#bq_ߠD"=t+8\Gϊa%, O\;f\BOKMQ,#ߛa=d o(B~o(yOР =OСnOY?B$ nOY?Ba=d #ߛa=d o(B~o(yOР =OСnOY?B$ nOY?Ba=d #ߛa=d o(B~o(yOР =OСnOY?B$ nOY?Ba=d #ߛa=d o(B~o(yOР =OСnOY?B$ nOY?Ba=d #ߛa=d o(B~o(yOР =OСnOY?B$ nOY?Ba=d #ߛa=d o(B~o(yOР =OСnOY?B$ nOY?Ba=d #ߛa=d o(B~o(yOР =OСnOY?B$ nOY?Ba=d #ߛa=d o(B~o(yOР =OСnOY?B$ nOY?Ba=d #ߛa=d o(B~o(yOР =OСnOY?B$ nOY?Ba=d #ߛa=d {l֧Y Mf\^]R̙22NX %BJNW}{b!{b5dimkY"01niژ⫼,J92T.pU(PA$r;\˵HRHny:7dZS6 rt#,S;6[-V2eh9#yw:*mfqUÕg~<-τzG(0<0E|EB;> +=+?;Q*-)swa3Jt cD{Kyl&fqGSR4ֱާ2#\ו6ܭtLdR ] fR,&Dh:u*.sntH3doJ\6ƅ*>U*LϼG-r]%*2#.+<; wcZ]}g,&9a-K\\aXpH٬2" r8B:o]L.URM}Ў!EJr]BRrl!^Ԭ҈P(ӑvcUl&s.n4͇XqerE'i^6.%KZI AOR{y/%ANʫ[Y8Ɋy;fȍNa$+iJ' RYe MIXSyx:6T(,NAx ru>fsN;BX4!dX mfe';W#d) NQZVKKT_JQM sW5VJĚ6* IR|sb. LjuhL"KTb, U;W bߌvYȍȅ/oÍ{ 9I]0Twœ%s"m*ȟt8cvn DkfzG}),L/>L#xxZ8,3 .9F~4ͅjf#TQ?.]H#k5;bDIʲL7%*7dC(JD߷?PVK'$ z[%H@G=l.G=l.HA^ɦÒzkaPjvc` ]޸|m4-1YZ9||MľHώp79oU=4UImbClR$%.ȄѕjL);q2"r31KyJs*~!Ȫ>Y 2)|BSaxRaxpIO6=1>41 Pj5DA9v 2ÐԻCdP`]e#l7wɋ mkfeJ. ZY "{N$OMWԩ)7&;~ Ÿ&xMuۊ(]F\q<^3 djJ2?VSpv•Mk8-_95yLφedcl!:$ Q!NyĮ V yl5qmU4.OMt4q h>0FbJ :.{2Má3=F @´#e B+N.+:A`Wk'ltc-rqboό Y \ruTTn;[ {H6(|=O*; aGP)tz|zuRZ&֦er):T030ǜYIl;;o),{cҰ>3iS8 DnaG%WuN]pTVXfո(+EGһMz[5՝&ݴB _nG:,yJ ꬴ0t+HUurH7 孫7⊺c v T'fHs@·2-5 A wD(ø,–*nѷ_ *?tTRb5$VTPIbi VXIě&iycfcxe\q]?4ߝQ&,Twfɒc}sY$^4z1<pkòXZ3;-aeY^niFce^xe\sxg<9q9cŬk y_dʰ5FgqyDthZ%4h2,^&)IeE3lr#Tn6sCFbĝ8tH.B@ɲKd͍9Ӧ*V$LLfj Q<* ֳ)PYfa4A%j{ȟrYik9* QqdNV8yZN!t vT8cu8c}c}tӈՑ.q,wɳKDEKN$22{t[x~)Rl$p UU@ z[%G}-r$ #sCOC "Wj]Uֶ]Pjx'T!Orv$c)NNeq`KP #8/zh!G8/zh<; ; 'o{'o #H@ #8/zh!G8/zh<; ; 'o{'o #H@ #8/zh!G8/zh<; ; 'o{'o #H@ #8/zh!G8/zh<; ; 'o{'o #H@ #8/zh!G8/zh<; ; 'o{'o #H@ #8/zh!G8/zh<; ; 'o{'o #H@ #8/zh!G8/zh<; ; 'o{'o #H@ #8/zh!G8/zh<; ; 'o{'o #H@ #8/zh!G8/zh<; ; 'o{'o #HAK'p_mk)]]ckg'mmfvKfE(9? 0& )A,kՅu5u|2 1fD 4fsM(2#π;%{ѩz8}L}+`EYmnFΡ5Cew4mGĮ؝|pΚ{~׾!dj.\mʱnҏ#l0+{2Y 2U= NǁģR }HvsZhI>U=k C.o ,ʽl̮ƓF5ơ1$#Hʼns6#=nzod k)9S@޵suos TdeP2z.'IpF_\C-A0+=as[j,i&(MP IFacxzm'^vrN@Ytyɩu;anaSaRԯӲ\ܯ3ґ8_{{'M6c2ƁGD>2cZUNK j J9j%rd[6,n\ߣ7çE!+B뫴MM/BW6.b{<-U&E˟8JBk/T˸/JK^YOˆ !?V_ 34ਲ2,UI7ςP۲wWK/M°T /V7RZUMT PG8-a&! G7̡\c m kdQ\ \89C>ڱQMMpV,q2GdҏZY.YA[݌R+{:(7% <%2긢K*ZS7()Tl r1 @|6O]*^Vg '!9{6qxAU,4{sRjL'z4N\Oc6+5avvᓖ'"oNʢkLfx)*PsC%0#h@mN}>+\Cc@1Ukm.\W6.\usFޫ#uҊ#r-[z鮲 .| {#u"˄a^;*E$olO,Oݙ0p8 lDa69u56'mtUa%غJɸ2s,3geEZ ,ynjp2.9yǞ;|e\}nq珯ZMtiK7W{YRˬR,jc~,jXqXඥ' 4֣Y 񈺉qT76.3v်&S*g?r7FUv Nw/pz>&2Hn&&CKIOط,XؐhKuki p6VQ7FS=dmJ *. Tw&fw04. ,w]:kMͶv9^.Ak*O-FO%x_ 9ܬl%#̢Le9C^Ro'PgU'G{e"W1 *\0t[lxCǛ2͍TTJaMhz3O~6;I ~U2W' I䄹M`H*ZΌ0ܹo@e=A4Ӭ}aֿ@I~6;0XJC=~㕦t7!HY5ο\݁ ؤKx#&ȹgWim҆5gY*b`pS%e4A+5JNV̎MS]}BF?N*u*J'uqQOfZYrtqŅ82R]12ԮJq-"jsq禨KI?yqVfMJJ)%nR8Kt`"M̀\ڸk[|4m:tݣh*GzǘOo/԰mܕmmͫz†\է"dzXQ|{酷,0*ڢG]G(.꒖eOAN[ͽSk5(+{=8h )BBw|Y [/l\5ڿ:45&Z9$'bZ:H{nGZdUI$Tb!D2[cZ͡l/'sJ{`[3c,ړY|­ZylYDګQ!mq̈m.gmwCy>gC&3{rb%6RJ5䤼}\ΜppDb~ O(r\K{}v5۪H.+O-Nd.+"?bBu\.X䅴WLSoH>Kz;?Hk4ٱ5·_PM5UYFI 6SEkMn &?"TZU.*⛄WI=߇8D"m'3FS/'v Oέ_Q@ =F>x!kB~QGg!p~b#ߔkyF>x!$ p~bQGg =F>x!kB~QGg!p~b#ߔkyF>x!$ p~bQGg =F>x!kB~QGg!p~b#ߔkyF>x!$ p~bQGg =F>x!kB~QGg!p~b#ߔkyF>x!$ p~bQGg =F>x!kB~QGg!p~b#ߔkyF>x!$ p~bQGg =F>x!kB~QGg!p~b#ߔkyF>x!$ p~bQGg =F>x!kB~QGg!p~b#ߔkyF>x!$ p~bQGg =F>x!kB~QGg!p~b#ߔkyF>x!$ p~bQGg =F>x!kB~QGg!p~b#ߔkyF>x!$ p~bQGg =F>x!ʧW2E.y1ɣDw쩛Y!B.B$bnj@uz? \$ uz? \=n0OHAK'uz? \$ uz? \_ZmG$6WUA! d [ ,m$Z*ˎ2R[b"q5kmZ9lXեyoR*kK9tͫ5.kۓ?Hi:^H񙉊vb qcsn=Q3jئq,:ӮPluMcZ7J<v$L]]pf<"myKsߛ}~~n) ؋YUr,X+ѽ;@jٚRTTGLjFǴn uK{jHh),IW'B-"LR*S!^ޫJQI=M]:5" : Zk(q`xb62NL19ahpE&sǷܜ;c.^M63fxqƿEO^iJF%(ܴ LA%;HF_.GNg_??韶[3 x}e[WT =u9gًqWE bj&X18RKI$738@ʖTJ?Fa<27{z\؏|@C<%ڒ\i| K_rT^-G/6r)[S,rh^>ּ0N0AXcZ!%VgTgugKg읯}VmwEUMUm^1aꝥN&޲Լ[!=X\'sBNX<䓉=dBN$'sDtcŝs iT5Agf~;sA:6IӐJpO/(PiwJC{߽yAK02HCx͠WO{z.Kڒ,v%x-s<=4waVިf&IȔ9P q)#ӡ@"燥lm.i9mq?隡 e_.uQfԚ9 N\*{qL1FcYr<^ OTAGJij&TIRnZ)9Okb9?Uv6׾FSHط"ͳ~3T*&llP+VӚ ZsjѥJ姵zX n>\#d2= D=| |ZkY~(cslo2T~]fҪ7GuS)㵃DjF^іh۲-mؐY/yun:G J h6nuAֲM%֬]-_֍rV=f2'RB͛N%.O2F)G,- w6^~ͿGU!}D^e| Y?Q!{a,(B=?d?1p=?d?1pBy0J_ս,ޅٝ V4׶ ī,ԫPJiRQԐi^e<˂ I+X1W2y%3xoxofYx*w$O)UYrq. 0.8;J|ȉNοR~4|ӀA*+fݑZ[[s Ko0;T3YDr*|sC-HT#4͡[0VG>ý-lq#j' xwM}~Nm{U3zQ}^I ߵW1Qt9X,zJ&fĆ\wD9HĈn208˪NBqѲp9\Tʕ@ZKG^\FqIyq1qRY)8+;?SD* C>_Zt5'Yfy d;{9ijS, <<䪋_7~{~im`Pl:؅%u)B^S=!d?.m&MX9*, -ܒ{zjOd,^؃4J|8a<)SLj̙Drv92lxՊF&g\&n@ڗ[#LGw/pk⸌P$e\[Y RpF&Vő9s)31x'2 TUMsoq? tEns|+PtNgQ),V"b Rxj$9/4wi; rJ&6{'ln*hr-7 hTŤS,48f*);"Ӹ~ ̩Nu5Veڞ5:1] Y-OL5+esD.yT]m'i={ ܥmyڙ"ݗݳ=:ڏQ7]@bDNr1_F7sM] \ىb8U7duZCj*-*5 cDȀ%Rg/ݞV1k^]XbY_sv/BmWy؝w jR#:1)̆1,sSqƧj=[Ew\d̍jR[ ֘YF/D"#5%s^םV?*IZB˓a)HMԤbt\OI4ya϶߻mfFʜ"Q?N7S!aViTKLm%f$-;ڦ3Ŧ f_RJG&Z7gͿ.XQk$ulrDRYR3q.iT~6< o#38T瘍miH ="$TN\''P^DV\9q\e{\*8Mq?隡]ѾԆ k++qĨcxȔrWLc$cѤ-I2FnKѕAt|=YP8扙+)sT.F&-Q*p_8fo8fg^]LTʵuẉEf-էR8NI쇓L͹IۍϓNwk>'~uU.Fȝq]%Gkm v.c.M~Y#S&Yl0Ș<~7ȋi`|n8#3Q1btaz5czW!5/w֋0h&jkÜ^5ڸl9g-MI%oL,378j?2j(ruM2_H~%DBBD5Ɛ-H-?O!-iAϝ FNshoΓozE:ts}9V\1Ha).'=}uk%Yp p%q&tR-q׃ #GnQPJ/͍js WuI-qFcnusϗ) [YBwZW&Y\ffPٞ}ltܝ,nDSkeXEOU ֧?noi02x(DйnLxae[x1uro,g4`(^,I4NG w(R: ) si%/Ŧ9=!pt{wغSlMOO<}FtƝ#[1+9ޛMyh?HH(Ȯ 'uY-q z{%"¤_9׮pvDknK|J 4In,wfbR>3Oֱ=u=S4D-œPq_8<>bKu.)`= wdW=5iAa!V}УZ߳"^:ll[Ȝgϗ3fN&,}[^bWFjH}/MmF6­'Zm%dJ#V̊*±$.ژę0-0I SSA.q4MӃJu!yCF)2)?X)LOA7)f)&ծ9[l,G88xWSIi8ƺk aMCVH#pv-%o2w~e2u|>/JKMʎS:B 5;/Lt+Qwoy8"+]biFM0`KSKu8e;*C}iaJj7s za)$ZP]Xj光RҚVBl0fgV%$O1X8b'sQ*&S] v6t*A?b,vHX$sx!|A&' r&xtS5 -5twd~AӭzӸ˲KBl];,BAaO^w6KkVCkjCU)UmOۿhge2RΆR ?uj Ǻ䔤1w3Lˎ902ycHJSZ-KdUkq2; 딜:b~ ''{<!JU"p*p~ffEYYessZCRs}5,m6YFf 54"Jڔ-ͭH-*$%,#LQIҥNQd' ( x^m{OmR7]׹Nuޅ~Z?xtZǎ YqZegOPQiP++ۊU]?zZ$z>,+BΒgƑ3đZ,Up֏Mm->1r5"̛7w̻EݼʜT^:읮s]aJ-EwSDuL9?PMOuH冼vrC%IdmĦ*VaJL=_'YYw /3>rq] lK9yB.yNr9{Þq=x~*O3_wܢBY?P{b!{b_wܢBY?Pkp]Ve3U5>&fs^;*Y .\x>|_2PVqVqĄ{տ'pտ'q!uo?uoH@G[{[{VqVqĄ{տ'pտ'q!uo?uoH@G[{[{VqVqĄ{տ'pտ'q!uo?uoH@G[{[{VqVqĄ{տ'pտ'q!uo?uoH@G[{[{VqVqĄ{տ'pտ'q!uo?uoH@G[{[{VqVqĄ{տ'pտ'q!uo?uoH@G[{[{VqVqĄ{տ'pտ'q!uo?uoH@G[{[{VqVqĄ{տ'pտ'q!uo?uoH@G[{[{VqVqĄ{տ'pտ'q!uo?uoH@G[{[{VqCK%p5 C7?F]9>5Ha]bE]'M~Ygsj5殗\6,[~+VNj9BP4/bw5Ijq0 輪ȘSZ򰵿OObXe3IFYsQdV.l5?r㒲u5.&֢|w\Sds9M[Y+T+6 FݝUx?=jN`%sҥ5frP2 cA; n*٨~eJ1gN=ԒÍ[Qd0]&@.8ftFꗚʵ2g6\ L^*[!LEIV_K'Aעlt ̂+đ.Y"BCj:,HFs*&ɪio=_S}YKhݬCIX}*e^>ȳfA̮)FtptEͣ_OXk|m!6.TA.#ÒK9!+4eQ2s۽Tl6θnhwLT: BfFd1rYTfh3KT)-Č_iw8~K]&$nBt"6qlS>>qo=š"הnnvcS!Zm7)[N>4EZl(^8^c+wFGZSMeHfENNfY8IKI1:zz5m偐JMT6Uqnd."h4d |U ב)H{dTmpk-$pO$=o'8`gq_<4b̭Р|4YDpzܦddHِ7G%q,s)$ ]9ޭ&y†Kv'd¡M,X8@" .XpJu)!:3Rxu 䘷wlަHaA PӒ>#kJB^Z䨒T"<,8=L8 ^crWadKTBɧzpU|^:^h]x7ֺ;E tCti/S!EI8rV `2 B:Dzn*ڣ1קrfRlZdcgG0緆\w8%:c'Ήy)56_=R%cƖNGYOE;NWBuk2" {:`p]kZԑK- Ś67 3j]rjVCKbzHNW a9|=;ʾ );yxBe qɜcl(Há:c3=m\-+o~[V̔+,\2ԄL=rČHOXWZr57\a͖6Yz $X,[NtJQɓ9n;cLl.Wr65pf5W \f0ڱ. 0{TkaG둢3vWORvL|?5|X?@8`}RF_Q&xEV>FT5p둢3vWGaoDɯ:Qg |?0:h ݕ[1k玀Y`kaH#_ /?we}DvL|?5|X?@8`}RF_Q&xEV>FT5p둢3vWGaoDɯ:Qg |?0:h ݕ[1k玀Y`kaH#_ /?we}DvL|?5|X?@8`}RF_Q&xEV>FT5p둢3vWGaoDɯ:Qg}ΫyT>M *_ g w0y{]:~W/ow>ߌD^?~/z˿CO_GHާSw` \u~3Ii­$R'HdzY1Dޔ7wWx+JyrQ6;F6[4l|ֹ'-'CXV3ҒN4~ۍ^+şV>FT5p둢3vWM˰c R>Fr4_~-5@~j,T5p\៻; z&>M|> |?0>ka#Eg쯨މ_Fr4_~-5@~j,T5p\៻; z&>M|> |?0>ka#Eg쯨މ_Fr4_~-5@~j,T5p\៻; z&>M|> |?0>ka#Eg쯨މ_Fr4_~-5@~j,T5p\៻; z&>M|> |?0>ka#Eg쯨މ_Fr4_~-5@~j,T5p\៻; z&>M|> |?0>ka#Eg쯨މ_88Թә|ziY<ֺEU/t#JtHIgxY4jM :Eudb}H%ɣFLd:}1Qǹ 2bQK Dߑ5jIUŦZ&"Ex&~7)p䬸$iU-n8lbVtؕb5"FCB&OM|n{F9"kZ>Y]հۚFXc#sHߵp֭v$\UVbI#scI|W0kk?1-bI's.FF @$~6eWxB#\P|nVHFv=&芛ȊΊpVZ,N,s'Ibs$sr^5vr* '8u|_Cԯև}47[|g u D-c9*JBWqDJCZc^Z,90m)1k3W 炲,u[ ʚaymHVeWl`Z31/˻ *lyNr45',|!qYg9s6bHȞ.9UE˶13m>*tm|3NȜJI\V#$cG&ݳ1<) Wlz5v51*ճ35(yRcB4=U*$ E'7ޥݓ摇ߵr)c|*m I&\G$~rb +_[c{]#*e⢣Us#Q{ꈪL؁'eU1$2X2q OH6ބIjV6JăW$ZNZscʘW/!+^dLZF_9N˲˲m̛^•{^{+fkUwr5"wM ޹ b{]6O\_܌RPt !up'ιtMl2'V5'%uKɁU%S5%gZYLtN͓/RߋK+LE*r3t [8XcniGwDtVUMK֍_T1j؊vv)kהrdc-Qs#r1ʐn#6$*©)LZ:"ZHC5;5_8!L湜sMNSn*S%6"3#&`$($Lf1PCNLB,iD-Zlieygk~n,6>Ya'*jlhxsV^6]pZIlLyqLkoԿ]7$G]1^AYf>G#P #f2ig*sL[@l1ihVqEiIR03, 3.8)k\5Es֢vʻ"'Ϊ!4׊Y{c49vlq|rֵ\E1d5ՋzGI4%aVif~"bozMxGÂq|a$MZik؍hEk7+ǵyr+\TT-1YLvo.AeiVozKZNi! ,ONg1rs(_'j'񟠿hOO?AZ1}ycTյ'XڃXHtӤSmvb&6/SrG8F34xg fLNf9{dYv:e؏T[M:J4KW ՙI88f}-t&32Oܔ%o7d8>Urܟ{^O}N7q۝agUp._ugHn]d'/y~OSE%/t\k N•b*=nuX-m/-3{#9iQFfU6jus_]֎:YzjGxI¬cw4O@\$I1LrjNB=^-GJ*YS!6-XzVqfx*shj\#\uf)V<^^m強D9,Su=_6˪xQLЊpdW,N2iܜ.ǬUs[)!3 ѻN#[{12&MTغm_ [&3WZ)=QO! Pu'#mF&Ⱦ巺+5'ɮbWuz?p'4eڤw8>dӒҕ3& 8]<}d7uݳ͓_VQ5DY{4٨Xy̋K!Ϳ47! Al r)?WbBx$dZ| Y`Z ό2˜p Lu7]| 8ygdH/}ahpxkZRQj |W.1A7citl׊Z~tLݮ[% BKLO"TkX[c58і9צ£zOS43*> 3&ƉFY\a+$̳njy9/HME| nyc땁TXAVSV{.*DPĤ9"!#Zf*1Q$dV&6 Hl"DƬn{{ܼToۧ???LIZ]KYr6z*8ّvrYuzY\^+7 =BUVjz;5$Mr {E+,Hq@ҥlU/)bins=b&/2mY=][)m^Lau`g{\2p~NFx柎J T!smt]=k.0=ʰ6Kkl]`S%5gsXY$@#MMl='Aia%amTIS*zYkr9%q34$<7 =%j89ZxeK&,lDƽQ7;R79F?'qNjeXaW,k[V[uSeϺXx٨wۛ+ح|lДPM]DX)g ]YL:Hj &.:5D^1rK*Y8VOiuz}}]m]ޡm#5XgY,,~aL39Ur_TI4;Y .\FbR6$9+Ӣ -H8U=ݴN6#YkkGb/wR++W Ԑ3rRX:ĨIyՉ+gVb6^2^Nl*ZӧOzƶtLDfniXx;"Zҩ3!DsFhHA+jFoJnIӫ4V(%MkjCt!y,3vsO̷9n-+siυ\D&'#1n=W Ew{%!G_b19eG$9ݲqWDM7rYZ}gƺF%IiM)hƓEJ>g{OޮsԺx6m]=D\F tNR7EҤf8j0,N 4/ z:N7k]AKyP岂Gr^,ex*f1Tr1Q_Ãп*Hѝ3^u3g]pt-kVE7\E,`i{Xc;g1+y1(IJi, XlJcs9^"rTEkW23%_TmQVbκ+Vs{b< }Z$ldFDkZ9؞ps 'O3dOʕSO9I{Td A-ZK:̍hk^qnjz,nr}~tw;r`ܪ<ɏ&*-](oonVINN΍[#r^c&=g|HUDȜG7}d⻙S~u\iLIeɾ 3TK^$^~#w- h kz+J w\?qWr1IAӜ&c?ṀβhwGL˺h晬ә$vhY5Zcizx]LuN0MT=\<ƹ1Rm}kŭJy$0+xJH_]R[L\?we9'k{=F>6\k5DAuk jVejkgSyoo1?R9~g/p JųWiq8شV;ǻu sZHr xrl] S.?}t`J[2 HSΓ K%O#&J%I˳rړ:d.r~Rë4{D22&&#N<=I&Z|%28I<iϜģ9moJJrVZkSɌ)J&ܤ/U=[eY#d[=3RSԴXawt2M.kQ0fYdh YV(bUbK7-9vɿ# KhlFSCq 12oNT͕qouBJD͡d}}Y"C#zC$c86-Xމb@{1dNytq-4yѤ ~F /ү-LXqݽ?XOnڒLJ!MH0=6OxJQ w2[Q8ȝaIV(S4àJrS"I9a 7vrZ猫H=WnAV[ +bhǵi-("X6%*.+ =w?Npt[)BJ4kV,>UKff^^XhΣ}Z-/dڝ>F/9{\ںx]+[OYBq+^Bkӡ9$,)9*/,;fc6gS9EAHeb.o݀W+c'jmXfk;z塥vm'Cʇ8<Σi7`uWU,\uʎ2[WQ:b`]ќ^Y#I%%V 0bgqč#[;2uVH@qihf P)70HlhQ$PZ?2'4ץRFL['EKCW#J \$>v絎֫FezSSqzc܆wEj< $.v5yJة.^[0eWGVzMq[,ډR$PwMW>MPKY DNݹniS{Z~D;,OH U*9GfM褏(a 1PSl6Glr> :n6iz=74 ]UGaҎ~Lc&2w8'G̉9J]B3pP*l܍䩆 e$mZ&R^\' cɋdϭmw\}lؼNFPjNiQ VvkJ\Xy3D ]##SPk4~JYՙ\.?u6U_Zz1;Ǧ"Kqf$Tbqa%Kt˶Ne'b O/xY0Si%|DT끙 emm{wFZu-5Az+r!WNS3v:k[W+%oqTDLPʍ~k<%y[N+;x֗T%E58EZO'.PۊROIe|vt++Vظ`Ć!Pkj9?.bV4n%)˹X33Þ9X)aA6&1(f14 ^-H,d4sHȣEtfr9nSQ̏Y6\jź3~#o :Agtm[o5 tw?o1f?H#P3d'ZJj@ [:sa:grfHm+r$Mv}ӓEw&).:WַE,?kL;>kIDckVh-59+8)2Vwt+,77rɪ4z'5-h֤=aK~ǎ %ulq&N5]# -aM%ѼQ}D2\s}b?7bC_lq!ɩQ]kنɊI.LasyJ`Kaҹ7k3,URQ}="+Kq>EO"l>|yҶn%S7cjKQZX%Zke,|9UzQVu[λe\V>*28jKn/D^Lb#oJmK-ɈLIҦJLshM,UF/0흝.Rb%(iҒZC95S=aټ">JXRXjb)'4|T0 8;o1Z'L8w!OJ Grmk㉼dA"Ioz#^]di/ gm3q*eI4}?O#ydfڻ!*ELi]KZfF:$tYHw,7 .? -_z\9zm7L#Q:rofr /*N24uH B"5:J%+!ⷥ?ۉN.iBT-cb,R*]H^*!j\BrsBr[7V%ٍt8A,Z*VW)yaXIUɉ7$[g4JVq`0 s,-KVJqK˿Ε)f7ڷmzn 7afnLNS7&Aqu^i3Q"Uɹȷ*3$YxfC`6dwTDzIpXlR(?K\p2PbZ\"jrylؕQdy$5r֢5kQDDDpx&iN? ׶7RT$|ҹWc#I&I'Uj8.D.vSuPO圦wC+winfa $!3):;"jBghgcY{wQWwnUT]~s6յNV,1K\ؖ6XMsZ+\Un*ōVy8bRf5Y6'CVr"lO66..j[ўby͒jSsѪXnE^%,3W3pٜnffi[B'\9=iESgY#M29md~ɷi)pJ\}GTh #Hw?ɽx9;2ǭiz#Pjr7WLPHq%M uP҉(X2$([S&FLwtkn~)[UlS+k{l}tG'+ỶȤe쯕,1hIeE{G"*He轴rGbe8֍^O#%Ć6֢Ds3d]N|lLZک;vZ8THk6ܥ&ѱ)$LjV5=f%7rrhRKbtE9`ؑ2Bvhvij)'4=qR9fO baF ^dι(S*jeLzyq=O3fXXd&oirb1l(Tm9[-4N2'4o|osb45:v6hGݞ6`+Xr5H"m,ض2N$J&a&ʌ88#;x;O4.Ta \A2|7H̍ Wht:\][Wb6%IHkGr9QF5DM2ФTuJa=wDC4ȳL#TVJTUY+Q)K21p.׺lX)ĩH/- )dq4,e+khRsB霛3H蘅$.=TM'y&E<},tnX*qck9GȨgsʍDj.ȈP^ kz+J w\?qWr1IAӜ&c?ṀβhwGԇj%q 2js;2J6u GTv iK23UrӚP4󗞄Ն~P@;qtSԟY+;n4nV*qvƍ۱'`5&ZUj bѯzm@n;P,v!9cخ[S%,jUsVDU (STlSJ)f- ^NfS廕V ٽt\r>mF5Jdw g86z%X+n̒D+L)XĀ.ez<7~mȳD+wbnѫ^vrӃ^抯t:O#'/NJu<ː4=jӶmy[2INdmWvԈcF"WEUཙ\UA\S-m#ȍ$ DqstЦ5KOWMR]ppLN&زg_'QjK$ ˘& bS_%vIf+1dY6k[e{8!FSwqyƊY;Zcm^Գ5jx礘crZIAccU!gMUJ5*D3Hh1OINY]ATK σJ/p4J[%J#XkۄnYxog%@|LǺ,3sf^Ya<mYX³T|Vb=$t}sݕU]D6Vబ7CsSZ:\ZXkMAZv)lv%fWr޲,)FdnMx2Zul 16rB5VF98ʵrkb9_8E{a}ϾRS ݳA1$e2W kIW6&XtSdr&,| 2/gr_]=ЫՎTW*$I'(Ur)JNKE5J x%ŒevY2&ҕ((׍MP7&/c\>Lh^[<, I1o8:Brm;`tlX(Jj6fĢyV T}-S2CN"32M< x|B;d!7>OHh meGC,9TEob,-CWsتdJSb x3R|sufکoeDGLdRUu#jV[3%d0يuI\~ Zl;3C[Awt.eicql>C#j& =8:m _>n6h)Ü.u" IsJW=6+y_eG $\9N{3`ɧx m8S9qJsXԂ&R&Ն),ɑ#Ίխ?FeWV+!ďb,IʼnUXbW6UC0}3 9 ~"keפcDԯZٯG]"eٚm6Y'VJ6\x&8"1[3*PAf\CuuSKc`8q}*VYI$iKS3y(ҶcloJőAY$AHi.q1IKtxǸŴ.G88Qq?vitL|y?*_'>o h@o WׇfDW[[~}~V0d/x}xvitL|y?*_'>o h@o WׇfDW[[~}~V0d/x}xvitL|y?*_'>o h@o WׇfDW[[~}~V0d/x}xvitL|y?*_'>o h@o WׇfDW[[~}~V0d/x}xvitL|y?*_'>o h@o WׇfDW[[~}~V0d/x}xvitL|y?*Λf^T>R<| '_M?oܧ>1S> ?+¿דpـ68V6TUkC$|$I#ힻolDNԼ2Wu}Pd:[.uSJQGZ|ZE 8H?8<?w> 끫/ g?8<? du3ԍgNftO_:j FuxO'_:BY/p5w#Y:<'pـ!S࿬`z??8l_K\ ]c=H~N6`HT/%c1k?'_uxO0*|1Wx_R5py:<' > 끫/ g?8<? du3Ԉk>S*Y?(p ^!>\Xo h@o WׇfDW[[~}~V0d/x}xvitL|y?*_'>o h@o WׇfDW[[~}~V0d/x}xvitL|y?*_'>o h@o WׇfDW[[~}~V0d/x}xvitL|y?*_'>o h@o WׇfDW[[~}~V0d/x}xvitL|y?*_'>o h@o WׇfDW[[~}~V0d/x}xvitL|y?*_'>o h@o WׇfDW[[~}~V0d/x}xvitL|y?*_'>o h@o WׇfDW[[~}~V0d/x}xvitL|y?*_'>o h@o WׇfDW[[~}~V0d/x}xvitL|y?*_'>o h@o WׇfDW[[~}~V0d/x}xvitL|y?*_'>o h@o WׇfDW[[~}~V0d/x}xvitL|y?*_'m G* G"'i[L6x|c*׃掆XŒ),nQWsWg".EE**s1z#TcSUJ>t$|[P ri叔G#w1r"y -w|g]iM)N{(II@ʌ~zsVJf`^JpR`2?Hm1xy yL܉6-*<S#U$I䡱 hbzT^ s9 ++_9bB ar蝗&.Hܺ*39`槜za~70o@=o&{l[ő害lwhZ͹a B њJώ3)BsK4,3;cGgWj^jB7gU\\ܗ*aDj׬0K7,=JL8233.ykɺl|OqrY,StmIRt5kTZ4%4kVRD%`L޳_BFWvgzniU\g%o8c8<:j1<*:ئY98si[;^CTx1Gf{3s ݊YU'e"s'Vc+#rSqY >9/RTpn8ޡ?Z;ZCY?UIGAOGnPGu~gs%?\R2$,PBD5JJM,ɓ^FADIXda%^9g\c0C3"@aEbn bl8dHB7Y5ZWN1L$7N7.{N. "ňdINk/-!Ih?,3S*s8 s~ekI\UDN7*"QzrISGNژ&dq~!3.fS64NOÉ LzŒy ړJy^tĕ\ZHb&˓56Ri9L/-i$HQsvyYd\*t)cL5W>J<poXV'T%f9qe=3',3" YJ 䳓+?ux󉤙fxܚ)u0VqiMW&d#·=BJO2&ʟr={o5h[ ޱ)`[IT2f#x톩9|Ŋ2bNFwќ8qYffk"ˎ98x?_~r?8c8ac8c8c image/svg+xml Package Jakub Steiner http://jimmac.musichall.cz/ package archive tarball tar bzip gzip zip arj tar jar bacula-15.0.3/src/qt-console/images/system-file-manager.svg0000644000175000017500000003320214771010173023374 0ustar bsbuildbsbuild image/svg+xml File Manager Jakub Steiner http://jimmac.musichall.cz file manager copy move filesystem bacula-15.0.3/src/qt-console/images/mail-message-pending.png0000644000175000017500000000335414771010173023503 0ustar bsbuildbsbuildPNG  IHDR szzsBIT|d pHYs : :dJtEXtSoftwarewww.inkscape.org<iIDATXŗ}l?繯}m jC2 k ܛ%nlƈ8u[l.!ScƌnLG阙"3le"6/9qEҚ웜9wGT0@𫩌f+**&@ FQ $&pB (z`2ܲgnelFT9癟yתn"eA,a&έ}qOzxXT[`u{3'6pJAsWa4٩ zaOj-.joϓ!Wwiwgz3)Hy4w Glt{W.z5:r8b. D@tqUT!Is C״5ʓC &q 0 *p Q8NGs 5/E*1+/Yp=̀E ZHA*C, pJo۶:7XLA.!Xi\UG)d _nV80Fmi}Ӂ8=N6[ ')p8|z۲n|cF}-Hye#eDD7*h?`O0X`zJ ؾ}{ʹnK\여E:6UI&IRL59˨D}g?+\Hc:9CeCJ(@(Ugۏ rԪ3Gj@$Z17F!pȐ cՄwfk_$^0`t>C"|dghhjFZb11cHRyI-:/Zn>2}=aw'c}4IuLhmBGd:>T4OyJ^CDpTYw/H$H&d2rܥ_l ow?E_H+ ("omTl?u85v1ر 9Jo|/fw2:fόZ,g /\޹^eNݯ`/)h {{z88_xcL@fGp/"ŰH!օb_z`<*z+*Xy6/\*dzIw˧n#H^`;jIjMlN{p=pӣon;s/_5fcg.y mǵs8-~ظG^IJvph8b)9U1X ::JYg0 +iycDpF{0|K;X1e""#I~瓬)8G"d f]|NPėCTi~ óf#n8"l\&s֯#zh[:0`jc"'|#R\. b*Pxp!Bj1( &/f3ďo*_*g'D\ĀJ ɸ_ȮXo ɫѤ$c>28@=<ʢs>Bp虬h! AIENDB`bacula-15.0.3/src/qt-console/images/ss_qrcode.jpg0000644000175000017500000144670214771010173021504 0ustar bsbuildbsbuildJFIFCC"  r+ !9X"#1:Yex$8Avwy%Qau2367q&'(45BWst*RSTU)CZ IU!STVWa"#125t$36AvrBDR ?qrt#X5mju,lB#iWe7v7],p%x=t\?|4gMOrwXZڐ*CsFDn bD]U#>Ah֭٨rO$% ;8888888888888888888888888㟚x㚉ᒘ#yJҝ9e]I&]]$w]yǻ_;d xƂ:0nGR\&f%lmg"ȣAP+{Z^jәDx` !b܁e&W1i3}rA$ޭĢǒ./lWB3RWrVa:]iV\؈ d5Scqp&VovĔ,uK;'2RZY\ Q.xjS3>*yR#US#Y=@~UKKtH5z̖wbݻVNJ@ď NTwE&~[$%N*5 )Ol;$N:oJCAq ꠊLe1Ll6O#g"̦0<]ovhLzam2 !8R˴$H rf|-r%Z LlFf F+ Lˣ&e ̅1!v 95#u / ǿx=X}˿ÞXcxǾn%Wp,I$eXxcYX}\֭t!i以OCC! t-2e~]7I !?YjtQuEYe$K QUˬM4뼳<0Ǯ.뮻ᄎn-S&A-c*Ȗ'bkrQ,D7Mܧi}Lm&W. (cGkȗr[afj}V@^O6NUb $Vdꙓ ڿ ܏?˪:q?-B!1ۮ%:3Xp,^_~s4dFO+m~@6G.lB8ֳ' fL'3;8x5_C6`%0ҭ-wj; @p(y[ٶ'd]y(}e#cN(y2ldnyΫGaEU13Čdf8eEV @aC!pd2:R 9l744c6m*-5.RRHX H62atȤ0c߅$Myp-@ ~HzwfJ8 BfpQڏ#7QhIIv]fШ Y4vL[5~ۛ)_gl`,Bؿ0o {"9x7C8A;-RgM! -Ye~8&nO j'ߵHn/s {q3U p ]g!av Zw2}OXKY12L\ta!967\QWX2ٹOr+i. IȻ'0+XeA,-IfEGgNA@qqqXEzɑQm]#VR><ϣa4!'1`Y9Zw+.t,W]xOl6+V c=f]6-Q}4EE*GnE)>Vof=xfyyy ֵbBׂ OrH"YL(Ly_;*ֺ"?k{/nIfTBf_ﺯ?cݔcwξ=uS9WsO*|Ƣ0s@5#[4 _@hfu}*eܨ/Z#ί;utMXa4c40Li ՞"DLYR5Y,4Aqqpqqqpqqqpqqqpqqqpqqqpqqqwդf&v&Yx|rym|fNYؿA^j.Q^AT@ Ը+>HHrҶo]7l2:"c-Nqv$Or\1脄 ,\~O2.E]hۯ*kљVRp-t\<=\]8l4PفQ=elƠ 43-Xl5Jrq<0oPQa)d KP-4!_e3}Vwu]B/WccINme t\pdurYAoS/b=`CD/Sp7Oy,+OcG"6pdĻ!FHO uI!q -WQ9> '8XL24\f0+$Gި`c18t] r|P[ԥ!I1fQ*>\1ql库V4.lb*PRj,O~=ë}Zz+R?0 d,`=b9/\Q0 'ĎEGDEnQa@Sc(}u-m ˨lyg#UO>͹i-`n$C X%_E*( ./[@hma6$YCP)3# ~F4![EhGF6,+Ja/NxwJbk-w2'gɤUʂb$egY\ŜqGOCF;7RQ-61x,_b˩IuuH$e!həWOQ) c!ϻlVZM '؃{0}yXZ?#W4~H11jQF b90]?%l_meM@UDR+dK-?(( v a#sxI:Y;Hf[~;%9,T:upi@u!K;8Uwΐr8&~]_ c636o7".IW(,y*e\1UO~jegHe$AQw[.ࣸ2dԬyBH$GO&:aIN~Kj`4HfEyیZ@ \E^?u+"VIt]ꦊ"J $Xuj*yciYg]c]]r_H`xAjf먽9&k.޲ʴ6u lBXЙJ-Y}'hJȔh#2ia$1H3\xw,F3{ f(ҙ[( K^,1G`8{3~tUW<0ˬ,+-Bc4be0,?͇J,_<itˬ{ *5ZePId X/jm})+nʅkѲl%1 6b-JIFb7M^(}31Inx||>W"EsF#8~P ϶+9}y{溝&hmzҕ6GoVk&YFĺ ‰"c[ Xt b8*hs̞(UJG-yXϛp=wUYuvȑ1(wF%c*E62bJŢ.$nٷl[PJ%l<5Z .jxtɦ1YfB&,l"TUň7xY3^NS,Hz>AT|BMq `$4A5q u(ݫVi}OdfHnz5htWUT,đnc,ΓiΫ E@Kanulb]FؗR4kje$ QPqduwNK}:Ϧ̪N5qXMCv*o N?*,Ǣ7SəX'FHv=`ؠߤmؗ/Tfҙ%V~F"tEZTT 1L\d.6D}`qD/"h^6Eif\4qW".;"/[==?"R3zY &q3,L; 9?q^ݧ,0X{xPQ <2 ^ 0VE#|0$a щ[s֯7#!ְ҆Fz$|8D ?$phҍeݾ]4oQj)A@_ԖA)}4 ?uaOxR|sq" ="~U:|B~'I~ 7{l[9zJ‹!b1XfEH9Vlø}ʴe ڡk#wt8ӴtMG0;d9v Q@q*K^uzv DH3B,BEā$b>d&2tn1i!'A+ʘ%xX rц]!`&hWRw%,fB<"߾,Adn@vn爥,׫FΕqblaKfeR xN)(˾~ M 3˾p>F ;䖜6 z볡uMjdXM&VIT>r Èg5*!+?vsO'QG^Zr/!x(6"(f#" @r>Y $L( Yk:+[V3T]ѦJͬ6³dV;'#t~Kѯ ɒ`o_z)K`fƱ5Hjk"Sl%%Hf&TK5r鋲MbÖpϏfkvqٚz3ݜu=Ozپ7j޳V$s[o>?7?ϏfkvquB=>nսf&H8`|{3X?oF۳7џOSބh{| |ݫz[Lqof<ތgo>?7?Ǻ׽ @V y`yn?`|{3X?oF۳u=Oz v5o2A5ٚz3ݜ~f<ތgz^'C[jߒdk 5g8y`yn=?O7ڷտ$Ϗfkvqٚz3ݜ{{ПxooY~I97џ 5g8S:?7j޳V$s[o>?7?ϏfkvquB=>nսf&H8`|{3X?oF۳7џOSބh{| |ݫz[Lqof<ތgo>?7?Ǻ׽ @V y`yn?`|{3X?oF۳u=Oz v5o2A5ٚz3ݜ~f<ތgz^'C[jߒdk 5g8y`yn=?O7ڷտ$Ϗfkvqٚz3ݜ{{ПxooY~I97џ 5g8S:?7j޳V$s[o>?7?ϏfkvquB=>nսf&H8`|{3X?oF۳7џOSބh{| |ݫz[Lqof<ތgo>?7?Ǻ׽ @V y`yn?`|{3X?oF۳u=Oz v5o2A5ٚz3ݜ~f<ތgz^'C[jߒdk 5g8y`yn=?O7ڷտ$Ϗfkvqٚz3ݜ{{ПxooY~I97џ 5g8S:?7j޳V$s[o>?7?ϏfkvquB=>nսf&H8`|{3X?oF۳7џOSބh{| |ݫz[Lqof<ތgo>?7?Ǻ׽ @V y`yn?`|{3X?oF۳u=Oz v5o2Aݓl[6YDy :K "yvy(XcJ_?-lf<ތgo>?7?Ǻ׽ @V y`yn?`|{3X?oF۳u=Oz v5o2A5ٚz3ݜ~f<ތgz^'C[jߒdk 5g8y`yn=?O7ڷտ$Ϗfkvqٚz3ݜ{{ПxooY~I97џ 5g8S:?7j޳V$s[o>?7?ϏfkvquB=>nսf&H8`|{3X?oF۳7џOSބh{| |ݫz[Lqof<ތgo>?7?Ǻ׽ @V y`yn?`|{3X?oF۳u=Oz v5o2A5ٚz3ݜ~f<ތgz^'C[jߒdk 5g8y`yn=?O7ڷտ$Ϗfkvqٚz3ݜ{{ПxooY~I97џ 5g8S:?7j޳V$s[o>?7?ϏfkvquB=>nսf&H8`|{3X?oF۳]us`I涺2c^z=f`9䘷(݉%$Tr8悙!v%>Ije== 'NlPe6`V vX5>W \gU~6h6O7kLL}tFĶ'6jr66ʨ{yo.nDNEuv͎ =sڬS!IJ55#{W,}ȣ]THrbqqpqqqpqqqpqqqP57ѭ _iIN1`$xbqh&Z$ɋ s'یgXv T,/&$%:`iEKnrd>0:lL5eW>ob*/O%,lm^["ҳu<[FK0ؼ6 {9c#ڎ=s`I$S,0>>뾰}u帄 1pL꾙8mR/K1^9bg "9Ň$>>]05qS?]wHs]; v<5c..ON(ُYz, r1JEN٨LvpC]ٳEthu)6,:Ս+ڬ;'sE_0*%f*;P(CRRÀGD>tX >qId8kxl+}cw˝"߭{ 1K'RI B<0߮-e~E/|*Xgxޚ:fYTC&=ǾIX^unU]j)Kl$[+,y̒#3n1ѕndp3/fwI"SfZ1crUrj:&]bZNݜ2^19R& kesY"FoUw3Cssw=`sX}q%}%~(r$d䠣4a7Z*㝮9cYmE-bb3 uXBF[5S+"\Q$^֣7|)(Hg0ۏ$ǎTV9; m476=;)O/Ifmʼno;m(=iגWL 1ǃ?l_>\*oW>M<9T 7N ȧnUƿjHdte' $ȩI9c]}ֱ s'T2K VEֶD䨼ڪgVX'%s!RF4$Vw`4S\މS4T>Cj ̐Xkм}8UEH:vńWɤy44Fxf|;OϧmzPQvֲ lq?S$z~;[Ek`==ciy=}ȣtԳMfdVF>-s?W1Tٵ#Q0rVKЩfRua䬨;X*"YF7c888888888888888/j'_ҴOH㡨?AJ>NcTȌo?NKld`9h_05qou8+Z_#"rS _>Ad[m#1Y,)l d陔Ig9U/+4li]s$ObU"Iɱ83 z5b@jujc2-7)W|i J1[iAe5jW NnTV5NnlMwDͭEL|0e 2EzYk&cV,|v&jݞ{xS>iaEf%0Fv(}&E5apS,}U맍d9)jF65b[Jo'%)i fYgvi9~5ϥ3MֿE{ X^VwdmUCnXVdZ4-ccY,[(]@MNg*Cz;`*4n*8E鄙i-+%6rf ~qX}IZ{V\~Y)}n,ͻ{&Nr0,QHn*h[nFcNjvnU^^|v.Np&T\~nm]mo]7n"܃9Q|1|+9E=T8888888888888,Յ|q4bvn(5%y h7(=_K%;믔csUwٽHlUY/v*|;}z=emrh,J@yy; V 9/ر˸M]"G#Uq3KXXrHڰ[\櫹W%DO'iy13أQy4-əeenuwK c{w">Ӑ\T8ZL+߃­G$ẕ*5{DV^}"!߼u)k6lvyRJu5c }hY\&70{0.GJbZh ۞%hkcVb9y3qe}֚lXQlX)i9!w:p ZgnJ-Zmfjkc*|7jas ɺ,#s#GdlIӯƑVW*MJݱ]_ᄬ.f5^Ye/-¿{i*ү? <, 7A(xn-9SO܌8x,ǧh7㞓dž%c-`~Q$3ef؁BPhC'Oƺ:n'\jOc{s#G_^Di'Tb?#"'3CU͌+]VXlߚ[ .IMVYØ@HUcjѿ/Kiω^~7yH[F#u1 9ŗlXF:;M]u8#\Gؽӊg#L1C̣qWަ}7\$xr٧7[[_*G]FzYMDrj,q9EM\11|M#d;?5j3T,s1thh_jy%NԶVAZ*^VHUʼԴ'bmPcxt}G ̝ڵk^/DКSrqDȻIy\e?Z ]1ثg.%~ Tag A}qB>~p%L6B-GsepS(|^ΌMOՌG7olAe@D` 7_%yӖ)hP45@/!jN }ESD[^hrI[_'c)UqזU'DϦ3%eY+vn?9KRXGnN ENK6A<¥h!.]sݕxڈJ#hĞHdDgX r UAikKֱn]'c\;Y?hLf#4($)"Ĺ:L3|69yD)i6?n(׬="{V[jklu[41轡)6,X)'c$9Lz$ >_7kEOaN6oe*8T}';)v(O88ASLvYU@r/Wr6Lzu^5jackB2P#;֖kV)UWFԑ[-ϭmg7sI_#浍ZKzPc*n c/Z.Z i7ZljMdlcq:FO"azGQ=m<`k53%\~MٝKzfS2IsXoݱdU$|8E>:dܒq̊cN;odˬbߴ?c>x_rh[ay8G`hf U- KU$Q&e`x}Elş쬩 NY{XfWdz5zHUF-a45Qktl ō|~YߛZեˏ$=ՖK2UZ1kNg%uF.g4E?*ɽ`EzuQTx10\+ݠ1&{3p;uT֪3lRߔF>} 6b7gdnW@sMU뾜wt˨ a Af,5*׋՛6ml];riݳtpeW<I,3QL+k#ܷak-1]/z$!M~dDmH2gr`2,9VEq4Ygu]aYZJqU$WfN֧LEE^EӬa[e2~ \F]!)lZmr6>uLTjT9%=#SR͝deиDEE  ^h'>\;p˼1Q_zä1`쫎4F܊n6ҙDA9DM<nJ90lv/ ځe?RnLV]m띝'+>o%=etjLCJ-i]xՓus6-@$Su\s[ڛRzUviWm$HT.D ? fEQ,QR[ܹh!rnZ?j~aW-es d-;GaMLiWWCcȷ{~|U{OvzC_2v`?(o?+JN8"8۫ $H#.}$;^yII8 Z`vGrPVo2˒ 6]3;%b63Ū*ַ9={ i=7/$dv$ ;Q6"lrYhآvG)}̓F#FHX׮A`]B(N`a&)8AUZ }bܼ9<)6fqފ1{F\D2bǵ3ߦ,q.vJ洷$`$0>w>:cebkSجj9统7V8|O T5XR֥c"<{el[zJ,UGbWBGZ%jGvKq:8888888888888889':3`/j'_ҴOX?#*8A-#8888888888888888888888888888888888888888888/j'_ҴOH㡨?AJ>NcTȌo?NKld`)ml)ml{Vo`F}i wॴɲglOsॴɲglOs{Vo`yQ|}CZ})ml)ml^FTk_8_{PG֟i} [G/vG8 [G/vG8EoѦ,?Qp.9&??&?;[iF}i wॴɲglOsॴɲglOs{Vo`yQ|}CZ})ml)ml^FTk_8_{PG֟i} [G/vG8 [G/vG8EoѦ,?Qp.9&??&?;[iF}i wॴɲglOsॴɲglOs{Vo`yQ|}CZ})ml)ml^FTk_8_{PG֟i} [G/vG8 [G/vG8EoѦ,?Qp.9&??&?;[iF}i wॴɲglOsॴɲglOs{Vo`yQ|}CZ})ml)ml^FTk_8_{PG֟i} [G/vG8 [G/vG8EoѦ,?Qp.9&??&?;[iF}i wॴɲglOsॴɲglOs{Vo`yQ|}CZ})ml)ml^FTk_8_{PG֟i} [G/vG8 [G/vG8EoѦ,?Qp.9&??&?;[iF}i wॴɲglOsॴɲglOs{Vo`yQ|}CZ})ml)ml^FTk_8_{PG֟i} [G/vG8 [G/vG8EoѦ,?Qp.9&??&?;[iF}i wॴɲglOsॴɲglOs{Vo`yQ|}CZ})ml)ml^FTk_8_{PG֟i} [G/vG8 [G/vG8EoѦ,?Qp.9&??&?;[iF}i wॴɲglOsॴɲglOs{Vo`yQ|}CZ})ml)ml^FTk_8_{PG֟i} [G/vG8 [G/vG8EoѦ,?Qp.9&??&?;[iF}i wॴɲglOsॴɲglOs{Vo`yQ|}CZ})ml)ml^FTk_8_{PG֟i} [G/vG8 [G/vG8EoѦ,?Qp.9&??&?;[iF}i wॴɲglOsॴɲglOs{Vo`yQ|}CZ})ml)ml^FTk_8_{PG֟i} [G/vG8 [G/vG8EoѦ,?Qp.9&??&?;[iF}i wॴɲglOsॴɲglOs{Vo`yQ|}CZ})ml)ml^FTk_8_{PG֟i} [G/vG8 [G/vG8EoѦ,?Qp.I"+D&?H8}zp]B8bE$AZ0+:x6j%]L,y2XZvEUZ""$񪪪"'Wݶ]rM3nc6 #c>1ZsUkZԝUr#QUUQ99qpqqqpqqqpqqqpqqqpqqq]P{}+buⳍsaAѴrXF5;9.k^?EE2VeqE'sǜ5QZ'"9`GW*&O^Eh'G#UsE8q38888888888888888888889O3k;E׶/8^syyxp&88888888888888888888}Cʃiw4̽k>(oO*4Ceu lAW@\PMWjkd*X}>i-4:rh6l8]\$PE'y(8&xy8}$YEiY֜ԃ,I| e7iqn]:%ֲژ2{= AmEQkZVЂ?9zC$J&*77foSwW}4ozqYl-qxs_AYեl:ƃl2ĉG2DU`g|2F[EyO20q<o4GDr֝k/YsLv(z}]yk˿u`,g0P1=gpKASX5ˉ2VQ0NMɋ4ۻW1̾=HBBGK;Nógeu)篵yur9s-^/\ߌn&U3jN1^-UKg׼t 7}Zyf-wFuk4NBlCX]R$Y!S>yS_@o- /[Ҋ+m۪[b",n ;%5/z籹B\\ gqmWr^ʹ_j1ݭSLqäbQv0Őg2e*-Gvn>/nndMc~TfaWAY:=jRk&EziNS6ckRH"HsH+W,j;R#]=k&jx xkGZ;9!{UwxY5S$F|kqqpqqqpqqqpqqqpqqqp=C^ؿ myyO3kq!#_{<qpqqqpqqqpqqqpqqqpqqqKŏ4lSrw}9T v~kZ2 Yq\؈mYKUeuORRVmqVZjū1Cݱ: ,jl^ĞJ%l9qƺ?W2#zRiQ(ԑ(\r˗/r [xW/beҬ]3W3X*EwRUʼTx[MG UVbм;Uss89OUglj?|,=S'9gJ)GNMhĚm;:%u3vwDŽg훐TnoM3q$Gu;/?q }>ϧ읓*zӝ9f4}* ܾm2/Y=̧)dPCMs7!}2IMUq xpo]ɤtj9ypW!#VpVkfvBJeXc k1e{Nb:שe_TI a,ư*2`0mracӞG:HV%$d1ؒ` *i-/rl "Hmz#aO׽ލ+aSN zn?0p\j,eѡ,ڈ~uHdҒm{ii;{!yz6NzFvRHTĵڤNEҎ[jd}eFds*$&É|n?`xeRpuTA &*fx<~g,aZW?Q#EXDU5Hs9~h-l⃌sE\rOϮ'q8bueHJA:YìNH3]PKfK%xژuN8x`SC"ƧL"H/nͪy*ib'j)wY`}^9e}O/:v`KTo7Xbgq>e4-xyc]00D<3 ˮ,rǾˮ뾻뾻w?.9_T`mBv&qmzb=f!nڌW﬒]w|/ˎڴF(%Km R9<]i_Sg').A:)41n/C2n?<9 ){mF5w& ě(9YӒۄ .OHttPS r\N "-VEM* 6p}1uj~9~@{n9%g0+$G%q(IK#G/,́zczA}܄~+Q4=y@j:xr7᛬GԳVˬVRQLvgP\0,x ث)X-*VFst$V4;q$^LH`)9d~vxx]wΞ͗!sS_|6:A0W{_0B>rC7Pc#](A<8Vnͪ=qo-w~s./ 2vr&#ڵ]uT^tP)@ _W?k2*nY0XE *-`.4GġaŞj&b$)k;EMGDo&-*)vBWEqK܆$ADWt%zt=i,}'+ͫY_0 غ,e龬&=,bYƥ̖Y&ے)W5$bq-D.1p)>.e*ݣk.-+bGV5"{}TXlσš>m|(eFREDr͢@+'HZlU4ݗ_ձ 2L3U6I7j$` 8ww|8Ԟ73.S5YhL2SM:AycX8hdr>뿖h|ۙAɾґZʼ.~?_X3(b,WjG(g8ALY,Q<.㜊Di  +ssA R uv2dz^uU`ȗ@ry lwцYQ;{lf̂6رvMQX#HVC8̉~a6e9n5o{ۆ5/Zmihth{',ɧj%op%lty:s:5Fv,vMٱ2hr)aIj5!IÒMddMNr.;uo^Z@zy.i=k@a+<.23$S8TqvA@!ŴF|J[Q49ݾŒTz0m-]aL. bc n|bJo>jVT`8<cͭ07*[1gW#MC.g+͇VrjXt\F U^nVVL̞k5g5H,8Olz`Yhܤ`8D ٖGyn;S-~#bZDRPFzrl{9S <L1 q3/6"˙]]Jsf[6#09qly`=ٰ5#tɳ8Y42-허MMaVy8`_xTC>Ŕ ~՘sH#"l@qc>S6_KԝFF|F@/NN[!#v]Ydf4檈ɘx㙑 Y{7z{' 6p]3`j =94`MLVAcӥP3\~߳iɝR߫@,];JuӦq@ąz,1ݔw(0!{ *@42g߈j.!L} xŢ ͍rcՄ쁤䌜CtLAa `âqpq O},>f |?+ʜZD*^93ƧF,J5_a08 s($]9IMrc dGq7JdGlfgC $z.,Td޼]͑Qep(f:CҪmmdxVR m#Z0,1¬6v/Z m 8eOf5e`7@upAr}7UʝC;:Fmzuȳj PG8/ nգs3,{.ΝHبIFDR8ܑGF2>9KMݐɄ$mHDG$LE& %2&\ &%gF!?Ր0Aw'*X̬ȿCk6v'Ys1whaBƩ(4v$Ǥ|pM%uYҝ 鶶}[ r]3#ҪYK)6tueby%cAМ~5tn{5wt%QPz%0hB*cW'1^]8 7<#.x6GmUloTGvZSerXKnLbfH#D1J ]w YÂ*ٺsم}G-B+lHF]lNP+<9+Drzƣ'U(y(|@U4n1C35fnŃh&ٛ&MV[#lEK SqǮp5+^'PxӨFd*ȅϤB tOqs :f,mzC%*LjTYOd;)@l?a6oI୅9,(3R ~N*DYRޖv6ߖMB])-?pPxw}C.EJ9|$DpXoLöpwTp[myxҎнx{\I*gxȭ9H3B ԋ@!9U܅"}RJ\-<.ľ= Q04aԧk$9ʌ6 Ā5L9@0c(S5M%Oߜ[)6Y{E[ʶ mO$Ց`8C]AFG!ZZ,H {p}4J_!D#}yۻԥy ڐΜϣk&ҰoUvX,2Uxȑv-6jԎqI\cM51$:hB7;g,ˬr'(d;9uoK{!_k;⊂ްoP*T=+rRrF E=diC%S qw^ooe岑[kY9%s-!330.*<ȨƅM' g#&(WsH5+]c=`a#?C, d cV\Y-´LAYrpD>2J1}T" `שG)t lJ[ွ+<$xi't<^PԡAY-$*yƭk?Ns 8kkQHt Ŀ)QT'0mw4dS,m~yKXv3SMRߠdҲY I2iZc2v::P3d3gLQFc<$ϭqk2!5Qh6R}@]%[$|3~l.L &K"9dF[r~ӝ[݊҉EV|3,Is(OC ŚDͱPe(W#x.phv𭋃CgwXn/*<Ͱ|f&UQ祀r)1~i&H-(Zְ*Bz+ꄐw`ʫV ;Dz7(zAś~][%'@{If-Wz";^$t`Jᓙ/;LhҭvsXui!zU?b>JNin!!;54&g3Y΄EP, X Uh+ F/s+Q[ ׭ɲRuʴ&M'~{ˣ d#8]"Hgu!<yU}۬ݡQ.AO`=\f7)Z-'@eb.2R<ID };! AkW&?Fp\XV AcxL֑vǃpp\Ļ8r6KxmFjޟsw]#ռvߡ 4((ƥnAD㲿|z~i޺mtPy(̬428kp۪Б1/*#| `HbÙ ۵qv$њtW&覟k8YU?eTS,k<ϛ]AؽɥLETq0^~p1gIJ=P0P0l ̈oY۔ɸɁ&3NۼE'  rIZupg{U"_D랤x+M_aCN#'Mh?oC3ƪ6KM3)n̒U[3RMDad%KQyGd |-%+-I& NM]oח7j%k—B0WJdWĻ&[.YSD[H&0Ug^l7zHԿ27͎{EYӫܤ%XՃª Rtbf 1iтZFߍ,!d7+[о-_?隂y~ sb\PzKT|d_ɕTU-}&Cr$S>2(p!rM[p k_=4F+k3 ^CZJ=)f%kgXDUL0ӝfm *8;A/58Da0(,X x Kx4e9ihɿqb5*XïXgL {"XNkCQ6)%M\vcqQ>Y3}Bk2f#xd8L8I/IW_i >w&ӽݩ&$%}7jR2Q;Yr2L\"ݤ+hic-Vp['m,"m]PRXY4'ڗFZ|:pa(I rZz|, ʸ#:}[ZسlgKP&FZ3|"5s-*N:FCR^8LfajF'`}{]!M3PfLs8qgd?ZCјy>9@-q|eH!ADȠ]7IWtS]<꘾}a#x ҥ%m%RDfsD~JQftAUi:kALˬMXo; [V{Pv+j:h7TD$eWGZ姸*ݾv?!"`=5Mo75-WqYe5(3s#aBQ$ua&QdHcvǾ(Lcٟ~t^ݟu@ψ2Qڱz/0lwɥ}^6FJ(ZCW;Ԫ?=;cqYpƜة+ttDY܂MnQ2OSuMi|9:[7|ؽcmh:Wqm0UD:62\__r^q ]2n#/!:쐫t@zSb53,k+obFYx`pQ[dznn8 b!51&FԐɳ31Əlrq+f.鮝b$ZD\0ϞpoEe@ylkEaBS/m[JTd cL.o%ɤ9*h7z-vT|@Yrv6g֔-1]$fU#Ldp9 jiUEȚH7$ΦT~#QNd@R-x_$UnX,_7=Y'. aƱ}j*OFkVDhFz}'c(;|}j5dd`z4=>_2IrRd >EoQ1M7ꦶk1.Ҵ I:zl(*PVHdxH=*T\aޞ7}KﭽshӺ)^M$ait< PqEj<+:BëOXm5܊קY^Hs8bsj6HV/r/j4J:jv`X|T Оl*=Ş|B}&׉z7)Z-K׬ǸR]IYњ ˠ%{#2rsEfi{G*Tid$*17qբէ.Ʒȕ `^8Xbp驣nt7jƕR.k&J:[Bڐvuw)U^B[%`ljg =5$YeJF.Csƭoc݈E~‡^WTt1%'#1{; "L87!13{&mFn_Sd~]g6fgWR09pvAElMUUOX͹6G +۹k[{gk)QZL)wUƌQk>?CUHTd#3^*nUa*Ve?j|r.m͋̈́yk]OӜ1 `W6,ݏq]m,nBn9f 5Ok[Aa5lhX^ "&-ĕ^N/q2xBmd!>l|93d2OSBY.xcX} N뼱뼾]w]~Tpo5iIykEAvɃkSŒ7iIbIU@oRnjPB;n9̖=k3ԋ^kf!cw2eMvi@%ϼ6@NJz֗wųwbSyLZ仏cBMj# 5f$ .Bb0 Yii'K}) S*ZԒMuZݏ^AWء* t\")ϤR",׋HQve+B6nv]zn^n1saiu@ V&Q<_7naEƥ󾚵 JFz_^X$c0hH(7`\=ܼq-r,~YFk._u)= 3l]ZWGdK1dͲrNY3>s}Y~'SQV/CH6YVѦf4xj 馲 'JxeReB{4<{6O֍補 t`.MnZ'dI)Yu7 u}y]Ǥ~.*Wm] <1N`]| 8,˾%,Oًm=5x nS` -+#힋@6eN03*Y0)W1y'v[z.em]An)'B0vI ߬E'.9SI)Xx{)2ܿQĹ\Xv׋qs;2tl_LTry G0X>&nmYėNc_?X3;\iCV,=7԰>`FP (bɏ""hdUlpJ"x)Xm˹=&yI+Qʇ-OL ~^'CWu'ȣ<ݰ롪%f\·^Gm{Տ)v"!Dn.+6>aI4}B#Hyv t,HiKZPK!jy9A(s9\z)oB9HK\w}䱀4rUU7qbאCD Z T'>zDB;UfNoLZվ%^*:̰J>%"8'acO֨H,Ư#Ү\UdL 6yLCq~UwJ\סv"ε+VM2ޚin~VչWkE].#Lh;?mK`}íX&I&mBg5I͈DlbC1.׍a2jipw9ol|# WZ[rI{,vC5sBp,~p3Kƙ}cnk 2+mz5>--CM v}՜XQE^w9|LGL &*6@y+wd6Xd6#|G3˘jv|EwOz񯗎頵q4nZccC%tnb1tiBd䝵:XdL& [yDUK6Rff6vj1A&MO=PlQ$Ч؊HIL;EzMQlvQ;IzBJՉK-N짢 b>@Odox%Ӓ55ߠҩu*7#};m o׶^v8q=Ѓɢˮ"6ĭȎs*2L gޏX#حdE4=ԇp"bU0s0:OyVmO"1V]I$D" K5E.zAGv3/ގudd'x]}g*Bx۶Ul^df>HFN#.Ѵ}|/_6͛#_iQEm:˷;d>$$~<|FCoi;42U# Y+16gG Ȍ7~%.Rc]Z6!Yj?u_०^Kjaw "bicDЈC]EJGnZG^ F MO _49_ImEd|㳯1bGS%R8ELQ=*npv9~2`J~(&=9XB!AUPaʢP}=dG@85AO\0YQoxor=8Ӎ%u72 ǧUun^:ճsL@Q :؜2Ns햾Tڛ8)]%ڐqcs* }ۥ؇ f 1'7Ck]Olx-ߪ9ԐbHJXuJEçdO9&egN2dwFIy\D e aꄲ%Mf-ZJQ5/<ߛgz5\/ ܃m恱W5t]߲߬?7t 5]ҘoimM}wOce_VktAnK%SpDJz;FHimU~n@C:Um.4kVEb8jB$ I :S$|19^HH?7mʸisj3slEus~ڮvX484Q=9WǟXIѓzTw5az.vF"K#cdUxW)tNCY3}݋DAf["ĵ 5ݣKi$Y84< 4V4GpD;w`i nC>Un6ֹKhn脒yfܐѢf)4.Z{I?K:j%0K'NK4;ݨ=x=JRNFX6Inv^}R8aH|[j ]'>vIŤ]Iv|5v>ն5GHcs9Ib2T߉3~;M4>:El 'A:YB+) ^і*Gi&Qx||;o.C-Y>)\Nٲx4 -e)1S-SEe"6褃$M\qܼ^تr+GlHt2F ^.\{R:`JȑcJ3HrV>fe8ߘD#A2oh gyȠ5<0$OǾ._ʟBfqFa7hE,gɡb`iIgOroK㶂GX0i*Ţw묋IXR$TSx:Q|ڏmdzh]:Guy}58kR.=AGDNBJ/ )(cWNYwqg٪OetzdtcGv1&|0eLbDH:ȸ l۷6]XH%EGz*\kac61O,Њp=ep0E:Oì>e;vw(ڟD(<)Lj&-N3\R$~;]G ܦx888888888888888888888888888888yi4 WҦʼNlWd2q.l٦墫|$ܷS.K,ڂڕĭ.Tm dا@co.RlpE7M۪DkSvOe(°!GFlֶ5E_d`~Hd>qjh#;Pd\"$ؽq!zӭ[?RD #)8 Ȼ r18=:|G$ttOW=FRcZce@Mؑ䲸a>ؓAK,5L$\te+ސ%T sLkYRY*/*ݐ+<Гm$px$y0u][CЭQJH~mL6C^"§80cXb ,%Q&tY֯ 3Y`E><_jU ȳkWHWjn=E?W= 0~p/ЮWްq{{15F6FAI*k`"1ٸ1Fl7L'Z_xYϟjƴZ,[vgXpRh]1EG@h(, t xǐSAL81Eně~b8J 勡ϗI*j +kg %?J"0tb} gɎ-kIs*De?7m.L rF,V8^ϘL! c9% C&bN6'~drimg6]f.kQK{^w gjV WѢƝ;I!`lJݟ0E}89+kғ^_je`vc }='Y0XRk%,l'0*Dfd}b>H@TDŽ;fcqoL@ĪRSz 0Ya,hFE$QӢ V5y3'|X\)RlÎ#O;߻pIy)8co1z62Svw%,ΌJ2NY7Pߓ!At3\&l[E??>]u}|}]]}uw(a~o*6&ޓ^)Q>> FV4zz]\q"%m\JGU6Aw$.ճNjѪtE ,nvW { 0n ,NuYLLFLHeKpwcrҒYFUpUѫ`PqRECٓ=xh.@yDH& ط`gJ$UL l>&VUp8fDx&'ud&t8%xP"Dž6 m}xg&9UNm#F}d~i 2Yl8D0 eլ)()B池-kk ˖#*JłDƈctLH%|h?/DťHRH&f@c H1,yQ9li&.f]6YT<qps,r]e]}]˼rxˮ]?8889*ғ/K\}ZF;VEg!O"fqbqʆbYf}*n'x|qqpqqqpqqһq7.oy.icܜ!6X>t>$]Uz=,[H1Y%:iщ0oGsGl rcVw_.4CvGB4H]J7L(I6ƚEcYnuI=]WGJV:$2e-ڲ,Y tQ6x"C@v>DU3W;YP+̳ WGջVJͫk3bsQE"s{^`]0Eh/ lɻܤ8Z{nEuc3n,'eՁ|h=4r9b_*RPMpRz;'^ qWT>CG4ۜ}@X`Ջ;ov&՛dųTC >3%g]\Q1RDǍƚy r,Es0}X\#U;FBǬ=X-}1htrg~1:Ħ.3n?5n*|>Q pC"NF# 6!㱈 0BY#)4 S&5KnպIaX\âG6\`5՟dg7; #IJ_,El*wjZI ^k|$r{ J9LG1$osM'.u#85;-kI܎2)1.K7E]vΪY \3(ئU^OV j"bx;HQڿtɉBb'ǂ|HP>*9 )/IEߣ-42pՉ^TY) V6)Gf0a|~_pw&{Fh{TnsVMFn`dZmyaӤet$4]S}?#QnXc:gwؿN w}sYUyMr?EPBօ}Үjl2:ӃEAFhgWWⅭSRR1QZka3Ħ,~ >yBLʼn!1d!W{j%?6و2^0-_[˷=۱Y<`*!AJc2ՙYWqZ@iYqTtn LٴA5|Aހfz\GW;?bu_56 9N} mcGpwEL,/^̳>$ʥ-S-l[Z¼N(Hߥ ',f2`ױ\F:z)@®j-Om6Z+Z+bߞ(}i4ILÀDMXڝZKT]]̲},RlؔuI ӆ:U!3h,{gEͼ |A^tV, YQ=$6+`CY9"}uQmL27 g-)=4k#N&Zv *&V2Zҭq.c߫PGr<8c9\9@ M:/ۯ5.ʈks4Vx_e6TĀR C+nRZCtWYYd ,j}UqS _/A?F}5}S혩/65.gM~wZCx#+%.C2%In &Dÿ?~M(,~ZɁ'"/ dQɓ}>h,8y3֟: 뮺ou_mh_]u']pI|G>bjj59ۓ^kmrͪZe6)4:m7IHñCSWM"1kjo~jyMϢLgu'5sYŪwm.qLH 8y!Z_zE^iHm]."O d bP NgrLIqc HʐE=&I×V_<]VvA8Y¢M[<7%~~+:a&˥'(qX|OLAgl~ŃC8fUQοDk<\ox2%js~Ec(ahT+"`nx́*CcdOHbԟA-ftR:[7,g޿G".\莯1]`&P'h9r,JY&EJu"v kOF`fC/P/"vzR]ڳ.Roj[4VpotQx5;/}~x+ g% &3aHL%*C,4<o,܋vg%e"aM>٪EcJ]LE녑JrALH 3Q#BF &GK >*c2i|dxv!כLķzzEEXScÞ,ɉ2$sdSE'Npb;?i%Xkv:ضkw$2w&4RE':!X97Y1:@r;ivhW@>ܛMANC5v n޴1(vǴ#<2@HCƆƇv|Hr>⾸T;uS)gB&쭐Kjm?C24#͏oc1hq5;Q3U88f6O_fe/Y~ʃ@Y5 :Xxɬ2@աŁ,a8ч"M3mc=]ӿ8@r`$36mJzvѶ jVFdx4 fL&fP\ fbnœAҕ􈿹֦mZ7cqewKcۋX:GH#!8*0&kqw "ꮥ~Z _턢zcxoG9"`/[EE1'98@sC%`$vnb6L{lꖗzo[!˶llk'kst$Q3uvNG|ଊY-^{[Zi Mel10$YPZH.8Dy=dZH<'MƺxϴpYj!`?V,Fmc]nӗ8:86JVF4>|*NiSM >uS\w{ų[ ۊg|5Ptxq~lԓeRxө̮VxSA+2PŁrzNLfV'eUg=_ڕn3kԒ&dlt^\$ߢɤY(C"eCZr 'aI\QZZm$$};`RN.-:~Txĺfi=hj&Qg) SQK[/ˬ*\\~lZi2\Ll#w f,wf6I βza\*/z?F!LmNR`}`2I!qxDLhhׂCے2'mlF_';-ԔnEBFء_# j ,, pf:£P-AcEZi͇Ŵ>eG}B~O#nY70<+[R#yakmm[ըYCeUuiJטHGTӔ#gvX; 棗5vvPG+f~َ\1dX/ad-1L0ᤌl(DWځM{Q}$Yf~m 0;cCN-I \.L lРϿEiٵnK$s&n>+BDl+"U7wǤ\Dbu;w~k; H!&"@ۂRj)Y|柘:a}=jXwܮ,{Oہ利mu <Ӻ~Oikȸl, }Y#N&>UomJYXeLBV/8ȳC8-qYA2j#ɵx|V,"|%Ph6DUF{kfYw4(P0ᇎd߸dX5ݵ@BUriڊ'EmzF#۪,cV %¨kGhDd,iƝ6TĻbfgMUm713 |#(o+Zr镳֪Cm^Ϥ2**2؀Eq]hRFȳ|@azǡZWiaSǔl4 TGGd`cˀ#41o\[Lc'pxu{gNNok6ꛯodikȭŹsğ&+ 9Q$XiYy:gs2%Lz4dS}+hXRIIHKd HR"H @7vV6G)HAqn+LbȔeTl] FR)Kc4鎿lVnzO-iM鎴DKPUDujVJ"M11@geڌێJX'v n5B5Cbq9Q(Ё3륟<ͨmڱo);F5,Mr5,n(xȤۤ3K._}]ẅ=`R{5i.mE-R2ЍDhN鱡Bk`0)aٟ<p㯪ASҧ* vt05ERp-Ԙ9$ JVԫI%3,R̴p|%d2"dՑWYYAU{ϼi_oa~]ƹ3"wziB"X*nI?MV,2>08s2NSM:߯V}3 ˵H}CEc!}Wɺͱ&:-Kd5%dPy`qCe +0o+NHH̊8>g,}>SHzѱY]OD0p%pBGUϬ|$㚪4_a]5J=Y.XuX`%0C}EǬ\1 :tNmVxu6h D8!Zӡ] 697(҉cC{Zm>۵a)%8lG]{9d]RvT0s=n_sֿ|~hItV5%>?U?/շ1azTUc՚CZ"\͝W흀^-RMJ& x% MFx? k"Ru'Ge}C͏^duk^8+(?dG !،:vi3#GH?k$n.JYg Φ0M*Æ9ZwcϺe,4oZF}fЃrcl 93g>'Lq{Ugţ k{Fb£VSXR;f 1I'zLMlj~1>2iYY25X"VW(K6d8`iMc$avHD)FB?Lҙ͍~%ʉ>ot֭?H,HV 6Ӕc7$1;6^62?6BFEn!b\2tgR]HTGrg @4l;`DZ66Ep9єPL8m\we;IH=CAPe/\ʏݱ]=VjeP<\ !EfLrĊI^fx5;;e;[Aş\e~(&AOx-5"p|m"=P=o"} d{yq[7pMai^f9d- ;{DG"B3 qq-crφvBdS7(ic<C>Wy}fk$_kWr{ȉjZ51` :!La}gpCWt8{Ktޜ7ˎk͛flMN<<;tBڝ&ZDmƉ,9E:֯<*ywZޛ\ygWY]֚Rsvʙ%0mm2OQAA I;*H0uYv׸Ty'򌆨l\QkB'@~gܚFt I»Ȥ]C ӹ1ghrULɤZV'u?E>)J0 Z59J[OQ]na)+szRN {y(дںWx?4۟auu2hhm[xnlHv9>VuY ΁LEɂM"P I %ޟA/L7:3zljV #fi lX!b~*$Y!P&/]N8Q]hd4Doϛ#6IUvL`S@ ˨dQ$z lrǝe9BHzműa|X{.wP.ތ&L~0{7+Ff!]Gt}q3ozEK]&784R533`.Z"1Q.MH~c(4S1U6Ҽuw!ZZ4-R49 / ճȬ$)HReYY; z̐7j$P 5Yr/G H,[MsH_ 9`XPdqv*MBxݥ)ٟ0KIU{(u!:kuF,s૱vzH!HXDp{ U>jđ&ZPCq,Z TB턑ǜ)Dv I ??$pZ (×*#q)ƿ_Ay3z٣2tnmOK49ndTz#gzirPSlƐ)IG|LųFWC:9 =F3WC1Ʒ|Of/;-2;&qQAccY *ET0f'0m^_Y}[@^m$K< s*$uFeȲy~MrJ>78YC7EZ~a./2m?>4\rfu^p6.J("Ja¥L-u܁B lsRcG~*]؇yYt_aKJ[B CEvJG=fjӬt?XΑ}r zһ(Y+~ T `.r'fJdGcb{{勍@h Sj MZՐHd$<7#ܸ,k%|coN[V\p i~T£>\êZq! e/%Y B pI (4jM(6ls)(kM]m?2ZSTZm`S*N5?#Hp&0e&\fMz2'0q:cg8D&~?3[~P*CY꽁hgn %;"^6"ٳHu ;E0qͿR<ڝ|}1W{f]+(wZ^,삞)ǿrU] (fa/ё;Iq*sm[S`%æ0PLXB)?;bM+81cфى68鋄< !р٦"==ۅS@HW{:tʖ`]4wk "B)tp'raUcu=T;/`O5m 7rDdOL`]qJnao-A&Jh HԢ]91 J5RzA3YwX?5{?p ~rz{[QMjj0uڕTȾ*U]J@.1J3 ]g}4^hX;ĵ8zkM w7X olэ)U*:Q$q'ØG7ˍI5\"#1~a/{8qqOm#Ԛ5h9To= 3v}Jrs\@q/b+N{WOi*?{,LxڍXEiTo-O[odPѿ$ '-sB$#q$3.1|1wc-ЩX@8=C]rg)VέV(i\b ^ dޏi̴n̓cT}DT?UnmG:(}sF@@lȃ%x]"1x0RA>lk"dQ]6ZU2QT_>VM\Uz;(=3q/ _n{"<:[(6mEdli4VlNzKwl iN &2jJTghhBɬۧab|1LHJ1''~pqqq_setY^jkx]8Bf@5VzޏE'ȴ ҭrW+ a_1Ս觜V^ka4/m:]uc_#M)( G Y7nUO,&ie[鍪Ǎs24-m> b& f7QUd8 'NK7ud;͍hINh5+=:E+:߹6H9۵:kJ@f(6YHH1ǫ W)~ytMsZJӱZ*dV,3PRf(HIuCIiJׄ}IDNUp SklSOu` 9uPᒶSH/7/<htH8*mY̢xo0Q75usW%Űz|սemy_al+J,i2A"I;4ܷTw3cO p>Qѩ/=]anD P^MU'w:ynʑ?IcVkev{ )LxFl1޷nHv2W x7VK, laKnĄuyC^] m3Tߟ.1/D+fk4uql(vԚl&lִQsL98 XѯܗQt8Jc]|-S֬U)` T)bF,O4]G&D$́ ƤM=tYz;Ͱ34q@6hE=JdȂV1ZHTCBJǞW K |. [G9FzK}gl >S/٨lTW.&Pen<Nˮ, W窻lmo=}PcLWPTןJ>%D"IEQ3Z쌧.`@l:n@[CKy0邀 1Vޮ;~y8SFc/ea }}O>aWM47]S,ma^ykOP;9IR* 6X_ɂgچS9ɻP.{]Mt׍F¹lB!R{Jɉ#٭E0CLW"J9QTխrM-oEA;xu;J.ˆ ,e}梹]~cCȴ~:. ֪~6brƚr˯˾jj7BIhegV3B~Xb`&&AF+Ɇ#GJM(?.K'iݪ#r9bLe{$R%&Ŭ[dצ*Xd v~݊Q\LKA)&-8݇ OcM#!TpYt8K5rC,.?aGbF鰰QA&hc}MM>| /(' 0܄zutT8RQ,+P SQfF4lF@[=G| XHhfM,-CUgƱb^k!.áYP)&R6$D&O_$)yKz$ Tx(gl\`.}`8,za&h F1LG!F3l5!W?w_e}@&k}텄')ʢ$t>k'5Cr>`uیG#FcaO[5[=V6߰mr腏O3lQז"28ѥJC HW.n5Ds/pzηcdWЋ]>o+j9Y%O<-ugӆ&Y"]gO_?7t!8If,~)G2g.%VʤIqX~ 2w/}]w_>[z]^}sUG.@ lj"r&ۥ$IHV&)Cq뤓2uڒ Z8<}sx0 jI(݂ ʉ YhYwxuȚ@E 8H~<[wCOq%Qʢ݈+/l"D" \W AEiqZ>]*oJe柈`:x9 r| B̯o\׶hY8e 'H4.X)7Xu89Lq/ {VǮ묺}1,Ah` =YeM؏b #YeI iYw?>?)5|fQkޯ%J?hWgeDvf(bg&*pKYC=/a3%~O$nɢ1ꚦY-EZbdFEv [^X4V;K!1cG 7xMz>$洮,DYU&O KG{]uƬ ٻz뮺pX+]uY P<(qf8PM  MY1f-Z7KSAKv8_ߎ473"vmf/_?^ْMPOd&P謟7H C:b9[q$QvbīWNVL`:M' 5b'.rVPs_ z! v(-L*xNHFd8aIGe#C)YY,6we__W>yQ\Q̋uiVM0sM?]npK鬒j r]熅]Z_wꏃ|;뙆 Ī<'s Q!&->^3 c1`.UsQ@ǟX8p <^aa!uL4h=V dœtZ2d4hѲxݫV-۷G E4q뮼ڽaXxtvm * ۔v,'{]Yu|!vz7gY]­"2ؔ(gphxU,C,?l9#8&1u#`c},ĉlу^S,ϤYY|x888888888888888888888888888888r[kul$0&IVOՄ<@<ȑr$D X['n,G)s $}5jz$9\\M5b A wѩU'zlHZVs/U X|O{[*u~H\Qqyx9E} j6ř6I!<(b&N.o(}5󎮳7*kʏVfA딖M&StҰW Jdϰ<` Q@*w%ȲEmU w^9{s)\oj]+!dZh9V(ѭtɦ$b ֒w^U 6\{-"*U~ՔOUxO@æ끔<-3pؚ d!@\'od]_>GDbYTyMi(ws)3tvXVio-Q=;VMU:#|Gd1gOع1?j mE-2'UfZ ./Iol"bЎӽm*!EAAԍ%*Kپ<6UTG%y,Ѿ ]mnhfmjٻL? `G»ǍSVTGAKǺsf.<1op6lJGE7 `=qXJ+Tknv 셓gX)(_ x>(.~E3b!]|P! 5 _cvy<[򪭶@)"Ɏ)[xȖZ:g"*4fv`nw迦z%U6[`ڿ*8goS%eA΋d*2}1'Z,M#U1"E.b7 pns/./6KzBW_. niL)v*Y;.RBMDB8(`L<$GǞN: xìyajܓt׆EZW? }Իxy% Iz4N~A_^mvj;Ŧͬ Ȭ:2ZQ:NSdeg-X?.lLqwjdTiXЬn{Sm_N[ilkzlT>.O}"nz34[2NNتR0"&ъIlHʯ uBE5VK @8\r&2Qɣ cnU2N7 6U&u/)m2\sm1VKRF`nTl_"79$rm>)_/ N'Wa"Q Jp̀MƭMmTa  Qy9;*1O>}of5ZȇH mlݭvR89;(Y,(| H)+l 2=:h}h`. |tꙫw<$˶g1Jڽ%O0dsi14cdرbL~|l[ ||?]GZ טGXS3vc9lXsB#VA|b@($+d<[ʹr}폨4 HobVbR-mijlͳB }HF͋L]> zj&]_76*KsNW#ǰuK&K0je dgɿ* jYGvbN r$_W9جxF'ʪMLqےXwOx u ̧\Y\0!_a Ŷ.d&?Or"8ff⠙PJ5hYuYjYeUUVQETS.QEOygywY}]}}*|}Z򒨷 Dѵ 0Zwa/!ȷ JdTbQG'ʼn0 rf*u6/;Gn6iS ^+rS3H6W& kÐ-c72jY"K5Өt3ѯz6/nLt Nלq#SIc6BMq& beE~+|`ͱe}!0\. aFFPJȉ ѡ:E ]g' -y諹=x'贆y` ӆ=̍åd@Y?~=H|7*2y*ـPkݣ!޿'d9խ~bೲb 02:Պb-5#Fn>h^͵L2~n饑Y@bQgfb`s72e&NF+闳fz:wWj;1ѯZbdpTtET5*+Q*ͪJ+=9~PW(#'dpvԞg'|eپtjȠy*:O  ނEUEA6G[7cb4bD$5A m(̅I $ vq);?Va:@Y3}&579aXVuT āV1(h.pɪC3~!N>QtG`]zߧ1k$줬b=owTQs#Tr"D z?ZEo͕ ZyRh2i%E)8H0AG]ΝȇBL9-Tlw燪z{[e:mVJV%u8xAjy5mtQ"IG"s]z^Fwkp$⵭,iLIwjtYt2,+Od 43n@?8=LҘ~֦lluLd. s.xp4B&$LAzE뽳OS٠o^%31oz,˒)dq\b_3P;,FO!pn~XsX[DݾR.@n( y *Tgy.<>RI4|B3$Sq?Oo~--Ij:kQw$xN28MCe bf!*L ;l;gӽz\[0vvڑYkjS ֑:uᕊ.5vq˥\&Υ Mώ9A{qӯR9|lECX;g쾄ى'uI >.=G:F4h(sD=m}Քy薨}aWet`+*ka7‰!S/Z1ȔY"ҀvuYwѾ>oc"y[Գ{9 '.YJ0)Y#C%Ih=J@[>z"EuHpZ[`EY,ݎJdųQ4î[뤳5AޒKu=ea7\ X1KVR7ͦc!H5W0ɗJ*/{yxzSܶRl6-B_GcŸb-lo/h̋#U{dvᴾ튌rfv悩w>>Nm+KVo%r'yxsnuUNL_z_+S̢؈qRʰ|S '(s1j7 "R f Zh2]'L7EG(g8l3'MC%OB%ll(6&b irv2 `CHd!X'4E36mtϓ[9Ζ6UѺ4 }q<s4H-Aq/7Ϸʲge81 ٿF"zrM b׭:F3Ru7x%2-:hE!59)G\cSkJ5+YB?Yk;6W*XbWlSQ5q6c؟g& ]?:67P @!BMz$T*=B u4Dڼ6㙒u뿪Mu`yf6yZ!al- LYW,jc:*ƜJ;e҂ŤM|*=!Q@b%+rJƬDȈC1- @" -СN>7"E֠XqqqWސ^2m#B%;Z 쎾ޚ*(Xb,^Fhm:: 7.P/daa8v;4(uitCOZ RDK >iOꂭO̤s516#i{DnK;ϾAd|vȈg =xI6C!6f\r`E()}/Gố@$>*V?FlGGZ׉A<ͫg,˳#n[Aۉao;5q;n6.gvӴ٥U]C#/t)n_$蓾{:Dܲ /Z5J+_th) UJb7 I<ߍ>4DzqL:e 9Pj'd|-qSX?I+[HE}tl%S4K?>mcq-&'hiF zw5 وd r iءlLemB`ZCǝqkҽxؤFr"0N bl|Ised=0wKtnVE2](܍ϼJjF,Ξ? %q"H2gBE/@`K ݭ/: ~7+iX5~ps$!3(e|ds{k]^x 65P>uImMPb傟VOYe&q{?$YT^sq f/a{gҍ3%jA+1%&Z T4T0i±YEjVbw+Z%/qb wE%,Soƛ6i%xjN0,ݑrnHx$w ._;(rx$'t}> iex!mѣDV @zJN\ff[d)ETDr} Y$".VyhGOc*2(kRg)u5Q9e'jp~ *H)^ZPpiASq#[_(gM5Kբjd&7<鑙A;cZ"Ytik߆zPފ+yi,IZG5\\F?$\6ǀBI n srMLS~[ZQ)+h7GĠQsBdfPleid8āKT4%(6$/_Ck943?z65;ä* % ˦(ע*X]/H9oWWXk1&QZ 2 &oƬvY!h!ME{lպ= ͯ.)/58,[qWu?e}[좤"D$qGc]q1DQɋTrު ?GRH _: VF: ::1?G풛 bVs'I\}ZҪlEEl*@H"r3&,L@i!QzY.Ydj7vuS](詂gyrl7]$ȖEαjPiH2#"2EYf EB3R翟>kU*Հm$ sEJ4:% "~1u).P `Bfs:NDgh#?E%yz1SC}XI9dcؐy#* f9xg}GjO5W3ϡPsc㤶]/9AQADGn˴!M0KUn[8)Y7: M0 v\eflх! z0PAOFɴJąheDp"ˮϜe&L$݇()8bx6a/0Uh݌3 2Kj|P Ml5q8.O3Y_ȱt9-8MS Rry_'Tmɋz_d6/DwX4)Vv\=vlY |,qcFlAX4P3)0z|J'o{߻CI:Jlaҕ %h8]z7 0d.fx`"6s2 :I#~y(;,ɜN2`.nT2˾E_h?mдW598k--۹|whdy,QcEN.ɘk4ĘA0x!J$*: RgTcӯFı4kr$&wcqѹ8xgeOv6r+޳cfi+TG$n6Lan^pFcjt(#j=MMs\zɳQeaFq^N"9 ۦ%jL~,b lQ01^L|41r/Um>}|< <϶w 80QBiHĢ.5e!uMlU>lApnX*򞿻<xO\f]|*q/ٵCj>^jXأ3I׹'Hf%.=A<_6x}{dhYKI" cl Sxn,nXro" ہݳXYET`j x jD. nx1FO_QJ$0&,afaXl|ۮib|O4dŸv^x$v=t:K0`5YBC KFIr)`#d?#rԍr4͆1ʟ%n,Roxb4ͷXI'ፂsۢuW:vSsUk_X2}{j~HQs~fJO'-N'մ.+P 9eF;O ?F7=Iy8MOGǜ޾/Dx }`AIëZ>b6],692H.0rޫXn43URy7f*scH!N7k:{4*^Zŝ1i`G3HN)"bK=%_ΚiǤ3 !ҟ?v& rجMUdr*8PY qGwt՟KԿ l{%0H(#b4 FAXD1]Μ>_9{T/B$ȅtUVNl~d\H"bH^DX30=;AvݹI.EDL h(l//nD?!t U$MLQ ay7me]7CǬE _$Rk+ڙ+JwA5[7h͐(vQLg6D!n #`6(,3џ7kb̯'ɉؙOF!-~"A":4LDI&D{X) x}uk@˵în!8ڽNz}IOq%8iU6M_DÌ ._S" #b[]M wg\֗҈dYN#D%GHaڊPgڨ}^_^=vݒm/g ma+f0Ű [QR[ Job9:8wC[ltWlTK7-1u^Mr9v52CqåO>.˾ͷe(]u@)4+_ɧ4kWBLTuVݲiRsK;~nT, 8N7ۺ4v[YxP6c`ߓv,?eWZy ,l># HL5v0p&BZCn=vCyuYv<}f%d(% 2 &휷D2t\Ʃya>!,df@`{A%= @7+ ,X)1p0%PCH7#Pު;Z? M?d*NX] ZQdwT,_@7o]`wܔluahLr $EI,L5hT$HfCUrD|g\m$F#[pD}3k` Z)Ɍ0]_4e}t>Sy:EYɿ a.,OzI1w 23ؼ4ke.z;A 0ޠ~ːZ ߩv3ɣ6N;V]+ov/vP1ʢ7m#~^)({&9ƿ0T4[N  ,8q 'ȏ0Mi ^ |1ѝ<t|>d֐l4S9 D`~+}.+BTK{* kk{1tHifX.P|f]a3f}B*%FY{_o3~kNaoI*uɈ4yGBk;͜汔 9`B#=<կae˹|dt.з*S*݅ijZTk-Yp{1aDTR'_&n?RDXŚABz:u x`VCz0zJYZ6ʹ݉l b6+ iV hWBf,咥7)Ѵ>'T!Ͻat27MSHK)nL+:!Q"}t tFj57-گB6>H]5'bКuC jP{_Af⎸Aӎp!Z06Ӯz/㖺7gY֓gja؏ }=dgO1*2,cY%~,q΃l& :{.cOVD$uvyZlܖoJp(k ɫw[rQ!iim )eQn}xq#ɠj\k6TSEXbML")0Z:e}:>rQ(a#*#dB?AbO^xqUif=uҙO,ee=}U>]_ QoOa34֔'I+<WPdg}0[R%s 5ngpseSru'+ĥX́4ˢ"!^A UW}=OzGޥR}Vۊ6k/>Eyj2#_8D4\# @G$\fMM1Hpsl4KOܡzؽ?{OO[\)l (8n, N`}R.E8Z<@Uɼ F|V:+9'Z3hHڢ1 F53Z*ɩx$aS$&dv8G'XgǼp]MnO],9Hglb?z˦_{?VP_MJmλDj`+65!ejq1<2 'rP.oAC`AHfX֝lE] y蕅}5#g5׳˱=)w#2"ׁY(aV XGQ8w/ 3;` r+^sO,.ϻ"y}wv'X rˮ=}/F'G>3{IgO;&ߕ r&z!( >=u~5ՙ`PIrZ]JZAr1F۷{3k`h :R<,,˾,R￟}}}}>}([QWt0entbfZ=G9tӹs*)%NܑŚ,jZBYXlm1\$Yt⪸f9X[\^Fc;]˖>>⦧lK Z"7]7iADAV"d#cāwj^JIƖ[[3ņg}@GY7OV$pQje $F'NtlU'B;\+2b(M'&h6x׺on-M_Y]%ix(k_Nݫ- xM$i0혟bFʇucdȖ VtyN8iv6\*He`Vǚmz`n{>ia:aHEOp)/nùdAKL:`{l\ aۄWO'(]ћF U͡ޝk-?1p~UW; TTDGAb􌌛Ro5G{[ݒ#,IiP {ְl84Qړ)݇D{ē4X(Kל$6,p6-[?wlI\|ʛuo\KigҲ0R1fGۺ(|è,唋!nF}B}'Y4T1X:<9?nK-/yjIK7EVSj%cx-rTTEY,rAQc 1JAEq( /pqs1 1lȌ &iY ;#w]``~.SW퇔5Ph)"YB= M̌X32<Ѳ/=Tᡊgc:ik Gj:l=$j[uWOBe`M)^.dW9t1,W$˷V.^խ WV&U."*R7*nя3hͶx+?uwϬ~}~.m+6gL챯D#i9̋ =m 3*;Yw"P:a1zNS:2/͋ZWU"*TλE;@$œlܸ~Ao[dHvp*Rxv,tB_5`Ԉ *FM1qe{ qȁ0MJ-~SEY`UУH+&H+anNY-7ЁmU[Y'nLFO'I`x0v%6s]ܵtߥ?==eyuϼ~}~.߿_o_ow5#v#E}>V>RVLF>MEX%(/XdI|tHŻ}o_UWzQNJ\k\IJAI~`!ӱNt8̖I2иT +Xr=W!s_/%M#2L2QdE7{->:LϪ桳(KY| .#]v]޻ol5^ۇ($ iJm8lCGEgwmV?m7{iDM}kPT-Zi8urse#F7CG،⸵q@O9*ӻVR)N+ٸisOf1exm_uMV)ӱ %XGRIq͏A+|s":qo̷wxN'QԪGY>L ey2S B%$JFc)w$s9$Kw-|}Xhޖv4<$1G+T'++ xN)#I,x57_̏gw"SV/ɵԆXR G,Jųc &R0kj]JITƅiʯ߭tn;G/hF͏0A9Hx*}ݥ^ЙgYa) Q,cI0Kr=:߸?`|wrɎuc_P{StҹD4cdD f-DCHdO`7ڼs청+Z53lNQr7*BrcC.ݜu4#B Ae㙁hP=Pi6%cy٪)|5gūr7N$OY<*XRoNJƚŎ̇F=%'hm~N[{wpoN޺.BcQ4:4T[~>Ut$ je{5j:);o|j2G"E 7Du!0MW'Av.tZoԬ-ڲʒ{蕫lXTa0J׈ᢲAI%H8Hn\͂$(,;nY~Cg^:>d:$ @70{B,7cG#meeI6LZu8Fg]`Pd87`z mJo("@Q&ih"ų! A&B݈~*d9I}n9_dk[^9 ZM1o ?J1Y*L4ɔBc:̀@$Eo&,I-zK;+owK\{qʞrIFݙUI*HƋ3}P VQ6RB3HSF^9[8AtY'-]"Q]TVIL;U<8쟏WMyw/N(NRr<8])pg/OӝBT-m'9db"Ո.S`rlJCa=HºO7O)oI,h{V`B}? CTgzHf e,C s gDGicnquIBvOy}WtJhl_m_ji ^u~,Ib]8N[H [yhp'VbժJ{xO}y",Ql?.MY<օ00-h>ꭼ+NśLɹfm]"dXAcD tevf9 HHx+G~`hV B("FVVB;&c$=[S0ɪ%IY'?UVGH-ݩ'wl}Q~/ĸf uOAWܞJ0)>7#t?6Km+W4̺U^}TQm.Y 19#RҎH,2ar!>=귫 7_zk??w VܻU{@ |]}-KqԄV*UFGr9k W}^Sm FXӻZ*e ,>7 J̢P<`bz-oʲćLfTrs2vw8A\;[̲54+ǘ$颯Avi;jI& sXx=_>~]~T< gvꜨbmؼ pĥ34N($Y}q|5t *%| Ǫ'k*[M`AlnOt:NᣕzI|\ϼd]|r m~iv Om6NRັ }Kdg޾DNbtkv7vܷM_Ju9" `k j.Jc5ə!Cˇ2Cq'WΌk" PqIF_T1ۢbvS,P1 &c#67[$-If=c_~[r\١\esc%1*n|{=a1MN&.ێ4{{ސq+BGˍvRw.PDAbKJ4$ey$4mü ]Ӧ~ˬT{WqqzW>V:|1Y=瘳:ُ-/Uno%Y-sjYet!l&@Fo Z\,b-8p"yK(˷R=o֑eӮ-0Ҧ؍z=^4P9cfE^0ےi}'/ sTפoN[If=\"\2q]DŽ0DVFH1"X]*$ @5̅hSYzgWA(9SjhjdBD茎<7S xXD;^emSB5CڽcO#`Hd=\gQË3@AAG%5x񪡈ze@zC2HBS5J<|+hIr]3~e ž>qAr54Zvo߯u,}b7:ک#0abSKrPNjK2a6I qv5x߰;>_4 "KeڻL=-AqdDwY֢8d|DBTbpkysElUY9lu TEi9~\rcsLO>O&/ vSOUlwZ#_?:ĥ:j+wSU8b7Ţ ;bqJ_E ۝ROrzv =DiFB&9i,^h T%L@Ȉ7IG?5%Dx}+ctef_ff2[X*M0+KDq ɱ\nL&rڳȎ86:O<},ՍlтkwMl~B*DIāx 24>J6I˩1] ujٌ5!׿1KH G\uLbm<.sk%lu@D5%J!K+/3ȖoZקp_Hj:"o Imj.elº#]ެacy+B"O%v 7aa\<Q@H ,)Zyq/G{NGUadIjDc̆7. ?AqQgW8LG3@7yw8aWqx嚙 =t)Ye}]a8Y|Bx;A۱]|M$ 3 ,p97eN68 o fb,,tc(S=ļ>kb7EK?>e)r"v= 2n(^Hj8I|0[~>h-}ѓk IfGeJ\$!&h8 .+h烺 %"; `|~Biw!LGn]k$ buu)ZD^A 5lC~{oR[dnǛuiS $6@/\EUv^<%OXRgWٵ##v tPQ睷.m*N 5\#hldIĮ{kXqC[A$NTn):[Eii6]n&itm0h7d :@ؤٶ6]/PzprK%=W^ܭ`k_[]#b1z\!!k˛%] 9^_k]%Eql 3F?d@ADM3Ymo &9,EǡQpL0fgc]Y&j1z*^Ss0Y3&,XtuɄ,?΅ lWQNQ2:YzqݵTy'/Ͻ_:#%!4`8d,ȹ1 -X#Ѝ4vԆ*kM!׺_k1D&&KzMy\SLEM~ݘF$cM\`*}ӣޠy[f38m|֗nڗ\jE$24ITB>AٵVxdvB'~(n5W "j9$1N, ,*g=%S:g%!)Iƣ@zKn>ol;yJl P+ 0P-~{~VK Hd/XFyy@\`koK=i<f*\Dj%pB#b\dPKu:w4v-؅tzNw; B,S)hЏG",B!0!{-{sS_[#eu ^\,2,=fx+`a Hw0m:Fϑ-&)^e`3=[!ը-[ iNOגG3 !eXOMD+cj0dxm3DtONaViD"z3gK)Ir7E whR,l!7lK=Oֽï]U[AIWW fIqJdE9a1IYƬK`BnH+7 b*([G:O>3 i* DVZ9:5Y,ymb YӠsK!(Jݍ6;p-rf#IJ ѳ|_Gzݻ1"98Hچ.K٪?n/OAv{Sr7N]h&wei wnڤ,jad<@6nX*Odv Ζ4_ud- jh~.))lvnFe1qL#0xC7!DW`gLVG4+S~8źC}\v"̀%~Ӷ.>=w1Ǯ2?:IvLqYMZ䜿WVϼä(5\W.MHe]asZ+Q 6s_(>)+AM`"EHRhx5s*`? q䞌!1t89䶂y}Q)bUno5?[sDp8(Gρ7(:n% T U/M}pk) :Dk+f./}_6&;pRg&LN9VU`%3 $rGf5>``i&ylػzu`mJmd},{Gܩ!ѿl/&}-x*5o"KҞzuqlAbcjIҲbZDbpWC@Fޓ1&;ް:k%\Qso; oT/xt~ܕx9톹 hpUκ>dC_MQ;vn-XY|]#DUԧkB"[Ҥ\G{#Nהŷt|9;{]D.lN[mryE.=h.<2D1TDC" Vjn<}=8wABH֯aPsϢ7Sdf%^p㐑^ ̩ي (e.t^ol@C$>yywbn{3,n1a"Qh h!$U'<|qQs6̲|7X3ޣln]Horb=.j`BO+9t4oEͫք/"*Rˀr/hul*}eƐRCKxWܺ&Tʹّ~tU@Νp(X$YC}7d7KZ쭡Qm 2g9* |0*x̃#WIgi:@x4c8϶VJkfμ2sU&~}`1/,s cG#KKݬk45)z4M=gh z=3Ի8UUht+;l85r1ۥ \gG"JAFa'-){ퟠVCkBݽaL*7mJf< ȊtޏZSw/;uwEk-;٭r#H+H/ЙЉJL9 $a"ty f6Wi{hbQHHIEHaq6%h꫇H@ĄܒsYwJ)yR{Rv+-}p6K弙 FxHbʼTGD&}71/GpVV]16rʀ[0gΞe4QMѮ2jEYXPumEjqHjc=fk7=zNy*kƣd?~w&9G@Ɩ$HgSH xGkw>{ShN,AvLqjFb62"ޟr 2ԓYO,!?-ÌjGm!z&{{տ-۲;kc:ݫ l1d!Mpͼ< [tݸ/G_ibN:Ad?*_kgjU'n8LAd w8l10`f݈h'-qt=!ݍ^oFDO;hjb9rpRDǪ@ b٘&˅ĆU=l֘惌Fv/x,ñP \M`yGfX=݀+,)F p%XYd?i%nK#.fLUcetTPX+2-^p"lNKy9d$#Q$;oҬ]`rPRGWM QTglڄIwwVWʰ|+X% ^I,&ʢd=? 9TM2v<&@P:{"EbK,_Dq(EjPpg[ 3oMI]\׳Z{{#ZԍTf^01m+xRn(AuCz@21LKmy#ҷ;4APG䍆c5HOKJ$F!w$آ7BQ7Cߤ|U2  xgҊ6$%ʘk_b4r_8xsLJ `0Jk7{6r+i_>gn5_c6FENr~=ߏ9־o>״k6ƩFJ>JHLڸw=0yM=bQp8+.`[w&ߡk# sڄ r2Nv1]c13v;ow{/rֺ݀Aސb[2WL;'}1GXGs%.|.77<|ѝa[PG۔+^(l*6H< 8Erc*0Κ#Q_݃x7&&jIΒլ:tŹ"%0#?5܌L$D=yM;ٝ7ELFh}U?dbc>ADJs vj\{+S{1G+Q?c_6xq(̊*V_*B4(XGN@1ͳr=t~>JyciXګPe񾻯;gmVheU[" A -Kl5$M3,`P(fnG;nxk> RY=ףA.@8TfnQhku%_cV{uyS>(j(eza% vſ0#!ьN. ࡣ 7@7giĭ? /?ryzPWѭkȎSYQ=BRKȣX!'GWPI!D 9Nvr0v ޵U^N2њT]]F fZ2ł>V@[b6Ȕ8tԘɤߊe|iY@"~F}^cn !AIsɝ2b* Kd94Dx_k4{`t:L26/?UJk;v2$&תj֢H 's+ES(d 7Ez/o;w_)ΩIƒX뾫`A626zvTƕr`D!RG=zHp5 O>#60)p a1@В! NV\ X64BM@d 2Q,)ɓ"X ыH.ەeeYyۭI8024g jBQs̜ ՞H1P]ymn|Um7ɬ_>vF=K!"%X z=M=FqCr)B{Cl-16l \lFL{8H9f s$)]{v#xUZCa*[Rݼ훂'( EXh.8r >jm~|Ҧ4-c0* $u]Dv&f1\V?Ӳu6Gڹ*G"FQ]mͽ]AĴ@9iCro$Ns$ < $r:V=NۊXm) UYJE>JUck=xL x&5!v 5_;ЖIn[WDV]_ơQ,:>5SR4N 1_ԟb}<xx, Jg]6HhʢlE@IٰPS>*p¢;3V zUFg=u˙Yz7G/fj]2YۑDDu&'xwӆH&ت/%uV6׊Y=}Gխ0o UU8or]; ysϜb?O U&)U>\:ymۯv6;jpHL%. o[69j*Kbʇ4p4-Zc;"9em&EGf.8 { u_0_;[:|Q֩xsi&NgYĦ 8ئDɖL%RfA dM~cp|pWY)=qB=s5{׍CG)iY򿥇ZNH,^HR7RWnO@bKyͮԾ|WU_+|/lJ8Zy,ӉlD8GB 4 M@ݾQF̓o*ԟg[ toFtFк$wjf9yb#tըclx?陙pc&{bn=ڟC[Q$G&B6k0aĜquHL+b%xnvQb9)J7vzVng[͒seο.TOU1X,R:$m]~g9-snΖhḞULPKG.2 /",&>(0=%dt7P]O Aݻ-^(k|/$(GjAYqڅh4lb`ЖÊG^* =XkgmtWNXy37#i*$Lr l"ŤQyL'.d-_6'v[s WhdcTU~nmwt l|ɮRR-.Iܝ+}gjF^2=L圲F"&v&S75%1#3~W΄{֓XB\Gc 05Lں$Eۺ-}-< ߥ:AW}F -Yu_ )cX>r7dkoǎ["c\h>ŧB*KcaQf|̴TrŐj#3,%)74O> .uτ\Ӽ4rk&` UOlbOYMiu Jc*dK"8N:<91;_WÉ;UU]/LrqpX KAAۃ$d8YLæVD88889J>lzEv6㫶dNkj#P`0{{39 ڣZɣX?kW89KzD֍ghMKJO;$HU<1u'qX2{86,+՜T@SmF9xR>]eFk=cB؉Y)Df9;Fhte,ӔmJ9w%,cX!c]]wqzcϜ4}ATjo8mu'&ȋ_6=. 8}@/ ?ěɭYR9Ze!xDB7x!A8]Ì|r/n'}1]feAMsJ 4RIXRyAˆ(#A[6**EhA&>/6X綰핲.Qi$-x<ب`&)$͵kUWG aK)سjfiCS2"ЉZ_3PxLq&YpŋL$oߎNa"Dg^-܍EGul8qyXt D1uIXUhR/uﬞf6-W8%JΑCJؠVc'c,.h45n"LOĤjFjw]q*_?Y yi+dmlb"h ZUlO,D6hb^O,h[,ɾ'rtf]' y3H{h*oN1ه[40]f ஧`P iCb%rg-#dFQ>%˽ qqqpqqqpqqqpqqqpqqqpq>Yad FEx-6EXkԖf@i&.fUlʷ]%S<;hyB-1$Q"Xɪr2"/kM\Քe,EK ɷ&Ń"l񲄟:o56V6>cW -)e(3/o]sqO"3v͸Qug}fÞ5888888888 eRhq[ ݘ`v7Qre 8X;E\݈65n|oK"}YխzuT#瓧cYJfN5 &iܪ\ =XFt>h_B!oI1,Q}ر=8.Sljlkvؑ1 F`CH2wgX2!iV?g+t[j'lR2HC 7ZXJM1*')+Ra$+Af$^HITy#|D=lzm憆l(-&GJmCav͘jڧH&@ɑ#C\_Ϧ = $ lB.sJ25٩p/ƍ<vcݶZ0h\~4F ӛjoV齽ǩG]֫2>YJ|T~D16n[fbcZZp-{(6/=jL7S64{ 1‰mfL*) ntfMԢ]pDg{[ A_5?nB,ybD#Ymm+{nraf ޴V  l>)J ߏIa}+4VQ$:aeKgF kkoF$3@YîșM˿m'syk1#M`{q/ҵpZrכ46#^I خ؜ Sһ^bU5bm_ w]wXtڍw] ^Pk%FM³Mzn2x)+&Kdҽј$2q E=&.cƯb!΂/1fPAFSQv|3ItSS+n׵Uvdڃ&tL%ΨfQdn$1G\&] p]AŶY,V>EUlдb/]˰]Xv^ͼxzmpY1]s ?dZu*MH+øg d~ˎGy34!ԙ(snא6|1y|qxmUZJ/#v9O iiAh[ 4VF9d`$@YG~ M%=Ŋݍۯz7gP}9}%| b1 h1cj-8ꃜ,/>pSYxzm߂SkG,C{-{Q`Q =yvH9LFL1ܠ݆ np9y4pۂ\WI|QL˶e$!Է5QiI42rrO:B!f z/Ӹĩjw( p,N)Oij;o_jiAY'wɌjG`RoRzd͆ a0x|QLh[0Ij7XIfõ"' I.V,ql!Y0I`?m~XyZ\)+ ;+%:3죁L,68&$G:wiݽa٫kdBmjj$L aD'+YrEZW QPyJI UieW#j,9ihZ vlNb^,Qdjy˵붫8Ϳy'ߘm'`f]GEc"Md qVUvm$u+2b/ZQUsf*yK>8;'!{C!v&.rlH,S㹨gbMU-j{$-"a&qZgڌ_d'$9na4HX郖p\baVLNhI724}5ܦE)Y fP7]]Q<aXw[Yjt{fbdv8ILDgr 8bJ `8. sY43XMܪ\̵ԐI]K iMg3.1#'hRXQ>7W4xwy8`۝ ^gpG)ɹvQ`2H;t9ˤ$Ke$aBHX'=}vr%Sl=G^]ug ޽Yq@d `iǛӅlږ=er>r $aF{}j\X! AGƂ0P^A@#P~%G/EnE Pj/?[]"zu/ O!N 1ÃHdzȉ&MDHg[4nʦՏ8SIH7Nr9UĢ:T2ٓfλ]0t?aMLf9Kj@Jʫ+n k>E c$Q͍ĠRLĸH>d #B9=jmnbWNהa}'2$%ilO"񞆦p^Y/17n隮;E~pqqqpqqqpqq=tl5*&$dH?s#C!G]vL۸Qx]]a^ yPZ{ӗdSSVd. (L 4{50fd\]}{;խ ٽڳ{hJvգ$uf<=nj>μ0;[^oQ>6<6yta v5(x-QL0ML?1F0`"tHRn`4`(((VlIW9U4, ]F:z[Q-7ڪTFbsn4k8sL.uKIj5;bRo`=-K,KJ%{EOygy:Jc}/888888888S J]ZgalA+r2d^-qIЫ/8LC秝`q28888@U X|˧uʭW5Ν$2Nе󜪺ɚQ t<d6L ؄x'f ^H-/_B!?V=G 1> mmӗ/QWo\ttJڎ\]O,sz;ޜ+ rd$&3@{`(ؤ&Ofᎂ>ȠEhk6yjc4eoa&8SM<* b{$.؃D `4E&*Ηf|T)׽+ގ@/yTm)sn_X",f=OgἏc*U {f 3H|Rמg<̂fn_Ly8S%4X6A K~@wuٞRAc6dƶqO>nC`db)M6eOsب;֭MkJd-v܄-̀驌M·]YAÝ pfz)TY`K.v(=l{Qܩ*)$[$Y㘣}t8rYe)Y@߇sl>C_iG<-i$c=բ 46&M3ٸ9o%-y3謻C ˽gl!ҨWw ^RngbN(ߎy(@Xyb}ԉ^O0UUSquڊge}h~$boceq^V֋1Ρ!*кv,YJbc!L!SEnsN7[wR#{OKI (/ {a&&bZ'Blf|.:#ǎJk鴄Yr &x$P%b4kiVp͓6Z@[0mM4x` 9 E뷮U˕>y~Xf"-3Cϼ@!q>̯~]a =<, Hrư~p2=j%ul>nwo%D{'I UQ.QᖺaSlLۉ=s)ahC (} 6f@[4[xG" Xq __:VŶ86@K핛`evs `ajZ}ډpE5lTBYA" bP͓&h$٣6Rlիdq훣 X`Iai8@H=g`{"ZY(6F yuhLz8tӜDKQ$/E0o̱(4Tkk_QAok6GdZۚH_dMdšɠȔF7cGH;b='F](eF7\$mCR MdSGϒ H:U0{T6'žd2S |ByɰuIĜCLg۶ b]PC|>A$ )g*Mý:}kQWJ9Mnو Mb di6`XtAv#cNǂ0/t߼Ҋ] ]z FӗYɦtX?c$A8ޟp+V:=@oci9Rī_}cDX1Y ͈sAHJy2Zc{oďvG^2ۡB *{.Z` Q wq;,fg91=!ۉ :Aa7g b ;bBp]H1:MhZ5Y6]! l瘷Ckћ>wXNVlұjUǥTA^(,4Tv4%05Y"#o+iC*k�0pʁBXb3-UDI$ `v= V?XzɯŅ,1^@J{!1H߽CIQ¼:,tB eq[qg-,Կ>w0?{3xl6f:*V!D\M!_3%2b˜goiwǗ[YWl[ե/р*>߸ՒSIҁ{[!V.,҈]ȃTqRn*IßTt܃rg*΃Lu(W.ECnbMzA+AK#u&\'J ) ዆lULrǥE-뮳EI,X9t0tDb%Z9꾠"fة.a:銮Z [fn.| /_G2ػo|b\H2"7cqv0,çs"ls,kҿAL&SU;OT":bW! EC*ʏw$Mڅ 1=}љ!4pxX4Vr rԎV]r'xޝ )c(ewCCEUqIZpEylAKL d'"8 7"ͣ4E8>:L[_ m`zY72n 3F+7M*7v4rrnFwݾ"6ϥT\Y*j$5lu_-"&)&%yTd rKx2Щ Ǫ,}TMX 7M!F˪_W2Y`e%ّ!')(0bWFs+8og;5G˯_>]/]kR$dM `/-Xl5qq(>xl 7H1/MGwD]|w_ᄏu5NM72?G :H3κ%AQyIfÿn2(lȔeH IbC--XkM+"cY\%RKaEb,He$Ӿ򋌓w]+Q>?$%UCcl:QGt޽G*÷'=4&Q/x*_,Ƴܺq'a[]p+ٽBJ!5\".Ct}܈39$l*Ց>?WS:^+M?e9lHe fF1E(<̔YV9I*, Y]S'23FOEFɥcLI3 F *{wY]| %YfH.m=-h[w ǍsMI"e_:, R"QPEwX:ˤf]:aeEG}B<؋չXk>/rz .1{J`";=f\Nwvě,l?~6+SΧ_E4g@DDI"Rpe P'4d+أ2Ť͊T}H;A KۯQeDBVV߬ѫ(9,C_4(@ṔeB2( KEJ7lqw1mJ/ f@c9f0cޓԑ;_% +:mwQyl}IDvg 5OX!f؂3'`RF-2$XCO{},uj  c,ƕz0|b1Y[ +`T\;(3_-[D v#NWwZ[~v^w$6$.ۅq`qAm5q6l `# 3ޚ}'`mkNSF+|Dеv_0AuDR)&BcoޫM N(d|Dg7ȏ%` vB9;8nЬToA ƶd1w1*E[#G>u 9QB:jRqvQU<ˮ++T/5V{B/ ,cki&'Lf sҢ ˑrvQ8(cA=U6nW^Ò.Ֆuسa g;f/c&)_1ld(\BlVr(JcZylֶSz&ȉG[p̊S'nش~}]o^hA+"VMVnwlty83 X'&/rqdc9tľ rv;ķY"W#"U|b}"~CbC0q@FQLQ) xҘŕn\[d#2x ~p%ո_dspC⵷Y;waO6f9^"ʲPr/XN;\\zORZ3!Kr|WZ)άK+vaёl) Z]"l0 ǻh͋2X3l /g MGucVKIV<'#,>U# 4f5']zi7M_#IIFQ^sUŢ>nM%:OѭpM̓݀DX?~ǧ B72IL8$W lSmMڣ ]9Y; OH}bM%8i֍{p|P%y ܒvbX3F$ڷ٪U2cj5-n4@f2?Izٓ6WHSg ?JHy]JXQ;`e0ML V ׀1I /&btMO)K6G?E'`n{& ^W4y)xڪe ,X1E%#BѐC%OdLΌJ;y9{ɕ tsV*:1|#TM`xAr}ḳ ;_ ]V}mZЋʋ͊=?5yuP+,̫iR#ч@%r3 JC#c h47͞keX _Q::&?^XfOR u&$|%TQgh¤Q7;]=nҭ'V֪1n:}_<]yepVlha|Y/SOzA+E?./$|g$F_@QXF$-Br9wLG%?Nw&ɪبߩ 66ENCzM^oא=f؛?}Ҭ[fhӶߦHx䁞12zt,df䃤"@?2 ޚ/JM`] F%i^{ax[aʌp9_%ݫpdW$Yt۶wOYXd1N%4٩Iy`OM쮖>Ih֊6ۓ [1BKaq\&(4@W6G$pXQ8h1!xTBhhvk Yj[<EQLy$,I?k۶ak6Y) uuC%pˮL2.ˮ|Kk!1 °@{wdt朌F*VNi͵Ӗ BE_.g% XbJ9m0ĒKﵺ!8-S }yu7VQNq8~gZAͳ!6 w ;c\9 70 hVe@^-P=ϰ'zoSʴ~Xzz KhQxI/ș#W27 hlv GL(h^| 4J+םhz޼'Jqk5y(k`Ŝp4*#F4/&^|Q9*du5gϛlݪYn&Ȧy:XG>s c98xÇnpu-jyR ɌVREA(4G?ءܮAod< USQts?'zTf* ʾ6F8v,,:zhMf 1`%cDQ](_gayFt?5lV-\|6L۬K&ݫVg\[,A a,xxyc=屾 6-=@z/:8ťF*dEp;InyՀh_ie X ;Lup/}crzMRen|l.ZB+8;XխQOasXB%[#3b<I3VMxS|uL nLKb]:K$x $@nxU8%x2A :NEg})Tvy<^4-6gҪꥻE%hP9لK؝*PAkgO&ihp ѲG b#LB@ M-bkyv LjL Z&8iAGg/ѝbS V-떕%Ա<-~.8Ϝ 4!eYI#@A/+c'iz#NH&ґ3M ]ZUK5p@kPQ}P ZnԦʃPt. =żo༸2kM{$0aeKeTMENKg}<'kJSʇ!hV(x!cF3+5Mv>g/zt`r|6 `4꣦.zcnSW[pu4,/BW!9ܼĠE9zś9w3͍UpqqkI:}Tٝqc o{I`VVs$>b.YEe`Ef ;jd(dJr~螾i,$RHG1tM UJ%PPTSOJWoz6ZuMZ#HC mɯQ{A(qֆ%g"E/ۆxNeJjkFDx!ޛ,ǚجGNɕ71  ua/%/ JS&sI 2CO2۟_N/ 5saXڙtbZv35'xVN&]XJ-Dž #ec 8byעޫ-ݫ[tGaHNm{H@Dh/MQI:KNR7#fكm(H]LOܿ kgP%)`3Ӆb;FrRLd #dg-si[ŧ~jըn ߖh hT- (RqVL$ yFG:tI[%}߭zUy6ן*V#aȶXkR&O6qE0Od2A :MAe^%\Gب|̀&%s0 bKFͳL9ĭoEfy.aQpFm._j˘ X;);&\;~ f+YMt[:!Z ^¯Jͻi'R rW'=SF`R-rQMor8YihЯkK=iXhbΡ@gof7.0L]L]bO\$}9o$a]y_Ϋ7yuڻ>Y6J% ޷FÙ B2WMݷ~ԌNDMBcz

g#;3e.m: V1qOrn8y-w>yiC4c-蘸^Bg5.@c!M9:,DL@\O)6PSiz ^4c+0u<> Z7.HULGb1SKpBxdX $w ioKo=ޟ3E3@6]wS{:Ի&񵃧 pJ;[E.g9+7O2kORuɍ9-Hj";۷2~d ڤD#^6vSP< g}8$@O?n355T}jTrՋ-J̨"HfR5^I XO7%8bh&ֲYoTi]Z^kuncu?VGXph=^ǰINWE8X9:Q-212Y${6 ջʾAi;%xўoGY5k-hIa]}8zyMI.q5"u+t0zMppiBfƳP0,xďGQA,/ych*P=SߚY[OΊ90 iesmWjFrb,/bCH|!s8"3O/j25v1 +lݏhC6ZMV&-T>P_] m\Ze_i<.ԜxVYGOÕ)k|S2EKT~yK4ǝ|@[LIa6+\"cH FYrb$lOY{`A3 a/uw1[];hw8?656K( jK=ON|p Ȏy(4J fn^Z򚔤xfY8|ts'֏vpqL8iW_*r]fck(~lg$< JX&[ 2)O3u4sڧΛr{Vvzǭ(>bYDwgu+Qet"Y}ԃ8( Xv z&9\[#C+6r0o/PRjzmkvVb[asb$(%"Up̘kg9"9iQQŇ#9?e/t?m _%}]W,4 uUAa}jCcUrYG9vq[ xCx[Yy띡w틫5R sZBQ*<֌gS+V" I#H.JFƘ9ѹkh=Emu25maD#%TU݅v@JrH˛oJ.jge@,k2`CUZL(L9 XWJ[[./臬zR‹٨Tb$K;J(HX$ڲr~SEQ ^4i#-**Gd-ާ k5:Ug,o'g/LL6kˬŮ Ҫ]=÷ib}r?l57(#A҄uc|~õ.\D!"X5F`WN>?=&S'9O(-׭?[㪮JĶ>76f\9Xq"{ Y:vis"Q@Y1OaS;}WZ?iӺGSj4Tv.5D8rE,$#%1.*S[`cfAnIxĩ)%`< K "if?^,T~pt1uiqZSM?G*?IkM[[6>F&z #QC :2>HkYDɓq#eWi7*]'1ΧDGH?yDs#4&-5 oKQ.,Ǯxeۻr?vЖo0Jѩx4٠6?eCL)HlhV Q<=#c{\p S7ͶT&zAI\K5Q%z4;Nbhi@ n>0fqdž^DU}(͖~7 ať5O;V`]dd6.tEK2T $ G:1YvIT̄yaUFy7:@bY%77COqR XcrXJk:.~8\+WRj̟VuV_v] ,x}k3(lq~F͉ ׅHԔŕhz:tjԇgmy9q_SlD9 6&|;&Z2;~L)9IR[/.5?h2Z燮~^Df*$$GCaRWI_`prȓ*!2fچ]6O ն=aa'sh3RQ% 3jD6ɕ,L'ZQMw6ʉuDYn։M 0>E7;YC#DU=v墩3Ǽ@eSBt Ǿki)]SvS5!=mJ K*vjTfq_/xfa`cr\l=qK喨jyfl;ZՑDlֵ$hz`@[wO G"MA1bې0x懊L *k1UUcZfCxe,3@O QW5Ų*xgk.j]n/4/26f Qn6Cdܴu|<;M* ̚=|ͬA'?lhS8N?G]2 .YRpJ ++$Y`JX(xxe9u]?o/O',K!tY3 3^v ogGdmkI4M od4QٷnI&2idEg˿+s5wjcr&Ѷv1?8 GD^_0]]=Q9\2X8a/+ .'G1: U-Ip[!"%EH:H%Fm)w07.49lKP&g.IȘf.t!k*d˓pY!F#_fsfkNl F5Z'Eլ7E:&ԧsvlt 98*kydd"C#1xܿKʂifq"܍z.6 L(و+8 F37ЋEDƇLRO"FZp!*uV LMbB$X%'UЅ\$P"Gꖰ?a=XF)mm݇^)/ڙ,eCL+;^:DV 2DD1r7I N88r- w)k<Ɨldo"d)$!;ʜ4npT$d,Zwp y("r Q|nYኋgY8=wc*kTьU_l * ACsƄ.|BU Tst"ǤtI [j+(4gө HqlNnhNq`+_S9& .Wrp$I*uRZU_GGwa2C `ʼnb'GAGO9W.&훤)}cxe}mm<Ҡ6<ºă\`{ sHceі"E2I5}aXX zQÆ'p44}mXJ\[QD;]|}o;zeGLvݣe ,Jl9"PHĉIJX1bт1\̺R!@M!z}0|&Ոɓ-܉ {JD'n94=N)->>d(weg>{muU禃#`6k6͢l1#vh@1cȪ f2btj"YM3`qٮRXYÓ?!Pڄ 눒1U]k Nܻv5õ7}͠:eߖm)JWNݟr)b XҊ߬]&0y[C`un]UH``CnZXƒlZH}Q9InL_G/ߵ[O^(Pomi0 5z<<nbyݟO4Z}E0Mkg3?S3m,+D5( E>wSf kTM HP٘A_t#%޲z$If~\{l%6qIF8TeD 6Ʊl::@IsO~ڽ=-OC~km?9 wAG5/|zU ZMKٟFv$**Fqw_Ȗ.+q2AP0@iiR 0=ۼ}_;d~l\˘Dx;FR-V>0ج`ت/co23R_?\OgBwl~(=i ֒65Te%00v}|==^A[ye޳@7w/r%JD4=w^z1]b,N(Ԉ G!Auȟ6u5uUD/h9Z`N`. -)[D5/EG"[s7;Ѥ{)Zz [i 0:23o/28Ӹe8[RgLW6a"ܗC=oa/ذ?dI2W]ͺzb&JG 04V.A|aglz~VV` ۜF-4qT5(aP`ACP]]Ht rm3j}0F{Zش/ -ji;(L8lIxS&bU+$W.T{ygoS\yQ%\Gj|NYʡ﮻S6H}u_IUz|]=WY,ڳЭ 3B ;^2ɱgYn0!C7gޝXbb9ߌ 7<] vg]cOZ5wڿ=~}u樂W/u}ik* 2;˵;5ϯ]4rSyϮ SvRԑ][{.|n7_a !u!-2 \=(4qaQ^a3H~5ćT.5}!Qw뾻ˮl_yIT0mx]a$)﮾$Y'=w}u9gB(D_(E龵V"@fEә@Е Eqf y#&_Ә_@*żK{R?@&{*41mZ@K9.Hq6D>/ec+l0DyvjSgGkt9zˤ`ڨƳE^b\(?>Ǯjnc8& _&}blT3]`]X}]u}}u?fj5 ^uؙN~+ GP$1n2N!g Ed *''7E'vP]՛ iܶ2'iUVY AY4334b"1ȹa^*UI~ye$/C zO{xˬ뮾]k,˹d Lwk͡@șlnH1SfZħX}ڽ.I'B (cT&8 G i!o"[Lw^q]"G%LEQ%4WO;:gf[x}٦̢l7X]ldaJCՑ2[p|HyBE ¿ UN}nf?r#po%g(m [$9DV*vcc D޴GdK9ѭ^UjZ 6kGjzzhig!3{xH$, JK&vN:V xU75ٝGGEjWv7l1rn 2r$3}Sp]z΄9A 1rY?*69Yo(˥?IoGǏ>m]ԜI?K>WއIMNzj}52_bIO ާe>KCϬThRXӐ|EFzoS =;VSb5l+HxkR힇cžFf0Ecq΅җ3gqo]>LSk3$ä\cr<[{\8jl!^D.oSA?e4 ld( p.32d#g.A t,l\a!_5K v/uj6(hs XZ7nͣ@hQ)(N=ŗ|@oW-r>_0ᲿoԹuS馻9f(]i'ϳe$.zҵ&(8Nq ;nUyaGRHtOeaZ[>zgfEaλ:Q\^]idvW'obE E_O=sv=_M5[ Z3^Wo^t}wcK\I"$uќ(^njݤne)BI2%\ hD;xm uD^ rVI82L)寮(>Y P&tsvu ɯ8`M 0` r'Mre]B`#YXoH 0<#&1Z]tM8MYz%rݒƆ뭣BYh aDӉ_-!Mq~o_!F?i# em*\l\$5xSf ^E0{j9Py`b j4DMw{ѿE_P]e"6wA~QΠB.{iO5$LD n7G;BZu 7|2s.AԶ7nʻlBFdPi1%s(wn>.o+a;g $]Rj4}?}kl3 ckpjGtrĶb$Z4-E/1Appֈ=c=u*~qgDUGFHk%R]$ELEdU%SU%14O<{ rǾٲsV,4\t] Ni=QD>}j9EU~97/r'*:wy+})aqewYY]nߨq:پh׽%$-@j-Q(AسtSoY/E]DT&lǏɓb{'O>Q~.FkU)UIQAv>Q[5MYCebX<FEz|Pw]駙br?KSZ;_mEڿoJꟍ~vA\DᑦVlĎe)("4E4|[w^X_vʹm ҩL?GWV*ץϲ8dO'kNI$L"GPs>.#R4l4:lz6 mL+Y+#EY"$K5,Zb%z[cea}ut۴d,ljj_} _be5Dǚh͊y65(od= E.ڶst]^ y0F>˛3:M nctdwX;mű7'GT] 5X;+o[n +3x\ZFT+ukh4Cd/c#cLR6ȚMx Y9Kz㷑Xmw##3A,(GbF瀤U$U"zA v`Ƙ֥W PkgufDtKK CUWuҢro T fqiO@v9gy\u^0.EeL>N"ɻ<3M L]b˷]F`y~7V"۩&1- LzNBln8ɿMX|_܅̀ 16T,X1tO>uJutqWc hCSB$c7rh<,h֎6UKЗkN2 LP<]kBdl/PEf?Vr#7n_A-9zWvمR=c:VqCfm`d"b ZDN~1™3lūv$ح2 n4>ū ^U^{*u7 # h=b p㽓^M]DyWOZ$qo=5A>QTؤ[=p&=G7EMh4[ ci}u@fB_ ٍmS[TnەT#fkfTq{lD=!YEkr3~.Met%UUU zQ5&MI;uj\Z*.$Kĝ>o(n٥{>{[j $I* zgs'-$z Zd7&ɿvmm~(*"庵5p&*0B%J0z@C2KdٸwyMToiq/ }AXU)dzYN@غcp.ioJDo }{qoZ+8][_ilG5eb=9Xm)tQ6 8BrP]ءLs7/D]xnm X,k4<^?mRg7x! (aLLCFc_=O3$dFzO4Ew&N~ 4Pܱ(AN^E(Lox.BzpukM=8 vgc "H| \zWR;5 HQ,|9v'I%՚6i\GIZ?9SO.eq -p4r wox-*v+q 7$ O}-ᄒ}|뺾}ux]kmm]<-.% ]XϨH^;02P+!@ 8 ~=|,B$eO/{zDz\No iV10AW5PTz!b2EUo(Ffw I)-d]yJ僉Dpag854P0"em*ztKD&hG% h6ehIM0&RsWM &xtIxFDn1tK'r[#.g+Xd1&<R&%iEDՙM<"`E`~*[g )1qxYMu3[ǥs?ŗ`lBmg'V>~h\r-whHvY;iqO9XGDDpC1v-dhܶ"o~G?4wAsQxwP.Ԫ$$Q0*I_nhAJ;?ypS|O{'QwWbJ,h]0￟>PGV{Uvj4 jqGR(rY "H%K }eG]fVL̎crؤɊhkwj0\lӬP|(8UŹt7ituf+`YeK9l}/B?OntmVU#i?EaΎ_47E0˾8/9w﮻wڱ3΍kdzy#aa2V)@z߮F]/|R`!ói~[柖l b= )tGnbC78z6-{[;!Em (xAGQbEd\"ꦺ .xȫYJaX(exg]==}wi:i veXWf td<}+j'` v]NI\:z.24Ǯu/LX-.FpZOB ]G8wo?k  0?3X99=exX]u9c]X}w}w]]˟2gS[6BZ{_td#-dfu!a%Wt:B01m7ڈ(({YLd<@c388"l"e[4A3?n"Ȃ=8Q0s "H*>Q4O^\G/jƑDp=Qb|>]#]$q&^2 ˔!O{9,K4c=]- ϷҬb5s<$ͷJ8;^D$Cܨ0Lly' dƉ;"3l,.Yi$ﯬik}&,hFKZT-Lˇ&{swRW .W޲5^avp HB Zan^0%⑖řUTtO4%lȌ&DwI;lfR"^7_&~hGR:tdM}g*kS$snNJ= o}=C9R>Eawj#c+fH5Io'avlt+/p2F~]//ʠ+1cM8%nqNnkx6{Mjסu- ]Qw* [ީ$abp8R+EV&*4o8#/|zږN\*?C m6U6ĉuWoE}M:fpq1QYv=ɾ؟2a:s햟[ok ҡD_#kMfU걅dzpġQfQG Z]D]ͬlmni$]~D!90nRvt :C' ~8 ؽ7A^k(Y;xnӗ +weKUrqYf$h\}OtjUKqdi_I4O}nM_;WJTW3fL2&m0l蔗Yڳrn-ڲ48@r[UCRnkhp{qWb~8* `[ quȆ9,>!f]4qD#KI$A׸kS]7 M鈲bt>F^&ɉREc kZ;6$Ȅ9?* *'߽My9A)f+"GE' M,>DD{al(+ǸUdsY]s/SNv64qtnmWdb,͕dN:nm1C dj #u0ԴPa mz˨r;4K= 9V]^TUalbIY ͬDM' ĸ5$t|MA8:2vCdOK[6gZm}jvdC:Wuv*Q6c ,*9k pݺB WiG> Ca9 .4I@6nˎ+d,#/P|V:"Ȭ]ܴdrHR.sbp WRRcYTεNeS,˦vUba %ܻL p>B@Vm.j\QmLumֶ ɬ 46D*l\0GeY,l4Ǯ~>ד0̭TvB8bwSyYTGgcM;+5>J,Ay ΂Bz6-Zi6P[,؅\32 I3G'r&i,efZ6YFtNl]u&BMc"D h<=~ۋ$Dşi|KwUNZzT/:ԧVD A37 tɢϯƳL1w8.CT"rߟ;[buimyNmcb˗g bL#;fbH1X6%ruԥ(pl[>Tja7YAe*g c\)82$QF } DCbE+>:b[DFԉ5yNI'4ƺ/e&r( voCbeDVڬ_`+m㹗bsuU4ӹ:UjPO2X"2,?KƬ/Ŧ]enp{gsG zIiQE#&qRo Tȵ=?hG}WWXVVET{6^7ױ3I"X&爃$QVHO[<1#q>a5GW U bSQ51S ˮ<2r.뾹PzR햔 [м, R-B"x,DJ.?eޞu~Q_ t5ܷRyo9Yd³jUkuNjȞUfDt!2qS&x˥݊^* -ߩUbӖUK X1bY$N|b,1,0g( uT0M&䮷p F\jQLBe2H( 5̥j2NŻlX.Ѿ)=t0K%R3׎wqb0zPMtG 9 6d&ɪ׎UIVQw I'uUWڷŝBY}u5 b21ƌe"%1Q,޲$˷Mm^!,^֛0ohx_m \zƑ]uD~ zڊEW I&|pI,gOН7gC[mU&j7؍#k?W ,Q w -GĎNhO쯎OH#R5ۍY;Ouoa4v2۹)fh YZ;~yh#ڔz$ȑC)(#Mp N!g9ѽDZ泙0 lvSi0*w /1Dޤ?`a"$aF$H>+7 8*}.cٷ>+t36vD\f5NphCpՁqވVT{Dv?^SgX'"gBNBԫ# ⲗ!\KDC0r:%R t&޽# }[(Ujשnm2/_ >w Ȍ=^gV;ͷmhm! KajiN*y]\kNcIGE#L}1'Ft{w;vtRo6gsڌ+`۰Lg!:_,3)W]=ȼU>JɫTG>݋Qhz>5[KhaSsKJ(d|,+# ǤkKFCZY*xo[ o ҟQQ,7/-Y5r Q$\iےh(Ƅ;펠kIMo*,QGaSGd*@٤qSh7Qګћl1a_ÃT;njok[E𸭻MBؐ/Jd˷ Tt , MǻNGXyεhܝ!L%6AMʃ(Yxȃ(K2P,cqm2m~Ӭ5"߹>֎wzo_Ϋ}'gر(59 5B_fEȞ6@d:qzzcTd2NqMVVK;2E'oS,hQ!1dńҿ&yz׺f)UaZ;3\&("N 1qrv9{00 g8;+dY>\l>= es`?TjTX5!7= !&Wݓŗ‘buKQb6)7,PfflPnܒhjguye<~:.٪jw_`k)*)KFׁlzyѸɳ 6M˰ifո=3c0A,Kڕ#/+h7zQnР$rAYwybiWĩu9^ԥ,7zsRF;ח1t(4zuxn8=[ՋS{i=ѱQw6|;|{hd{e'2O^4s8zYۥs]˅KUS5˿ߖywq!Nyͤ:8b`REd-bHMoi9[9y{rm-=4Yke^t6|80ki)n#`|y?<.j\p y9ZfT-tڒUWTⶡ*Ya("ĞL5K,s㦤a# DmE|D[NA.ZoD:٧PNM[Ikȏ: (v週WF8A3eMiٶU;4c݉3usFV%g*b3!r a'eY!1ŒQ:& @n ^z:8dg;^Q7f]7Dl"L'VR4FG7jtit@j͎شg}&fͩY|PDG_%~fa5}cDν`9g:+Mol㓳U0q#}1cU|릭*3,$lö"7rAf/o4iCպ;oZVc)F^lhD/[[\pκh9 cx ?sh>B_ue)=H`ൄ'߉,ޡ.HR!p\v׏ɲd@|vW1abEr&$pE5eM4K I=结M4O 00Ǯq뮺뮺띵~]zժ:)[WMңl]B{cB]Rx`r^h|d (ZO4$nuvOA //~FY4s5jԄ#"b^̹MxG1Vir6a<.KU[pFN"9z`:6]Bǭ =M2gmßjfz=%֍!se!C'-vpg5W8O_d7Ԉ[7JZf3lި4#|*DiBApxNz,h 8"O5c7h(sflotuuu6=APbhW(>q"^"[0@[X5>`ѹUMnmZ\:`ִ\Kmc$d;*FBN PFfz: o􅊮j1Ck0V241|Dd2m'fw,[}ZDz}Z gA :oh%hBn"~d)}b;$ĻчC''='QyD.Fx vtṥa+^&1? ;Gµ|T~#}|OGE%'U?VՁ1vֲӥޤ)ta^hJzHG<(Aw R_ &q:!qD6*OXƾҐzĐK)d@Xlkc8O4G.&6Uȩ4|N[`K@Q58nAui،KbO0Jn#常0UYe28tб]x^pI%6U\sq^6Af9v!Qȝ(>5o  ̮bd﯐fj'm3S m,KbYt;ZB,$߄h='C>A$C&Ea썪z:**CI m\0b'gHIg&%^*v9TZ+⷟zO 4]XpuDq"N?K3$uo&%E(dn}2SmqqqpqqqpqqqpqqqpqqqpqqqpqqqpqqqpqqqpqqryTû?\FKZdKa2A$c!_$&q^-ra 7e^UvS5e =<M 5 =Y*<$QV;M7-;eZqk:urCX=OlITp9MZDn)0l {(%ЖN1%G8| KE0`,c A4dY4S<@s<~(o$E߲JȺVbdF P tУ697RZOdQbcӑJlE\[YjA%b0k:퍫Ay29M|3?Fd]P U3fN~JkٶXp2X9K8I|l 6Q, Si2]9y8`fyLX Kj1WS";('H2h \8vI>N? m }"J %^zUv؂e1m? pk%a  *4ve[ju*ka`kKPTc^jy+{pΧs}"Yhb Ѵ '+M\ ۓ @@a|:dz`,.DZ~ҷ̅]w 2b:6h1Nee{&L3Vۣ~Ƶ"]6'dVLoޤ;2f. 3z|PBE8il8J 4ޙz*Hڲ)\LT&Ԩ*I7WBriYKW"89@]/9qƕYVSoѫ`LwRٓс[v`.DiLH$طgJ$UN gÓP;i1:^>w Z$> zGX NBhYz&$~lwW=|Mn|\ 5[t.pjA W6AbeXY nIbIbŻn|veXУvUbj4+ȧL  hSy3FC'Qg+$_U0/y|B~izF-S۬InZjФ+|&{Vkl/!9Ppd<|p ћBl)[#\j miΧg!h2vD10š!Y|ԃ9ۨji6b(2plmd҉QpAtsr1ϵVO j/$w2Zގy eW:-FEEɊNšM .W*hJ[ ڿs wj.Z4c٭_YOf~e c*64NrM&nɲcv>r%FjprddR@h9&uVn^*t-y,J,ێłTռcl.]B#( J$DaxP& QۤS+eugL鵹c;|6{LMHPCfMz"3)Ua]d*l:W{]t7:t mCհ+mfk0M  QGK}7̓]hٮ9dPy^3[l~ZLMzDX0Eǒhj a+ '<3(ހ:]߿˾}w}w\á6;-Y wjJ ӮaЗc[(:-U,-&Lo$T)oS iU^ ַ_1axFIXAԺ|aU--8n9C q֘ub$ ]G_ e sBfRa_]u"Ϥ "٠]Zƹ)}aI%fQ>¢â@Ȣ2A=*caB6jvNs?v7s,Z7_r i*i(q&j拼_O,7g$Mx:LU0[)kUFW[˝N{ZL2#S0mSeJxЧK q ,! >qn72Czssom9p̭J"bF$9`4tؔS)+3nUt -z=/>1tcJeaaFQ)fO#Ј.'N 8P[z#<`V7+v,Zqrc?ORMEa" T!/2Q>4OƍP=$d6&ZIK4b 3+rH\p#"847k21IKtsLގzr'c&g-͑ʮ~͵a6*y^<4.M7($a'm2ф[w9_Z&J`GU-~2fv]k* V|㇍Yʘ!.\0v#a. %dk@m=M/[I3KnY@8d'+ҊzA AG3 W-枧\},˂ٮ9ijFާ-l2l115"_B(IcD<!d0u=u{Cܽw's)xc`tq+ȠNIBMaEF E2@"n tLC#6#D#.o 67"M H:DGkۀ63M"Š{Qd1p }gu1sCĊg50a"bɬ7 }_HG+>=:HA YonxfOe<9 44 V#j=xl?Q|[O~9x嚙< q_<˾믟ˮﯟ]_|Q;J :}-Z;._r'}! /$M/AZJq@ȳ0דG5m4HȏuURjXӽmY#YYRW,`n{2ܭ^(EJo ͯjw;&2V;',nϯt.î9]*m-OVǻmyDZt{0V?^+;_>9ve؇6]yw|o:0}[}?_C}??85YZ KC=lHu\# I阽G4)ܜv,I*͟@*DJʛTMlJ`]fxYuᄒ}w}w~|fY;h|p| l5yb:#]9c1dr_Έl~> 3<3Lp[b #[n[>*8"Sc8g-MLbi쟱QdP${%tTС_׋ub˘%!Qi*hgrNk{|QxWm~v2.M@q9i[{h()"3A%t&Sp6d_2*ŐCC3ZJl.VWNJ ʪ%FE`>`L8Ҋ O_6K2TX͒pd@(I4'rӴ0O0ӮWGՍ?^ k9(&m#1F;".xx>tM5~k-W q_Tk} כݶŞ ^JcT4oq]"f zue犝kM~ *G4Ji)ֺZ1#&vjygѨK jxiQ](P#ؒB s:`5}(<,| MuCDl[Q` -Xۖ6f-mY4C<3ItO74~L-~IJn^v)#A!BE 4FI $?][nڔŕ39Dw+\%0Yc 4]F1A,FX*/a|nhGh}e!rL׹$xi( XۅG4#WC\?#y#;T,M}(CjdTSvnF̘tFAWNyrd0x,_c4y&|uhTarqvje?s;14b3/dǼ4]_7=MUIޱksRxN$ӱ%Z[$HźHp( ug'3#Z>/! B)fWdMn^Л֊)P.d_|%ˇ:֭jߢ^m33f)΄ٙ (\Fœ\Rd+ҲhYz7w6+!ju1aۮĈ=i"4~`C&g^"!chw!r '#HMroB}]b\QI(0XBdj")7z>6]4zwHB}ݻ?7un?o5AnNקMkј3YˊSR*W9ɫyIH^]_?,.T^;]b?N1h%/qhCO]-}wq|]c]u;=v$T4ok Y#QRMڂ/#4V;$&6i( I#Ro`$sVF\ГC, ,*f惲m7%˥<$@iJ+㾋AW1 CPyLD$jf\K'!y4$lG2)Ӭ]3_G(>l[Sڃ*FeXF6p'蝖']a.=V,IQ5%Y?pD=|fѢ}GahjyQ_)%Y qjhP,¡|-vϝk *ejJVg]Z`l|^B" CAYu1#P5596`XlYYdlrROۮd@ĝ|p-X -ѲTC?  ٲpd3|"0 hFɠ-"պIc#W5Bϋ] evM+zr1WLDDZ=i HGɢe%mm.SU8ڛ°pk&JIm9 ƈ¤priuI0k#Uq#j\YAKXfQ  f,|F0Ϸ[4MBn]cb/E}M6n:/$U3L*KD$zxr:2 uMݢQgpU:yÔ!s dم Gc9Xg!dYؠW5#bXdp &]!_3 -g$Gl.^FONAUJDlDq8g.:EQϤ"gwؿN 5~c]j z`Id2)-m 5"# 6>Bi]nsx~ذbݿZ/9'/Do?CS^HF3Ny5¡ǎ:ȔS9t,Iۏ*ͯ_n[7}c(w jMIPe㔤*KVϢ5#I9l Ǧ1b9Ͷ$&6¦4 m,)[:ӶX͋Vmцa2DsIK^f7,'aQ_qg-Nr}HڪǸGu~,NC#p&M:C%sLQrDWh'ox^_Xf7~xJ>׋q&ZSՍ+`xLQ'i?`M9#<ޥ}n~|GG;B^ĥTކ`P~}dW(<Ь+Їgch@w)/4PJR{{Tm(P( U½뮺뮛]u_.뭵z뮺u]~\>dzMbY"{fk_4W,\%0\cnG739$h(/٤sEܐeX=[]՝5rKBcY f) ;YדAd,,B0]qhM M ndhrS R !ZŠX4$sıieKF vLWQlyM5UR'.]8*=e\L܆qV*9 IK㮾q5;SѴʽњ{_ }}NIeB&̞I#Ӂ[A.kv:_<^i'+W2 Vqr y "B;'bؠ|Yc w/i;Mh-ulY1=o泴fP߬'X8T*JY#I맊8^@,)8[Zw#P~OAszuOAdlКR[ڳ0!Y=\YvOsyjn+gG3hW*W{iTaQ[?mt%]0vD$aQ$ &Ą%t44ՖufGVY ۷3jJ +TuKp8غnB!#2e@d 1$ߞ56;=YVU5p,& 7QE&HӢI:tEW.χ ? k8lv  PqV˾Iʙ&,0{pu;}U;᯲ʻ՚Orʟf's=CmfWf7ٵC5}5ny5W.pCHvXCk(.֮nFu䘻sŶH:K%kf0U/]|!{_>뮿]uɥ1mW$^At~}?(t$9 51rvϮc.Z7ꏝ hƵnE T9{V93va*m"٨HgEI~cdLShT6[;z8 }ME$OdR< s*I g۴Qj,$jc J73_ZL%:FYss#%d^Qciń4z)rGEzEZS%cIfLJ%K HrGJw=lu™-YW;mĩMݼl קŜD6i%'!dLFSc֭}':mOb2G'3CpZ;Ø(QzEff}$Q*Vs+|o?ދKx{e~I 61-iQ(aAXA`#)0t1L٬Ow7ucza.n MO%G`Y'.4 #T!:l vœ9e"`u|2qzv\i)Ź&rA Zɿv얠p(3A~S9: LE~xSތF;[̓ D잯?_KoşƋ,gV~ cvgl?W!a~k8g+Z1\RɗYS6Ne,Rz)c5v_I4%;pm4iMPDw8Y# Va$;f=Dɘ u@Gc~pl1C ןYdLFRm7 I6<~^yY:rS%r/mk*ѽxDwihql͗iK'1k^ ^Dg|C WHEI:AC'!ך~j{qd'ٕ-JRj֚VU f\ܭ,`ɰ6ȦN(777Wu/.>k[o/I[g%Y]Ii I$2GCn`ok4|B.#- JD`累`d_}+ح\id䵉RB*Et^o ɑq.&N1+|ܾS7Ykg>\zg>Π=%7aX- =xKG^;ZXIJrDŽɼj"NBL%qE(>/tOQjbsm-YreU9MI44 HIkX)RDI ɋjEgiΜۋ2 'mo-n9*%ޜ*s ~HA7E%5"h$< Q2'ucX/*ܚO)G9`m$ D9 (⯜s\/=Vۉݓ"vAyWŞ*ڂPȐh y4r3 ) KyMz&k{-8: Bm ⟷!vcΒHGYp$ 9)-tw~`lL>]ŋtO9cr#,{WW^mM壀DC.֐΋5~ "%,V$L#|we~+tŇ'ptږlac(1mlɰge[1Qrep>_8|>q-3YNݳ$wsi!2pIYaT ΤRKR~y[?wݺa#gq(v%AZF'!<- "pYyvJ'\uւzU1)j~iar5{> ĭ͍b@BvBn BDDdžu8|CέJm-~ TIRw#[i |[!0ae=Fǁx1LJ6j/ |*L#%o`՛ӫϻ'Nro1 zY5Xႛ"*"I/ p̵C y ]_O=ݹ,Jei_Dp#r,;\LY"-(촒@VL[$ղI~B~͑:;)e6 Imksx%"4E~ƒK$")2dϓ< F[e~Cnܢ(K ip8Ҵ`c"@Yh(Sǂ@l D"P@mcxvaǢGQ=Q ox4[ᓇK^U\0ǯa?}{Zm-Kht!/CWFĄ/Żİ h-^^˩E Ɉ=魶*e#tuNE&Q8 C̃6t ?G~.;ꛈUqළ[??(qرR]TMFr9M`߃.w yoMlKϪ{wkn{MA+R֥v}#o)eH #'n Y S\=w|04bՖcXQ(LjrZᜣϦ U &^y> `ZCxcl]ʲmgGf.Kԍ $Y1ߍ$32+ۥ:)1_yg|hu-AujabG39"8v$TKs養:Iۣ8,}}בۭ2=7bu.V"h;W:6;-[Xf$].wLY>7`K& <Sc#fZ[^Lr@ڍdfOF6OhLsŪ44W'^ȝOG̬R5-/D`iQuO (y.*}'|wLL{ G"ZhEܺRn4|SeZWIB<nԺX6^4ۨ[ළqaȻ1f Rx1~覮kkK|u ^iX Zꬖb UkGT 6#dXi}>sW?I?X+2~yczלmb/܈_p KQUDU9u_n@j7~c, ŀJ+0;c( XC<7FMd2fl3ֲ,!2÷G[A'sn]Ur3a2]d+":݉lɃ`/3)\_^)]oE >@T;?a@z dҷF $1n2dH{'2A ml[]k8"}`GGMV&RM1XI,X$\2 4ƴ,q$A:8DLͅ<}Z)}eN6)V1po%jiz(S@:57K';׼G k`{\BܲȻ-'xJ"ldQTh%xQb|Vks:IfŬTL+dJiE 3HAZMf;Q :< ⪴>#Ar )fRq?fGöĘY01 jdg?,~]6.A3WBԯhdzE(ٻbġpnwk0 ?˸B 5tGǍ,(՛WF\ݔBsFP %QDdc#%Px<ܦLDfHض|mt麠PtQK!5r肓\V)CoA0/dFfo<bG4F7muó3ߤp,_{"*~}RUTsT~5vzm5E,tdӬ?F F ^;UѠKV!>zoAWenU =jYwD~Jۺ;1.qrVs))fGM¢cOŁv( )K>8888Pw#X 5{nQ~Ϟ7qSMԛ35/<}>Pš`k,m#}>?{!ypQ_s>j)[0/=ڝ/A6ۤWӾK[Rѽ؊F#(N*s" H;fRŃ jj hݝzx^y+Kԍdq4zSJ"n=& ,[ґ҃ʱ|$xs'"vqqpqqqpqqqpqqqpqqqpqqqpqqqpqqq8/Qv\oOxU~Y^q*Rny5)u `l2ʐ&ld 9KlSk6]/`igC(O.7e'}>q/¿J-)6̇.-\&TK"6U3J1uq1\}CBk.j)S-vb-|Ii;c?4K~AINyg٧$vkt_esek#0d=!d\. +~@VzCh+k1קn^fi͛^MwV¡ֵr{`!ҐŮOG*MSWG߶mdm֔ # `,jMb!FtheMIc#)R$36bKdǛVyMڿֽIgaGѠb['hp5n1#ÏvoN!>{n K:srL5mRʊ2vZob$ʖ"1tw+&Q䎱NLO3}k3UgfvZ8%*%1daU"^f:c*90\OG2zVtmoވC[v[k@:ef%k}7ΦgMU_KtjUga dQt֠hVnyhj]FG!bMgIXXZ#'$ *f-[dQ<,$_c7ޮW7%?5I"[寠]f;eʣq&qSÇȸĝ;Ŧq;ԝ/95c< o -U 3hxO-($aM +48ERLN>d2~8m]u{y]|ͫ#]uos|Q_E[N,DSh^~Q~4gvd:=hhՠ)`4iXKPUNQ/ΌbūHq 9}-O_q#x=De\hٲCCK3 Ȣzn~V)D!Ihy ߤ Vn'o/Kށ/ *n~)6wOΆmcW LaPNVL D؏'Izn4X -d|9۴#}6|Iv<ͧnҒ{9i[6TfeMBlH7MN˲ͼk;UuxPC-xvT`LS`\' _cCns*SZ4mfYDv56[Y?g amX$cϐ Gžʉ9 4S)@{5zmu!a^4;f<-2 8~ئx^j]Q~AlY-~Ǯ$ M# u]V G\ J+$ Kmj9}^23Z\6M8k@aFǵu Z& Q0-]HJ%/dx`]xg1&+T+"'xݡbY %J)m0vYD$J'M|4AՅ嬣 6$k:93? lNt7(?.Nك2$6p-wJV!Dm8fm:*$ق $Dc%1݌UqʘZU^n@1~m޵~#E&JtRB T~9!;uaCI_ؖĄw)F7$F2GGs&|C:9 pgMupx@IgkDVg!@X"ZAp>ט _%?|7(ծĶί/Yf"CyFֱrsez<>(I& i#Pc}"K7J}u(Ǭ~9v/)t=]3q=  ~YVc`m4[0إ"j^g̸kϏU;|X;J=Trڂ&xfҺIx&n0X*N|ż|^zۭ;﫹.f i=g Ҩw*HDaa AbגGP̎.ZSK}9GNA ,tFqʸJ* jhHV)XF7)Ȗd8gMGrB{w{rwZzJPMD+Z$71Ĉ3>lM /:=`>Ob^֔wi_,gV=5Uر]X1(Cs" @F?4^FP͍zm\ɽJ`79+/ؑsk9*(%#dT%jvq.DI ڠ}|/O|b!)hx%@E 9 Ы@廥`Y(R\E08W&ۡߠہL7f B/شQ\Z%N<ЋBGd eC㡣߲RM}hYrhktSpMfalc+o#:jmǦh_Ϫ;*{]gU\_[,v( ֵ"AVY K"MÎjܫSf/@~:қ:bB0kl!f%,kmͷָєݺmWP-a8YvP$WᄎD{yFbV|} SW_/Y=!ӏ=:4]mqmGMy C *<\].D#2&E_A5AR u,zC"a]wM^eKIn0KǬIg}'XiQ=kLO}Śy5]TGeHA;7h30f̙3l(iH^ xx888888888888888888888888888888~M[>l;fh6ty$w+i.gK"&yex]0I(Ɍ1骾_epdQl5s.S/9ruΊvtMSX$Ōe'Gjbm ibHN9"Z?,-EHfmkzRm>maG Ej[ ܎\ʓ>p"xfV3YH$"2/ī G>>m}}oOW}"ቔ6"6QCEƇę>k}BڶI yݪ_S<,¢HPL`kᄏp\8A lC1MZ<l0tK6q Y㊉]u8`, dhd{ȳbśdEFm 7K AS$q뮿RӻDeCWoF9Wܹ;[^!ju}gu8;T h#㚇(hA qB1j4s<W5I"پ9gIaJuy-$p q~ "I1 ޼L{v9wnV5q>뮺܋nY+p.{\ʱNsP~U&u⎒k9Ϛ6)NAzp;)ImIYX|!#;Eeς~,S/p7)Yj#Ͼ%QlMۤ I4PAIQKI$ìpM$O q qz뮺럳fwV|NEgD]f(uvU 8hmd[D 50e,7b6UM Z.*Ac5}x?!)+.Y~D~~?<4Yye>')8a8c8a=u8=u8]u=u]u]uˮ~\E=0O1jxեoE*fd>( pP%6l &K%B}D^*J%gI4 #EԐI$r)cXzQ%90g*PӁB؏X5EUA򹪦jwLN.z=4S[6Ue51 γj*sС@ #+aȱÌ7"AӲL zaҫzGdDsJ[Ib+K̞tlP6C ve,ok٭h*0THc; ;9GR RG$erYA[,GN%rL?}>^M3>: |5 a*f|ZȊk!`0 ,T5`\6qY&#|pqsޑ1,W$3SpY%cx) Xuyw_,{逸U몖]R ƠN=h(6 N 1rI$@#?1 i=pN$GK9@PwLQkzb+Q la͑&݁Rl{ SrH88㟊`x&xd)X`uYyw8=wYe]c]}u4x:bwX8hNPϼ2 dr<fA ,dIb8IdJce]cw.894CJGж[Zu`Z=9&K%/4`,{(])Z]8888888Q7Ou^UvՐ N٘`uXFL/XhqOJ &x H l_npr\x%wr%mV龏lBIi;VB M'"v?P`ȦAhI1t0w9.Y-x ﺓh)5DTPOn|[bo*8[#׈m2qXCZWY}hTV 䐨Bҷ De{dú~ k&kcn]awUbMہIrs-l~F,ڽbnP(A0vrM޵Uir7W_<<*;? %f nka 7nuMe\xqA"8pH?EߚW2~ HMeB%CMHˁiI²(ݻr ׎x6ȅSn&U\YdO$I8*RmA0C ſ!#ߒu~bӥAJdԖ@^Q?b.9I?Ԫ3 `Ԧc oSlKHHj Plȼl5.f9,ix؂[mNiԵ1MM1sh%ԣ.QΡ9W(Ir#.Ls%r niƂCE6` #5LV1 Pqܳ};gech<(mT *Qz,>mkg?a2HSJ5EGN[[̐pA&&ѣۥ.޺w7M|6Ǖ!q : C Tjr*b71!P'(]m&6͜K~5ᄆȄy^[_0ͪ]|Ul:r~^,&t4/$vI( IL ]⢉3Yl1rGUIuIL3)nq :"I6W$0y3멼3`BL>Y.ɧg46.n!75]!t̠O^ Z:qUU4j]a888}U+H?/oa->9oOο2_}Aڲ6d[#cdHD;X##Qb:W eFK#v8dՊҭAlX1uSYeN[ճZL'8sN6qLhEpaT77~WF.%#mݰ@)9a~,%PH*<K2f†1!$jb4Lkߊ=lā>IKRMp;Jd\rbUWͺ)c_^vz{]IF#XχZާA,ͳҥI.g- 4!nPz߆v%vz&}BzZ$3شV' A cѸ~|GzK |{ٽ{&s؆E -)o!MuͻMLQ< uˋΡ4.%(Z5G(-6O_&3w'z>چMu@{AclԤV?L;I%ɦ@dI·DfIdzmƣ%cҹ3w[5 eWo䳀S "f3hTӤ氈1"AW+flS] %VǨx&"TJMf4@f"2itW7fG7yۜO*ܟḺǝL.=l5ތb6N W,S5d}},t8N>1ڣ V)vp:r26ixWVA S2,7/7O$~7v]%i99,6A!?>Q@.dwmQG)V&d N' 5|sԢ[NWd2_>,rti'uG|*x-m՝;9b+A+TQIitdxL&bWfNd2F1A̞b;FO*8ZK-{x,d gR=e^X4HEakގ])Kǿ)cR?]Ro^k4.5d {FI+@D_NE~l=dbذy8҇}z{=v^K4֒k _I5UlBY{r2HRQZ"Ytۖ?,f쟰;Sp] +ҩAjYqmtuĭѨLĀNmG 9j ~y)8kX * GiԈS(-e 3xف (΂3=u^P=={b׿^cK@# 7c =]K:DwT1!{2,X!^Gc9#ژR>zA> MIB X'=091WX^5L61xN{0bDx/ra*{bHR=nAyX13M"I119uh뮾}]kpIq1 OYtb<~VenpD^I1v X=7H_44IMSQҽŖBt{ص:Фv.1uLuPHxjJȩ8ܙNtbϽߖhdіI\QƘӮ_~P˘ Vv;Ϡ]9﷼um-?рR J<x"D0ц%dM}ZK6Qu~˦q?O,;{=c?Dwݽ3f)o^X]؊z&.csi\TF|XelM ̆J.ěCɌ mKOM=dK = o#tĠ+ \Q@jPȗ rãޡF=Ns_-H/Qv%cţHŻTsQvSƙP 0|6uۖXN۴}Y_tndX{FNK,lXac;ob~y48֥v+aJu:=]ys:Q71Yʮ|h,9F*.پ2ְlpU]R!=h$?: iߨ/h(F^-{Wf Ep ӷ><ִn-0qV@4XnֹI$p-Al^,4I!2F'#$c9x%G(3J]uИncQj89thQ_5,|A?d6b-q1ą('䚏YA {ld3ܩ|*6 1Jj']t[9p|DڳcUxD4MW+&y}gٲkj98xt5GQÕAsUL1cJl?>oYewBRr=:"\5Qct592 (f95f<(a#{8bxg|b>z$[҃3 MNQg |Jydlf\&2K˴Ǿ@"͏. 9G=y_*m^q:LT?I!C`zvG+db{/>[j OCO^\^d߅@,Ӈ$6<"\03Q1 oamydcws ܔ\QZ .`l1n$X%Xe%M_LyAE3jК]p=MR>0{]v@burWFG7yT*T2l2|řJkOcfwPY\?mB }G-۫Itڄ3^ KU)*ڑq94 #ɧO$@i/{XDtD9a5(Յ_w<`cY$Bj\ ӭEua<\RW IO>jMLz0\cxexX9c]]}w|}z\OMm/h1u0V2OgH?- EìqlgKޫ#㠹vdrWN@QMOs`C1u|ظK zV{K:?e>k 8(=uQAn}57ˇhc6bEOtЃF.bA) U:]WhP#ڒ:?l0V C"j;tw)6ی7 )g%폚弶a, l4{e:1- NƯV7`5J+tЃ4Y$Vb@_Ֆp;>j孂l|ZA1U iu5<2n9au9a=X}w}](S.Cᄆv1Ǯᄆ2|u/_e~'qh5YIKFyng3k-]*JM&#BZp ݷ_OK!szoF]]i]EF}ZR -8) $nxaY,9+%"W"h$zO>o;ԯDzyPOH??{.1ޖRk,TzGoN$AktpG{z-)Çۚa{co!5)݅uuR*iG\J5f@ W&߅9 b}Nn~%d¤zaa9{uڮd f C̝ 7ؘ16xܖ?ҏ|Öv wŝοUBV`G.e:f$Y7|_ق`8*d{EK+d~ޥl^i]mN&;"\nۛS*!% 0r6I79& ='ng>IBfi[kb6 E2Dxx'#> LFf]#ѿ~xA' St[e s y9M͈a*5GBq0lbjzQja- g٠j(/GINr:C(aϬ6N:4ɍBM mp%HJ"1lЇ֧ʸy "Qy*2Jc{fD?7&,MgyOPb{ *Q!&C =.n[&,<\A4[bg]l@woX],I}f:P\lbT%!8:sۻ;p"wKcU7՜3[e3;B#<'NXfXKBGo,߼H>|y 0͚.[|q_};%6)5籂Gm͠7Sx3+gM1|l,0vev5Dk?pM7}j6h/>+V,}r6t:ͭ,ă>歑MM!(#ho @d${lyane碾R\[jMUWf[Tմ\4W6%Ɩ7<1%Z1HX=nʃ╶݄Wj%gusPjЬ2RX. wor_1u-e(:ݭ}Gmfy4˨[6ƅ&⾖bB?-@A\3'ԝUqclh> cM+&<yZTdHc4٦9\K""q%` 9=k/ =Jcۃ%RM3~X99 [ˁrQyLO6k.Z~͟CC% oj@m؝fȷuRVNߌܴ \9oߕ "bq^VO߇\}MޣEtg݋ v2C6wݞ$utc 3ЇD9_J7gW 5'G}ǵvO6]ij kjK 716uk- Ymea-+fۖutɍ=761{|;%èCkgX&ƿVL]5dCrS8 ,ͱ[Ho#u:>6I [ex?ǖxu]]|9}޽/} lU7:V`OEV3at$qɜ.Xt 1q2IO[ٰsOwQg|R>b/zP2j9&O9q ݱX9p}AH`>ً?t].ڝaP)HY-oӾ'ҽ&MsM+ ع>O~ӺƠuc}!د`F;iD1 Tah@_G. Ul@>zSy^y#Xi9p9sK7\*4qBId >ɴX;k)zy^xms`0a!^N~"Els"F|-!0xI²v_=St5ri&€ڲQތ+v\zO5qV9-Arǯɿ-*tj/%ٙZlmti׍5vrCN$Pfh"=VL:#%T-JƮĘhj{7h_1BoGYM-0B@ƹD,GZ w ƵGX0֒"X"wϒŢ .6vuR_2!zkCWOrH8Hdn5)[Dx8%>W뮒hS@<έN ={*hRԒݍiC㐱Y3"; Ύ`>/&ř ̗{Yu nJu yr,peKf}8X]u J4#O+9gYW@-21l公`\K ֫<UZ&/W]m}JRɬΣpJ3V?,UImfb@(,XP";6dsq'ȟ!k6*l8Nvczs'ϒX8%ʞTqV 躢JHKi 9G7Řb K]c%4Dz1la0Ѹ"7iy[v:&5)}Fbя|wox夏{*{C{r%tOFy^x MqzHG#feV`G? ޿{PC n:†`݌ڸl,A$"+)ZMqtq:3)"7V޵5NK="j?a3#oGŝFމ7|<16);V"<(Y<nlTf Y"Ե7lJ11i 1Aj9dKOrS ,{!Cz p<*U1.DNjH79 g^B?t$x"y$ut˧!pUs_zI d ^LjtJ7nCCN#$MH(5zwB"ozY|[XoUi<56&f/49k(>Wi uϴܡ#`1Y=D孾vXkS>q vZ65p?Uoڵt,>B\!^H*(YdS, . 6EE $=k4|EjUn庪"yX_(8'(R6L8b>Ea1{. +2 qvˆnzA.hirޮGu\i)5KfXTCKR?P3 (.C5wqݭyu ^h?EK_Ɐ,GQxQ;`Ze\15y;.@ˇY=R=zx諂QQ  .nT?L/ۥ1>ѫ2I`%NB "b6:bge p87\\fQu`f"/5R"Q'NU]|-RY Z#nC_1edIAOm@*}ݡn3)t=8u^y'7S%[F?ֲ6qD^t0J=k)ȥ1,J&l){kj8#]S9lˈfqB?V"U݄A>eA6@-=Amj[uCYD Q}_JN"[LT|P o&D.7 ;{y5VSIKܚ:4V),O+e/JlK)CÎro2IN>i6R^!N>5[eiK4*?x.c8:ݣsv#/{% bl(GG򏎱O fFoEb( c3Xld ,[cPLG @6ˡSITArg/f6q[q>6,ĢCwhm*c>`%Ut?WVjWT`[OE ʳ~{`CI%JNL9+lvb\H pbb̤ 888kci`{%wI>\ iBNAʖ\2ȠWx?.׷hӄTK˓VR)B䓈},&B^dI&j(Y0Y g2Ea6Q)X)}r^+@ܭwN`ٍlddfvU9U†c:ɞ+F\b\:[>kd䴯}>Xͮ2`̖$ XˤUYbyQLj->#Wtht87gjN^ *">xFcM$# 2+/5!zS(Elu4@^ez7[_y)3?-ig?;d+G&P(h}R/Ix4!?<<חm؍&6^}oYn#M\6\Hb+3lpO .CvI膧A`mjCDtTTUXbp-Ze%@"'NيfCMw=uډ5#2mȔ(e./r(|1٦b)cJLˮ̝y@ MZ~z!xMV8G4[r8 T=%OBg rfC F1BN.ï>axR:rr 2B8H:ȑn pr,fcu+]5͡hX6dld]di18:Ez,8! $_7i|kug\p ΁luaT/Z}2OXtx6T"̱fN{Uh Vt)dci918&/uJkj a?w_m2ή`THؔP Q&`i/ȓ$41WTGs8]X^ue! (L|9$>l"s\΃ IY hL ,ʕ dYdV4JMOkK(=pUV+:# F; ra -=*ryD+c*Pq'iPu|E H9n}hѓV% ›lXME?" = D :W3ln{Xg98se]\p ɚ'1ٌաzP%DHU"ͻۭIPqB8t2`Nٻ !b)[FG(o_Q)9O;!}r&WY <,Z^ Ul XlD<̇8.VwA @ͧFvz#/Ml$le"̞Ǫ$~cW3 ߥN<#ӸXb".3Qp~wRiL.2냚c0K`Jqɉ1 /m`XiZTbbL6;yuy{ϴ;,d4V8.MP(5eL+RXd.wi" 6H I E`L`MMlߤbI& p8קY}b먓a7 ؙޏ!Qc#on Զ7n$] xW$`ްInҋᛕl zިĜbOz7Ώ>5ߪ? !𸐟;eמS?5Tr(w󺦉ɰҌ6y B+l;&bqdMK l%' hql-ˎĩZ<%%5.2Trtx \`lH T I4I|JO`tU1E,:myVie-$ V8A D6qCF,#LS&%v/4|a5)Nn~N5(YR6JȼJFT+@A\ K"ݪF.]ToZ kb2yRg\D~.x2@GDbUgnbtX7`+E+#2ꇛ#P鞐HnN>;jDC6YpXCQӝ8&`,+NLݸ_bl "a0x697ᑅ#8+D 0䖓"Gd;Ya ,A׏O~|3xZG^USW3GOfռ$/3#<*-7d̀C̘w©5w.HUJm#G4#ow޹6F5q=-$wT 2N<ȠOv%OȚDȲ;m%&yݴz#l6%o̕_r^d8bTfph3zG3'YůkT ]Ura6n-8Tevjt`y [T>CYFH`45.2m÷ ]]Zmc ͓["Ӓ]UhG,C/PBZ 2DCzAy,4Oowm hDȒ `W( "Q‡~(7槝FF[',=Qwsn6L J⢫>=':~dܕ; ,2xS놎nLOujbdkzvYVۊVQ}97:VI%@9ȕV1yTHNB!_k,PGg{vl%€"!d޼ ük=lR㿓3rL>/SyK-m#nڭEڻAfV9H 3ADp8j90q\b>PVwOReR@aE e|p |]l[;KJcsԚ*m9!,J"\RuN 3ćC7HGSE>RKoӲdf>]9>ij,g hS@38UM! ]#5#$9#4jʸ@tvk\o\ݗM= z/BrÕ%PRoj@d`9, >}L0{o3M5xK.(itTf1FFU0UN2=,ǾXAnߺ?; m3܊`q$YWnʅErVLA">dž~Ua ;vƆߙ &9–+Z꟝JIrZS 3f](lR9]P 7hR&)Ɯaו&QzqOlg|֒L:vd.kT[zez/aB ^ۙi\aS nXSC$$*M_ *q·-+ʹ!@4ٶnHPdą8PE e H\T *Al۬,Kev`;e۵)aiU(ӏE{|љL2\Θ(]K^9 ='ji͗1av6Ԕ ʿvHF}1=Eb}0t}7Idu jD^UVjir сqd_$۬[qirt +)uc)u|,m؞&N=-_ce$iU q6Je|

F+ < l_yƫлw`Ρ&4 k|נG43K05,B(m<Xf+.9lJ n@};x&X4yË.=9KϮxe>ˎwtD#ϢOF*LzK蚆j7ec"xReRwJUʟ$兔yyuP.cYN(pH܉!Y2Bd<`I>w)9)R)qqpKuwdAFB ዌ$OFI=H{vEt)8nj}QgwaPח2f2#t9Htp(den&FN;jK_?єi?MvNFgyͼb;EOA/j&sXlmd*:_\"$;Y//%;/Zʂ-$Do9K`ɏB`˼z+՟0%S"VNS[ owOx˰Z4_c9_hef]~X,pg |Z #ţu%rYھ)/dQ VF@ te&Q-ڡX4ox $2|EDj}ҽ!l}ٚs%AWSiKD%& `al:.2)Qf,ra^[6Xްݤ8ZX.ExRW\zHX ^sl;׃]Km鿩_CoDM15QVȃG Hh)bS2ќ1l_7Z[0rC捇pT]Lbء62W2cdFd&4'ѹ~uuC۷Dof3'$ftLljty͇),Q(T]7pAxa56Y7/A})ϑ;էOڦiɯRfPIC78/[".{i*'gXrl3j*O7z_R>{ӶARYMk3SNla(l΍?UJdbe,$ZabX3yoSĵ${@6QaCUd6;1|㣎e 9 $_DG: ;@+zZJΣa&҆Kdl-C.611ad4vUO mޯ$=V諸 xdipNSQf%8`(\AFyyzY+r}lRwa'׺M9= aQDZ@!/\f'[(3Yߜ56^u@/5QeR(᪸A@˸M1]Sw 6BYOOY\o fy&*Uܢ]}fY)XuYgye˾9m_ov՟[9\r^ÆV\MAʓCX#)44M)rk@0wUY5}e)gul-]h½YYk8+`q8pfl髕Zb)B֭;_=жAE$TRB"ـ#/ d Ɍ dܩ AڬF7oԖ.%֬qu]k65d6?'/ so9;nPh0Pǭ dX욁=GGtpLvhj<̙$#y33"r/BFXZdd*W8$.QI\5ݯԫ Ly@PL 3M_~[xnvL D٘ۄp~žja}ußNOV0ՎR ^ʆcu*Հݶ hdMWD'k۱z͍U~y&!ت^EB$Ţ\\p Q1`(@ӶzEFL(ښzH@4BU(EYN\B¦VTyGh8m}vΒY_#}R{+^uׯz7eAƨH+h4.5\@w(-} ѳrOy]y^ms;R*&ĉS3A:؃ 8VJ&0ծ6-"jU@i>jgV QI^U8Db/iML,"C)<hb b4 4쏩wj ¦M*K1LeQZN!TRp-H; $t%+L3kJC|+Cvs_M# %m5 D$#1֚nX&)j᎖hdj˿޾lL j=݋NkY 7) R$x)"&[1HOt7I])Soh8ڨg$@1ӫ)Yrq9eA~p.`4'1fC0-d홍2ׯ>u6" 'tKOo.\ގTQ9{C둒%#&JȊ!czȃκrB_kSF 8ЉLs]6-)57\29J$8ӦQgdK(/*,u3UL8B[w{o.讃ynq@z)Bx9ܰ÷ ԤQF߱ ̂06S kЯ/տ: =a_:w=֭yO>}:q]uxO8۞z묯ƽىk|\9&FfiNݻzFTljn$EJf,1bnOlۃpns_:#{\U1"#LT3H80V^K:ʧ!*O߇ heة/NgݤvNߏ.URS~w$/=?k߃?wˀ|&}?;/}K7⿝m0m F ؎V\s W=V,eFI5̬{mK{Iޢþ#.Bsj+/߶ucj888888888888888888888888888888 ٞl4>.5s2i]lƣN; 5kf].QUO+ ~yuV_] {3YmXYl%$HwG0{-޹O:p'wp PzO^8%Me~ZckVb?қ}5u2ߔ`@)>3!OR.S ~ S|q1WKοU}h؊;q˶$ piN0j^`&bpLؑf糗(Fxoa6~zms_Ue{hQ, I5U*n RC>` T!qfI*]2.s69wp E?C|<ęjɽu[ҹ`"g^R,t4Ge1=3\|Xn $xce]7Vg{; {TW2fja[ -11.>EnKE@\}n+|>:m2p1?K5˵'{5gMA@mh lv|mw*bKU'.^q`1.,RVY%oT;tS2_r̲)-0V FD#dbdb܋)r01.I})>koVp4D,ZE.L%Iy-iHǖHBlУ-\ ':O0M|ͮk98g5dtI?7oMiF'A=+ִprDPdZ':6Aj[KOHclQ:|7Njr\PdG6hQh&cf5  59Gw>٣/D<Q0gcw^ʥh b r*YbV AF͐FK[&J!]]>쭁qYƙ 8~=B'Y$@WԔO4uhx8ԄcF%i!jEONw^@fwR)-Ϋ*[z#lA]̽& Ab0ݐvaH^ tg  ]0P:Q~b˦ӝL.)FKb t`!BB]&:DK0\\ʢHuߝE쇡T`U5 [VE'XCIN̈́Ż1ݳq8A$b6.;^E#_=MHuykaC6{\0j# <(e pma:"*=P2٬z￾JG5L+ CUk79&%03h%($Nz,64:@P6'pqqqpqqqpqqqpqqqpqqqpqqqpqqqpq#FMoӭxhf%6޷ pl:(&۫'ىmckC"k|XΥ(qdfH щ?I$SJ#}HM>l4V7ҍj F=صh{*eg2ɻI;`xH -D(qlTxܪFW?I5m>չځ7Sұ"lcO%?.Mf!pfRD(}M6_S2<=}ۆ YUd9 neW~1 PB"D:)Kܐ(4H[bL>I3toRۉs&WBecCH18RK2BP x hQ~9'ueo<љJނFA-5GnW fNfTCʳ\l|L%29cGx߆ն*^Gh{ Չ GKD[lMKEQGOUCUr1+ڙdx+zEUy*so뫖[aʏ}#0س6CƠIq3AtE͵iNC}oKw΅iOW5]Aʭf Т1J(+1MI)]( 3L&lcM(%ڙ?T6>{l*IX#,Hq-U# ~Ȼ|j:3YDpQjkO43H4~J~!@U/j@̑DQMdaY՝~KGQ9X!@_կ.6z":t%U) vĺ4YM#8a!=!VA_,DFtjlVM 2ueak#b@+"&7"&4ptfrJS[{xCF6xF^YV5 9i4E3 ^Iqdo/SiV>>v7"H`a̛,$ƦP-M$*Œk\U S >DZ[P/DI\cl2 NS=a2bW•&.d|8$\ߪo u}mjC\u` wkp 8Tqh/(6N%IIy}̿?vK%Yvlƭ)ܻcUAڶg""͉L#-K݉&mz,rPs k_g_B7e[4@֗6X)2„LDB}2Rx}vZ'#y[;ꇨ+V:ERdN" % yk'&S3#l \8E؁9o ^mϛv|lJ) q-7 !s9dTT6N@/:`H#~^81B!Dd,n#&MϒjғfԍP[5ʼn&V27f*>f$'Xf7_dT81ا:cg5 R3v|&&$!h a61Qxh],x;w]zy"ߝ̮=j+] ёX ([h!Aq'DI 9 me-m|d<~ݝs#}pz'b`>@r3 ''b5"p2lC~΅;wэmEܩ7ͽ/4((yEy^JHAnGBI؄ z &niLh쀜1,}џ3@D*~뽛m&BQ61P)@T~ؼ(~cf,o>J|5{V)s y1 ;/!g2pJ H6hN0 ԻF|R_VW)x*ꗴfibRq;}Ԗ4|7\FS3g XtB { R-ha{Urd3hrw\;kff2^&B׻*k~S0͂%jQhY?bH[DIhXdZ\_NU S ۙ}ظ6S74"w [` R&I5"eR @މ&wcdҘ9TZ(BW9ۍۿQEm+Sѫ)oڎ"'||QlC.l,)4ny"'ʾ4B59^^^֪ms+ZZ& ;s JM2V`qq18{xzG #0&ʍ$cFRՑm^VػlԳd~ݻqpN0M|zWc42ǽ[^ދàZz)mB3iLĖɄK(UR tŃv_rݓMv㵓lգF䳇.\-pUW<I<2< ]uکƝJj&~qV=v t<" 3 4yWnVsx^viZޅn1 ꠬%uA ~ՃmkTi8LLȲj?Ќ#]!'3 |fI~]*u~U$‚wbfEgB;(fJIn8Kd,(=tMZoH\,7I2X+T&A: Mضtu϶-}aMfLc/ :q:oV!l *؍ ~_AH1F$ a-bqs'y|CZ״Ծ6[Q{c\'cZ!fe3&*'t>ʵ,wzVä"Rh0\~K%HA<˄8!%ſn7z 6rK%2zj^P[M[0l yժq4?XLKY4p.6ea i#k%@ɻvj2yZMqc'>Yc-B9=^ zƩ`r"eLG!Iuopb92%nbId\HIdv=+q=>}}Ķ%}*JT{^5MDc bg;`\Br5q[, aꝯ \iq Mgu^4^fa(fr٨jl9g(/8QܰDRBzq(/o)cufUc}cjlkhlb>6kULiAR\X~P& K~s-:W{Ŧ^Ć;lU7Ƶ=}Bj@,' fq+),@QŽ*QtX7z5 jCW9bs5% =1-XI)jXȊMzI4п"Ly5kD"zI,J#2|KRb XUN:`tl8U\}]XLjؘ<+vv .AN˱PQabC~Mܣ)C-ڗy Rh[gVle`M@B[DP9f H@*rLCX6tX:n?^]aF=cOU؉a)D^;QB\q# 9mhé0[d+4Nyuŗ).|6h ڸşjBvfowNck(${)Bc3Ɍj9Ҫò g_Pgp#wtCE*&kBB+vhQQj .mϸxRZɓXOa>9#Gʣ$>cdaڽuba}Lq,qS8OYu_>﾿(;mȞiwyun 4-:X_H3"dSM/"019v-RI LGa"̓As&5דxջGTnzYfb/mxLņ^ "YV&p7Sl}e:[ Y!ӗ2#m៲ ^|l k yB@wŃ0U]@Υ8K`(/p[KAP537>SͰN=(+_aM^">Sǥ'DT|f2xufXpSyŤ\yֲ"SD-n,3zxsڲ9*B#n?Bå#\D1P6E2G-NL̥S/ݡ$@ .бҭɉ"!ɱ0`:L kx׮.^dz:'CEagYzڎBbZJ)ˢPMq11O](V$bI]`52OR>=kc붧԰y%_=A\  I-$$$SY2{;-YYX7[RїЩ,2)(s<<xnpq!B{b<9 ċ,?fkM`zz2> ofK- `X~7cR#"RY 리|ӖY<2|pq O},>C/4'bYӵ˛BbV\*a\ϣI^A Mi4JT%)|5?v0w?mrY~皦銫^X9H㕍[ nJ auB?l(z}FK 4S'x}c򖕕~N 4םܛ!jɌ* bҹf`hYU3&֙򲓦QS z.#Pb.I&CN<% aXK*EPc-C~a R@3.ի۳bM<^"ffR>,H沙,={*(Z|'anoˣH¶޽HɁ;?-$3 i4n٨Μ4EnUI2U e5Ԭc7=e:ԈZ[AdQ McLF;(ɊŽ7ع#f:X,S*f}+^UriդrdXcr8 hG$,Ïy7g('v>wRHkZL[3h_ӧG '0<,U趸f(3OU0+3ǭnwbevU Uws[uJgZKXrJPUzE&q&|oL9|4˯/}rU 5`2{ƕe!Sچc͠,%c U.lQK;U¹ <{__mҚƪ8d雂C!0 Nb?E~(NLqˬϾ;j6_nyWEE.V,%?4Ur8-Y[P !?4-][]&Yu Huc[FDB4 kQxtz?l ջ-S&ݲ ]a]uuXuSDl ԬĤܤRW,E +!rͷ5 ` CWNT/I8zE]ifyI\uiBx|>qhN!DbSK@dV-#rH6 50b} <7ıG0#_i-`SX)C Q rk(*H]}gK]~niIgm:Ըɽw\+%$汵 @:1~m6$CG-]/S'/WT4(JZW#_E؉')xY"RCtdEI 4RW<2kEy-XR[B d:=O\:jr>q ^1h[..Ze=ԭşs[O ]ցPxe-)$AHCbUE^XYH6['_#5܆],belm 6^nca4=䉨2f# .c-Kw}km[yŋ̀c6Y3K At\["v>hFʫP='*dPk(f@AFH$CJ>Jl k)|08d(l9̮˛h%egRQ@- Ɍ:>~@E~aҤJz싵r=w=;owB\8e d RLVw -dPB6"K.XJſ#1_ü;6.ee<^B%M6]&0[![D唷!;_3[5uI༭x$&Tؼ:/fuP`k~U6\JȘKZa(r XQQx9c2iDELӱB}_-Xdk r [)Ƀ<Ǭ] =6~GWC>.XU]x=Pt $!(W0#3_.?08,8Hlœ櫴zջ.q 41H$R*Fp) ΛsEOȁ7]8{H\74jϘ>lOY9K:jEg炨.yYye]wntTmz!^USy5҈H:#`x%"۬::-Fxy$իf-dl٠Vv[7On$A,0IROpqǮ'>)кuAzӫ.ɓT ;iy ͸aGs'XF1;/r4Puڮa ݋ëk\5Ճlk ۗiaԩ@4۱&EmNv8x󼐵Ro8+M[JK##Y7(`H oͫQxe7 񪈹A%p::}>Z:+<]&x.5y`4U"d΍7 eopQoI(nҎVU|Z;SSYGMp_86qE,=&N1E^OI.<:'݅)]oxdrԲ5[prrdEAw' <5Q>)y2O J&6krUQAH^JA  8&6eOY*3~c-˽INO^թ οU7-}IʠLTH)ظɳPxl_7l;nɁ?l{o9,ZjVL:~)XNa`9c&V8+tg%!F\iVH!0]>xD庘Wx*ke(w.99cw|ҭuW5DrÊ@O]Ä~;cI4k3R5KX֍ <GAvͷґS!{6aJQF* }J )DrwwXrA@,?,V:IÎ EopoQ?(Hءk:z]UZ zng 0tgX</NI]䒸ycZ*v*h-u>- Zb f8% [ 1avS$ 0IlMghgj$̌4?pVyiY}IWvb&aOMYmtڜKY/;RK Kc^AܸK-cX]q]l|?WmZG2Mfo?/2 V_n \\'nQ1g˦+/ bblݭ׭yBԺ}3Ix] 1s1`!*IH+.yHL)W4Ġ7Q{ N+ G]tjMs[]5q8I5:fIXJۇ9)WעT:GV($=QHP0Ґ1sFGKj 챜2:pZC(]zO<:jθ :f*-cDT\| G>C2Yvit[ =02Q;˱H[h~^sw)Kˤ1"FQ2_'uq F& RnDv&Rkem8yyK6l<&x@k3E=gkm`#eK͝ꎫT%Fc;9Tӓ똗WMZT͜%IL;YdשMI3]&Z _DL`R>N|0))"Y7k&5k F|b^YP7(MV}-zr^ocԀIxp_6ܻq2M[ZQ[3qԖKc}ָvFa*a$NJJkz1Z8ȇ Gzc$v-܌UV@vno8'N"=dI +*6}M6HcK$uI ,QGto#57Xjj@LdRhw&Ft7'64YsI RYD̙$㲸,ü)8a /@ư ,!BbȲz< dT4rKYGTKBtALJXyYr;w"53+ipg ٘nW . nx8P.-ѭ{\#a*inK+^~\b å5ЊpgS& $ :ri鲺; m^m-X"U V:MtzlhWf*]v}cUYQM<Ȥ 1pR<ߋ3puj$SX鄶 0sLKip]/\xrV_YD* I4{Z/4H﯁G5ukAiϪZJYRQ!<Sy.T<GoIq讑:*OTfϜЃȤ]&n'}82I{9EJ TCt.Tm覦=[c5S[6 kugzBu *aJfM1zdm7z0 U ba&ݗ%۠;08_TDu;hU3laE]O2'[?er{ֿ9Wenrg'[na#ʥQ0+Te fKTGp?wƦNjMz0th螲ܱy:a/ka"Sd\J ‘CNb|1VJ*AEz>u `m x5KKY JwjI&p8# X2C2 d*B=._4ɿZ2ۢՌҞkmOƿ)ЪXpG2 ~S$B,x{3U.⯜gfv1lɉqeQƯgL>XkMwl=널p x-j?޺\0^3)5f|l"G,bLf`]> +r_#UybӧI=Q'w3UuFqU B&!g!yl?v@.at|#$,p _殂҆* Iet~s&PE/+5id H ,&fMGf>Fc(G o' 6ȔV9 bDL$ɂG(Ցǎwggog_ l*lD1&2A22@̟NȈ6圏* -ߍ$Æo*g?7a^w.Ɣjth[SMk+>KpLCĥ]9S>608{<>vhVDﯔ b7^iEI| E9|-nA~c(A8cҨ }J`l43H}w{€u8Sc饋e(npUܼ@) ](aP9<Uǽ_8`M .&c6I&vYm"8wy8uYeϾePwYuWf7 bL"fE\7 o8Uvmp(5Yh4ŝt glQxit2YmY8!^xW]qmfun}E؟8Ŏ0ddX*FE`G#ffߤ߷ï7So4AdX*ڇaw.9cw|пX .K֒jUQ-դID SuթJ!@rwg8'@f,;5ʍ4=v{F.[S1x~g 5 (7}۱%e8<U;' $4 /_k@G@@ >PmVS%VŮ*Q琙er`Q?YdS5~%;{>,q}D*,]#pty,F#AZ@0Sٍ$j4O1Ǯkj@[=/ X'p Ȅbllpq%}94Hl|#7WDcLs㺽\I4MeL qy9ǃŒ>r͒"ѻjf}c6x88q^^)TfD ~xd] 78,(7ojǢpo\iӵG|A|J.*5Ũ\p֢:+hJk`UĦ𒴝O`^`.=aڰs!]/f8oֱ^Ռk1Kuy5 67W졫pcsD-Mh Gfi@,֚^k{I7*E٬;IR֝%JV$, jbS"옌@q$Jm܋m(~Wz+#vlJހ'Og1LsèR.CK2ntwZRIm +9XE@žm2%jP2Z6 1ĔllJUnDѫQxPC-ݤ&}\ZՍ~L8ʮ>B'9J#Ľvg?~)vO]u0 7+I)*ĖqtN [g$/lq1 fGBc9,t=cӮc;I|;t.Y-E;O^"88 !̝0` ؍=.V*ac}7_>bQZqwnN`4 Kʲ;Uά̵zTf75Z4C@;:ӏ իޡ0ku1קnzVB $$,.p.n8;o߲]PM(uw2{j3Z*O㒽* hD>All8dHɡąz.S;tNTjRSG# ':|b[7%:'ti4GY}}fZWښmΗܚ/kOP3@7.En#XvQGAx%RѼdc`T׳JvzZ,e1_S|a!3i3ɸr9Gy;؉+Nqlx)ya9;7[#I qD|eta퓂Ne5R\vz6cθ U$nZ받۹X2 YcͩXBAk&U1o6Qe\ a)9>r*wkX&mfbHӵZƪ;O/NR1f)8Ć& %e )b`h7ޓE7nUstg\Z(rC W4p)0֝D]3Q\ "Vfuo\5>z[u5am׺vJcpjqV7( Y*fM bG(Ϧ^B.gۇ(n֔NJ?]Ӟ]ٟw`5ڗځ^twcq$Wt|7)8j'm4#l)%{Z腚ٵC˒^Q=,_"Hm&(,rbHby;`i@̍z-TY7ϴwLb@y bd?Aɵ?~,nU3`7#1dDWd?1}e=>oAzBYS(e7oŕZ&ƃ2z.Pv=ĐJ$:G c!v+ߨ4i>,D,6E[ԥ]vcd$YG/?QĆJBeaт7 C^7pHiUJ]aoE-􅌮g"sY-?996D6B ]@_v^,?v=b:]v}w-q[EkWxUqGRۚ:f0ފ2.!dqF# {" c8My/4ݢV.WJҖlQ8} >W&)K*}r9$ (pۉF^㑷g W^ڋֹXbD'R]3GCc(}Ԧpw;lc*D9wAE#~Y,Bʦh{^De:(>#D}T%k'7:|II复*rBRw8501`1iIbH`XEU9mQyRRRJ6nhbف.~"v QGh0ʼndؾd1ΗI-5s=",om%=];'g*-Z۪HbPl$sdELDbRƀ! ~(W=: %R\bUNrAȬVcҨni i&}Sm~7.z!Zպ+F O6$H =IbI5͚n,|5鵽/_Tk|/k/3 Tj5*&d#D~Ul-1^>qڶY\2#8>־k^ 3#vَ+@ [u͙l/2%ם'dҷ4y Z`Bd!FH"L$Q>d?'8`hbOo:j`iaUe=yYNIW(&yFDr 1~j\[*229ƶ:;"M=Dh`@B :&`b.[,{gpV*hS%L4'^dRى2DG7$Ȥ$\6d%&kep~i{%Mi*-k}v+LZX7w LVQ\* o8d"mT< 7O5C*MN&\Rhb:lj""e5=AwmWO4WE50*d[65p©l"ˬ}&(,UL<ᄎn6ŭ,ȰdeSY,I epDGy=ǵ[.S.v8aY/6tKOQB9.hۺ]E3$_ C 6'9@Or^mI`/XGI9 $MoF Q]d]j%YY>zo ATuT$IF?a$$Mk=-rπ툅bo#SF~ `?]A+|f3K¿2P98_>P08I+%P|^Qۯ>N*V٣EJoR+aSJ"8[2 ᇊ6-ErA~haG?^Te[3z[a}S/GD Gě}z=;ha\D8 Ku//<@%q(׻7N6^6zbhȚNN3ƔϏD+.QĀccWǗhe~U`4Ye^t2(สʡ`%q26lPG 'P\hK<%SÂl vUy hYw']wz3en{*+),g Y15SQ<,3Ǽrw/D%eV%VES;ө3fEP ˘̲Cȏ|.y7H9G<|;_=IrD[k-`[_`B'c-9\V,6ȏmB Ɇ Mhϭw9j46WK>JZDdLYAǹ'`$&*/&Kڮ}KUOq0$Lw)ʁCcUSM'OƌAE1MLWX)=w9w֊K"Ӹ9KCJGHcrv% 4~ӆRW<2.I|N}9%1&AkjbR RCadLFPGr҅TţIkPӮVm?VJ⠣6.)Mv%)v<@tsut_l`wE)8&WWm4L%!8hH9IH-.8ؖAE"0(=D{0rUn*[ i]'tuPŷ fUuO^J76hM" =E(bNB*p@ yI׫^FH&U&] )+ 9x(Р[LY/)M ՗F0fU0l~4V˘k[`.$&8:m+as^jP5%vlPqij + fax[`68'h&Yْ xcxߧ. 󇳰;F`$Wp|⭚Ƭ-?N3ɖ nO5L:W{@[h@/LqլOP %إ5P$Q4y-Vf2lehktruwϮs&JzFm*H?Qs @uH V qqŚ;"ԈĦ0`zK8xW0 rS6DE?ljID@}i$FTx(xSf8]# $|Ip!㘷]kٲ*e'^+Kf6LD5Y]~s[@yYRҊ*ȴ'11Jb?"'FxIo0edͫ\;q'=c]s4G7z5枤jM-YGQǠ3(aaVǜJM{|ڟ6hn 쪩X2 >gMȒX&BVD%y2kMgG2u)I݆#))Yw1N2f:@N.0dU:qH =ADtbZ=XW[C] 7 #yDBڵ@lz%&(b  0>Ǭc9o}YuJwx)N,_`BK=E?&½ȊX.9ϱ='GسtD(~!oi8n\#9vR˺*gBӶ`.&pS?`9oلVGOіn{" [=C{q_SĶ6gwYutqK`TJjlɳtv}6]-d' R6@ 0"ң SPq8J#fĩPsUF9mnn.xssOWh-{ mr+|J^e XFDX/` 21pAv=2ݲi6}'׿Q]%lZTj@ʼn$b 4Y<*(A`=zU6jrUƇ_P|bs@^L~61ih| DNIHwj7DΘ|FPɵf@ϝ!vuggDWJKq'oC_\9#xECVd!Lbb.^;+NɣfxjEQhNv 5@k3Otj=hO`E72G;ERY>p՜QƊ͵yh}jڽR km`vGO,fAK>?mUŻzuJX!HrTxtK}ƬwT)*4,8HH$ѳ vE@Iɓ-ͧ5YV$\sοH?J>`35m#K-.R=k|)6 -C$ƈ0|e6s}I "!`UDgga$b)Ş)caⅳtҽx}Y|8*|I$^Wz7oh_#nImz 8 H]ƲYNL+aG$$HfM4+K5IDӊg^4i[14b>f4q7h2D@4U[{RlQQ2K¬j݃k!@1}>@ fa MrhPkDKiui#%A&q.b-abȉ&,/lsO88s}#s7AZtC VE<56+K8]E]N5US5,.oi3=ϴ y)1җV&6Ԯ$fnWpu2Y Αo .jb| L!0~dalyy|}O\)A~zՏ\Z[Fȥ" DY:\r d5%{^?5W^0D?g˓C-zqlUw=țǂ>2b$Y#4[)2KQga24h۶nΝ8|(&r0,F-9tI㌄OKz|(M!n+VE5d|;jj8yIt۸ez&FD5E}Aٝ_kmXQ9NU#+*NbS@#$W,LFI͋){Ibү!w|a&obDv h}Bm偭%a@`NXSSJ 8_n3Lhm>3tt>kMln?[Ɣ Ƞmү Yl̞% X8XRW~;[NژOZ nz,PF*ɩoJçYi{].lM9.CDu/ej^fՓX2e1rTɪeɴ>,"\`*`Ui |Zf#wڂ RF' a&BvDXX4W 05cOKᬿkUZw_ۚ MՋxh_R "e08;E{|}Y+gE\siMyjDYpymu*d$kԧtY 3'Jdp |JnŶmmvFv9w)˿v=k9-,-=Y?f ؙ،/w.!e.u^Air©&(E|~]E<Ԕm0P?fqH$vllb`òN)Y!4z僎4ळm=v jGfeO.Sꦖ]/JkհnaGݮ`c#, na;7 WV^OOb"6\EW.K,qt(46 ,Z/bQJ'ʛ.pv)QhqOp<h1fݍߐI |}UBnåܢ˷hgn9pu{=R7+boF1@"+ ]jȔ32NFjX+#b5 b,d"TVUS*W>09X2Hl]3X}oJ:8pzg.T!"'adsEOlyԖ#&tO\TZQFiwMePUڸ]g._Q_?;}5WϽ g?p?VLWf<@X 5:ztubq `Sςl8{c8o1'VJD Vqvfͯ~٣f3 HT~(8 cdշYttp«1}ngҹԛ//t-xڑՎ7²~$s:$dwn3eFþ]U=/nfȷqWݕ`Ǣ/I|މw:GnEE|AWe1@zž j5&D6a7ђ[!RX+'_.,}LIWOHHVi>L`a"Njf#5!mT*WBᏃx$l=0D4ڟgMmngr!ܚ>K#2)7LV?~t[d{`7]W#Q,Үl:P=e܁D3 M7?@XՇJnAZ(Ԍ'7Bo_*LIbi~rn]+k#>9g:6y`X"cCF۵L8 ,XY%Jnl&fǫMm,%~1I7RImm_M6W8(S9SnZj0~&/>i,y7W-tG6 _+.pج 89t0`aFa5f(aH$ƬQ>U{f8ڻ&KbY)oP7 EdX,$_(tqO?UݱCt7_ Js:L˲gnޓ~4m$ţ)t0ԉ^E|iGغsWA^>Z%QUAL(FeҎz0o$cK57ϘܹAyPws)=lN?T&2 u,'fPI,8?}/3L-L-gQ5}^tF6 qDE^#))CP(VbR:뮺뮺뮺]~u_uMӳV߲RܖȢ`eKj:.㥇2Oq=gÉZo=,h}OKl̽9M@FÅ$WbٴM&tPɤPB |޾yezHUڼLQn.E-PԮ`gZ4&`\]S9Ge 1MTl؂KZtn8aEGl1abGQhhٳl8tSK z̲|7X*G_6Zez8Z1mg@1 " |1 A#/$y=e4(='Kد 魎Hl,_^zɖJzlq䅽fa) 1~Wit- |w^OdtX^ "鯠"n(o,pX˼r&jL:|+̪?Yɦj^٪avx:m =dD7düä4jr) > ¥kحu\71/NQTʾSf-p1~#JdG',C̀f 3 {Qmrژb1Э׽qoB=vdSyJT ) 1[RRtN0,PK DLZjykQ>Ǣ5RFjdZb,5%%9NgTF:qj.\Myp)Aųj8h h{H`ٛ&M Z4nh6nx$)xc8NzYѺֳmo&m|4?эrP9~zD|Ju|yri2;Ei'ƭ7TE& ">xy M,ܟhnsٹ/20Б=j|CR&1 t=<'tfDr2O!'hqr6:p;޽,}Ջ[RT 1C3Q3D$`K1QwӤ3楊oQ jz eͱZ޺_}Q ']G!&4/k88jx!{kJ-[qzA7H,kޞo% >phdz+*Xǎ/ݰ.yR폰a-]m#6,½%u2gL^.$PQC:Kήo QmO>dpaTF; >PNepyFdrE05K&JⷤZ?&aDzIםLegL2d,JrHX\$MډE T$$xJ_~aAƦ'64/[ų;5eUgQ«QdMrI9)@$ߗtB@P揹a7u];+G,[q܎.ua3|,0E@](z;&K>᫧ KStHm$n΁}f }_F*°'F `dkD?9)ܻaZ;5kҔ  aCtG'HuOok]'pugzOx+CvZՐ8"{&U)l X=@Xvrk6v{s I[nEXw1X\^y6P[ T_˷c+$&1G ۆ`LS8>~`AtDzy`{A["fȤݻtAt$PE,:M$QI>M$pM<1 0Ǭq뮺v񯵞+Z◯evLȆ='Aă0(%\FpTCZ㛗Ia 8Ň7?͋m&+n5lVVqBR`OfU:DtaTCu1^lzk*y)ml)|$rIe *䫸 ߎ1r2V&[g_;0b&\AAz OFǢA GAG&"D0f}x7n=84ikY?&>p\5AiNGEG6S>_Y]mCl%=F `G¼ӓ}厽ԇS&lsQM>evͻ{xD@~:ϥ{VaKRkhR} hҩ\7Wi$ fA"INdp!Qa6e*'/o:Mh˙jkxE>if<5NZ}gXGGgc#j)&dٻAzdۚJ* 5#'PݍTdnZ-Obcp];??cD+-zvx.XllRm01FVQ=b #͐bo^ )x![oH]Iw]~O0eWquڊg}acxQ# jŊ,{D3o 4AD[&y,a⤝k"J*ª4;7X|sگ-Ol6lbo~Z$r4<$= dmAgJߟ[KZP|VGC"pȣ1Or)xYیle%@~oHIbpRv-i|AT*PC}`i$Ǯj㧚0i]%~}wm Gccm#ìwF&dHAšU,74rG?n|pqqxVݪV,5w +5M~ED K6VNˬ;|s5~r폻@n+ͳc4ϣv  aOd5tUwe>Mtaš `c;C4Gh4QfRRoa9a9LM8.$syX`(~n嚩=vGf_5]g"0ô}#˵ t칗l\YFi4呙djtG<.H hktиgOG%:5:EO>/]wr =3}MD6DGu;YbU )bN[[[Kq5m2$";!4f,Kd! Ş\d4"=]WJ~&[|#.}^mjI/:ǾK~x<$3<Ҳ8tLԾ}63Z1x ɤhXVp𛤚~y¸bqqԺ5Ag.VIf( ( 8v,I$9fayeX|Ћ[U͂B<>7W?ao_ߗq"Dx ~XxH2`ţ|;QwO9&훢9()Iyg8|+ZyXY0]1,1 {ju2+LjII^n4ﴔY}d۟ܫZE{&ȡBbnS@xÎ-Fc=I4$8s{{F(o5?sZA(+I\=02qݾt6I&ZJ(lhҭ |I?:aH?:6Sa*ɲHI$nm* ܤo) bQ!Uk9K2Ɗhf9oGxނ^EyZ/i=>a+h3SUЗʅ%.|qݩܔ N A~Qr8)Y~yauߵ~jNU*ґ&ʩ IpL!y-5 z i0cuMqqpqqqpqqqpqqqpqqqpqqrAc-xϩIC61O?6S祕w ~ٜIjuۇDk>#8VxӦgSa8[.]˒mo5moi021eÅ0r=9cDJbvQbW-pqqqpqqqp1Μ/?g6gPQi͑z=b㾑|-Ȱ7d_z-qﬕ]Zj7ھmn%YܷKwoezd!BDޔ6"ejY1C ;7 g`#bR" \srx spotђ.;珲:Vü} ;F?t]H QtGfSJ{L[bWʻVt(c51XZ[&gfdǬz}uڿ.D+-5)Ki.6{KZ Yv BFfcP)R7:+}/o\|֜y g>[8~krTnbE ,%:i!mD`Qnpt6^ooM5|znaz^;@ޮM6,T{ꁎ;`KSlIӾ%gR1ْ桃~-}};Ћw V@׫|j0z#XlA!1PN;WU霋xH,Y2Y[p/˜X|M+ò ܛWib1Xe M2pS;xJ{e:e/O<t=l_]eF35| {;s_kYfY)1qȌ l =c87d,n:N3QXQ`q+Νs#7~q)id^!ry{0X A=]D#Waf"=MHukѓ ǚGu,hLN,HJ@'^Y 5Cbh}~ơf-u-; ^Bm֫/-:|R0O-,A`%5 .gQ}.aC0\׏-EmKedsҗ3`Scd>V}zg$TL٣#ȁ ?Oq"Joe+H_>S=X6qY cR'3;[bN{,dۊǙYv%>FN#՜'%GFAC>!=fϧ26y4Д;3f׮ŽGEE-YwrԔ=tտ.vjժ[BE `>%l$N!He:G/-G,0_5<ߠbVxƪwaZǠ> ˮ;=ocF<dg-"q"aWnl}t8'zA9R]&Xy~;ͻ<3!ɏfcD/Taxkfo?*-EN߻ȕYG$cd~mʫ-ӒUj$JeGDE5g_u/_#vr8cҸ޾&A{$ vʉnvԄ e]x)oP7,j)X3Uio[`kMQ*d٫Ӊ\Y}})۪k;WaL#VLUY ?Z6C?s@vQ!UNM+JJ Q7nNY#?HW9&E%3|:_,5҅ݔU]*h Ӳ6rYbRJv۔2U5$Ac^G}}Nu-jaTDyYU?aG&qU}XvȳmBcz`óqK(B#uᾱB\'%kXGg!Ϯ穞c^׸+<ѱhjb%kvL XsA,`W@y's$h|Ol888888888888888888888888888888888k=f=Y"ݻ_ JӪk 8grgb-O;{j{s0#ImNΥN%5wqcT'JC1Σ?Q6uM88T(nKRVnjؘLQ񖘶E"yPpluVQYtfGx~]گm[88888k4ވ4µ/+ڹA[ !hiSc&LCd; Kn<-?P-+88oqK}-/οϪc8j ħ]R.8O.;gP6l'rv$(oNt)ue nd"И wkI(IL=NL3v2]+GX@8SzK}$}38ٝ;'Zu¾ \.;'C$\JǤ]YՌwNɦjzOlڡ5YsH#aBDƸ %ކmީ_ej'FDY󯰕u O(8L$3\w)ҝhp7GIڍXF,ꛭ(bq+Jσ'P`m@veq +7#MӐ*q}.GR/ΘV|I66t7 V'&Cײ16He.=8ZWqpw-K~-\|8[23$aM0u"?)ef-u蘿<tƎ?j'f.:n*峄Y+}+% &xeX]88888889Rhfnfg#S:YNRr,8Lfcap݃7b»BwZq6[¯-wh͔֭{.`ٶh0;'W&,^&HI1$:'޶j؍Bßݖ]UkuZѾ'{܊"@8U$F2c>],ێSF{{~a.!+D@;Vi'a±b?+MFF%fH\DŽ2儐H:켼2(ڒcg7}Lpz2 fL\`2cPzh01f@ o~l /W, nn= #:C+q{Dp1frIWHK*yRǟ8F}xZ.W?bjȖM8(eN)DvfM(4J8nųw_v(SY.hlŬI}H5Q]{q)Uu쒉#0Sy~ ŔQt;NF`h˦] hk89Y-}n~#N!.3r (ZdUI5Kkɋl /͸8888r W`Yб220򒺪mGJ5iLа/,Z"A..qqp;o.ɶe( vSx{:? ; 8ǩM(33uIps*0G f$0a *E,nj9~i՛&ӕMET0˾s7EC0!gn*ۂ+)h94I: aH"*,`ڙ4 EMtOqqWސ^ZquUN4[J10 ^I6U[j졔lɩaGGJ#`N@<-斏<3Bc_U X|_ $hOIJ?kS7=ݽF([cJ;nqg 37ra3=!͹ 1R=V/hҶ+Zzm.G9\Ž=Ih}kR:U?,Q^jWS{@)jD%$CuɈxp,x}ȇl=wk=yr 5X[~4;(cRϏ3z2bk%t)LWUA Ӡ/h}P}F5~g0% FQ|ƚrtEO<"(GXM vqsb]eğ2j# qbk.Y672|qE]Tj=` NaQ39LU6e"ԑ9B/&tO>l9V>^Ç|AVD*s{%1IЙ^L.5:-?ֹ\Kք8՛8INv]p՘~BzOIkLNn&f;  bmd!gmjX`+e72iٴz =!;u7`<V@i"Elk 9 z*S{ B $D(>:,Կ'_ĝ<[M,Fv%K6<$Cex$ʝ*l;hӵ0hJwY|9CLQ\#ݘb>rULLPT8凗,);26%]mbl>"3l)Bw`:yӮ?&7a3Wj7-Mv)N;2zntg"pG)iؑW ]/S'ۓw5Ojbb>VxD n/8dq)X? FOZzğј AUío$շL+x9no($d1pBfnr8!z1&Y3ޖޯP`&NK&YS0+,Av)پd P IJIlGM+(/q&͔S1ˊmfc} YO.asڈק+{E&.JU}~#WtH&1 XJ))w3H'Ep~>EK$V.(FUc;@9J͈)/`6vA':blccAKl;]LBVn%*4]?[J:E'9,O{ lV䗛rhD:5,5'28,5PW-3dKtk$ TYL08<>"I^z_5:[Бce) g"I2ҙ,1\Ij_~?EN#[\4rÊN)Psc(dr!. %+}0¢C1ߪsH-Q ?^513Sd),gIhYg_ʾaxrݮ! DơϙH:PylbwYC܏: ΋[2('Uz'? n66ś7+>FACrB>zQ]L*'&&k=8CdB+$B]u8Y ߍ}w|?w38c;:K*,;ڜjޡY>ü"`Ë9"/؈E {wHtP{$\V|H>~ShF{PХ#R] d5ǫL/ ;nΉ";@Trs[7㤺D)Wy$ "$(ADZMd6M@y.A `Apqهr#)zyc M3>2wۏCc_HfI_C~M80ws' @ [&܋l% &a?뾐top;חuw6NuVɊY~ U@Y$jizU0A=hNTnNq=HZG&;WEZ s#js:6"x, 4GaQ (Uqnϭlӵz˦Չ3uf9}$Fo=19CU>TN.IL&=?"˒vj^3388QB{Vk$VqWC"!۾YߨPǤU\vޏz_8ӯ<*ZlOmEwS3= "+2}V\.u |~]뽩2nvGGCcS$̐dU6ȶDo!tsi I+,jgh( C"iEzj]nJA&u }${fOu&}}lp~چAPc7)U^5⮆bX9Xv]kz?^cb5K<Ma6+}[5e* μ֩N b#,Cj׻E} Q>)<@2>Bt.Q~%: J,$/tCzy/zn.jp,~8bK!A"fG03&XC^J>-ӔPc,xC9ɓ\p礑z5i[V^m |͇ĩ ,"" àźeUTKCV ]izS_MPq`Sm&k0S,@ 7 _#ʿ8sCF%d?MS6^ ^u=()aeҏ[!deho%v!f0_OY1A-pX{Ew} }Ƌ{셽zĚd9v٢"".D-!b0̚45x6YY_ES:aӼ;.=vnyLCC*&u,m".ܣZ4G'bKb.$&d:hjRwXIlf3v-]AY$[T'OJW moNtvmwMZ_&F*rwö7da rˆ .-)6v? LZs׶8hb` ֘nX?S3d *+¤ȍKR{U6$yT;!1v &(Ќ 1,N"Ѹ呐M~S4>ѻY+S^YYy@;#iޖ,_k_ l5#2HŒ ~HQPkwxV_%M^}=U^ PmS::k$tbs.06w^LY1m]NIϒ*VE$[%-*ަrUa^P Qԩ,S@h b4kP(_[* 3 !y>t}u_cHIW 3=^HBMڎC\Kl_wџLYtk\[}Z_+e0pS<% d߀^J*`` K} ֈ5ߔlHfҳtr_d՚GԮ9"ȁib2 D].9plZ_=|1%%WbmEM`L*4n< L2DK1qĿ8)\JJg1}*kδ/HFj[ jO]1[n$VSC ;uXbK-Ґv3cxww=; FgIVפ62VûM$ ǧl5$T.lKPnJ3EtF2B-ssL#V@1p8|FҊt9$ر"ɓ˵=t@U.af0Wۼ_ gJ:Vpg Zv%M2lY*ք<Ȟ8.a|vB"EUC,/V3)ʦNfeWkK~sCyʌ2_@~Kǂ8bAgy62kGgS[ZވŎDTx' $8Q%K 1S9~qYUWV2Twx`Ij,Fh1:q t/ GJ&:oZQzv~=;ؕC "s16x)~oM@HCPQ:^uG8ѤuLl6XT'jCmKGD=Vy0 PLh$L$+ ʄH@/ qCUx(.HS]2\!qtI,0ćHt}R "ڶ7 E4.z=>5KY0$dɝ<,ͫz\h6v;Xr$BAor4 6FU}5$#68Q]ܤB@ԃ U!Oś)>e 祿ͽةIg]2qeCe%??.jċˇ!}7O>z=8~odY~O ;]:+}ܞhIVu&tMxƦ6@ĵ c$aFvhdAW _JVҖ{{z[#Q]qM+RrY'BBk!٘ rOVvR&ɅPd5W_ZNmR}2Z:*|,\#e"] !ࢌۮ9n+>|^;A턨Dv<͙XH+ǃ..^x1n#cK"TZݯqw6;5ҁәl6}qI*#.tTb07c pH5y9PQmͳIwbQa쭘|ρ/bΰ7"8".Y'r㱎o]5㖲Cd-{;vnFK׽n峇`{xǠ޶zw/%ꆽ~Zei&= e>Rq$ #3FBj1V>"hpG%N&E{C懢ucTlgt:TnbMup *8$ %e$̰+6c >#Z'~i}mĹ"f K?!}QzGbS4E1VxeǼ/Y۾tF#!^z3͊yZ0.٤=i"[g:Ii+tEjKqL O-y\xFz>lu/H$1X.j8S:uFr]N-<٭ѯtn\aͥ XB 9FRD1$$c% ah$ϠPttg32DFGA/;qd/\nFø[P$%`J|:¹}Hp%WuwS^յOb5oLJvffnq#/te]5q1CdLc#-pa$`sZog>#_Cj6ǩt?X5a>ke&}+S׸+?apO1iq"CȳY\ۅ;c.+">ǪPH.?m:|֧.W{aIAqIfpwK[*J&X`43k箮]8aiMQQz`} 8Dsm0z - 6m܈.90IOQtlHoW82A~v3> PHI="nU5:Lp.hM"nS01Pζfv["&27yS2b=5w2j73lMaI.ŵz*3 ɺ3x)+ duǴOؑ ;6De܄0egrDQnVD . t!VLNI^,~C\Uj-nG%_)ڍO/Ǵi UV_VGdɕe*C1%Yj%v&)(&nխ컦WͮnVUMARO:K!˸Yu Yˇdf7ޯ}peiaÚg\c? d#(1GɄ@EElg '1s\vjuzeqIsϖb+"B\[(|J8{Yc]]c_.뮾}ˮ}ylp:щ6l>m 1H),h%Qw^3]Tw*K&n8}jφzd1IݙpN'4Ti.,=)yh)oػW kV~GiK?PVIIrJ^AA˟9TfHHM8Ǥ& QfpUE0Mt dmoKױ8'F0AG,,tPyt,yYwg@ΪzEEk lv;TD㉄4K}βh3qږ"ZNh wd3TٕW 5t)xMNKw=NUoBHDN&Ǫ@d]v֓G需{5;RRszFV)oɵ+ݧcŕxYe.)H|r7El񄍳"@cM#=f;W$ٚe: )Il֩cI;WDt@S5/w'ɋ}eﯟ}|˾ww}w=sC} Dm[,vAǠ=wSyL\>=f)SIȁ"1 :ǚڡn)PɣLEv\G"ȭ|Ge<8QsRotzD<{TV:(Z8k9 m WٻP \.&RT0&dh5:#AqA &DU ^Q|ds뾻ᄎǾˮᄎ﮻N뾻'}w6oW]ǥ:;VA Po[/y܄;xE&0ЧXt_ rA%ےl^,rҼfRt.ڙiWn񧮘e9VB-XAQz ?>]u}|?}˿~]{y_#uF[Pi!kFԢkK*XHNe2S4?rq9yiM$[bƅߦf,s^U^06*"EQ\(y]8n'Zg1ce]Je&|(bHcÜlw'Eky mpX:R _te֖}j,K9XFHU##N>dr*a6#. bپB}|Ϯᄒ}|뿟˾]w._W빲k^$m *-nɎ TAB*gضx~y%G**b}u6T{W69cmH#c^%ǧahtJa2( d\;]JS]NpCR5A"tk[UM?g33#$_~̀ 6HdžD,L=B" I@.|ں"T_g dȨR,ۯ;"mJA#ԑkq?6LP*ݓdR;﮾_>]uu^^JZèW RlۖCiRjdxxl""H$Ti}ۖGI ΖC]nנd*lbZ#RU85F˪)X twݑquQȇ_U4["x~e=wYu=u,뮺}뮿|?:CF&;'l%TӁϠDZ@, F́t4S^8.h&O7>E/tTZJ˵Ā1'whK2{xK u$}|;뾻Gޯ\4ÝF^L (a^(b$(srXMO.g >:c~ٻAp@ Uy`Ùr@ml">Q-Sb]A.QF8[8528{ OpYlZm)HJB(d,/&԰zufob98d%ZK],,;R&i*lNB3uC7y?鐉e7D GYGe ]:.Cb,rxYu˾k?QZEi-}UYz%ؒ z,UXkvuD@E<طF|@+ ~3zoH`{oOxN/PFƯIM ;ΣH1qXBJ iMF->Ljd7gVZR+wl2/"+`8GV-T.rZ=mTsd~#"IXAqNAq҂ΉsAu _lC-um b^ˮ*~`=HLBHkαv)KFnW;,իoǟǼǬ.뿟˾]w}|wy g6fS{9 'lFN*"űC@FŜY ҏ9:EA7]Y[+\]˽wOvJ+\NKJ Hyef}!f z5g򩹚p +OC̝&=JO ۫k(ֳnsx]&ϲQ8g.-TٗgۣJU%\۲ dJ%So I%҂(dRD`h$,`Mdnњi@-eݢQr"_fzWd@f0ĖYl 'pV]9rfPME[*MPph|u*r*U uIII[ $T̖F&I2Jɥ'm3gS_j1fhe7?yw]|}uϾ뮾}}_﮺/<7j!SUS٢:aok" ٠=4fM2qCڵ`$d4bāLAџ;:ȕWU84fdg.bčbB1HN< J/ݤB]q1C n9a-dҢ߫dc9Dx5Eǒ | x| bV4[\bJԶМSoLMQ}_Vϗq]e0jpb7N8_kʛ*˗Y@vrՏ_k0ddx0(WJB[u]2 j]yb;zTz~.À".vPDK$HL+XH8Iw!X-1kX@qוvIutl#fz k8) 8( @S&888r8:2mڴ{p ={F$2=yK%\ rVV O|1pdvKr'.;' ǬRS{T߳UFwy$bIYWZy]s yJꟛ$)/1MqlWt%dAembu ҕYtuÃx?vM7lQZd?LOkIFZC~iFO6+it:v;(ll6Ѥp%dN<3lU*ѣPs.X 4舔1YpqȪAWBț?K?fHEKO*#fj(8ɰeޜ*㪑bT:EeG孏<à^È}"yu)aX[ ^~ˢ{ :䂊$Ƣ%CaLW7VY{7J Đ'־[FWZ>4` ze8%/<̓"`!-L$P&@o+V^G5b>ڊeכ 5t/orA`\1(O*}OUj/[A awMkC02H\@̐$E:%14Gl.zviM'U~v5SHm9=u6W!y$H YYA`L̞EWG*I8yt=!싎ƨEzօVrc#%^GC;2kAd!tVhXCl8[/(doVM r[2Wa׳Ewy?R.Xf`*$(DPRasޟ^i ʵZ>u2kuk$W\A^k&8fyN epr"=ݬ'>=Nm+$f 3)wK'3Bͪ.liGJ{} %022줄2l/StcR4UMo$4H­@d4V[]T1G-2(QA2sR|H~}Tnvӝ|vߋ7Ai4TW,ze-#u/5ew%a Ef֗N &ȹs buT⯃`M×DGG]SpT40ߎXgYc]X}o_5m&Ke 74pIu zN |]c}},(: -MNY]>]>^[LjJΫGLsK΃dQ0- gKތ/0tOf+XZ7+1O}|߳[PnlS]6uÕ28"ywp%;殤\ &س+`-U95$m$B Hh "O zHw6$}*3x X)ŒLpRhy&*,X8I%&~oqh埆I2O{qϬ˶vQ Yu}b}}4<{ךּU=EЛ#ZHDm6F*7`*GV`Iˆb ٟl8 [~ڊm OijAu1|/ R]I>#'c>fPSbc$ػ.lo7VUV7I'`Ƅ`|ƫu]_d:.7yU @Ѵ\Py Zken2L-Zz{U5>V~a_B flvfBѐEœ%ePlGR A+1S;of٣|G)䚸v1c}ۗajcVD_zܢܲb , K?ܶN[.Hd4^~`.>8֧l-۸t<|E#O7it:]gC2?MN{>ӿ%}Ii?Mw )'?oπg)V?֞^[y e8Y MoBF\KXTQ=# ș.8DG@ƚ oeC(S3-ۛɛҬjfuwĆ3TWʬ" K (F ]p2h5|(~̗wbnQOY ,3Q,^ Aw=J5~UĚ}c9cVx/\˵,5\ K2fr̺(X]e}}L2ϬzS;:nqvݲ{ClkFGz)2I<5c-PͯJf=\sv6:;6=k4/1y0x40!QSTC,g_Ht]{ڑmZ+4xړ*߼d APZ"I H,lY5x)ml4/zmWoZ7ϾH4 K *#>fbhÒ-0@j)w#zj.%q{[0aܠ0yW2+Lv}g]'ژؑX/8,g,M0l EfC#S)K2j.wBa=D{}*܎PO%$=ئ;.xH#>K)x|Wxd='4aPa%l?v]~.l:NϬzI_˘?/3=8erةY-mauPlqv"d],UXrKY,X];U(+')Yj oi]wxg}9uYc}w}w̏]WV5SC㾐轉qH}f0ٺ懜Ml*RL1}0мl5Oc>&Wf #\u M[PH:)Xj(H_@I [-3,[]&/\_H3uս`3};Go<kF978HA8Mbј(ϸ$Oy>UhѧmReg*$0t6@r@`2?ח(ƿ?/Jk"y{RϤ#z}ln Wbֈţ(69W{;^맰WqWD4Dֿ(*F{IP/j'md#+jDzau~ሒ>&$\|ԦLp7"|p}ϊ&-]vnYN]/m^l u`ܑCskJsZWj):UbA_H؃,cݓ Ό"Q!<`I9 b`\ B -'3.٬,|`1^[kWW{ɽbv]v!4fM--@"cX:$l\ j˞wBjȹgX^5?RU9Q2f诘rAЃG-0qhwGX<8Qv|\~U;shWACSN 1/5,wa}X} LV4XJ*Ewq=[I[5<ˑNDQ&GC4m~U>mM "n8Ǽ{XyFhfxF˧_s: &,|Kض5XZ4fuw]%eDCz"K4 +WQ:IN5 Ϯ..^(bu[2Ǥez B(2e1a⧦9}ЯrG7T*O^vlmȖRA F޶ 烫0ȋm@1Q0T8{H'ڹl/br?KSZ?ZnZ;(>cj?WЗrUsnlA|D\}s3c!:bxcH1"EL|e''ЭCaac90]& ŽcqH CI5uHKkh5ka'若GKTsF#7t-:t4vJv)TpNf|6y}֭w[C6{'nK,0J Zp~"f7#aA "\_EfR$F>AdsxKLfu{ ?&44%8 UVT,|؟8 EoWLCmwXh`n*;m`Ϭ*$Jl񴩊F0m`4qts.حٷҽL^& ĮǦՂɥe2ǥY#M`BjN VRj $źMELFLT8Y/"n39n}ѴOKDU0uuX2fqE* 8s J3G"T>ڜ)?,9>r n;x٪+]\QUT,Ye3<,˼bۿ'(߇itn6n_dobY,lTacnːA`<2y){Ľms^B/kFG#-RL^Q,rBUb ʨeǍ>Wg[w'fM,!W 4Ju7"I;D3 (-G5vU\ʽ>سN?4D<% whZ**kp s03bA` *3 h6QT6V3o0Մupe+H{l (^͔6Ǫaژvi3"5ɍt_,e.,mٳX:g@v)gw.'?'݇U T\*6JְeVӖ5(nHъ*=4>QFI7Qu&-5.d,fF@xM&Ā qمe-ɥ`#Ip<ԒC|{lٺ5Nu%ʦy7o=PUX!lJ3eXŌA2RXfI'M!ԚwHu41ɜ: &]O&($*/ o ݫ~)'1lk iٛ&DžQA̦I qBlgw &8 d _H6oW$,a/84t(`lJHR'jž"-?fyi:[A0A8}/kl:P݌ttaвkOLjA,IfHUx$4@n /l6-wN#$漣ZTk3mht*qY4Gafuܙ̰cT>C8]L%MYwۄ+y/J[[ _ҼcE?XLu+ֈv,:1s%{ z˵3Og)q}I:csk HⰡ u~f^y,r|yGƱQ"Zwo.OKMۯ@nJBM;-bQ񝰭$5`H1fP-طξU:P&, i6j5wf#-(K# F8b0nO ջp=5r; 7ƨN^Yaix[?H9c,rǹ]]]]w}|_06 ݊*׍7\'+2_sׄ t^!"`ݔEY7i0x7vKu~\dzO 4]bNQ4?Ya]*mֻtVI8Fώfݛ !̸ۄA'IgxeYm>#3vwygkiloӭip`Ө̖x-˹a( ̙v2g!ZMO_7~ԩ8|-ŢbѩV`V4e8+&}t$$8Md2Q}9."|o1vhn J&)Kzbs#d'ZaW^u*'5IȴXxrD KB]_G)<5vо_8#<{2͚htx_,uv=w]__!oޅhŤ.գ{*xk$MbѸ B4vLv0 P(Q6ujv Zk737au@qKTwMnMGd,I4p$r(;1ݨ<ԛYDM-dgl urLP~w+72j6]% *:F[|?6Pg!Ykǰ7-V7IG$p6KDo^ 4g+!eő@B]uwjt٩fv@<3Q`ρ6d/#xJ=$(.nAeU=Kr[KI.)/_:}FU+ZR:l/ӜM19HuQGLBYjzV8W[{5>_+F 9F[XUe?eIaCYʰ2ӚUZ&&6d~'%G9@@el* Ö$Y:^ dL]))7 \6r l[ZneG튆xwQw'7q7*"4 :*&9O8պK04ΌdfkuQBe!Lz;Pp4wYYˀ888QG)zW0ZZ>ilw&*N#e+qOIvR' V5tG~8A}I=k2 v4}p$g4{eZsB6 '>m'ˬWn~h@y=mZsl<]W$<?.1˸->@2]FXBdrSB~ AZ^; AuӳkEI"PAn1IJG_fE""BBqw.'LM)Y,?$] }f#nʽё󫘽,6!I=h,7/U? tSO%KLzqDZwhBhN:neX9Nŗm3sT{Sqo=ON8dzY|iQ,9ua{h%W+Gp&G/0aO^շj.:3憯jqV b3;w5cԔ%d qO˾/F|$ro)Uۘu)zҷnE;P.G2dz>L&i&S+><|5sncEeO5RHjnآl[&Ra(`sa# #4GX@|ݐ>CkkL-][[Y%dWe):*&OIF"e3>Ƙ*/?}t~VӭVC^ (7f&3Xa$tpTcGr& Jg$jˎ6)DڽBSX:_{ܭG!AQkl>QH:kԏA\<ݍu?O?lm.u!Fj*e)=Lc2$(yB홉x oX G9)|

Q^otG5!ZnW.*3gD1qq2EX2"YdJt0#"@+኏]QFhMQֳlLɤS[ Ɇ;:h@i x@j(*`DqRPҺTLo=zњ kC)U@1wpQVB!Qi=!,gxI}T5B|Nke0μőe Tݨk" l>&װq >:맙eN8g;N`\'|Khg<`~Lm1ë836GhffQ #v3›0T-z[B1U%|hgVKlb9+wX`}/q2hG>5gI=ZNX7VKMIDHfVv'XAͺ8?|"Vf} $z[E?]*z8]VP  ـc &0hܮ0G.f^׎Dy:0 C~ry7`' uC븘@WElrpM]/nkxYؕ 3P;R .S$-! \z$~ֽ]J]|d_\/3d3J6WD.5軤EIO G:eH 9I,&XGK?HH,927~1e"T i9(w,RK/>t

3Xx0uN~a͚"a˯ŏ/×˯_._,=ʘ_<55bw39 &R! "dLwd\Dų.3Y]v^ƒߛ_\O{o-2:UbQE(DqN pكrH+A})u蓊y'*w`,'vH %;DUn3g)gޡ#,y-؜쫎ET+1)dƨG[GVQ]{ ~bg'RoPr<ں8k} k9޾guDCwiL[0*M]vYsf߱֟]zq8A|ֱ|Zg&F p^:j"i/{Ǡ>dk_Űw"|ag3Fszv?̨nn=+λd۽nHvMڹ~y>[HC^sjoeh׮-v 65ђYL(Ud5zIZ^-{=o}11 vDB($K8=@^Y<]Eq3/ky)OV̰*0gTUϩ *8ڣU$kѬ4&l,EӁ,Ib>$M*"Rho/ %6Ԯ,1+9۱C ds@K2Y4%xIWɅ^{2Ԉjv䳫J[ lEZ;b<N C^# /bGɜtxpP !g5sM#^ ڌ/4dZ3 h ȫ%2mlt1tcO^8u׵ޘotż; [j Wϥ&fžAT`cWE6A5kWW'+ U Hn/IX ֐jxw+CzoΧ-/U#,=z&8g*h(\"+\&ZPxk\_^XQwі+zK.Dv*A IBzA¨Yҧ`p@b 0։P?KLî4|GK$p s{HQtZ"Ѡc(Lo"!۫i`YAEl]}Vy[u\×%gDn<@g%M*²?RCI ϪMr;do:GF;]$ueŮ!gϐ4M M@qE,8LQܣ6 TM2b.|vWuÏ{yng;Xl KGM Qz?s3 چ`g'4FYyԭ–q@2fH(Y>Sg%Q%X&FFUrC{6m mk'lw|lE۹#ZO*橯c f?4YB fsUCFBa&i%q2ğޯg1>8V6ŀR軚A𐛮M0% , =Mڑ1Acj?^TOkyhP*kuj }(h;"(b%:' jDԊFy2$o6wSl6_ɡZ|{V6LVr ?.d~a8u]|LBk' 1:53FKywbPoVܽ!=هu6\GVAn~'i@RCD&egp2 ?r<ô郞᯻輪mfٯQnͣ7^Fp1Le%1Z`$uRTpHQR58;&ކ¡%; A\sS,`0ȓDF,:q,RQ }^p2@}qgF쾣%b5윭u#->k>Ra'}XlXvmoc藎@UyճC=קC3uj#"Wo^:GRSl;h-2cm^A ꫜ֏hnu^G٢Ԅj+m_:GQ)&X=h6 *Y0\Ta5:|91(T5 #֤1MUvp n/+*V–{GE̎ß_b$te\KXA8+p{k_8+/\FY#kbb)z :X 9#f0N=:Y[a[g׽5Pnu01AÍp.vPXSTTFfc0Y?6Y'9H>[Wg7AL֗߸jv#Qd̠UfN|?dGsx3ƖvNu?38dPa @e)\L*dA6 զ)I\WQkQ>ⷶǦ< mOu}+J'd [m_I5 gڙ)-pG(RI)L5uC 2y(,X81EܰUxYH&,G.>U>^-㮹kOP'ickԤJ5V$CD w"X'd09ԕ /P(nHl;.{p #7fQ1",bjQ]o~mi\(JZϺkMVهeF'p*&K .nlh O3˷`vb zjБ,eCd4T>M~8r${EmȤh)I}TS~<1q0jZ# YAnirID-T#cH`* xKwIj3Y_ WrXܧ`H`.8V9׶D|PUyׇ#$c?7F$S#jumCIimjD5lCX+F drf,$ې_"qFeLp#J_dH,"ȏXج*G!GJcKK3|5iy'xۭG( ]hbM%kLSX!s58.%1O*j`; I)[AQ燯֥R ՜Y5tnʽcҮ04v+tضL˾888888888888888888888888888888888888888rktl驷Њ*nI(Fo"@B>ĉޛp$#E4|Xc7/AD!nRZQujKypTM0 iF%k H,8 pe!#lh٠5!U+H?/oa-^8里</[leDFfׅxi)qfMe} >B>vZQEgْZ?y!o 9TV\z[Ry'ҳؚva USjD\nyJ6tyO#AI'NoL/!wΔa[b"dV2X}Z`1}5:z&2TFsZ :WGmX}:6vNJ1vDJD~׭yxPp+/˳UÔOc᜴]|@vZaRv ^䧪N+y ~ ,i 'SK0Q\KO"t+4Z4![ڻEnݑ!zCZXYRE*+ҏqsڋ@ژ^߯:J34}5UڮLEĞ`0وхP-]bZ8N?+_ 7qA@VmxM4_&c6p%l7'B̉sX膍.ͮ.~N9U>B%7 [[P^kI/ JJfȮ mBJ@x€YbG"g1̛m'][Y jNk0~d(~8&XaDN$`v|"ɲ[&ks_k엲R%}x{=BaFڈ R (w6&"#ntm()/8ؖ'c?]k2[(;ټHU>fWI-)0V1IK k Lki*Л@ޝ ~l>9Xom&6H6&e#DeH&0n.#qdߩ؞gB=Z߿CM1[DX RIZ%PtZhHئ-Ӑ A)fB=1.!ːBL. -+J=OJr=g#\,@Q̋"oʎ(zc+\c'Ё+&f!Sr- htTm H䕔2G>x-i|\w `Jū t}ظ9ׇ̞2*׷&-$I2;av^ &3vQi}) &&B5~XMUJ?RxS%"֦6*5gK >hۂT-fo{Cl!ҽVf kp7f}gg4.w L~`sltY} }i>e=׷1iԺ>ej4uzU :dml[XD 911͜.tۥW͵pm|LT!=M,+J.kg_=" Xċ#1}́z~sMꦶ(ou;)[vū,h 9(=!". nw菥5ID[x%[Vvݯ1]/P, d\sG*PpxYk)쯓C_ Tl } Ci<"~`c]%F== $)6rAF*0Y\&n M䌆\4Μ5eA[+-4tM a $5k]I@lְ0s-fʼngIE-Q.ݱn ճ$4NZ.b[ռЍ0dXfg Xsc l6 dMb:G2KD/A(KWZWf嬫F훢RUA*M>`5p74NbHGrC[_}M]{UPs[Jc.KWkV%,8ΆtG_ EˋX4UHݜf7,se|/޷Ua=2hG6F!!oQ,z-'ct4ՎJFj4/7-/lkL<_8AUQ"⥄54J̊a!@NQ|sgTY6`^'?̲ r< q,˾qYeˮ뮻﮺믟_O\-{ ~3=Q]`[ٶQދN%4Q 8=lW( wdʍ 6Adķ6?Wy+?=8meqJ 9E,2 i+B 9Dyؙ Zt+])er ~]ˮ\:a,Im=z^"v$P#% 7|q(C< `h/YLl%m[&{M{1吶ȕ{ yHX[K͙"xGC8O儅Y{Zm!V8=j?HZ} EIU7p+ hI"ng \)$@9h_ޘ"O-H!{Y-;G,$vk I&d%MDDXRۧ`ä;2SRv NusnWt43Xc Ǿ* #@*GR$<ns.u~m|Ck_8=8J{+^Z LDz8l >/$#ѩB'S2/<`憦mb6uU#;,@'U18,ٷy!͆v( 6C ;jf廤ϴQɯ99wyXX]>_? ~Orҡ$U_{M}l]uuǐìmzPҭ,|9%mǬd>+_m/gM$vh#gnQ\eY IFkL&-H* ގ9d f C7nՒ?R(>E]KwaZjb$3Yxu++  AY( oQ޻Oc/ ^}@FXO8GX0K"|TWrl+&SbrfddMDa+`z.Ij|%iz+9Q*O5:׃5ljOA :|ZSf…IUUR#ʼn4]@#K:  K2i(a9c0}}BJe& FLYyJҶQaMچbaT$tHMr:iI]K.ƅŁmWul&WcOd4"A. Rqf,! >~As~d˜G:pԒG^oU\BxX`z R\I&ʲ",,̀ YNU*(e~&lZ-h:V*G]q&UhԀ~/~&BdE/:rMܧӆnWlYdՊ|D^3N%1TSxᆥ3Ѡ{c|Uȷg]jK8=xu}XB{mc N5!tS ,KGG˵\"Evc8}a_PtPI>ZPTbDT4|P;drq̞wۜnϔ;\i [r#i϶Qn5&vVHdE(|4_̯^;m2R\sW2םvǭBٶ`1t r<]^Os$^:4 WK84pbq0mGbцSOo31nT< eOAWn҈2'MqT ;j9IG+%ڷ3* ^)&ҦY #IIhkn,ayzQY@RJU H3$f/G|OҨ6vҹU_8B"rY$6nG'K䋕Y& ]?b n erniE^a͈t0POIvlNh skK.i:茞?9y*E6!}7&=4O&eYsi21o K6dGxW&65dbqlɁ PgBz s lc6+ ƊN8Ujlc7mS'a&H6ꪾh"9e17&T_l-g?Q=}R,۾{,Tp+9jIp]/'f8XR ɖLEjnQ:Qf:;$ 8-ӷz_GEYdXtMx/&PVáLHR6==q9 .61V#R.A1(픐N;zFB` fdEl:Mm],%y撉`?õt-֚_06 gIjM(3j$Iar"ڀh6tPbxByY9ye:6JH_ZO YԈԟ;i3تAa;"Yle:MzK%NGW]%k8$ʔE,'cЊ}`~4Wkwjw 6~Sz SJf<Pf=:7M<ݫh tAE9pgg2?*ܿ;˶"wLK_F1a{X)ֲO5z,qff2˺67cvLձ׋r8Mw: Ib :p83δ7#0l1#i]\bUY@m6;t5Y>]& '<3'f<Ӎ#Խ4}bFIl]&m#F;XN2G _5:2,;Su7o@\1IE&Hc0~1S.8bnDTDcrA/UP֎^5uu8͢ yAO.A2l6a$9CJ6>7 6VdoBLƿIEy(֝R8A(G:Bh[bz@ɪ%WViyW{?_:u}G>x1ai+c_Bo dɼwJdtK\NG]vJ+7YKXOxLg궛yh*;?)tlc"BgEI x2o9 E1+o^<^P]u]]u]|b=}}/}cKP3? /c =] DwjbPm_=cw(exB)Tw{4{S T =WֺZ#'J A^dgRf븰ȨLS$I5ʊtIp;澭YHӴl;*YU W$N">)!Ɯ&͟5['|Cp o9VqCO36No\4+(LNeIGf%sʐ<z̓m6ӽb:flڻh Ag().yYy]w܏7 jcr.^i-G 3 #QL)4bI1K!uos^vB\ICͧd+Y+UJ) IF$} \~w*9뮼O뮻n﮺뼻qq]u'X@ $e/.L%M͚Nn%kp.Gq$TItR tL2Uo35"Yc'JMb7fPQg.ș E >(t~;ޢaTkZ:εi Ap1)=ſT ?S}NWR^Fasެ-I)R)ţX6mI#8=cwЧMs|?coE Y< e~"˵RnV/lxb3uYeus_ï{C.i]M b}e?b,@`=JBɓB2=~:wKInfԫn6a~Z{1oX+3[3Ug>!οj8]26[,v{kX6(?oK Y5X N#?:>IRj01J`-|s묺Eh?mдW59?7۝51Kt*[?‘u&QbfM,5邂6ϥKi V:mb[=xN6xZckW9ql}"*ϧv:vrvClōUӄ%L 8U4S>͛|sS,1wnAdrH%jLAoKA(1yLUQ3xBN*ɁɕEP1bʎx'WnlYZΫgm#"ۡ0~InHI`B0ʥ9o{mtzXr9*D`lA1 D!Q33_:L?s!g+?kOcfwPY\?mB}￝tǢc \P(sb \FR$Ԙw ~v.58hYCwq[mv7D'#_`ƓL`Qϐ5R<̭K,]r˥KkySI+ 5j8暉1s<2s<{ˮ뾻+j#I랛[n]Ȓc9"xOeҝ2kh=-EìqmJڎZDZ|D4QdZ7X cء-p| .qoE3?`(X&AʲîA,A2lW)j}*w_d?+l򡼞,?X i]&"YR޺]BߥǦ溎Gz^74&ى2i_iݒQ*±14?2zh2i:gQx,|e٢K8/yIK)介^vũUC哭ݎPP2܁[Qf%$b!ZP'ȹ$7öi5_yP\S+]i#RucAvj?hٻFQq"&-Vƽm &o)tNu.ӘD$ؙ̓c9U0F CTdK-!jə7d<ZnZ+-!_lP:AIQ$ҫtr˸N,ruQ8$Ч.$ڊTnrկ:{W9v#!SGoOyM[7tηwve'y>IYd<dXwH|2cQ}dHY}>MCho DM􋷥8CG]RU/JX3)gj^R P A=:qWq`[řP+Q]9zR } *+|o?ދK%~Z>A4{ ޛ9]Qo1'J !,_9[] sJW8٥鮛e%_C?jucǞQ/fiGbE{zy ,,Pmz.j._;IanݚkfAUٕwk hXXx,d'eNv'&BBv*jTuĝ,Eu/E\&Yl` Dyڿ#݈ͦ>d8Hr}zJcI!c 9cn[$1*<\Q$梌\╤huٖ֒6Mom,})r9#XHg_@c1LD2lv3%>E\PU\}k`Ul{w7W6V繬kFKrqؤR?+ 5D$)? tHHd[AVUT~~uf0#6sYv-4]vmV9K'gm1Opɧ=k=3uwҕҝ֭]k;CGr ~!ZuWM]c򦻏^>StJյpmNl& \G=`0]"qpx3!.Y`x?8hPhŲ>Fg[Okmo6i3H@u jE}&s@qIg~EуcS#$qr4:jVۀlv-o2(vwSW4O2!5ظ%G=te0.ێiKr~-RV>ޚ8"?v[f ccU#YJ Z2GfYT*iCk~Ej6"]fcNK*kP:;r ’9dMl{ yEq`0HWO{?\b iǝs&?곸,QLkGhXx7͟BM-MڢwGv#Y(awT ϵKRjdMv#f2BJt62L>꭫T W 3fYC>j}W|X.F۹Ij%x \x:HGR Y⤖XyyPMnlYk-D0Z{=%3!:i ٢ ݨ5+=6^@( ilutl7]ASEI5?.)3\fʈk:YRSq2Oh[Kz~oЭ"X&QضFhf % .IK5dvVFp_K%s`LۛwG1>&1*L>?믟UQW?a{ +o䏏{)SUdҮ_HuCa _5d1?P5IR킢;Næ:(vtl cT8:7_nۤp|:3jg9/CDv?v qDı5dJ)vbs]>bdLHYo׾a"4;vk)ֆtSS 1 `a&9PF3lT^}'tI V,SLgy4AM';O峐_z 'pD&k׏݂\̓ c0,X0(aBScO,/7Y rk>꽯ͥ[ԧ)ni 8{&Qfk\,HHJ. Q_IMŋF nDTz:jYgjw^vMN~KLovH4>A[UȶPJhfJ]3q%xSgM3,x3|Z)Uu1q[6΀zpfw-=EbR7-)]C_GIb%YG4h)^V믟4uSR \`19b{b%~3;- ǖ<`EBfGZHrFl_̲|HU/'@t-ָ޺#z;~A.wd2^YJ93+(QǷ5o7sa`e"[1edyXf1"Q'JMj ;y",`r8|xNR |1( R;#]/mK&9y!\2z|3EeELrXYXJJ,E,L$%[AšTJ !`HMvzy/塴S6i#Hq`{J%#q#\aMX?|pi‘cX.I# 3[kf&#ӝk yXIڵnaoMEEGHdѦbկvѼs޶UMoFʆGʼ1 G", tf,]u:tJ~g/߸rˇ Fnoa%F^Yt1r1yWLFaۺODw()Y;q?|S?kijvw4-UT%0Ҋ4@Ahb2ȝ OZ* M|\ y7%\cc"pr -S&-!dZQe3t 5:;S xo|ڥ+%̳]|лjhdllqN6]Cpt *hIx].햻_-[E)[擴RFb)Xkm'hh}9ev lߋDDDZe'/S^fccZ%ӯݧhKD+]T3ë]>zm 8ŧckVF- R+XP7nDTmnVfqCqҒ:5KnN˸6wlI]SLe6d{[8F2,SOX~r &`TȉEw/9_>hGѽzCXsIM{\ w$cĬDgb81;r_Ӟ${.ƾDgw_G)ٕ5OnkX-lRl(4\\a7$tip9dJ )u-{;׶szIeFtHA d 9*5CR}1$ :"݊_^: xM,Y^o~" fYK*c`R ]0Gn<7y`y ]]2/)6.GOt jqV~7C p_Y9=|tc69cJ>H 4>Vש$K9QE%L_K$M8˒O\zofUe;(x%`02XJ,7 ]T.G41$<"b|WajG}UvANWV?735SO]LqOOWwvꇪMs=5^T-f݉\(*UIsueb1ѡG[v>-^P 'Ƭ7$C#O IhQRpYMY;~^Y7Mk7lUY+&ػnvR:lIvG H0`1"HL0Wα!+/&,ա$@888pM4b7#"" 4 ؇`Uy"|0 I#ޠO*QItTL1˪8}U x7adGZClܓIݓXpWO,n'cE`o[>S}vjϝkw| xJc˼WYs)U5U4s[>z%Ս:S.(vN u5E&ȇ 8BV@fx7K[oX|>3/Dx u/H6^RTةH9!gl"xN9|Uo΄՛lh*p z/o:&ܲ"Ph~Rr #"vܸ`Ɖ(8f1in9I})ώW0^|3pB -_{kfg-mIUu Gɻ{HB?3 dWt˷߻|O xp\> AZ2bɢ9vVPME8]LE3QL㕣Wʉ]9Vk_D*8I6#ȞzD4Eg `^ҽ[ZȚ[U Z%: )lVX= ROC!fQG6hH)V`jI#T3{MoZ6*쳑 h{9f(UEL]6JorQ)D`"H&ddfaC f3cW(݅Zii%畔\ ۏ -"d4v A=ɣWl߃lʦqkčDPYTJJ%lM2 =Mj9,Ҹ`+!xb`}QUTj-Qo`)輤߶%xSwIGRB6pdh.8Leģ_Ae!gJbPpvA5[ޚu=O6^O؁::uJ & J 䛞f25pNh/:d&0@'c2܈6;d^ .(URrĈN;n 7YD.r&שDnh]H8KF:`i8F1n$]@Wiʎջ&K\I˼,ǿ: +] Ptݕp Li] (d/#Q50?yoy_ҍ%]XujM䓪չ ֽ0fCpFf-t`l`errbr ߭[d|&f=i1dd!;7Idc3.$ux{ Sp/ի:w_vL {l[%g3^MX&q#yR[Ba1u:&عKhK3eЭ)I|4lM{Ă0sqHZ7&ZYmU^>{q{,QlUgekdh۰3| :ģQ:I+3INl_Y]MSmP7uzMlLvBLT8G[]nHn*F[A0*]=}\zOc6J„Wigu^Qi[r6f:be@m"~J]ayfÿJJq_n6ltfϜ(dƖ02_:Tt0#2_[_X }MyBkH(R{|nٱzQ7S8Y_FAfTsw"crV=9 _w/^/82+i4%Ȍ[L霖:[qhnB\KVfؐ9箨GaY+v!:*D>\_p3:̦0qe j&A^n!Eb=hl<`=ŏTRZ걝 b3='f4G`[qE>YYcN1,AM 󭖾UrKMCq-}Xg,FA-̻Y_MDr_3[ pOxiTځulJs 8F;;U\š242ʜq% Sz|vj&Ez~U6b-L*MUQe%3N=I3⅝WB K- {[J{Rt,jؕ pP>tV@WUL6x6n盗H%H)JuolnxY:CyΖԋ:RF=/Z =_ĢB{UO w#;#MeԊhN'_X+8{R/D6kk3N IMȞ ,Q)v}~݂m$ wZ:PVV]& 4GIF r6Bo!7]@xV}VqrYxkT6lQWFM* ĖToN#&F s?fB>Ma~.|NJˁliX KaL%RnVY }Ī>ymtyu ؍F6bo%+fe ݐA՛|EdI>W;(ݶ>t~\mO~蚷WBfB^놼d@"mJauOĐpH6!`MQR6#?Bz_{jj:_cY}5 (7&̦?'k6iH3`'NeeO C|IcZZJ]v.@ H-y^ŭ)SH E!Ģ`AȲƈ@Z$IIBV䛥M 5̗:2)RDDɱ Tf &lnoe^SD&Җ_t -]ZhH$wu-!70rYG8ff劝Z>>k2`5q*}S-q'. o&ɤUM̉0Ag : A{c|4&=㿠{{ ؝9oc[ YvR!Xp d]n-&b>y^ L잿Q*۞vٚ[ ċD`!ųJG`2(DXx2nP㩍76TIk&j,$aHXɱ=#&d!žTz[[QZTkX曩 =Pu,4E[qٗ齱ݛv!]Ȥ=˧8`8$3`]o,=;jY˹(^mI[,;Yq)9{] ? =vjk՟$"/-H5;&W)m^xyqY!g]p/'[ʋGz(k%?ؿp<݃FP_C 001$ \)ޡ)&FVPH5qP.h_fNƔu#%Y+pG 2%Da^Uj;3Th+_:\hUɴS%ac,'FF,|9JQwq±;szi z@W'U<5'+Jl('˓"nӸ4sn=LXƸ:|vJ.}]f{%}4 44{ و=ʻdzĤZPQ(A39,GL_fru's5N;;[']Z<6"GXɽygc 1"ċS`Y! 1=<^@z'S'_<ױeۏ~>wERK?.T?q;z|%S,>﹛lggu,bCb,MgX"7e&i!IȈQ>z ʮy-ݲTGXvf!QMdDI0UtpۂlxQbmBNJǂc4o/@i+Cn6z/_s_Q߅yZ9@ݿ)ZQwJ p ]ȓO>!&|ðL P$o888888888888888888888888888888Fк$ߧ{a ˨]jnx2/FS86@Q$6J8U|f$'}֍£ .kb6RY )f.8w_~|jUݲn)OOIn?[ˇ^^pZΧ9+(F:ub.~c$@VLseJU,88888䯣W.X-e;aUrx.T95"I,P3D6ۀM⾦kivXT,ѡ2+.LL,DDB aa*I GO/k)888NܽO?V7ت*(kڒ+2 D;'8ڂ(D6;vj߱{ey-AKZ}*8zd|(S˾t1!_f_0Q%d \ 5#Vm~+f|ޢ*BJ<@ O0mI,EvK9DcwIE&Qʝ߀8yl+&v:5Hb̘rXYE}`6fwKn8s7zlԱ)+HhĶ'1x|o 04ca= 9M (e&Ղ]x3|4b82xqD(ɻqʅjl)a8eN8qYcn%}s\qqpqqq>WE,J fW- #=!<` S7d| 1l+$YuG-%҅Rh%lxLq2'a:!`0'&|)'68nߺ>6蝇߭'RjTaUfhJR#+Л*tn%Q$+7^/:mZ[)#J>pKe1l@C1X㙳*8knb Nע׾ۗlF[lHalyjm#M= OoIJɻƹ>šu$d/%#@u6}orAe_Wn KF&i`c3m7eVG)Dhap.#se'+NWŭLUX ;fIHȒl Lʱi1j+)"*Kv;SST§'nE$gŁNȤС : Qe]>9Z_A-s> my rRF225\bڪMW'.炎Ǿ\zCʯ ֜6CٶBL?J1ϟ3CSiy,8.L6oNؠ1֊+[-$g >{ƌCCG'B9~@.f+ ԯ6/5#d~å، d/7XGae15 (prM@ǵZUs]ƞ+(BbNl h*4 WbGFOd$6]lmͩ=?[[1 ',q(srT"1yzĔUwڢjusznlΝ'6ʦI5A@X:#`dw-rh` 5`>lv>*mNjҦE$vZ:EPIWpb)xIrtW- 4(?3T=5ت1xRDa{@^v;*YR-QDо @) = Hl pE&qpq O},>e )lc H3VSfǐʑfMV7a^yhj'>*q piJzWސ^2m#B%;Zh6ECдÃ+BN ʕ10]ʻ=CGƗLr=hDz:% O}C1іƵUTІbqդb~x!v*m&w!)B/.*,3yOK67J'*'woqhE]p\IWfu34[qM:c5#Q\w57is*O sI3|Y(:聜dͤqK"T> 6|!4<ץf@tip*0q72 qLf6vX>jC|~ih|RזU)ظfNm[>F|"LZ:pz##. B-agt0IIR튵+lL ^[qf>EdIGnz ⫬gW:oX6K38T@v v$it]=k`6:TQlb4mzyy醔A=SD"ۑٜQ9]='û0bOS_nD+c&~9wkc%ok#*d.2B& OXiiͯTV4 :ڥ5;p$\26AG WKwjt;Ǭ.C&ߒpySEG5# >,RY㓰C$2LI=0rk8JI/Pt`Zb{nՙF D(T@gRg*F*B|5%hܯnLT j,_IbX>.svz9!,{[FI` 3p}HQq`Y xRukG(l ]ogm9٩ --^:}6'pf.& @oӇ(/\_" (a<_-.&Λ_l2MkwznLzj 0j„5H|y@]6Y`!SQJق9UOjVעp8q/RQ 퉍\!~FC i DcL?$0Paf^==Cg^g{SzGVߛeG"h䴤q5DCTzs~F˾)6z1*/,T QЛn릓XЩNdɤlDe:\8؝ɬv]lD."hlBZ9R=t82b cUAN]L+= ;m N\RY*0H(pSlY4Mq$aXWWÙe.zˆ5W |o^E'qܢ:ט&{3Yi墹|0U<;ʞzE[ޚMеb#vLT?s^4FI ?h;i 3o.ܼOIvGП`} Jѽ_B@d,{W'M~dLأ8"F#Qp^MrՈFR[ρg"ywVr6!Fo-,NG.5,&ݼ~c$Xhbǂ+Q-x0(d#0;5A句Ǡ;, :'VvK^HY F!+fH}WE$pYCYIb(Iwξ$8K+|it12 9V: ԧ l2;qތڕ~ډ1\蝩' &*;TQ2(3z _ݷT" 9kCFmȅ ""L%6P)W!#аA7ġ]:q [lRNTXf [2 N!v (+t6P˲zUE|] -9L7p>\=w!b!ހMJ- ԁ񲑣s `>hl9`H9AOe>P{d{v*]O<:ߴOT "b/}wGi6k;n[ؿH;u:l.Yҙ#~Lm8ъZI?ˏO%-dy?0w!H$~Td]fIǾ;owB\w接i&צMO+2DP WaQ? d YWھbV8N0)W` W?!뎯q6׭LWBlnxsc ԕ32gɖ]vIUHHP'DTϋB)s'BlQ^.Ksɻ1U&{Ig8S;1^$Mz8IzR%Z]QRYjBM|A~eY~䋄W! LQ^|>AfN-EUGpG(m3} {'/y˱eGUxuxqK|Ws/ЕVhڵ!"AtcKK Ë7F3`lۚ*}O2hi}qp5T?qUf <0uN_8l9qW>kåP FXD&t<-)w5#G`t9 Miuq"y#˧ |ņ0 .w`uiv{e]u}|IOox'u]\0r8+vu{I:xlP%H=uz"LC5u{dM 'ՌuUԨ,]!G*N։Ϧ1f՚ȍC%Q䴙ؤl!/Y6N<(]A&1]?֛bՆrwwg3fv1e-QbS5{qcWσ2Ю*SCԛL.ŗY*Stp s#nj`gΛ,O#Z~ MKOuQ̬?޷|â@|b|b l8=l>|_ %q FܺPF D ]^ގZNLIS#i5'bO.2^\+UR؊BaL퇔XT޳;b谫cUu04,c2\aqd.c|c߇tcYjJs\#,X,:-.ĴŦi- hu檕HB:NB:n! :lxAuޭ:vmiE*gZ+:"f rM mpYO]"NS>4󶭶$7e'W1HxF&0X%(l1VYzW'6vHMw`E;`jS+u-?Ti`#aGVƏFC : *{&Bٱ DPc9l|3 u>IE=_؍fi4Ә+̚, Ag]a =$*ŌZ`3,w5|_4ѯ~>} }+qh>V4;0IFGJ4 pAmkchw,Zn4kn)mҖUԐea-+i&KD29ɢ'qтBh$UM趿|;md"3Rxy9K;qL.V&.r M-Bm rYG6%5\_͍ޞ{x#NIמ~iմG_صk.-c?W<g s1mSX;urXrN4'{fiۮ*i7)RMVOjgtE)9IF#(܃RX/<=ޭB;3+%Z_c='n!8yH'ЛFb5Zv߬F~bF ۝զ؟DM]\ڴ+O ,5"Y=.ȍY9݇#F;5IdN"ڍFST:s'5 wq>NJzKeĪdrlG{(I64CӌF#JU) ) H{+׮؅#+rղ'a"~D~5ŭG1*+f] +`UV iaY? EYdxsoyj+/)H6ԡE-.ξ{f : U&գ"VtN-ʀu4=/̍Y3AxDj03N(pcrrB3M,"=kjz+E]"8Mu C)}HƘ =,,3Givqk kK5@E|p}fiw\%p&[s$?^ H*i>겘S4pmuEg<4}`-JoYF5#6BbIIQLG+/!,Dk!<[;"h @oҊJ'a`[Y6-!]&d6(AJ1(&TA2Z)$(`y[2C$Srҵa@!tbw?on]ʟ=5׬a%Fn}ȱ4ֲ~H]aD MSJih,FA(Em4YYLPEeǤ,y elnWtƓ5T5kԭc"aaIcIG>雼Cb!_oIPܓ# x5wNX(O)Ҩ+H:WwϠ*SdO7~kMU": RLκ3f5$?M7WˮcsgtyhGg1KI3>ɩ MEK$0Þoϱ{m= ܙ$lVVʍq;$p4Pd DEI4PKGEd P ; R.ѣѓKڇo#Z˂>722nFAw.g#-zzO{)2ܿhItV5%>?U?/4%ӯX#E;CZWZS0;Wq^̐Q2vMm(͇}Ӝ8; !;_~WZ;ݝK#M_`t:5t102DRI2a:mHF8aR1&/9r5 }pޏ"..?M0:xB ty2 r1ōe9]/ݬVL"NUMQU1Ǻ t_'7uJn/6+J&yfeʬ OYjUcaE&4Fx y錶]It?B pkbAn)A"IUR7[iߓK[FjswýNRh_XڊydTYTMBH ,X&/Oˮ?D<:7 Qc2MTD`Q6[4*e&<$o㌆ s?Up]~/^ftbYUpv`fٳǚ0F@Aƙm۸0X>MWJb) KQ3I (,BE l$ɑx-`=zd5li']yˮ-ȳ)Q`BlI51%`bUHco v.H;S6Uy}ַS"U..R*v,YUp$DpDJw.|Ҋ'|؋m4[T촴W&'Nx[%M4?>_<~VX ie1Ck U\(,aBc ],$FA"k5zUln庪"yXp ҫoږRר}eCȘB[e-]2ȡá%Z}%|?ۧg;;A4ξyBγkU,(6mr'G?*G$ėaIgyYڒkR@uk +Hd" EccۉK_ŗMճ&M˼Qni͵VQ]it ̬V/!K14nS?` CW+.Cc_|][/h.m1Ym|F fVphd~`26b6ĨeeKg~dzTjҏVtƟ,S^d*HHBPG"]O]%4F <1&"Q<"ӕ3W)SU^ܘTK`G&B!obˤBݲ"xۦ߼qx r#h-w5fMejOK<5+2.S$MӇλ/WU^,ͮН5@כh6JgO9Te7y񙨵cȨ dn'ȸ=.xkV4Rt(NYQ O.ԏ$u2#qC%k Mۤ"0O1:p,Muq!Za^zW0Sw5>ObG]2.:]~84o<{YGmƶVWN,nT%vWjГ@ˆ|4 "E3]dRVy|IW}~S=!CX5q8I&~J-+2eI Hsf>gH"6s;EFBvXVpx>NL g2Ya>LpYI"&_bL Me\e7r5<>oiC&)8Ved=ثUO!_[.S`jcx]ߟ,ٰ2 Ϳh(@WO| سl9*ӥA53| p5kiB5y47.Qw?p0M^ (^EyRs|\m' #z[NI (G% $hn52M<.l^kn*m뺻p\#uc3L~dVp@ݏ(GN`ܤ $5!!vauVTYLv&կDd:,n6/hp88sqP,,xh@]f,4PO׎"Plѱ D!CcGAG3j1?.jx~.{ZCj}rޢ%NVM > acIRUpEr ξxdxL.4wZ'e b*doeDH }b6YRI7p)YsPzڴaךQ'f,pǷ2JÅתgڋ}[ טYe+q_WIO jm c:u('+4Qgy5I4qi]haSbqh F'<$xFBe_4Z5K\,u)^ߎX<_eцroj$k)*14yO3#|M"K?̚oLB8$]KUWR׺Q6fl[ )bXJD2C[6_Ox* yͦ8=%p`";La䐑K~69Co̅ 0:H H%̖L_tIϝrlэ+ /g` Lm&g; 2f '\=pw<)n8nM=2;^蘉'4:1 HMEIYpP쬣Vѡ)!2Z2 DG<̫: $'ZQCKl-qX83@KY|ksǥ-BD)Dc$0Fa_Q vD7HjvmqM% EH2I E8yeGXE` lɊ3@ <7FÎǬ`"@h-`Dx.i}]fVgueTe+BD^B7.$OBd&8rwE7`϶WuCK$c9# #qYzxȰ;ЏpTS)ѨæuZJ*$ +!+y"iIdԨ-ddJA 2E E>3筹ELWT.#oIÜ,LD̜*jH$8u޸jwM5;d՛ ';;t4w_pX{؁wę,kY6RuOzGr"ךK`pl& Iϧ lk6CEǧ^_}];䷛;m<,#3IWyć$qdP1Ҳ5l&(I`ˊⅤiUuyL@ Z b iqt߻vܝ]Skx䪎;pkt^OkS  j%PYor{÷J,$AJbٳtR Xb 'fgm oWw, G2C.4sox|.p+h|t' ݜ*vױ۠Ţҁf- c#D߀1)`1)3HF DQmiw`peY/ZXUᎃ11(ϲ]B2 (EwNs?tAO+/sPǿw78W-,|{(eŮKSq `~Icλ(v cᢑlSa᭄ӥsU^ͳ4:QEH/]݄ύWKR)p\}Y1 aӇn'˵S<#$3>UmvI%#-0 g`Inv< `8jALOzɷ+"ÇEYҌW%bH5PTͺf#>ǹ mf/-;#dkҵ͈E= lfxk+ wf 6OPr6ttw#>~/XGƾBpd +5U㿙W;F[GJ7bFI= vj"pcc2 E@.'fZ:J,GM\JtV䐬6ƒxأU1%殸#n'>lM0׭iٝ{"hbaw]Ndƈ:lTS4a Il}B$F2}<&;2ہP D= f#W-'HIv`G9Q ƑnTaUj_jHKz m+tM_b$ч"]:l9op555KO˴^Ui:}-\EѢĸHff!y.)᫇Mƻϧ- YaarL,ƽ!Utl ͣfYw!V{aWɻZ6W~6 ev"8M!G9o 4jhd.G^X$J{=}ҵuQ|jRZ0!V+÷aFRVIg[a "`Ew\nn^ވ] ▖dN{B=M$-tU =(U?)$pPjBa&طRuLwp?9[27洡D=^Zp,s50[*h~5|]8UMsȊ-ˣh(lJ9yE4(ǃ jЊͻ"il(G&P\8ux9Qb`|߯$ɬOlmC%ڰ 'z_ c^t~X Xħ"ұDEH9ZC"x,LˎK۟tc=o} DJ̣w탫5RZ A˶DcMduÙ$%c۴xixJ>Mb"{Nӈ?0T>WSDK%\'Q9`4>cD=^i|#XlיO[KqM^y?g0.3k?Y$Y<ӮclٱIVWOu⩵A&y/SܼvΝ/]ªyyv9_'{|nm-4!*uS!{.g#A%mѢ!F js_*bO 5.v.>bZ2CȞ8v β 2Hȸ HwUڤ]eoslL]w,HddcC;+ܜ=$Oj2nx [ب`3";tB P0"24,",?IlЄz-^b(nUT^F$ddQqEߴDhglTU7ʞ8hnM# N74>w]PƁYSGa+bEPѮH6-8߳R<9*j]ml;3l^SI,P"P0[ZxD\Q:泉#>n)YI&BX}-q*]՘^oD,~5D -dFrXK!U~PUY|^$Fg=ಘp |Wُm3{J3l6碠JGlj91t#K5.ۗoG8:t 2Ao ?kǚK􌅡= ƥY(I w[CN.aeVz5jT5IOjyfruj uah}M/\kLl:g C\3fdARDH}qH \,!Ϗ>nw$z; Y$:*E0+kŐXdF6W8f$-]zxռPmkvZb*i 謮|фݮLY cܹY~|YM{n[HƵ+ Ț#58)@ِ$s38YF1Eq2;CjXDm)^K"9RMnԱBN-E % nPFehAFewܗk"JP\"i$CKX $F& D %5z騿Z9^DM-5B p|D)D[7Ʈ뿾e r1(8KzeS?µv#@6& >Z:Ѩq5?z]w_rWQq$Oc) bvۺu?rb:da]sWx4M&%=oVfǏWcaf$BA<їdr 6&db?j&XgfI||ѫoi/6[m;cpLIգh}NTl$4LA8ՏjGC VQ*3;&{z@UrZ5}G VNgRi\^bUȔ ^8IxwG@ 0cE!$ hkdU6D,i*}5~.l~NT}]a>>lP_0'0sxLgp==1 ml]ʏ)DInܙSgg >$Rm&ڏw㺾l?vfluCImZ(VJBB U\f)~ڲ4$\WGH6s$ +&rKn:"سy.-oK%Ygژ|ϛ7zٻAMY7 6p*eKY+%IXYc]v؞x@]{͹2B \Wu~7` lAؖTsĮn6+E+a\b%q?4믫̖yenW+w~@6ʴǓ쵘ꗬ]S262y)` $)U(R3u4hRϫ=z[xQ1clb Vܹbsi-H\(uԊ<,Qqx&+٪LѮ^YuK )deBf2F., (69tw_`) >f5\:QQfͲ}f8wXJg?,q&yV'uX!ݓwIτ.y.lz=mh);(Qe$fe!&!0fޣX*bu}}u3>]E+䲭5W Z4R9h޾ #:# BBIhXkVnF,7E88"b|I`;,|n˼}2f%8lH(Ħ1D!-9Až㹹 &fx#{Hw]c֖L әTncxK i~tXæl'Ү0< ~4d]7eOv qK r\-LL!XT#lOdK6 Z:lߺ35ZkVh}SP/YYˣS3"1[8XSM!dβPNh`lV5)³`8Ѣ΅y(-æ.Lr,_>ᄇ뾹 %![a(Ut'(} [âWVt@wW1Nr*76~,ٴdOcOIՓ@tYmpmItU m"jY%; KFI}=xDx89(ı0äc?DieGQOǿ:;Sʊ'u ا-7hѹ\9orG<-oYU}~ӁVl$vyiۮyѪ崀4uΟ'1(UY 1,GG;a yM}b"m^ŀle4nk(+9_b)^w2,r@$, R67 n,`d}-N_}VG{oYbk}oY)]DtprL_DNpdƏ4*rIoYU`H{ Y[/3G"DMCIGd@q:a$H<0[zi>y9zBrrG10pK?R3WSj8]eXۤ@՞j}B֬m^nCF(ܶ tɏ%Pl J0xl˼a6ɭu` KSZ>A޿;gi`xN5YU1O*eJ9C4y.dQ}YcqhEr=vĈ3z'Hf^5fCHP,Hha̅IwN.!͒h5Y׊)⣧t>Uʭy*h&)(ji&=梊)}ccyg]8}]w<.׫l551Hx\;)\:eQd_(=Lq(]w]w]`r]Wsɕs^q[qZU J-X8,4ݷE96dE4RHG||9mbŸS,x107mݙV %o/Z ȸߗbMIdY~d1im Ȧ^b\f7mupقbtMY$M<yuJwʈN& S(C"n¼+haIdK=l6XC'lWMY.KcS}`Ǔ lW9 #&>ꆆLB{1[#XwF蝌VD t{ 35^=2(˦iXxdatt=|j`*k=8F>W,zuhj`5ɭ}I2M|ᄎ}s%>|>^oP^i^<2$̮y"S!Ο97Q'$ 0T1IcIЊWn cmD԰6*srڣ1f{W⣲؊8Gg"96~ @Zw}|}tpFmYӷnMfp` iY8}c?G^ke+^KSz8B5Hf;UɣTR#ӓc1!tBhݭJuׯCt*ΞnΊhT}jB~tLن`$.vM*C.+k Y $LVD]f yax^#/cțV.6uM:]=&ܵ τa3XXGI:e LR'(C#yiM=nSlpԝzSI%Uc3PȈ6v ̕L(@fLcLa}wyx]p+#`fg$ ź$Z`D,r8ghtjw,FE^)>iR_^1=,KcecTih,LlW UYȣ図[@M;5hΉfAܰ]4t&{^t8.A"thNl^H!  |lQ‹ne>j0XuUUAF]ɥ_,a,j0DfS0YPݧYfBi1G%SN~짿'{Z^D|Xu,#ӱXzbkY3q \~dŊ 7 6 Jy(][t{um|Z#R"JIa I>ұm,D^Xƫ%tqfMP4y1T>]HXV.Ȋ#fcҝM \Ait]$DI79y#;z.sCObD 7jOK5Sf;u)XMٛ6HI8^ 4_q~zukTۇM3)S=pd"m>kɮDG`i=ə]IgeS_I=nkhhocc:NVB9[=|JźxbL@2h-aJ'_pVr15.sO[ѤԳ]w 5’aSO!'nY($UAdxaW ?X].coDG4qۺE(*Hhz.OY#yT,mІaݔv)(b^ {B'#!=j% )dDpYchǬ,z뾹 A7D|`S}G/U񬺩ϯŗ_>eGAtOC;Lo[Uu[xށ6WfĻ+*5Z,#3#N>NzzO~YZ|њ"fʬgd6Z]KS&2td[g9,x?n:X8lD"%*EB<|ESL;<,:뮲J#H$5i$bC,F$."IN:]reM&"2SO,VZǝ2_RN{m5jOX}=aez# dӧnjL5_ߦT:#ծuQ`clR%.+ELGy<~2㾑Wk-]ΡM(8\${ge p͑tiSp .94n] Kɞ`({YBnhjg}(t<Ǯ]u4-2Dt(<=y**u5exB0s)0j72ʈv0qVxcdi|BQ}S^V< A۷]dMLL]F3$]jm/*D Kpmj؉|r=BŢQ2܀is ᦰ68jH@K]9M1,Lkw셸 dx;n߬ɞjbLzr۷ 6*/YjO;ܯNvpzYEYu\)nS+'o2 >2G&cG;mp۾wk}ݺZݾӦ3uVJbc3ba 43y,ۯU4'{l.&5"m!9楳Ht#4YGnӢa} tɂ]*Ǽ뾱2^;|Xrհ*cKhвy]`/9FK EpEXfh8IS $9Hq{_͡!2q_>y"}uЁظG A")d&N&+ ,w},/% ]9BV&.Jl?_pP1$47mB@!˳vDZ#5FCD.Xeԇ_. H< 6a] ;UYMxO*.[)g_IOTDZ[QJǶz;8p܄ ;m*XkʱjWGb} {d{\taO$<"y٪M~-|mh({a 왜 TeLc1'_Iml]ZzWA Ɂ֗X-w`ȣp,W;*T<916Dc$dlc%bߪYHrj (&(,xI'ygX&xuY}c=w]])_H֘@(Ħ!/AcaUYeXa˿Ï_ܰP5&UġbsL 9BzXzʀOSM! e+W<$ ǩX8;뾹\Л` jeu4IjYf -ۤGJ(Id6qYu*wcvͬi1M#&ʌ)=mݙV=ty0H/k0̫oұK"0*- m_-Ak)p-:FſK)ݨIp p{cPRp'aȄI5.ä|)D.GlTWMY.Kc6U48V1kdn=9~*$Y'OǨ_֫_.rw3O1:Ltbo=I)>CVmgŶ1ş*.i+FxdfoVZ-1և : 1߱G͑Ylq#HT,`b ">\T6(6\Xf\ɺ_EhG\Jec}a}yl:9< T~8r3UEr#W/w1X+"-"GF>,Mb$gɲY8_2Z< ,1^|+yiWW]%3rpĀ"y&Iƒ4ij+XzsR/AR 0d#R:"2AX5WYt)ipq ߧ.?D;!" ğ9k<~DhɓTUr y8謞6Efq@X]nN ]ex ؘr`UI^t%;MD?<{xT$Wek] [ZڍP3(a+-"h{2سv (EXvILqSO3gy50} ҝdu S<_c`/OBf-jiV2)$֏0η8:Fx}`-6w]gYw1fI,2ڗ!-b*E#5\Y&Mn[0[c_x|Lau;rj-~F⭬lUi1oS$tJHf/\7%RM*cݷkNZzgܠ5UL˻F 5P#e~19~4pY3]7P(| s#qF!+jHrcܑufF.䫦j"dmq?h0%Q2j4`(&ՐPM&L#jղI4QOq뮪܍wvmǡ.Y84kƒɼ5Ygx rʢ۳(~LS`c+W3Bqj۪xֱvW ҫJϲH`e1so]R$Hq_ٙ| ;%LlV~=Qgh \cwB3QH . p6 ]&㼖M,UKrp XǗ~x$G t[Cp{&CREX[[EUjrJ"IXtٟM-jiz^ŲedQI(u^( ]L#%8퇎>xnv{ˬ0qtZgbntq DV9 ,Id`܁TشtK`YUHQVtp|a>zU:*m4M|toEعY5|3fǍ3o:1:(eYIAFNjrNf /mlGIȤ,Ĵ^\jD/'IwgZ$˷4~ק-'M%]%2{>m?聆y|=RMy)ط*4DGhY,_,Tþ̈ zu6ԄKWaqXV=<&]PMUbpݷiPrO>G Ob5`,SgiKXëK2PbNnW\ͅtži& #X|' Iq1- n۱Zs4.G5ɍ\HZS{8K"FLĢS43]+%) z:DhWo9?wG-{H;u.^Yo=C6?Is.zq2n'}׫etq)8Pu'v% <3JBڪGq1WGò=jq`?k}kJ6Y3([\ǹ^m¢&#rKNbq뇮Y*:Fp P yZU@&.>`w\R@^\7(m.gj%ԗPS٨G77Evly1@goKb`8;pOIIK5i @ţf|_0|dMq8kHU1Y׵nj!1j il# ;YyH捺QNQO,.X׶v)\N+#۹αђi% !=  T̨c+H2"KrN>̮1YUYjF?Ql@"6%Jbh8gE?NHYu]l,^@/H}[iiJ"-F]ǭi0ɭ(IPdjLZp_jXY9ȫ9kiX[!-$G=>vOpRUb`"Ʒ;x%#S }| ]u=dɪ'8dcN~x1B04)`\4\` IETYN6\q ([a4{vHC/(sib#S+3O|9f %"!_ rq_1_}Nn |)VvUHdif\@#WO&V9 E2]u  ~MM*9dbSiW'Wuhes/Ov?>'*dIsng:{VcwLu-!jϞS' F529p)'26kp CU{nšۋ&-Mt{=M.)$&20zeDժ&RȌ|ѥr0)h$3Ǯ}Z=۝{(ZՔmaW1(Yf˰HR$.b Ƚ$L3:1{j-OE ezi4W % ozЄ,JjỤ )ײĒJr[ #I $o)Na巩3Zj鳵^B9HåVoJjxy}fsY]~0Fe5O7r%mzQVBȾIU}52i{oubpeVB֪5TVVxb _IpOLRjۦhmZM*t+5ŊK"a 5`ˬ6R"?vћ$$2&,3 Gؤ͓׍s]5KNͬ<ȁ7ӶCDg܎4py9qF6D o0'E1xb@75[t>jw67õVG kXS2펤2<Ԥ \̓g}y凥[a.] &k\.@ƻ~mYuy`8~:jYV| )R0@hc[epһ^0ֹx~(3lb}W,uõj;PɶLt5C2bO=! -A ~UH6ix\JđuBɁԬ&X,⏤eF#WQ+SFwvl)_ d 9%lÇLRNY)!Ỹd$Z()hҟe5'/LA{bOpe^L[?Fgҽ;S *ݶŞ= qeo*_kڷsY.{ )[E^(bM4 vCb]zGdٶfIVyۨ޲Gley%FE2S^hۓڶT̃D!m؋J@Qb8e'ӇX5f߽^mK:Whtj pRQmfhR~ v0&FAIL&;f̜'xy D g;mk9ǘ^HDVvO6j6YIzʦq-h,^2'CUtá:J$4F<}3zt4΀xIr-N\&7"36>3xC3=(DqH׼@]a?ѭk>SHL4&_)-F`9l,U}aEeXtŰ Aq3`~;Rگ5E>UsW4"˹se_qh|%X>8K{6v-Ⲓp@7W3O/V9ueox jg( b$Fg~FD> &N=" -fJ2޻[%o7h%#_ՕI[\Yj QPdYWӓ!YnBt)vmKbKUEW[3*G}&IK>_+])[ܧ^ǷȷHzY֞ǩ}}[- $B!$`?e{Ba-&#)! Tg@ ;j^Y=#ᥞ5-[O\WЧs'[ڸϔ`ܛx0%rQӌyec?\.sSS݉=QX,bt!II Im=EvK8dL Qrb??w qrdžzd͋եa!zDzh^*/ܠ'qGi pf);}3R)=Uܾ)٫vFQ/Umky?_9h[+o-AG fadž~%/ٗ})7[C=jnh܂ SJ._ՃN0"y8q&?z5d2ܜ I< .7(ukOSvgg>Iᨴj=y4"i/!~@Wl-F2&5n q19geA@տzwuP{f\NNVl>g?YI#]J]W= qQň]DcxEVF5_,r[R|u%C2タn u+k:vmIE.iqnTDDFW 2|ѷ2I&wyfzgWZx0cU-#gղȹpD"5e$Y>IME̬01_>i#ߛ5':V̈Ntؗ];I%')TnY7[1cYŸxS]YRc ״k H7&baέ{'$,> $rC0^I(U4Y;X,ځb^{~mf{*@-!,38H:u6lKjo"zy;E/1B~v-:a-$0N˜4 Kf\SfUINJ9(|vޗ- Xi6gҚ[^kzSjgV턪rp&?؄Wؕ/]`DfƹMGj8OG,_[`i-35Y L nMU Hв9F3xmT.r"?G$}yl@V_~I\#Y(#X=v<ꎳz7TcBBy^9FPK~yT".a4X}~" =-nω\W W;%6رh`nwF ;\9a2NU)6cgE"murvKhҝFT ׍e3}>xJd^bSȺȲIy3@~(k$Wl.tZw0ܼe @/Gh)I3 Lcj_'rt}0 Ȟ >0tڝZbӒ9B]FGvR\ˀm(PB'rA :/ sTzK~$6=űЈr tRȰxEWRQr)!g ٕTk.1U g#q Hk-^Ts[8%}6e+\2 ?>5 ^Myq]vrOOq9 AQ5TEwMA-JvH&71|#[fIɗMC# j}suRF,6-ZU|SNQ0NϒTe#R O0+>uÐNѺ :ǂ)e9čdpĭ\I8c Z MlȼGIi%PR!< $/%.(ϬP`|oZP:ګ X)pltC]fIC*Cuҋ5EF,QÃT*3uFF-ji{skaԠ{igΚ225&0o$6fJG+<~8wE$Hs7R~O %&A䑺 1jyt)Դv^{%`e0nB=O,EZqߚ~WNog¹qq&dаN-rYb š2fɓ)"-opZGlőu*B: f7h\pn<]%[\TsY<%tQeh1> QjWus}ԸFܜ.Zj0JX"t `Ô>GX]G "v5^ul>4\VW"44N1̒H8#j|͚g?U5v}ZReӬF9DPBBiW "(FuCHC7Ltrqz#W+m&Ҡ*ᗗH+(hx 9'ޱ ѢdOn9sl^_ !-V59H%C*hdȡvbs[H}i>E5X7ϰ{(r6* aOS4JFw5Q[&!ΦKF-IQTOS7\z^ah*cAnLdi^֖|[RK+NH7E) ,̝sҘ`]wY}u_,.ˮ_>]p,6l%8pLA%YeI$5,p r,Ǯ֭򋭶C_-'EEL7Mۡdƒ$=6<~D? taQMUq޴}՛AݜnQ~B,(L ]QydὒܸbwLj6t8r|[}Kkq=hukI8vWl ^N`t83+Dr5رŗOtNvMW'`57^0Mߝ;'g:C-=kLsIڦc%3Eq\*9]&=N^^lL 9^=bD&bяa.:́mTBXh\9n%7Gm= 0c8u= (ű$aVO2z`6F&-Z Zfbh!%s j27̤PKc F4|~v0?O%qE}t{Wٖ&\ujJrFޓm 6St˩P/$ Fޢ4ìk=GmWJ+U>EFΫbMPx);$l߁6׿#PT*(o6ZK${h.gxG][&x"\d65eGoVǙT* ]hfhL8!n2#->xW%o:eh`0m~-U:EEePXHY-e ZQ'c" rץ1F੔S^/rեo@Ag?ܯX (2xhW HDƯ3B_Ǎ>zbj^[(K7] 55-,ǃ%xid2:TI$T-b`v{hܞ-=iY ΢#n) l7ad}yxWHClҏ3l3@\{#vMt墥nUPmhֺŵwf{PaC\I!&sQc zeMhz;Z$ӴU |f ?Hn2, 8lԀ['- [VrǮwнaaWӳ.)6┴zcW^4$aS]֒n7.lC b(9iվDZm~7w>2m>ٍC+8 FN(#6 ~e}JɰUNѶSM^Ƞ&ٿ M`w-WwYdK1ySy4%7ilX*}r7t"*$O0ֹ*((gYAU~ jgguh.quk8۵ZgOk|E6/h)s2JrzegWyM4'bq u۶c(s%[}Cq&3f1o- ]w4sm(ɖ ܊H-E-8AAo",c(w'*-Abہǰ|[:l"U@DoTG%܊ɮL̒_lgkx*A%m*He-Ky1[SYNtRP%s|sdUHhrfHYtՒK$n/疲vZa]4Xb X>Uیn[41AZ/Ը<Mn>1a06PI,tYVoDMpE\`i㒙^V5U螗SiV`6W\?@UprdQXIߘ9bV wENo]V&eGn,T8:f3 >Ud97ltڑp Rt&ϥbbu0AGO~zVLY5IW.ݹU&MES.|8ؖAE"0H=D{0rUn*SR4.\"^E٥GHDopc vQnvX"ݩ&c:Mſk.=TifV߬v'+yIo}bg#vH#B6lDeQDAK O?Ŷd"p{BU@ʬlg){ l2HWr.GfJ"?Qjjtm]]n>漟lϭo޽kАQi݇LJ%܊/l9nE49ۦSn# b7\=M^nii hJ$=&T)_i_#;OgYC`w!lc7,(ݩPg-24'DtLLV8KVgZkMctzmX\=زm"έ'$-O؀e#p۷ט:H8=cOKᬿ/55:v+_j)-qnk{}*&^~K>aWv#1=Y} s=xD=۹KQW@i)b2'>تK,]lhκbA@b}3xE9nx+_/~?<~v=w*^[ aBoLɭt8b7A*K&4$uyR#k,<5mZ[9T>I^ \=Y:3 ϩq@X*n{UF8ܯ|km,carkZ,Ӕff3Kn%.KA'qeofL0n GӨ^W@G/=l]ivBvԁXlnJ 9zcF6UUHajwlYŚv6s3Ş5fXبK`3=yQb ' ;idyPH|I[\|<*pE};qOYis㌡EB"*n;`EDpwXS澀 ׊{p'y"$/cW҇#eqllTRK,>>[^9yإ7msp:y$z]5bf.ԏG,H-%ABb}3S,(50 $BJ +Ē1 =Yjeaۑ̓|tj?|ǯ!yOf {iXjoZULnc. mcp'R٦'ڮX l/:k7_@iyeMYMu2̆rW:`(RC Ns0 #^a_B=.!}س`(ɚDPLGM>˼ 97PEkzHޖG;;{WS{gn'IHcH }r1+(8#61`XȻv,$D 5h@͒$BKEۡ?KC=uOج% ]MSa5Gi'&S}HJv3,Eϯ~frӺe!t΁tsB@1]-1ˑUIhMvɩ4@kzۅZmk}CQxp'LH˂ʆUx?%SoR24Oo"HI!ſo#LqBMn6PU%V(ae i"LW&^IKac=w V^y>j9:dLuwRE-ĈLTk,P& yL:ܓd.y#Z~&?\[PV*KWRf2#81'] _"QR RGtnWN̽*F<g!3bWt7*R:Ӓ]1U],p=37kںl8߷wcUpfA\4]Z܀J W7xIp%൝6$Of,̿xh/3$]gQ|X$F-|ܵzrl,玘\1A\xJ(^,y.Յ5{}PzQūը7}hO`mޜg0zLԅFμ]pŦuO)xx]3W?oאla\B+$= ,":=xԔ+0PB:4޻6Oٻ_DKvTd5ubL&U4RltKX@xbŌB%;vz}]ϭPp[rUl,2`]wYNALK p),S;hm?p *|Vם?&p떾2U͋\~h&(1R$ȐA0#AsCǛ,2Ŝzy)66'(+3 I;BCY&b1Cf=y\as=ͬ帾hV? l Xpk؝&9}C2U)t貝lR8]Em-.bFH, R:E7Q$62j]:]DnJ*xYuZKz7 B&Wqcͷv@uΰz9DX)fTtqd3lEl2WN?%7vJYdؒub!v ,CM~@_*q"=KTBVBzTs8Ote,L2N3g(|G}bFI|XxwV/_tWjO-ފB%=l_N#&Z#ʸ"tQ,U}avdn1`us|Mu _p6nƲ_ k8^#Xli]5!>˿Iî9ZaiA]qX=Y<&htfsW pIK :5A=GTsk9ڶ:"l $ͩb;dnƮ0Q-\nyY暘eYcy% Jҹ*RnW)3h傉†6A_s̨ HU}#`ɘ#dv3K"Lwp6_H:;m)FkEnC+7dE] 9[,slCȄ=,43 h3Ĕ,VhqJ'H炭6YTK,TMLˮrA׉N4\gZ$U:9ZxuPpk~)lXAoU}o^,3[γTo~QbJ*pg B8"l<|r_} v{VLl3c?HgRu!0^F+!hS(Ppƚ?x9t43g\8x_>x%gQfnQ._*fd#69>t}#=Nkkǥh"RI,-maQӮƒax㉷HW}lb!gzWcK7FzCIĬY$, ٔԅW c|EO̻֛\ #wN-G=UAG\F [Emcf뗅P篜Z6ifhE,+_JTcH:k$˘IɢѹYdNy9JE *h , ,k03ɠ1=Xv$guh.HWk eLILFu ;I6#sh`p J\$ovlK& }4y^L2b]~Yu$r_<ޮUyI]E*v1\ DJvFk9`չhu݋i`RŤz~vU.K+")TP IfR '3lͱtؤCŋEITM%x]]nF]YF |\ܼ|n[.xd2 &HWg"'SdGϫ ދ!l_L5͚=2~A§tz^BCdXJT:NXB3!0 yoހ {dvޢ3z,J֝~9Fz|Rr;Y 4Av`,V(4VPBAE[x2Z 4K%Y&J:vnsU+%:7ԟim@i(/Ku?IT>_>CIr"}ؑʆF5jUnsH>e_@hXU[7o-#gkO]ϝ@Z9!n@Ae$VExokV,6v/l>57Oهh IrHl|mGL"1bߪ?RZYJ˱bûlI2DsJa YOʄ;"ݡ"@ b2ng{aթUON%AUXp˥:4D)5щ=}1%j;7trfR0o٘Y@ v:AW,%xb}UzΚ7:Ć$[lNF1t9m5, '{0)a $j'f38>2p.RMhd&OCgWdz`=|_v]5sj4zk\;"DLJfս]7 : 3xEDlE[ʲK-]nF`BH*k¤4ʑ.QIXȘ>pP+&;J1A<`E6[]LJWI5y-%YE v@ZRlS;RϜ(MYd}8cfH'p~Y|*֒oSA1i8U+믡m}p&xWJνujo1TD-uOΤQ J5 ZZ<-ui#Cr4\&W1" $ D3"dX* ^>rzs !O:<J`eKUe&Ehmd)Ys'acƸd(4+P8 (u$:M±kB_<܉~K&"ai1,U<;6?ke{}Y7ЊV]l-?ŸK)aN%b. MSq"☴}Mp )My /մNѕ4ctMgshc^\N¿=/Z^BNTwĚ (@s,a) $9jnTIR '\񩔠CFeskfһȜI^g2g"S8[혊d{>;7w_9y9{"1,\A9}tf{ˮU\u pROuXDI֏ ACqapLFF$e1hF ;+(\zL^+:Mqw=L}J΋ٛ`X(KI\KʉXj6 0p9bh0폹UˆճRpow U >ǘ:j{YQ0 ӠEF -4*f_ZQ#ɤ^+{),[֘$&5P<6`H dɋb3?S層k]z@FESd zƫO%W\=}eGS9(z!^H~kNw>u .:޶9Y lD]TH$ _ ]] *UmO&E4D5g#4,r!,u(4\D$Ӡ1L+Lԓס 0|(dœfy!"Qͳ/Fٻn9lj"*fgycy[R};oTa%xK 먫v?_ I~U9d`]fi`MuR q$!Ǘ@ˌO9Gn/H.vI3-^U݄2˘:jrO9v)*i᭞A-W^W`vwn!y]ȇKu*l!@fMD) 5,{pqs[*&Zꞯ&vJ&_K%$5DI,Γ`=3KPUuO*G& "C1w5()DAûMFԆ*DsteY=b ]ś)XX3Zp>>K%5kl-YhYgNBnt׋. IOE~" qzZ|L޹pA\}i,VJSj 49~,ڐ i,Y5Il3O!|t4Yʤ%SuTv7 `I0 !8`fvƆkb8x:;[[qX?[dV55UxL@iRDt1NDʼnaK(StWO?{ԕ UPsKEGǥØc̮q2gTHnY)R=2}~XT}:cQiwg&M5=~d VMvAz RƔJEo1sXr;Yn)X,ƏXtw]1X8eŃߨ+a nHJYM9LZe[y1 -"sV%%-M V $% G41PLVzQޏwJNZ^;(E$K(~Wn5CeEh"J HK*(Q<Q=p9/kعټU\KFW4鐑Z'j9|Avay6W]vp^4R6zELYp8Ym{RafM1`O.pM&m"x= şuMgsԡpmפe $v/$EI pm] 9~XɦR lv2оQU97-BUj x`X h"L"ɣgGK(䄼_,`<-BM'£3)8F(S*`9d#'*Dl2]6bu;]$QL~Ub8UΡ]'n濓C-[G&LX;3i#Iz8gA<w\[áTl-5lԓ`L@BI+D"%dc=L@O"8y1l(s|@d= \{v] fYgw RY~d=zM !'C8*`ZI ZRɩ+ჴz|Lno+X0 Ѳٻ6!q۩"Yw[Y=6O䣚Vye(Z֦SYww!PfWyv7h}^ڸUW!jb?{5+:5c ivr2UaaL~ᬭN;T(#$onpqqqpqqqpqqqpqqqpqqqp8ŮYv5l܎ pP0K c .t!mV5PE52ǿ]6j46׽yI"Su4 HHhE8H,4uҽa]e]eϝˎ888888888 W d*:ⶨuEbHֶ8ef/MGьi% !樶7l%^9i٫;RA0U4L,[2e/_d4mګ}'{^8jiέMj(<`r8:3iL2e$[`CYIlADQ,ndX|3V쟠fivŚ\ Mt {ɤ)k8Xc3t%[P۪!`z-;f-r'7Hgu%{t&R8;)n9`A IM]L `q}6l6h('~ڂKIؒn] ĵ+]ӆ.R 3-HKuRfexM_ݍٿP.͚@lNi ثdӋŞO(kfM1G:YLPSeI.vquύoMOfCNqh]P /T9<"FiW(q0zE,+b}˝2CR>kc& (&Vn@t[ˇ̱͛zWb ͘j/ߊև\}$G,{ dڢZ f"ݛIRT9<;Hݧ$v]Cf>y2Oo5",b+?N6=>̀IL>FGUIZJ}]d[tM9uMiQK {G≰mɪ_w22]N7hXBX4?<}ynFC;ԘHkio_%ŚÙ?E䄜Wa3[-)Xrrw^{lE7k [mKSo$#3نpyS҄2e{^ОlsmkVemd-:@meMK;_nwg~g8>ek]3/0:~Ԗ7*H1ٓ#z6( (c\vk߀dq&w+./KE['ɖЊ`1Kq%r%[&}y}L+vl[-=8Pzsm=! ;O U#cx}u>}$Apc `nD7˩l&%$"5; i< ٥^qQtY0͸oҊ$yO%ockFBN1wuWe !JCᑴG4 r"͐jB!֎:ecn rZHB f|)m7ŷK'xt*}jVK3:[SSN.t/GϓN/>' {LXFP⼰62).9(_N dl~οHERPH8jK"5"H,lKN<Z| "l _v-76h阕rʌ]w:/nCU)(5]e.-s='8 'hYu>n¥pjmeЎᝩHX+?-驧~Cp2 ckؾ8:Ps>ўze1&QO}7x)j"f t.Q_<dɻ~ ,ɒqvyWiwwMvֳlf# ǖxUWPa+R߉6m(C,0PdY^6X&}[Z\j?+e{[Y٣=n FKntQ9{+,< s޸mta]ЛYˆo JEҖ H(^1u$EcφL@tG>le=_X}1`)rB2}gSb닲#o+@-eja4їp4b(`+ ᴘɎcd,.wNJԽ~έa")N[sw^\ްj&?  ,y}A(*0Z]"gNydLjyzvJ1Xb׽Wa68*b[Rs]#@$Ex7qq}u#K"!Mc.JCt@PIlݾyfNT$zm>_bu rV:0`VcٰVЩY$l UTc "٣U{ޱUg{,]gHg?}l75g}?@M)|_f>}j[ 꿚{Whf굋V+맢Hw*aI3FO77i76AD\񃧛85#\rTDhQ!ۧYmIxyc,,pǮϮ2}j'k2Z)zWfc:G̞ҫ㏨=trX8W:lKHXF.&AKqqpqqqpqqqpqqqpqqqpqqqpqqqpqqqp[iM;. ͍ka j"itK,أgUTs'l͸]j^$9iEfcјE 񡹧@a1ZbMzrY˕88]D=n ƭӝ`8l8Iv!A. EMJ͜, ܔk~qqqpr|Mݖ;7ոX  B7b(KVqu"]Ù&*/fQlAewP)(l>3{_A%a*AfX]w%|P\q ]ي_-4`3V+:My9,;z\R:gUo h6_Iu0rN8{y;kMҍ!W<]6 &3-o&" Ivܤ,O#[ڝNlU *oUUR!E.eNI&/Zf1N)9I8۞_ a) qꎿZ F R ’~dIxvmPnic1Aw/֊5m'ܛ>k:F=5&NVHsH*Mz,Z |R|'q(;}}um,Z{ol=O5 >ɹt/oYLGnw_<5/ǎ888go&熅k߫&gɔSTXvڱmͤӞ˨\65.8_:m]o}Mfѻg<TWEM{e]a2=eHdK1X׉&}3HSbĴz$..Q f(*7 xqXaȐ+ ULA\dIyaBxrirs)WflӭM{V|5s)]dQGd.{rK3\`#0Id1&sN8M ƦPUqdY`Ǣ1#Tl3'E>d@l 9t<ߏJ,+iO1l;0bl 3}EEdK`)(pF؅lj,%[$K7BXr>a $ADM~JJf}%j}fy]uyw>Gkó1'":R ju)U(,zŀ-fN]aQv]9 n:#" UA?!/89BnnE w U>;XQ +c 6\+چNtD˟=8|qqq f< j|!SЙgiU =ICcG-P&&`),5ѪwQ gˮyYwǮ}뮺]u\8Rg}jmtCWEMy4 nR; | o@f$2TKs& <S`.ώWVQM%/ec W"31SKr0Ei)nrC 9Vɨ~_m8-VCacRB_bln˜$4 wXoQJ=%]|z5[}KI_hG%Ip z4v[d,Z9eJ;tq`Y&'`äx=tJ/"_]Ot9i`IET?'8]}88'_B2-Ww+SlB<̛Lᐋd X\h䨋hiDG cu+]u}ޅ]u]|? $hOıkSA/=k4='/,ceQ\[C ~^LɪP~@@l5׸/2[u?'c&7W/uxW1Fa٨`KB"-388} GiuWO ;|nT3d4E;`N)TU}sx醄Dt;1mf-4,̂X**"?,8N(V&R/%G eθ(ц֌~nh޶Ժ5HFHX9JDCY#|}5ݠw/j]&[Vz\MWF-itGRzݺXԫ${`&zs^H6/fL[>{DojfҗvYE6 Y&t\G\2&%<}Asڛ#>KUPͰ*Ũֵ=Y|b}t$X5hdb2dfcaJۓje#ē[&F8%P(ޥhaHqE_nUN@ /r-&NI5`Y}a^ j"X:伆xyaSn'k%<ܜaj@IVP~`Al0C$O6þI<-N=@e;`h{`Q "9;=]Α(HW'd7AEu'3a }\ӭw :=[Jk 8Q "I4n-[3O;S;boTyVnդqgPp k+cE$].~-"woQMZTR3i~Fp[cUXzH$b#vukJѬIPkjucI0GCp$2UƷcvBsa([V6b؃s?A(1ED Lf6S5.f $#{<ѫ+XweZ:O\[{g;SPFL1w=V–+3c-,ԁWEF\daOcxQv̆4Jɯ~eHs_Ⓗ`m7[{IT 4ܞ/~zb5[(;PZrޝ';{, %]"IϪ뽗ϰW.2Jڲha } 84K}($VGʻ#"r@ۼ|`"o'ݟҽb;e'[@ yՃY*#T(?U}4r#o} HA+Em`u1ST@S1wȢB^D4,,XQ9 r|ovD)k|=82 yL@5J<;Wǘbt&ͰKcRڒ󺌣&);ATo[ZeZKX[:d}~_q5 c $ni's#'+0K&,}?zaXۦLUL&SI5W-ΏJ_aQgiz# 'TFck'$׌?,n@reҕʆ+'?jW⥰),J}Lmr޻ o=S\k`W:=ދ=B6ЏЉoXfIKF\V3H;:6FP(ȟ~zt]v:v]ac(KV=#XdaC[d1Mz\q @#=wBS~mۮ1]P+~2ncMb2HH&PI:M񃾃a^|?qx:gcX@.֡:`^^|+@{k{]}OF++X-ܰq'Q9lZ2w.Âsj j@&ߍB:w~Cd= mGa禡$1h|^*7Wlvt{ϻ~,3Mh%1X&lr]>m酎J#߀0Y=1H;dOw`tA&X؅~a2gd<}-l[ͿvHa.X)_œIa#k C$?tZ׭IbtE(p Jѷ1aF,&a&Z1 3o܎Bm}JZ6R}w&?P)<q>xb0B7r$rg 6  [ SbN$ r[ZTRmӖf (#/Pᓇ hRL K]lݿaSEى$ +.y_ w DYw1l# [d,kڙ)nd~u>OԎN;`-k9ƛxptdlH4QFB>%mkS!ZbN 6 FĺRgSa^:BKZe h*zP ((X}'|R/Jo~o!~#J;nl K_vli3r!u@&ԭplw.kejX8%ooĆR|DIe$9UL㞲W^Ο>9ik~7EʎrڶVqSEa uV,-+2If*+u]~Эuk݋ݸZbiWԕdx/3vd1 Jt qd`pul%}3ius2ԴcѰGE"mZtPBlŨ تf~,Cͣ ˴WW /嗢txOE"܆f嵶^E^*PRu!/F>@뒗I$書+}7e?(o5ZёQdQHbYfN0]/助@{G8_Q ;35?{wWPKܿ :}QwZ8qhnˡЪ5-gf2' D_ld%z@ rn~u]B.z~76q9=g _G#Qrb#d.ivQֱWM$eX=SO_l~ϧz!m[hN)J)bySzlG%Gb? H`A0yRM]WҧR`DBɆJbrumH jMQOzF2j D "rjsM 9cǼ=/j1^Ж6k8uEW X$p=uA۝:M$38ʩ#g#n:88b%`q8:\ɘfpNk/%{{!})qbjH<Օr"iDFP ܎>aq#7V:em>:qmý,($21M*|a%ʮOpX˾;)%m bu6O.;!ֆ6VHa&Hc Q8UcWf=yd%EV zLwSp ُFl7>7j 묚5TC@yK7?`WJ/ ٓАǐ=q5Fښ7F'ԕ)_~>\nL~zp^aqZ"D,J2JU~ N 3"n6RE?C#X ]#ɳypr(I36HY:?9}ziw=]0R:mYH?Л*׮F1\EfR#2U} 蟉C&<>)][VUuL.eRu=|~dɴro<#ámF> ݕtO&u£Y5<9kqj0PsH._fQڏV7"/Ӂ5qҟ\c D 9WkHn B<{^s(O]S>ݿ_?>}뚻KeˮJ+).:% fbKD|QeiN%>(|v#'3dlޓuZֺc*3)7I˷2!*jHQۧC;]>]kQB5soqS6AMXp`9Ae 4|4Enu뙝m2hߢZw:3Ժ b7DpIq %XvBT12!ãȋtdHc1%5cS5ʿeiۖult/mU+*Ry!YH&R\jdJePO&gޠ)>z+q uUFjKBS%Q@J :Rf̱[7H4$GE4txQ,.>y_YbxMΤ|DEP$s4Dt߸`([}ucq?;Aq4Y|-:~mZ+KaZd%Yٷł@qܒ_ڤL%v8lca!O*C,s%:">I鷴;ejZ3Cةr:B1T /ʞ~؈~q^ h}O+ױ?<u]pJUO3`\e&iWW֙իG*g zM:'ؿ{桥X ~(5ZӟKiܛ[gYNeF.(EP@N5mXM' VV?990 JGM0:ۮK]W_\{A'F$Y8TsjኳD|f?h* Pv cGZiͭJtSH--hHiLvK+d#D5 rb 4M%$MkAjҹqaWuWj44%PB.i$Db/!ahBb~DʒLzc= `wwAuzгb+$AB^Tt3gdd'Mtd[dKc:_3L܋rX :Uq ˾QB:ŶY˾ADJޱ7ޥg%kܛ]QEoO\RRqXokc.d"!35He*\[fP륚u6ЬZp8( pm7&jghe)}cE EdG^~S7٥BU˫xrs84ddsk$:0%眼ppi#uہ24zڭЏekKaPhU cCu =$i>fW#Xm똍YSY!?[[^W HEԚ1ZB lWQ9^jv,tU[;>$R5[mFUbcL#Nvd]׈9Qr"%"Gbݺ0Hae{H7v:ތRn V;ϬM!H˫F[)>5UVh;I6jzpu;dl]TMZƽfeUelx 5WEDJ8aNlq(pKK<_Z4n϶E`bYuD,HP|*@g& %D:d o##}[WgާΪ\ڪ#ZBmW8 S*B彆>)n1&]nu|QS^tv ?Ǽϼzᄎ/]_>YO^PkjWw`aHMſauӞPlpg96/b]a?[ *v&j~7?9 ~_>xeYu}w˾K|]|>t:MlN]ӵsb+G0R0TC`yj})lN2Daݤz]̵|O!/}sR>}~cC{k7"K*Bnwa$fR +hB"qOA4? XH */KIjˏѻ.@FȓƧYiB"B-M7Ey[>i,kƍU3I\MUg)0pPӉgE7Č8վsm"[Ho!?>sZlHe|^u_ u̮_a?w6&5RӞZ L?kG=Oqp&0iCĸr],$FƓ"jzUUln庪"yX1jn!ˀİ;]1V-~/\}._IJgy( PиTJ D+X/m2$˧/{Ynp}gUN}g@Lꑨf$k'#ŢY!%&8+eot9u8EHMahsJ˂RN58ݚyH/J[FQLQ#Ý$]Lʦ Æ6yІ lAJxw88rXDçE+RElS=bFp9Ѵ)گ0/-X:Ws bg>}M\**N .]WFa!qRW+8'n91[,UgLQN<_q>x"C fxY60cbV#3IٶG nٲIXxc=uȉa!Dc,MQ˒r UlK%I X$X~8#-9mv[mLo #cŖ8ϡ1nK1Ru\B`*IU Vf)c)+"'=%͛4r:ON.'N;UL]_z/B## ]vFΕwK"4KfU`9Y(yg},*ƶ#íjgD\1k &ip$HbJ%y~ ] aD3cj9X̤:Hȕ!%JG؁MEކ]l5e.E2l,˴Sbйjk6Y3~yݓ2 ȋtݍw(1),jēSzǷ Tn嶷m\.[zuV%8W.& 3MAc(Ș"1EEE7MPEfQIw (맂vd+Y`yx}w\]c#9HFl+S&)j 4$`] q_u]Î_2vp(XĤ> 6]@Vnl8d]EL1V3K,}wp }XO*(^_d:vA&U,*r=/#_k 8Z@^#h(F;& &E0V̸rẄdZw(*~}n8-(RfH}1OU1v]a_D,ʻuM=gXTe˯Ï$;E]hE5U@xgqQ]J$y~ ~^":AWCV<6]H8{ѩH`ⳮ>_y\^\݃9\lC4qvIl&eŸooY-jY,~ 騊+aJx(xx9]=}w};={ffeMV*a!*D6FwTep2n: i2]*//juyu]u;/ϐXl Gꕋ,y"nr@ndx9FjS$'7;ki="4^HQ6*1Lo]U&ح)u0SO-kOTwM=,FWRRdcJCHV`$)ŷ 1@{h8h=uLpŭi_`^ 4pSQ6B`Xр) ͛dɠïݎuϧr֑YQ)4oebȒihb⍱/—_jq<Ķ L$#ID-*F17$(>M`\I'8ɤtϬS [6E&훤 ۠i"Ii$xi8a=c]c]s$^حn=2Vb IU+5c k6t Y3u[ $,qǬzsHcpgXx,L)& :C 哬tRt7txeB   dh"0`Hٛ&VL7M45lH x$xa8ukc)pVڮ~YFQ]eOT 6n,D[ˮw;Wπ )dc(lw)I@.l߲fv}(uxvw?/È ~ •j lHY&N07 ߏz5]>fQps,{szƃ/c4KQ{y09"_ҡcJw+?,ϾʻO_CgA04xC`E5G 3 rɨѣě28dxsҴ GN~ȟ}n.ve>YvK/KO;43 0obwByz %羽9M{ɏQlJ+c?2ϢWgOz,]vy EӖ>rŬa3s ﵓ% ژ"pφ.' 31r%е͋g5,mUkMK$rDv5e  lGb3 \YY f-q]ۇW+}dy" Pb)*;l],yvB݋hײ$U,QϬ>+Ͽ g+!z 7 D @C~ DxV!mz4OM$_ώE&$ &'()'X&I8&xca=c=u=u]upq# &ȶ/sqݭk2E:J|["D!؊3hQ8~>`B}\+H+oar |8=L;u̺޸>!`Ϳ0ƸWja)b5FْgV_E -QcJ*Yϥ CHHka j=#H9C(ݒCLe- 5 =JMȻ0odɱglѫu;eڈ7I,dY2iI_j'Nj~S|]}/"@, rBm[ۮNiFfl6j;耦y&ЃeKt4rqRtlҫBrcm߽b/{ N.mZeb~vW$OM?Iui?_?<^~oݤW/W]-U0!3ιeSKfG/#5Qxi^>!FpǂD[GwMyMxڛ %/e&iJuf0qUy)Vo0t8a3;oc[<''铆W"OY,%r1@y}1]v+Hg,V*eY R1QL55c)³v9ױa󏺶N5|ʧail6[r,uM\VhېT`Fn@!1CH)OeHGliE,z$?eډ]}=S<><4|@C'˺;Ϥ\12@Z0Vyn`g)G>MMt"A2E6iEZg"tV"Gޯڷba;,ԧ(ILA[5!6Y#<"tk" (ix '=xg}ex9c]}w|x&k]6Z"_G\դ#RҩJbE*(:;}Ysӆ\6[MT |v)JS$e&J⾑Avʎ!䉵t-8@kx˅Ic͟:%[iv{ŰTpA6>Z@: x_s1+,HJYH}-rf:->ep ETi(KOaS`ĕTt2vqp/$xa,{;oNmq Zȫ#h؁mj``[~Tg8ؔb<I<̈́N[tiuڷӛ@$QrBGN SAS/mC21*v.h#/"^Md|;$^-fi;j49V~HuNcP=lDlϬRϥuj^>/;*n/e^~x}MEv=peFCdi& a)C0,?׏HW?W8ㇶ_*u vY]biu뤲ϯY'K-XwxUu??׏HW/1B7vkKosAgsaSqQh)-ThWQrX[9_[} vvGS1t̵-77'j<ڍl8si00b8l)):` bMmWJ>oKG,R,P~Nr3\U$Q-p%?1>-A$޺#Q9v{)Jt, %_Y~<AӁ=JKmeKؗ 5r% !D*V$WXU=%h"VW}x ;9I !*7BBnToI `QGteO0 pt|ЀXО>fZ_kzЪ:Y?[oO_ :Y}o/4uU騻*\Zy\VDJ#ao&\;zکd>=.]Hdm?ُ8!/=lTf]+CEwwfj$2W;.ic%'=..[y[Wdj6lFM HY5s6(MEqlOY} Z}0@1߱ aǬn9K` 3h6kĐ,\z )gl=Y#lVZ "'vQ hݞ,a9Ѯ9cYc-,rǿYcXu]]w}w}wϮW]ưӋkvvnOCJllHNR5gݣ38AUH# ug6˗&Qjb# 6P@¨ٷe\OQdeLq2 ALÎ[[unB6IdKb2fPȘAހ8);tE,Œ>|kI'ipORM&֖rBDg x^do) eѴdOՒ v(OSjǛUl˻~A 2Q1Lf[:{j2QA&:"B}EKn`SQ~W]_ }e(8:.=*Bŵe묖W}NyYmRU+A/Ϯ9!?`vjn@+ImK ?z!-cJ`fQ}qE(+m^o^dlQl,~Uھ>rK;+X43&8),Dp8J.sL*ZH[f޽vb-\]6w".f"([x|3eΛ$08-J"jx_=,]\88TLSQ52K?8g]ˬc}_,]\qpqqqpqqqpqqq?ZJQWŖ?,Sņ]?a]_?9u=뾹88888888888888888 Ae[* x+#Ijj'=gxeXeYc]|?ѭB8m%MMe+dAΣu!h ÜUO!&-N:̏]๏U X|ky6յTflDzr^Ν]Vד=nƚ@IaqXV0x`1<ônfUsI@5 \5Y<#eS!Nr!2ȌZ(/A;|(^EfҸ&+"Û{/7+_*6l`Z*"8g5W70:QqyjwM"yvړ'H<^lG\Q*NMĖnXbFaKhj"7D~h-9Y֨N2|5IΤhUj#ڈ9~m<|?Q L[RV] ڬ1D$D˽ *Йr@aŋVp_YoA|Bn w2fȩKdx$KLJi1tBCD`-awB9f;OB4` ,&E _$>ջ`4kO(ЇMEl0i6͆E,jPoGCaFfrā]&$MްzerdO<ˮO!Q{m=8V>ykt 6"8@dr1ƐPP n,s=LJM;oBk+t`UQEK0qje]^[b~05yOlUsF,5ܤL>x'+>3mGIFaJ ؐdUrIosM㯎zfί[u$8mNQ)^_uxpEU^$T.ux EncviW7^s;\ &Sp,(YܐtŃ!J\_PQAGJ9^qTj;fOTF- f747Í$+')ĖQ!ZO?:؃7x*tVE\0XU5ۆʰ"j0),Ɠ&d\RYFM h(fY2:E"GtCˋ~L3 =ӆP[4.w>qʼ}HVWa[MN3',1gh w.&*|HTa P'ۗIGTۚo͡J[t]*lp0`/9Sѝam5ki]Yi*<]e,g+ 7mE9"M)P1"NMUJ2n,V 2&@{S&uގU"Ay tt5:p&s6*bqyZa ѱd}t` l 3,`Ęg9d˹k65 8yub:Hp . CD%(Q6Pcld׎F{YYփ] <)AٸmՐ=)ݖcd#O sw5[!rh&]x@]aw,Nv],'%S]l_{_hvtkV+m9**y_W Ӈ*(Du2×qbM-OfONN|REʂHNᵠV1`^L * #C=2D3ȑgO>|yDk]rM7v"j Wh[G}PTH? pPk7MZ t숴>–CZUf{>UWHhb 0|ju낞B=~so~ݻ.oStiu[i렩2J*$9;_>7tKYwRGfWQuV,!3fVZl{l@@ÉM?c5=_кY,7Ca(g ЇLǂ(y}cUwyeþ 7*|Q25#m6FBja#kE[*H*"1#YV֝IdѶ(g>B3*-Z˯=Al k7$g-ifm&x/e1Ac *.JV_(]YH2 +aMr;cc:<}lMCc$T=$~ɢɹepK'Hz]6jt۠j6r\nb+ydX a9x}vvw~l;ERjib9Gs eoGh޴6=qǰl1a1nFL3G3j ٫V馃vH 8c]oz٤XiwMVeш+%֓C2y1y  -%gt!$'S>^]ʍyks=dR ۊ@l*W%g`$Y6>FӦƸ7NT9i>\yuV]kⶦPxv9(u8CbdgFP6N۠]y`~Tᷡ:8h HG^kx)"xaaưX=Q-s_$7M0,v ~Q:Mтkv&p-Ytb. 8b pv'T,6V.Rnl˂^˛qӏVԭn(9kNlOd7`d6̗䱓8U4 $4)gYuyRO< >6{<;˭by9c_.]]]}~[ϡ=ySx1XVRRkwY0y4ʫ%zn)aY4}1dA#% U\6d}c  %[̶cMKq&v#VE~iҘu(>7}7qSe/X+0H&IuE a J귊M+:[ e?v}G$&w@piYm_)zbQpe?"Xm= &$AX`xfЀX:MFEtOg^t-U-R)Fs)`@0&4e$ $O.$hvF^.ݿh>PJ&g+[Vg=D;ZZ/;gI?: a!cE,b=8{*R3Iיa޺Yc +AkܤLYyԔԯ $v7 WD+Zq ƧiG"XݦEg_#/QF2oY N{ߍpF/@HA |զѱ< `"XCz>}̤PHj8ER% pX}_֕t\B^cLP#~ĢZGĵǾl48v DzCMϿ0g^UDmmSoص}Q5S"Uid;H"bٺz,b}=gG=c}G#ql}\2U+]#~ȸ$)$pxxP ,PIaoy%{eS*ĩm >n0nAOR)O-UEˁMJg1ͻܥAL=9[W8Kmk\V&z#!dK%O!AنO)z` GlƕW{CP *Ё@Ig`ǂI?S1E0ض=Rjq-QmT1(*"13Zs^ QDOA$ N7_d@}X0##1D!5aIUY,j><1)E>YC6[5_9$Qb+&pNH]*;͠J]w='c?>/]T`TXD(~ǧeqSN/-U$gz凖uXD[d6EV DeQ/82}rcJ`GӎelD6hwN#fŔh+<Aee .]$e1r S`wS"CgwW튵Z3F-rŦxj-tGN،rYU\*uM1ڝՆQ.Sz]2Lr\ȈLClUb gA@ֶ$LVڪx+9i:NUf̬`*G:xݶIV|@j׈RD *rы67R:7>BTy^JDq >mfJ4d2Yz{ύ ,[ u5S-cs)q9b* R=ӢjH^sV?v^QWUff i-'ggg$rFvaQ9`G\Hj5gm3/k¸+S:ar1 F#&.5o Vx1'| 0yH/zП6nd^-^9*-08ڽQTTA̤h42q!H(-b  ltNb 5ap#l88@([2`Ų Z a\rץ}hʴE4p-7Uֺ2jm-rږ;VJfռ#) Гa/`'~@h]Vkƍ^Ԩ8&V5)a؊'+|QkfUs.]u@ZkA/F';"!V$ Y&np}&9ePS'{K0⵼8D>,*^!,;6ol\ n䬉 Qcw|O?Rn|-%Tn bH$E]"2 [hT0'ʃ<1ZQO?j]uh.ILr~}>fZĩUo)KwY4xpHm=Ag+4J0z);8޻Xh˛N^UB|-}cl~꽌CS ?jЃ"='N7t]tRS%1w늧K` O-rV0Ҁ%Q&v#RU ho;X˾!f(9KӯDuka4 '&g7d|j(^e2 DCQRA9E,Tk_2G _VֱSk{D&>xp2O!# \:bА{ɾyJg67W#=Эl+^7JoV4HVG,=6X,Aוy!J7wݳڭUf/vmyGmU HwVdlUN`,ˮR]Ji^zS rWgI#R͓x];v=\6>Ǝ.4|{O+i7r6Y}MF*JGu<洛,:e~` b|,V\^mi'iaealY/t'X̞hBxX&N)Ą{4&^zJi,`J*&jciw9=x}w}].j5d@%!a]5U7q3u rڵQFX[Z1:ֹ͗:p>Ha'7 E+BUlXNSBe% !P7"ޔTr{}7V&1l5\_h:I2Po=GˮNn~[4l60751 K;9±ؽɠ*{QK``({prjNؽL!Ju.k6`6m ^DzQY%0sxFMw VQzEu+ET?÷6۾kghi6_u7 v;_C91wo-Yfav [q biQs/IvǤǰ79'_wJiɌB؝Vל(l.Ra&Ndp A0  *@ZuכzcPޛu, AY@r85o3`=!*ȺoEwɲɫs{Ri{ ]C2:ܬH PɧS5ϜHcfTs)9 sֿYһssy[7{3w r֢֓Nƞ.50Dč͠St}SV$MǗyK˯'],ZlmvƊqYUs1dR^i,u2DcyKQ6ȥKJ : *U}qA"}U3ؕR'"xyvg`rAI_.ڬ }]~rk ]Yo}3Ɨ!.nNPf#X4$qr̞*4F'rą\ڹ=dܽIAmWχ_ę?K D0γNt5K2R1\ KbȬQHof4v7 '6cѭcbC4u)-6ٳCInQYܑEtR>ZU 8P_S#SA5f;stرQBEZv fnH̃լl$ӅUb;sJ8ԇb qέz-v +5hK&B ^1E2jңKq>Jbָ888 z*]d@`ax)$aJͣѐYDDYinja.M#yv Av1h=M`A.+v) cv?~ڍ*m]vgƖ4tuCNcsnIG;]nD=3]YՌ:t{ @o* Ofcuod{ nҙYi0A1L6QDK *qב^GȎ[? v+mps,ZVb;ھZJ͊@,BfGqʰ~ ZӐ lzEDiM/iI̓8}'(5 Y%3<}$7w-]iJ}U=IVl(X[(ekʢUEFI­ 47g/da(ߣcz.s=nIҞAǚF]7d"6, f8888cX8X=uX]u]u]u]u_.Vog? mʨfbL9͈bZdžba]2u ((O8tnq2#Lm׶MJ 'xEkhmR:#T!f?ş+7&nցng+{#zl7BXx` ygQY&h烚,ݽdSkq2eo, +?j- &u=5+'sYpEFL< CH!W}@}=mu"av^ٱ#Pk@90L̞W+s 5+|8hS*-Sp )p(jGcIIkgWapJm09Ғnz״z?Kc}`Ya/Gᅩ^V~KwD=3ꦟ{з͘(2ԐԲd&=A@"͖+Xp U:cwGt@H:˯VL:\gb˃A0㰹b q`[Z!~%޸e kI9TlniM͑[8JK].tD$Sjs~gq6RmI@UYӇZi l f&HfE1&˙ 8x P$ /4ESXhو9⼍-f71= o9cLEGlbJ,8JXaM; szV2ObEaicϤ=X7&2#Fξ,(I'Yw8{#U6J GuTBHX_ŋL̞ JDJّaʏn0$heY;Ƨ^0ܚVcj<n*tأ8șC;:aҼ? K6{)Qnmz'.4=gPtMAuf9b&AL^H]63pKJ]/ Ót=";v>]j&{b%v] 紾?ǟCωS/p^\sv9I6*wH27z/FBk}R->^RyM#5dZ { GSAS2Hk5c7i`,IeT>ǾZ ]_.UUCMY4Hd wծwьX\`ZekD~b%+NU1I} O f5~3G7{ӫϭQD>]bɇy?N>=T:HAL/ˎ[$ەw1 TquխN;F[yG`)DbDi7ğ_;|4C\wNE^jG]OimVuiFH4k1 Q':'KZ>dH \-0QI:Q!=, X#of\nMRidE'edr鴑zznN>;n'//8.iz.:M:М4SD39 ;+ Lۖ1,Gs F ´ U9IaƑآMvXU[ ժ%zd*0644B"]!kɸA;W7v$[Jya.8x.՝=:Dœyh:鼸,QqcxU'IFOO?Tv*wjqIc̯yh$.;]x IWO&h;Ϥû<08~=9}QC6D7-q5lpVsG LϤ]t88nOOvWl*FI^e r3H]&md"$tV (Oq"lY5#Sl_yӍ+gLzmx 8ՅI)4x숃b֝o}ZSjdC`OKncjo>8ٺsfk,v._ۄAw;7Y2Erk%mH (ӶmS7N&%9PZ M0`t? 3E!!Ciެ0!BbK ʕ!8M#4NIGmIg &*`ԧ|EDу. 2lDYAQͻGIfY;n9jP]IT,c>UI~4P#&ᔁ#uFx~aSqD6H$˂8_.\Fv[/]g1먅Uͫlz)rK1 ᤀ"MKd90l<Tn뿹z-qiT8Ĉ$VR(3qi,Y XrIdG ]F__!:u@tn쮵N\ӓH؝FBoEA/"hē(QCzp!i4~k65zk X O36U[i|.ʆJA+Ad\&eJ=$̙5@Bd8;k=U<%ݣp^4`a|@u~Kct-zl\H ӍeI:'^$V3*ߋ"7cqɢ痼ۅ ZPpBy=Q*%#yb1l Dc.Qt50]"P֏/ۨ٥^8?Aہi4+D"0*0"[Z{zm#NIƗ0 jEeoT&gS[+r)f65#Y`o#K*RѬa)_X:6ԕ֪̘;lћeMS9+cㄊ,ٓ|zA :&Hu>LWL4 #0j~CbU]$}EﴝtL~}JqqpPkEO5/9VTu Lrǝ ,lMg.2HhP7tPѢ# fUܛMާoa> ⊹rmEudf PP']G$n,W{i٠ڦ@^T 8 CJf0XnH$A,`ܼ|nћF(˗ &eYy ~?˯-g*2Zt#FR0G9qފbs=޼oـ}7ι4BBE?^h p}rEfaƙJ`f;c5I(S,Xc =d/$ORrn`9l9զ I2/s}gKΧhGb`Hdx!"!?p `ۼ{qߣ:=ު_ ![j 2KZrlrzeKOZFL]W/.p>X!(Mu"4_󍉔z'_[FDf]ƈ:$.VB63:p +ҡbI:4/]RLcIbK:\ v%*=0AVI̺o&HX1vɇzžwկխ?7JNA۶3tѳZP[hyB>,La.WG`j&`]3iZlߗ[ Ӎee 08tNVۘئL]|,W&:K#S~t|H^4! 8;*yibI]LQ IPڵQ(}QMLuHJ=K4OH#'=?ȩSDMs03! ^{A܍ޯ:ɵ5F{clkGoQ\8Xҹ̰q#j[&M$ŤHݔ.<9oQ )2hik\ERk-P(d1nU4Q2 p93q-"φ8<<.sn}Wĵg,J %n(Ki(4X!)%&u0\ɱ˳ t%fs8r&b>U{.j]Z?3T{08c^Xu{+6wU`Ɂ@2h㙃D㮶Trfkhl'9+:w] B<'ƛ$EpBhܯ@B;ǚXkzhoO_9+ژZYq6uI&.b|pYl0vɿB3eщX q̆5;'BFHk^p2lY!w"0v SlP )2[!/?-] T%7d똃B WEHr,#`SoX{g|}gϪ9/#6?gDrUG6QnL_4Vr \hKbul:ݝˆku|:Rj)(.r vYhBմf}nk˼tK _b;c{㞚6w T-GBk9d(sk7%2 2##B &A2 xD !ujq̮j 9Hee>!Ȼj堷nM Ҙn9}ۣꂜ)IьlT$ZR'/a *MBB AbB k(#AmYۃt^yFT}ir7ɢGφ(m%U64\؏m*̒7l99M{_oG럖_f7^hԗϪ[-dyImoXvlrzbS//̒cMȚ 97M.%.i2sˬyf@Y ?0(eCvO_m .V䉗G$أ]?8͌ -b"mXIz'uϮ:im2॰"^u "i"cNJmnqmt5̥kbp5J+ ZB*ky@+8% T'aRt6zbGx5ml[}'Tq[7PzGω:OgH;vD[ ]gT[) FLWC \0ʙdpÃL\s({=P0 Rԩu!Qa@%74jϒ^<(0];.C9C{y7}+2ѵmVhpoᲓ'xT&zrrв)RPr!Mrx{`id|w|Ǥ3_=JyFM]U竧gB*Táb#ઁɑ3/(m)n@o` wOa=IcpyYcSmQ\53VGAB.P;F$$|Gn`ufkϚ{xSRpkRe1dGbnH6Ęw5&9wN[,YaNlV?{/;Fֽ͡FR{へFf!@Kd#0ZBoر+i~ B-"]"[d^}uMQ˅rᅲ]t)g}ˮlU=ԝW7BA۾2~Wc@Ipb؈֤@+tPG "=ͬtً$̮I]1ka᳍W#/#cp|YmEN`" =1['rU+r&3 *tXKލ .ͣ֫W}<[s)٦.J'aϢM GGbFDJf HKcC'~j٦S<ݿ]<މ]V-,es -bV%oJj@`\ U8Ebњ s׿5r]8JqsM׽ pɳ::G,$M~&* [7PQf#bBc7ζZ$A,YY?-|ްXd9^UDzڋɽ;SX08VŽ(aBiLnD8<-%b4rBH!2rJ붢47m^P՜ٛ.5f54Qf:}p2h7bK%>Ƌ<]}?\m_t΍>{fܢґbIƴ"͎c+^DX Uk/7zALYuUzϳ[&XM:eMH ?h`YcExmY{͢X)~{hnk ? 3t,&t}yKsf` YlI7sӵ]sқ;= ؔ50Xۈ>ZHva.ӗ2iUv)%pD>h;E'žYSt<ЈE1J c">@ncFD@Alp - WcRyxH׺kXTݍQY&I%=v_ kqD8\X/8yyVlVnxpl`A>{@-1Y#@nU1/? ( ' +jqO}FWBb̂\_X^Ge#XJLHG^ښ*-T50 RJ3`TtT`㙒7ufvoך{8Ww5W@Ci^d)<(՘t2qIY-o ;uptpBT޻i_zu,*}G*!\1J4F:|nddKS :4VlSƵRAot<ƃUJǃ`48jC0( -YX' rQ6psN|S_n<>>9Vzw^jg5jl@VvV8~M$cEf),d׬( )]dF|Xo['º 1OMb)n2/,s 9`?iFH`Ceg+ӯF->]+w2Q$يZ$E|Dc#cjh˜3 a}÷JYFU0L啛^(R? %aX+mvXܽ0.{׵hzaL|{My$\\:&r۬>ǼH /XÞ5ں6||>iDkWXQFpR[IG5rqh!E'63G%Tϗ_+|uV-_JBOdMFxԫ>].Y_˿|SͷwuR,.2tECŪX/DZCnz)^5.c=`"yr$Vw\"[BlE٬bȪfs888888888888vkiP=dh.k Tyk>tHB |Ƞ㘒ֈ.zd~>z!9)*W~+9^Yq~Ҋɂb&"7 `1T=*e994YMr,O֭F iAK8r1aǚd\+[fm A`~Ϡ3[ܬ|D>d}iG[e$ZMx ,SX,GOnWLP#z}<N ͂kIcA6dYk Na5#d ,̛͠YES;!]ڶ qVRLQ bDVkb:A1>Nk<7tkcc74v-A!t7oXQ=o{{Gޞsh۹!n'eŇ8l9EfySѰ9"x@y3t_(!IPPW2sAoN`75ֽ9k{)]b6{t{8jz^PNN{U|pLvG5x"!eU֎7YR!"R%V8lfD0ltz {;uzឦ] ;6rRDU:eTJ &LP:($Hn 4lOo3yUxƣܺ+=Y;CNYUc_v-I8D:]6Aa`MzOIUP gwM){9DF6;\lJ^W6*g(b+o W&26L3Κ>ͱU_?6_ hzx!.e`VV 5t#3UEaŅ8G=jU P۪OLnxMWRSͧ/-kb!>ځ:n vѠn\`9|nzo<BGl!b2uJ20iUsOg`ϛY' G $)Ԑau x.x>ѵLgʇWYJrn3, rᣇ 8,u8pU3fg<һjڂlf70$<*WT큸ƯEmrnګNPA_tRS0Bkn[RRJk]t6/]8dZ$pə>bʼnL\ 2LY3B_f61UXBnccW$$ΪNl.XMBz@uڠJ34IˬKJ] CTj ]3!ffy +195% &9|0p%lh"\ah8U|BOѸ`Gb]X qh*E]Kj` M^JҶԿgs% l\g=Ē828XD#L H1OqтO8X#'bå+4ym[6Yt*UB,x`lM& w.ι`ŷ|2mⷔak}ⶸ @gpXwbr"A{0)i J/#t!fKXu\"MZZ]Ů])D9pEgx&v\1U4ǬUO >^zMڦ1}6#bKŦyl9`hg)ٿvŰ6?8]j> =seaWaXcEv*Ǐ5K]Ir۬F%G* 7`$"osEyK5߆I+AtFďIݼvBԡdL8vcG.)kbQ4>W7 jx\/Nc.~2=/D2NԢk' TaU$رB,{6O&eQVѺ 4F̬AAW,/4NmMH#n·!]%`Pě֯s麿iQt8^Ťi<@q f0i,f'$ z-N5uR긜U1|Sdq(I,Īl,*.UD8P /!S~nꆦN߱!=`ϳ7,YӚA.HnV>`۹-įaVmVYlj`XD#+Dp<\Cl'ew cx]in!MRР]xl&:U9.Mz'KeǠ1'zGpb5r&ϫ؊J"zS[ln"g2mЬS7x#-yflof6~HG)nnuȃL6oX? NdŋT Ϟ4Urn )TO`+d"ܴVŸL  $d;x6t*c0IL*`ЭxrLHź'ݜuedS#;h -0_3:?^@oɌSU]+9=Tuao]8pҏ^9U>q備)mz[.׸5fj 7yזʼnd7H 0X"{|taɠ.^v|o2m~վLRb0FSO\.|j,{";QdZ1G82jX6W'χd ~"%4'^H2DbsHW^I麥d艌]V7mv.[[lM(J7x%W$!Dn14U)0m.l٪Eo6ϥQOV> .g*qMg8͕jVSI4V]IND:IQʃDtt4`)P{ٺ'MD+n)(/PFOKWb-U  lbDE ("3&4QnC]M^7SKDf(5%-,5M1qrRZ8 S+#ٽ]n6BE55VYkB&8(Dd8=ʋǜ])T89e ;,v})ɲr2hAu*N&EL;[,q]' 1跜 zr3IƬIQyˑ A-yl$ާkbϱ*P! Aڨ888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888VNZ<Q͟x/x|Ӄ탴QO)~Eq??__G3?x/p~N3+X|LU{`Hbq7GY88瘚M"JG;DC?#>Q^`(@3r524颊jpilM,WyVyn]跏1MJ_g%# 9nx8BIe-C1KWco+sH׾*ժڻeƨJCbtѤhCZ{Uؼ\_sxcZݴrբ͓bݐ9ЎIVV.A8:oWśFu%ZlFi |kJgڈPdg׎(9Ax.I^Lb+=n(hPV;x%TjBZviؓ͠ݻ;Jgh`rg1ʭ}嗰d ^d,:%$dȨLN!nL__jPd؈ R;GexgcS~+:jk,*[A㔸sջE9.ɰud5`:]?uQfoǀAa fL'!-W哌O,N߮b_ޟJKNS]ʯ>Vro/ןYYeߧϼC_eH;׍bkktZNt\"2>WsIgriZqqpqqqpqqqpqqIy5U~ِ'`?O'/XTI)~YOVr sMxy0K,FGlj{ܨֵEW9UT_SdmAJpb˖l5V&IW8{kQUN)_s<~ӪO0m2.ȀFfiSb Bz_' ? q%`GOJZTs_6~PfY?/ck#W̯SafrAyhQjJ,I-xnqqȍ{R;Mugx9ĀƔweR'1xᚨb'|;t[7 $K]ލOퟆ$'u)K(g)5j3C`e&jpCg :oJ8G U6b9Ƥvj׮a]*E%1ft6(ؒLbFTUcxsj.]o˗/lYz}Jrub]x̲ƺ&C\lK,Q3)3R%aFJ i;X`ũƉ?1 ?o 6"W,Sѫ'r|Rk1bk"SX|C}.u׷Vvڥ<6 fJ-'v؞"K^ib{q̤pqqʽ'wmg'iybv ]^G$۩ۤAoKG =Aہ 2TpkvLV[SA] 5,xUY#}ђV%.^h:ȱ"b k-[׬]:hmT"tG֩b=tC1ersCbc^8=ׯ\ u ڿ~juNb**/r~ݔE7'Bl6Izp6 pI\g8Ȥأ5R狤G>K9ٝ-EL)jhxYvd$܄28PkhRO3*ڬ=sn_9J]Z|VV.mr;XVkK =:Tc?]f׽m*n.;[ep~?9I^3'd38yVjNEeokWD%e^2X}8.c%WL!hl+ܙ>E֙:5xHvi8E%p }G6s^ֽk9kk+\QyB 5k0^yd1Mнccz#9HǢ9jȊ8#`Zȴ~ivJGe0[?!䤸taGmYBKv)2Kg X9ɢ|'O ^K fG,+,hTc5r"=o>Ka>+)WY^Iv^FjG#.53!Oc`%٬luwO LY9׎888'%:B*FH.C}h6E!Iu4=A難U6-l' *33 (]O2AyGmȳ$Nr/6߻L^/zU}k#&B-Rl$Tbtx ,Ի.NZ5SheO/pjA?ndHO7k0`:ˬ1G뾱;3Wh{)¢Ay2$C)oyh#%7ݵf\"9vcձ%!yϛK-I*Ⱦ.nK'%lQ,AE8I$ S~H]cv#fp)$@dn (v.L[d_»tʝsCt2U%߈ѣavAWƎ^M˛OA׭3kKe(x!ADu-~Cm̌:sPu#jϏIU@ֽ*+P@ X݄ZLVO$x,"J0b]fɃL[sv#CbHz،xVXF/858zG`8fI8E8WΎ!YTdVc'shiv\xlKG'NfqS:M!u4J值ȥa!y9$#9EQ| |3G8ciՂ?بפV 4/.eZڮדd szov]g\n=ն O4[z.,FdrQÐfÑK'GYQ1Vb{ 93vI:C.i:L.%c. q.AoGsu\1 1cPAIX)$c'FeJs}xӲ[DT0zl%<'P`Ts AL Ѳ8fa!{4ɦ/`: 3)Á2H3v(w;[ qv5>XJjݲ:mnh+#it3[*ctx՚jduwd4~ NAEVVjqM$RYWNYh3_ pG%y>±nRH?>bJL2$m&e4L`yhV y\F]1m\UzY`s{Ez4Wg9egKS;~ݪk3|\>31|HimVkx \f"GIK.q(Q% =9vS_*uJK>5 Ҷ#KU!"XŌ S2ޛ@AnV•{T񺮽-( <unC$/lʁxd$=׸ !xO\$s~NafP,r8I3:vm ,$,Bg] dbm;W:_Q\)j}M>8w/r`nu4Yu!ss;I<3&" w0jjnĽc8vM>5Y}ny5Yeu#M⳧LgiE"[ڤ0~4fYŘ=n3i[a]o`vUr2,PK굥^d)8Սn,d4;XlՋZ^iՙ{#C+T2UkeKW2ǾդuqEqqpqqqpqqqp(41#q=N{7`S.hyCc03]cǯ򍽎r;YfY( C'Ǥ!v~V %CR M?x9շ4_'n]'iآRmVQ~ϚsC^&HȲ1, Ʃ^ѶJza{$ϏQѪ)7I$I4A<Ep4I/&"dÈIH`% *ABn:!M,skdm3IdbUbMP8Dj0.. 9%2>JbʋN-lJ9ؾrqs*cz|R22k3Z|3y6$UkS|^/':3֧5ZX)byrgb-:0$!dH #ҋNWYvZ4x_&䭆oΜxYf)02!^1rQ>֛ņC];+Tren#,' Fvæ8\e.\$6A <-6O^*Ÿ &`OH G49ҏ v2\;mͻduF9/I4|Fft|f?,'vf +IHg^," ツa" r W=v4^7s)B):TyFԷn1ҭ8V̌HO]jR1Y8 Ixch{Mfnc _epoc=g?}(s2]6EڰC ʉemia=2RccXڈ_~Mׂ<5vZ8?&MzrO= 3x9VV"=_f5qGX_3#.kY?w}909>|[\F9k3TdkWȲ!UU8g(,ӤLj\NYH[yxwPY9]>WN 6$A+WWM%*GɶڎK&:x26!{.v|p6kr|NܱWXGUrxg%v$dA=,Yw?[1Gu$*nuiy4B4mlÀZXy.Av|S4zb7|.V|}Vݣ!L?qG5r8*q77Dg9~U.̚p!@LiSٶǼkNKrǯ}28e~]| 譧4^Q""Od7E%J%zw_&X $q.^| ; =3c`\-"j{5g{wL%F^n7'.SemxKD&WPvL~axV7pS\ETU2)eYC-cf/;xb*xcš ֈhA*p2D {!(qlطTY.#9 dTE=9tպp8#S䐒cj0rUTO<v]raƿNYrEoyUYYZQ9ү"cΞLkb5NPbcʱ4IIe}>ծފ-DX,?ibKK-%$9rtx8g4Eei }g=x>쯆yx;2I|8pͳ,ZHyRBD#Q̧hו{YK c黊 LЕaI1_zГ&l "ɚ:{Slʷ8f&E̳-X:[3jFl26Z'O䮯'Su+xek7~aI3,>F6\K,6w˒= >X̶.Ùڮd Ejΰgi!<0-(SI6lu'XJ SJd9jVD\WRdkz:ۼڎC|rÚYY*uHk[Z7^/UlM ( YՅq "ɛg`itO/ڪ^D1$2J EZ\m)|\j)G,˾SsunS'^ylulL/l[%d1jK0xʌs=#z>c7kۦw{N 5[Wاܒd-*bˀ=*[d(ؖuc w?{Ib90 ?g*% Pa I &W[DWkfyހ%*9A"`QK7Ġ8?j6fc7Yyz]ďn^d]Pu[6ÓC ϨC%pNA?dFd-ZEMUN ʽP6MHӜQ.c锁Œ3? [, QsndMBr Xum}WA nzsty !5/-d*YN"#B 2˖vԳ 6Pc}ñ=HXU{zHf 1֤W'Yv.wt.7li$`+#]۳<ApYvIC(MfMQVNGOb]V ~ʒ&~BlJrA#-V="%c+oa8sG}OP՛m}Z#kFj;[jPS {Yԍ&J+V6]Ȑر(3! g5|ߗ+/oD!\l &A0DfEHm :QC ;cy+U6Y3Z֎o"+[:r NYfRat0MF/U0[\>7x.1ǵpOǮ]_ב][C{Xə9ѰgU7HL H{ c-iPPԙdQ5"=Yrv_r U3>$Zuy6.JUYjԁ69P#ksFͷGN"Q[甂,FKRfGFlF習>y\{GW@ ݚd]K&i 8h;#9 nFH K988ףU tYoQwv`Ţ '3!**>Dtzec].i*l{_@7Ik6mp !XÙ bp<5 ~nvGyҙ9kTI6{I&=u.;*vx}^ zة7OcM`)(QÖ{Ƅ_~k:uNUfI3Ŗ6?R2EXTTA'j܆)%m 1ͰUGv2YvY^w3&c\ȮVF۳rg54HӹKjf4|zY9`lLީ}1|eCO3PkroOQ.WGu F,r9Qڿ㊷`x5!I$dC_HSnEUQ3ATvsλ_y@mldz`yhE%߼1w]:T j:I]loq|Yx5ÆLBNωƥƥ V-.vVu0*iԭ?_r~ɴ&avvge.q.nIgMckI"DّeuP\jaٖPN30&Ng$"X ]]ecI쨬Ay1:f]3IQM2avỲ lDAa1~l .>VcjUj5a?_o]GO7EI,J5yyvsv\מ}yk;k{73ekaV 6+|DzGŭ!v_Al$ݚ JF9d!ُͱMުbfR6I$geYxStEuEӑ1Ê0,F$1(Qe `UWm.a "Q7[q.|s,&#} m::7 {{Fu9.UDHDkQn"n2ٺl{XWCW)jf-׮,2;ӵ{jďWI4uMH\҃B3RA,ҁ 1[dL+$9-Ɉ0-Y"U'x暘e_,.ss\5Ȏkr/4TTUEEEEEHfOA<2hfXeF+_=G1Ȏj)PDݹ}Uz&e?Iث]eIu>bdx\}$~ ddOqe,+硌Nitz6EdjEXTE^6(qu[6ō+'[/rn[)es-]dJvMi&Ny96JЖ-L[ݗSJA党m6əU]<bG1DZ[6Kj뮠 yjz$9L238-ӟzuś]cCB45|"r}fds^4ztDz7K#ISj9ʊcľ #n{%R(` {ƥJ$hJ%FVJrGD5no66?kW+݁1Mr .f]}|_.͍ժi[-;a^NXѼAX*Ʉ5x͒E%{v_6je9erFqt6+ܯKV9{+U9=s9ȪUWa0X A&mV62֫FyլdrNN^qg5zۊfmE (Zh2'ߛjI,|5 Q*=b]}:뾫:GZcXItNu 4tGǤʤ}vWY|îE^WNUd2kj('Cz$#Wr1&?Ra6#ܵo)dH1YhƎf<ѣ_"ӮU9@PJ *(V';}!9IഌH/<1N[0i88[ uk`]ʎXlDco>^Ukӓ U>Ǩe"j̮ =]=wZH%iIȯf6X߀z/=gc}ax䖯6t\Uf";&N1o>")i铔WizSmM^Rxȫ~ٽ\YgF}oO*۾U.fΙ;z4WH7ێc>[nZ$*J.dSsHcI\:Uza H1HC{KrIehi4X}dcJO4Ii_II~n1,GͦmlIJxGǤg!k4&I$pNx98kV>2SjV9`cחTq\#DGܐ0\oܦ;mgnRY&Rj%[] b^HVJV!pxD݋besQ÷$2"+v^vLf1 aF{ɡLi%L |MbUݥnu=idRp.ky+a,*}e]9x}w\pY5[s  x݊Ƀ9qp}uE..î5AVJ87%) jt%vs[ҍsQF*"*G[oFS=Kj}S`]RXt`ڣWH:Ay6(嗷|J:dsکukNh=:w!rN`k NurɻܺC Wܳ$u(㛺kR: h[F⍼U"59U^\W9Uʪafv\edvzԨMf+815]1 mlq989_M5nDzWL$AqK'oLc񪘩 g 5NG97.i猡,խr+[ ) &芊G5y*"4S)3ZNk^dpyz/Y)7lc{=aVH7:9Fs;{Ibrq.*,`:F䂘=:>F8wx((EDKz]VS;`]{#ѡ\uPaDh'MӶmaUf.k4x) pV'xJx|V>)Z+)aA%:g^YZs\kX6>#}nOf6,+&ܭ?mjHE3e|G+$- HxudM ɑdd<+>ILKtrfo[$q˸o=15w+F'>&Cj0"&yH1$>ŏL`H1UɸZ4M7{-jђ7=`}x +H^F'IuȈүG7qk{)W1nh@ʴuHĞz01Ҳ{uJ(-_J:F+Y4eU3w'0 1Y,H^^붭>|2hq[*vJƔtpTZNY]jٴ6~i}_>cw~J=Y"J+ 5#ܪ"u>e'&[ed} ۯd,Y~Z-z6ÃnFITMltw=1k}v61PܑO^.0SCs1mK,%V5qV8x4g>djƬ:Z*SRǒg\$8I bEa3nC7j1ٚH )_5I.9hR[ȭX{aZٵ'UݒȉԬ檨UF.n6M5Y-˯WTW,Ldm|kVD.өcbrY^q~s=ogo|py[ogo<~q[~s=ogo|py[ogo<~q[~s=ogo|py[ogo<~q[~s=ogo|py[ogo<~q[~s=ogo|py[ogo<~q[~s=ogo|py[ogo<~q[~s=ogo|py[ogo<~q[~s=ogo|py[ogo<~q[~s=ogo|py[ogo<~q[~s=ogo|py[ogo<~q[~s=ogo|py[ogo<~q[~sqqpqqqpqqqpqqqpqqqpqqqpqqqpqqqpqqqpqqqpqqqpqqqpwuڦeI,]&D3Uły8t*qN2b1~ 3K:(zhۮb@eGdKV랑HBJi%fӥ쓁cJ޷b4`w}u]]뾿]z8888sNC4$ C hanT|Ѻͻ(۳,r+_.8j A;o|Qe (D!uLs?f.:G>' MT묰ϮPSD8LjCerD'΀U(+Jª,e} ރl|4X#hOX :6t&v$#ۺUOzٱMڨ=mflaZ+N| }K7foK{lfYy+8=qqqpqqqpqqW>6jo ,Y!i;nY4US[5Oe]pў[Pxe+4ځBn-Yh,~탆 OӞt8Bkz;ŭw?v%t?d)Gꯖַ}Ⓓu˾Ͼ}w=Nr Fg̏rȘ N0b tP]%Q zֽ[0o 5r rﮍiGM_#Ǹq[\`\dF"ennN/{.vcpS UMvRn;5SOSLկT?{߀Vz:3i&)\'kv%Asn?.OJ}/=opqqqpqqqpq!?5o_b;Z5BEHJ9&T@Q&l*`,RILq˾F6 TjmhKAJeFȾxYa{蔰4jl J,:y`9d@{ı]a,U<˯,{8X9dј>b)*p̆&X`Y],3ǼUELpYU<3ƮȍɢP 0Snj)|I +#Q9.aKrmܗf`yD fJ)`';f n1W+2ON'AG2MG%>N:ǾqqpqqqpqqqpqqqpqqsQ4$OS˯I9]w}u9u=]p00 zqqǮq뮺뮺뮺뮺ˎ888888888888888888888888888888888888888888888888888888888888888888888888888888888bacula-15.0.3/src/qt-console/images/folderbothchecked.png0000644000175000017500000000136614771010173023155 0ustar bsbuildbsbuildPNG  IHDRasBIT|dtEXtSoftwarewww.inkscape.org<IDAT8ӻkQܙ̺d&FT0j aN!Vjgl+ $`EATķs7jܕĘf3s_U@O{99c(Z?oquR(0F!_,j:ٹi cEq#Dkb`T*u]X|˳G04 DQB۫ju1MǹiOON$Ilչ=Q{k ȮnxDqdՙ'߾aH 2-yʎM !σ.$oўfC~O[˒8Ҟv68!7EeљK*IlB65 j6jڷl!xֶ\e ؐ-7 h6]2#N:zrܝ,LF^|M+IDv#D$ZxA{> O?I%QXCo9NXZfmS:BLdqa1S `ے=AYT>ڻs@Z?ibd́5~[(L8N.~Z~B`H(ZIENDB`bacula-15.0.3/src/qt-console/images/unchecked.png0000644000175000017500000000071514771010173021444 0ustar bsbuildbsbuildPNG  IHDRasBIT|dtEXtSoftwarewww.inkscape.org<_IDAT8K0j#] >Хu>vU"MM0aD4t)%irtp8eٝ.I0i6dYf+BPլx\R.IzNTURz=+</Rf}<ϣnBXz~HT*өMk^Vw맀_x<~EQd{_8\W uJ)("svW1)%|coIENDB`bacula-15.0.3/src/qt-console/images/64p.png0000644000175000017500000000040514771010173020120 0ustar bsbuildbsbuildPNG  IHDR#LsRGBbKGD pHYs  tIME Y!tEXtCommentCreated with GIMPW`IDATHc`49o<3gΠ`]g b"(1 F3Q Kǰ gϞ%6 F% sIENDB`bacula-15.0.3/src/qt-console/images/media-floppy.svg0000644000175000017500000003332114771010173022113 0ustar bsbuildbsbuild image/svg+xml Media Floppy Tuomas Kuosmanen http://www.tango-project.org save document store file io floppy media Jakub Steiner bacula-15.0.3/src/qt-console/images/storage_icon_48dp.png0000755000175000017500000000016314771010173023026 0ustar bsbuildbsbuildPNG  IHDR00mkPLTEgtRNS@fIDATxc p'rhIlI>7IENDB`bacula-15.0.3/src/qt-console/images/graph1.png0000644000175000017500000000347514771010173020703 0ustar bsbuildbsbuildPNG  IHDR szzsBIT|dtEXtSoftwarewww.inkscape.org<IDATXˏ\WUczzgdcv "RĚ$&+vH,Y!V( ‰c;d^\ƜM4DNFL9a0[B6@Hx~2/yg/9{U0ˁS$g/K @fB!/DVT&R# ` !.,":agW9 Yb buHC"ù2DU"ֻV Kfmnk·*y:@Z7 ,gQ {k3u#\h"x,%Ȃ$ ll:nlwX u1f3^rˑ^Lx98Tt@U"J .+8{;m6-z=mSE"!Bg'Ɠ3AUqށ.\U"ZpkF pLg0ƎXaQ.SS3CTS>;?b.*_ki+8nN*|[;-srQwWN?7㓣c1NW4~_xGj5FLfw8w\\UouެyU-|a坏 #YiQ,_rr9J+w-Ͽ9՚\׿jXcB0#5$ 3 QNLů| I% Ff9Wƻx.%:M: т@5!@U*Nq2iEyjSTcGt;A;MCrrQ0|QŠ6괷đ,ι-DPg+AG'5|3:x΋6@ q;Ð`&y c(/*@3PC%gLo":VJNE,ylDppN >?8KaV(G f)KnJihkLRprM81eC}1KY+q0 $*#͎8Ca4khbFXeb:F&V,=q_sʭ^C.>4bAfKOu- X\OHw>|ɱzR@Yz~z+k8RPI ![\Xz4p5F~\Pu8G:GX ߆ /']1B8*EU폟֡I%x]*⴩&P7 /*`Oյ~i+jWzwu_)'>um 'IENDB`bacula-15.0.3/src/qt-console/images/browse.svg0000644000175000017500000010221514771010173021025 0ustar bsbuildbsbuild image/svg+xml Folder Icon Jakub Steiner http://jimmac.musichall.cz folder directory bacula-15.0.3/src/qt-console/images/mark.png0000644000175000017500000000070414771010173020443 0ustar bsbuildbsbuildPNG  IHDRĴl;gAMA a pHYs ?@"tIME -4bKGD#fZ~AIDAT8c`v)?: .=?ƒߧpo=߰ +n<_, &c`<_M'dE3wg:?g8Ʀ[DWzgKGO KA|Bbs!z} tG @/B}H%& 8C03y'ysMehXRn>Qh(UBƞh%G ~:"W#4Ew>F(>}%zT@3%t (߽FQ CZvVU63%JgFAj}\y"^/cw\q9G z+h|,Ztk;.՚@FC yGB>}K8mۿ{\?81T$<殪9.U ǐ)3M v%XZfѨ#&*ůu ~tY" tin4t<Q;ˤ_j4$/rmɚl9x@/bhFJ_iY4J(UT B`9x nx;+5M@Jyٍk(UX!/,55g:F1jx~if1Ad[PRGeA(ba˛:2aE t++勂W^}z4j$K6OkMji${8U9yU%s]I @>iptPEj_':kzsǗvwkqO¹Ξ\RDslƺ;FיCnQpt{<sgwm[}Է'fUZ~~3..]Xscȴ:=TUsj|TBz0pZ8|Qd?:`P]+`Ycct )]NN.L;ƴoc5zV/BO|}pN@"а|ۚ zGԞg5\u|8n_#. |Δȫ;uVgwy+4k,pʵ;;nX^YyaBy:ԕƙ\~]9i'}FVbBj< Um.ɛo _ut)+kF)͚olvN(^uo)0?C?%˳ϕ?|PFQzߎ+p8$Һ&?ⁿ?lsIHatX @2= ,VL($߭p8B~cǏw PJ  D46dCK3'hYI-|| !Bh10t4`6 Ϧ4(ܮS5?/aJM{B9)-bUzi%PH(4bIENDB`bacula-15.0.3/src/qt-console/images/bat_icon.icns0000644000175000017500000036421314771010173021447 0ustar bsbuildbsbuildicnsTOC His32s8mkil32l8mkit32!t8mk@ic08hic09bis32 3B\fRV}d< iaSG4kY;WWٷyUpɹ\Yvqh3W? $3   6  ?     U  3s8mk*:_|szuP?@"&% 9JCID8%fQ8Cil32 3QtUxlZ7NQa3Jm{pXSQEks\Q> /Ǿp|bD [ƨ]&? @ح}s ĚotFگh͆HKgɍIܳXnóhZҫj*ʬ_L] |̷H В>pf"    *U3U   ? U"  3    +  +" U  U.3 U    \  \ *  ((?' ,$l8mk%PXghfhgc@'/z~ξ^P/%m㧛LK䕝{^C u # %  I_1-#it32!  ?!U?UUU U UU)(5PUH@8/.HUZI[emyaOB7DWI;= HUU68"*;Hae[QHA4HS_Xeq}qYK?N^TF@ )1AA- "f2-@`\QMksùpgmȻwNPMG?XUJ- 6@?BMcxzrrdVSUFDCDWRWY^u{z[e]T@,M'@42689&36:C;FPY_`cmp^cn{e?1'$%#..47@LY )$5Ѹƍn^sBAL`e'"_#UaνʹnG4LKORRI 7 _͸lJ?UmU55UH?UeŰЫ:5UdŰ˒! WڀֲܽZ UUe׺ʤ+'7sځ ҴU ={ځ ¨ ?ڀځ ϶~|Dށ Îrig<܁ЦpOFU >݁ӵ\D @݁kS$8ڀ܃شA?nۄۀصR_  mتy"UUU o݂܀ ذWLsۀ `Uz׾ps_Um$UeǀHC_= kہÅm_ct0 U!oƲunp4 rހ݀ Ȱtr= yӈyB evہƪiLFXØG9N±C4KҼǽȕoA,Aͯ׾́Q-:ȴ˿{ȖV.8xͻəXU+5rųʱəZ*:̾Ĵxɒ^ %8ſɎD iɸȵ>MȰ«y: 8ϬÜ׾d31\õӾּx"U$&Iҵӵv33@x̨Ի54;O3+U#w$'UU"T] U.:A 42Ѿ4*60udz)/#nҺ%yšl/#˻~W/*µiC ;.T1 B6.`\0 0i=-3af* ,MG1 %      $    mU3    & 3U:$ B150!#.+""!.   !+-&9f  &)#*3 ?̀3 U 1   ?(    *   ?D"$   ! .?H  UH((?U  .E  $     !*N'?"' UU  3  _  37?UUU ?mI364.--'      3?HUUUD?6%$3D=-*-*%&$      -7<;<$ GUfUNaM7<" #FQz.\OP`jadiloVUXE*$!  3N8QA7'&#     1QU?$)&   5  5RSH%"'# -$+51558Es%  (Z>) ?S &   %$!9N'M:L  Uf?    37   U?f@    ] J5 *'#<,,/__ #&9=6*88UU5  0D?5)[DD 0J?2..8UH $/(#!.8`K #( ?6- .  ( ! B +8<55*?U?U, *UU$+1 f $U 1. .*  ""    ? U UU& * -$U>%/U: *8@9FS @N6?")U "(U $+ +3,33<$/8U3+*0=309#)0?.)  -A50 -b?Z ]  mU *" bI ),.5E >U$   )5 UU 8> fH&$"UU_E0)   Ufc / , 1 *$% %,%t8mk@&-/1132499<;;;;=:8??==<<=;86/-  (Nº}?+.# ?i¸j$MR:&:R)J`ˢ>aoןsm[-ZѠ;C\{|sgUtԾ5 $ O\e  ?Ŀ9g騌cGD[f8dekOxB6nYjz5z=prA|;iL@R! 5zȻXUB pջ,{`M]V3#'#K&  ]>],ە!*K;jA=NX{]t^t]tZsYoXi[c^_]lZbZ#G3 -4K80 I&Z 1   {ؙ#5Akj-!d>@0)o01ic08hPNG  IHDR\rf$iCCPICC Profile8UoT>oR? XGůUS[IJ*$:7鶪O{7@Hkk?<kktq݋m6nƶد-mR;`zv x#=\% oYRڱ#&?>ҹЪn_;j;$}*}+(}'}/LtY"$].9⦅%{_a݊]hk5'SN{<_ t jM{-4%TńtY۟R6#v\喊x:'HO3^&0::m,L%3:qVE t]~Iv6Wٯ) |ʸ2]G4(6w‹$"AEv m[D;Vh[}چN|3HS:KtxU'D;77;_"e?Yqxl+@IDATxifyw{L ЌB"eQV8Eed%՝*,%YNEJ*_NJ0r%bX5 AӖ,R=f]{LA7xygO<91                                                                                                                                                             |P yGN*~#/?:]Ea"zSą) q1EdO\K)%+&fXCjZN2㺤tx.>G^oa%[/uWub-[r|(|Vy鼢>_2ɂI\bD .u.˝0?LG 7;]]X4K.{5;&u:rPjebWekguTjV Y\]+r:*cu(I)e(%]=ΒO-{֑&'MP~akkz׵2%*ET҆q%(t/v>%K6pY7|NmMn#YXncrȵXfhm}}^{f?"]YXuewqu6^_nzIsz ^ M^ʴ<|pR=vr\GޗYҥJ0 q7Rֻ]=t7{(2*(9R=rI+SZcD]٦//]|+[)H !F \7RwʊwdGuEnJYp.x 8.(qi΃iGwo$;D_,ۨ;^Z՚KWFe/Gi2t}3[+#ES}9J%ìM5*dOqޕwV;BmWB2S#eeg6v֕ݱl*gr A [:[mKQƫUBrjPl,J. `^+#]GJ)QP V*3xUR>4˿yR ^DÏڬ8-.,U2wPH}^t]F뮤'! ?Zβ)ˮQݢ:+<¿eJM[Ml9Uj+!w(j=\9ڦh%=/z{7.wPKŅxmm-{)6_3Y/6;Yd&>I2yWȔA+~u͘|dgOeXs9~,stژ]s-ݳS"z_Duͯn5+BTiٲ1_o*1lTLZYޗ^Wf/ QS<qe9߶Ϙybb|܌/gL?7kΧmsAmY󵵧_ߍΟ48ϡ;oDL>.0:C{ 5wHs8Dq~M`(g#X\f,m-n,Vʚ:^2L+T7$?6Y1c6v&|0^k'-qmj}y3Yʣ)oHZĀp )ӫϘ~Ә,M*py -( Uu(]<9hR6}D+&o){ 4q4@!*N$:eҨ(aji#SSxWϚvԲ=fcSjnm$1s܎i_k3?3gnfrPcRtŸ+[$ QTon\nPԉJ/X0`SKl\nMxQ5cøf$XM035H5OaplKm~ii jDCtSӕ˰\W"ݨ %p#YT jbzD(h\?B,%KJ 4y^q7 P"6ղ/-J793}USsn>m)gw:o-}L}$aE.`n<}#:sLC8,eƷO۴g.]47) ]/B+5g6L=}5A(2?K5C/I򉩙՘ͳh2˥xۦKDR7iCR8FRO4 ʣ#͎rʆs=lO'.QjԮ< ! E;}Ծb&UNOE.~d, LCE_-D#']|JƯ%=5Wf ̘WĦ3>T ⳟGϙ5f;2YWEs~ f! />idӹ0x(W]lh*Jcϕx>V< V1-Նzy7%P6D3;S vbZD@SD@aF}t'Tx E_yeE't ~K>Oe?xAbhSNP`:;g}I V?teޤT)[l䎾ܮiL[mi}}iSBRV{̴ii^o֮{cJ;c6ytNnY6i[z]ycibNY&'Ƴy*CUe ,ax@'o`OZJW"Q#$a#kJ]Rħ&Ao\>]֘ŏ ᭤]=ȓݢ.U;kٯEɎyNp/i}W7k3s.ꍱ"Jӭ/L9K[Y;zRz-fu[x4iҚ>B 2 Ac qvjNJ3+ [_9(ZP`} 2lV*.CN$307LӠ%C#ZuGv,R`Xb"`. C )t,/Úvkv[XS)84azs'?c5$kKo&ijތq?W~Yԥ^|ҙƕB:xgMbՓ|V(h"TFeו!1,ژήZ_+ A^k{s43HatodOSkIbcfOw_maB Ԕz6YK6ʖWsj:Hh@hu n-%R0:!F$b.=H*;.d+ Pj8\O[|L89֬N:A8|n'{ 擑Nfb6\ -<;?Y }Q}bL64szk< &4}XuaJ.j40@Pd0Mi8$9eySfR}}ҩ\I8;&-0@L?G=X-b6vOz3uURK]o$@̆R5HdQ֋&"W9[\I{7μ.លN*N|M^ LB^_Pͪ9qS`k0X7@,ƙ 4| em3h>[*c^Y0gkzXRU FhɊWW'V#_UY{;H@7fKqc죯,E|U[9 @[YS Н%ڈ>5Ja_[7_87Yp>|sդLH2%&#R83դR>-}Ht?rqe*ԟҳVS߶H$ʆ!`,'-H)2$SfIS~2Y-L#ۭ S K2u)6S5 c7iw&$^9UaUl%W!N';5'E,`,cj/+ߚ Esh;->*t$:7K+ihhJUaJ÷b7 $+L&bT'>@EŜ u_rH6m377yjmYVN!!6]}9 @0j;w 8k);f 4sό^OSNKzR0fI"Z3% N`HFŶ :߷YPyRN}O}EXŁdZ!S;y/x^6!0 ؖi_5f]w}c-}[>&<[휫A3*g[Š);C[ib?xY3{ŀ~}9E{?M/ )5yur]ӏW]. Q4A^cl>7  sZ[R*=& "t@ϮKmn2LI3*D6%]Lg|S/C3GFa^=ȏݯ͡4ƽX1_L<52ҴMpT&IlKMHU{Bo$ |vxcA.?I(lPμT>3Om2GE|N8DאW_h^k_ @ 4]V u9V77Z[LbKa4|6̈́$WLcCs%{@z0O?[MR%q؋zkɥTﺷ^FLZu1N*=8*~tm{_i^ԟ3$~Mm5aH8G \ХهAT$,D3LG~$qUݤ쬢=X1Jd_$B]H~YkNd諥b>rh@1E~z9y/k ӹ8/쟯?YuomIſj+θ=ɤIpb}tmgNF?\G3sN c͜wfaM0UK@S6 HfXrkCȇ,ʮ) cμtgU|3N/5!5*!1nh!> Ö4u X@j Lfѱ"b%\LoȸD7 I[MGGzB`F; v4%!2WT'6c؛&޹Tm&шf ~fjꁺ}5Wy3_ =FW,vXs:CtbQ\0aӏ?oJ!;NZ0/{p%2|04U] Zt˄^:MY8U-pMuBW4(݉$h"龈%<2{cY)"άc$E3+{L4y'}I##kcWg! R JW,;GS;a:14&?~ '?" yQpZK25$5IS/}Q<yƤ2=ZGZ=^~;~hP$+.#v1R`02N!*vwMLc?~ ZOi iJMÁ5Mͫ:myK 6W*[;8'7ay4w7d~\xqO NKM{*_EgC G+L[E{y[>yHKYhWѡƣ) tSX( ΠxCX|泩.a--PW MJT}:SC`LƑ .+^ޢ7v#C{|ޕv6-x͜)oSW`Λ>}c,XlIUN[qtz p=ؑZj8?ɿ!OTSV3[KEঌYQ@jF=RvӬ@h֫&)M^kcHe_sԣB#*SO#FIn ڮhLM҃{Ϟtb6;f4gHUc}^ݬ; txTbĥ!=xp\Q7—n;[2mX rFI FRE܈w`Nsا2{ߦ~tS[nSIh3y>W%E)\JtOHuQZ=s3hAUwT3OH H*甞ts]ƸuiМʑIH;Ѫ|v{U}WӨۜ贖ͫtصZ z9%_{kڣ[gU?>cZbm<5xeʝdЛʼnJXNjA`"8''ɓ&JCZ}ZNiLևK[ 0iƜLyQc#} ;u`Hl A '{ zH["ǦP$eFQg,(ͽ$MS=3Ycuf inԽn7Υ}o A|\/qUd ٳN-]?1Ek|pz2ϝ[zj?kvd Xp,n`z=+ya?8z0_WR74AGt[R7$$eZ"; *48 &澭Xo*M- oE}v x_1S ''Oi56ЎSH`3%_/IyKXf~9a`4rvb$ahLfױߌR@/@L{睅휌yS2#;a!/AubQi=-z򂪐9l|M{n0vhNJ6P^5Shp77ؖn~~ovm-_#wpBcfSd1,vw5>؛βT;LMzZW&v>/%?*}& Km.{#?$ZN㸒F@ؐ6ڇ/:ZFg|HԌWva"]mԙi`BuB}U9XH@= U`"*=@{)p =߼/At0-t}G%O>>7=zl0qK-ij"CF[  pzf@: f0.e-Dl]-0>[v1pRL>{ziY؉?DKCLV Ũ6Lo Zp^ρ餠) cH4ۦzDT}Whb/@#gz#s`I4ohXo.)FGR9xX?k1E IAL'tׄvM^N**N-(kO˘x-;3g.d!{%Yp;dHwn:[˺sjx1`Q$ 8GwU)=içZv=ty#=8Q.lp~WTo[8H],`\?WqQd呏yV+tIג@eWϫK"g:^yP_n:*ǭIP9NpT]v~nA&L)ɯZ)zӯ!vOA2͍K^|v9b0@ oVSvs/Ȱ=)?jwnJA{]"OGeP&p aN,qH|KUʰ0I?$Vj+mAG sRTXe:N=}LLH١ɾf\rYjviO4Y%@{)`;ΐ!R\tlœ9k:*;ijSό= x'#ˆ+L 8HA\nu0  ʃAa*(_ᑯvݒO[p-QP۴N?<5r`@k߹גg59TN!-cLSq/J4o'dtUvJˍZsv07k8@C(p?cӇ=r; t F],wl>l/̛T_oH-oisPE*;cM {PG<:x K};DuL+-T?` [fڞjWM_Wc<# 0h?R=>1}3{}ȱT@d` 5ъe `٣ Ƙ9Z#T3ʘ*iM@|I;oX'+2Yϝ K'hy AP咇A-϶u>߶6d7iJOSݡN6Qwb6$FGJadXe%%+Jl[2̯]Mt2MPd/v+BVIz ̂yRJqW=7$6 𳈇y_7ejvbԬ\sMw>ffvVl A**p *3 WhM3baW6چvJeG¦#7TwYؼJO3Ck5.KVkUr{pw#{聁U }q|ٍl/iE(zyȬ " 8{>RwRzNJ˜37w~C$+ p/ 3#mYQF{[sԇ+e%>gje9qo}B}ugU݃3|Š끬.fǖS@WV]9R]l+? T)p5kop5>cN) :yp:3e |g,ٗ 鼩`wAkN'ri ~b0)ߦsB@o|sR.q:vWXp}X}5=a,tH' ՟nmߥ|=m/2qMS(p)p #h.OD{שBFC펓,{cשHeܰ hA RpH7T"C?lv_e(n̓ʹڊ|ݞ8n~w>ZIJZvu{{CgUtd] ȘT/ ʟ;GweajpN#gGAP~4Anuw0ygQx@G \t4 _;čoֵ(p)p o3[n2Je$Ć!Ӭ0x]w:B`ldL_ T[)fjaQ`hh3S|yfo@~ KtBo|:hMǕ;nh[" MBWpv%V-+3CWMjÂMoktw-V5A(_0&5|t%y#`Ìwݿً{/ѵ=خu{}A<&l1@zMՈA~z_Ӧ_1x[7o8PP,KG\{݀@Mɡ,șS(U-{ tFV3+ {Y+H\}e+:&${/v9^]u ܯ),wxtbS&W[KUy1XbdyoyF0V~7klWG7k0K#Qvًk79x$  q.x$ާѭ+y}Z<)sP-a\NFH&4(p(p_ݐB\ū<UUyR36']i^E48md:`\a 09-~N3 7b}0_ #<@#}e蘆R@-r;)0*G#̇8 ]Ц8 !:@E:R)P {&SgAy?g1Ԁr>F7#O5o@~T~6d;hoՑjMr)ddW/_,xaJFxp} Jnrs*oz?H >8o5b 8y@<- pwr y]es:v˝EBayqdoyX-\Way&!i`ʾ.YOs=*GpǏ^j3S+ eraNqkwb?+y}C~:lm19"]PtuiTmR'U}Qp 8R _@% *42@'ZnLf - 9?`V9@F}ߗv~GWG^8yJwBvܣ> }; Ga6R?٧8i? ޔgF1޲45?k;ew5vFJ<()PtoZ6g7&AT>9({xD1f't4qzdz=\=;-edFeq/ 4gݾ޿iC"=Zr@{5ݷ/t5"PM~Hi!(PQpKl7@(p,YȪ?R˴U*^S254_d0NF i0WU W^f&S Ϳi wS/~ .Pݤ3#x&/_.cETMZc2y٩ut 6#z/R+[ॢ\W_DOIŸoleo[> U#.0#?yuR)^W4`r**CG`|PzrMd uA߷J?+3OiB1SM~ZkVf5F1 { ҪY%{ 3V Y #]5-~OX5?0&5J#$IDAT5J0-~ɾcVbg79HUcpǏG}CRw/CTvlG/-,Pm2|߆D7]Tw̄hҷP }X>dF]9XLj4X))NϊU=>gkPW_~V:zCOմr1՞Y x 8rptY*X7/ p"kg7[b-O)ɂ 8 lX\@SsMy01ॎ1nZ5d­RtUz_!8m̝.O7LPmLbMVl"f88X vre1ZVk뽖\-a4q<ŗsGHaGw+@}Sƿܹ_y'@Sr[l9.#^x#xFQ,w"U "ȃo[5zF|·ey|^{.ҵu '"UEwaixZ@]i 4- \AEoo.9cH{yZ$J;Yi$?؞?l~apǖGT:N^ild/DIkVi7N-,5x~q]K .4 OF73y),̴Gǡx /}P6}F(ߖd3 3گ-)ŵuh^<8=6֮}俢_XZZ=.-/R#P;˪fTłg՝\S4c[+!`b7ch9*ި~0 >&G~$o05ՍTRn\1}@; /1WFt:o^-`Mz?%!E `7` 4>W/f.g&3<  i":)S #<Y5o4[~Ww_YuRJul}es7+t, .7PW`a% BLs٘Mv]K[s[]>*#pd<,:P_aZheNi vb/@uMׯZXp/jkt]u(S`o~: 3 =xgq>=p'qΰ,l{.ρު@(.XDVzEs(p(p @%׾M6N.'_D6{v[n5>&*χ {`Wc<\WQ_Ձq:T#n+I- ǤmKCP7 Ϛ#&n567E[R8eYPg+< >S|Qp+ #>}MFXYm f/%5poZQl,E 1j@'-`ܣ~ĕ4HziUtrյqzP#؅҆@wG6k?|LD)Ua^vuP 5ve̕MgMC8͵9{,>hQ}]?Q-NA6¿}?G1m;)LɅ[Σ N|hPIg8tJJ~!}W_T~:|(Sa Fxi3|d5f%C (jjr jv+8`W; ԝͲ-D+`~ 98m kw:cďEқT קɋG#Q_u*U VFU)!ۀgc v[^ ,5@w0)Ȼr&UoR(p(p?裯t#܆^5Mi };>jwW*ZZ(Ŋ46DbдX+D;K<} w 8QQvhHO1D@[EOفN*HA@82rRF&O.FWe .P~S~07mo (%W&D))*>Zkm՟#YS ࣩ#K'Y5A.{|=լw-eZ0Z~yr%!a@I.3mv`рs p@VpW@cGcr3w}-a=s}ϐݬ`}[@hI/^ ⼆4P瞙FAxs0y/U[׌zslSj7p{1.)Mټs=4 (p>.2 g& _w`@`N`:|\~+>e8400Feֆ[=&Hb8=C@ `lTЦ'HoθRKkl$:{dOW4Z*(ar)p@{xsWޠ)&h}A(8Ґn3eb x3Nc ;k7|A&Pݠ$z[&̭tJ!!$T3q>AO*:֧Y0kҋETC0).u {m&Sm=Ÿ5x2kgtD7x Z%>WvfƁ"<8範G{Q&C0)k®E̢(ͧ&n=C7.PR3]8Q=>35'gv|"{`Bk!P4-x" -2Oo[pMK3' V%520h߿+sr39>Y[ʽĢ},[tg),w(>@JcҞf]fb֣3sFb~KcD>X[La81̅Հ*-}ʲdY_uI\,?O `(˅`wBM~{ĭ"iڔd ~(`JW2IF|r+h\^klQ0o薕rYTGx*WUFKR|G[7SZ>k|߼ٽnq)W~1=mim7݃Kt⣮O:2j f@czE< 8%S?*2uKWTUw/Dכ%5[`%_ݒf9E3  NﺲVsn&uͷ'`&u@E s!Zy{K~<0Fy+ 3,@cI$@G`l/_<Wm Ӎb]R3BL0 Z_46Ɍkhn@<=i6aȋv!a@MWoi4W鿃cz.@JB֭vഁJҝ'v9oSG_*qֳlT ђbZ# - yO?v@lh  Y*!+Z47MBŦDi@% v^CU@IaY%YT9ꗧC;yA5FZ+8)T=I:& g  @#jET}Z,l:v'5bx~w1M6/K}cErX}s  pr}`ܳW4MS뭛- )i#_bEQ^ܼJ:,t rK~N8՟DXL*|n+ofv}ls  ul"ЀR_Ԅ{OXv`=虺  f"0@O. O9hmz&?[&nmͧ@,Em祙'3 O8q<' g&_hL%_~b+a޾{E*ښ YBp;wR=mq|q}z{kό d6,XAs0ޣ;BzEv 9i&ceY^"АiX ;vbwOGF{]\X-.w>4aMˍ8x^ ֭u>=WҠ)}8Y8]Ә1ɞ9s$ {  Hc VXwm=O%%ݵ4Ws2;F5"+8LDQhIq߮xo:x-{|w>5x>h)Ba4 a -"Yi5JO7NxDp8UOZẵ0!+p~}+$ { 'Y=D(b T8a4 iZ PDL<m}pǝ'薅r<}eŮ5Z[Ⱦf s:Gg0/oR0Ap@Aϴ5u([b8^aE%jI7XWvY5P){9YLvKpx7 pd@;51[;KH JR2.N;Z `lędZaCH7WaBa Iň .PQiou]H$ͽr+`zU#2vom8NQ ^45@E$K;-ixczZF"aO(Y՟t0]͸?Nv4\~[(ogAf^QNifv=.8я{<8y C7@ e\\n`?lL^w2K\̧jvH7.N%q8Qsǡܽc 0> p`޳& SYB>  v ZDNgk0Il 6Z?¥)ZSwDNNvhmVh/:JjVmͤg&ڟ7j?C &m7yC]1 r$?kJ҄DUm0Ɇ.YS]sg6@N&!c}s3)ɮodT~ XSokоeF0ڮW<~SG?f- `K'Ip''hH\Ow祭q$fҸ9WKE/; 7xN@r f`TvZuokq[i\N,+mp''Xڂjf}_hf^y-5oJ;xMsxwzKoxfOvA )ݸvYq^׸cjFPܼR # 0\bhv NU?KJn院AI𵞹:t|Imԇ[E:}{^3nL:6mс,%qP;5W0M(p2)p"ڐ*yt/Iktעx`T}֣@à b 2Du-MoZִHg {(pb)pK>Fn:I[055mmgP ݀_9)ׅ4 88y pccG )Nݯ.s_#Ww2 sql( Ȯ?u$v\2(] ,"26(<@C~e῞F1ƾLXk/q}o3S (p"zKHJ:WJxF;q%<ƬB-.;m7i L۪)ㄎiv.g̥bŬYbqRPZF     ܚjfo V$ނVON C ޘEz=(A { ILS {qFL@#;F6]3c wHܐ=P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P P Po[a!d 0` 0` 0` 0` 0` 0` 0` 0` 0` 7drZwIENDB`ic09bPNG  IHDRx$iCCPICC Profile8UoT>oR? XGůUS[IJ*$:7鶪O{7@Hkk?<kktq݋m6nƶد-mR;`zv x#=\% oYRڱ#&?>ҹЪn_;j;$}*}+(}'}/LtY"$].9⦅%{_a݊]hk5'SN{<_ t jM{-4%TńtY۟R6#v\喊x:'HO3^&0::m,L%3:qVE t]~Iv6Wٯ) |ʸ2]G4(6w‹$"AEv m[D;Vh[}چN|3HS:KtxU'D;77;_"e?Yqxl+@IDATx[eIߵ}VUVuuO3񌪄<6˒l _^Aot磸.o}4^oY{K9{kW}umQ;w^k2۷ og}5݋~#ǽW߇=F +{AIs!E}nw~leCD H>TW>aSلER\lD"$@"!φWND H> >F|n7UgޟOgC7cvw6ޟ;cA/moe *?RxVâݼvo0)tiܦU)NISmc7ͽ0Uvm3ߎp7JSV[abmQ[X&>#{Xm{"b䶓n.|:vw"}XAPfwm緵ɭ蒏Wݥ-[p=1#Nm=6>c%>e>C8*{":?h?OSQk ?ߴ/ _H]W7KϿ4[O[O $weDŽd1M2@"$@")F Oɪ%@"$(r l` s.}_d1tW udꍺk"iT?{~ֻT6cU+4y#toL:s۽B)%} OK>3f91\t{ٳ[پA w64{ ^kn`ؖm6Sw}:sVNCŻww8wWwi^{mPmmԱٳޘDuUcwwӖu:n/R1ۤIj#+ށOmr͝;w7^~y<{U2Ӣ{,%{^-{O[iX|=F<16M}m! !.fp5w}n6Z~~@īCu Nxߑ>6%@"$&)l②D HD@ 57^*xBO>e~~k٣aS}s{idg Kͱ?Q_z@SOUkO3+Ecg7$+z5 sg(vso vwneN_;U-iyaFۮ6lnf-  wbܭrޱ| |v}HSq2uwrnsӥ#c7`nlemk2'½ WI^TX+K[|86`O?-XZo$fh7Dg͎vfbF4 g,etm֤џnY-98vwv52%ώ{1uNn/.\P{a[=Զ81dݭiTx.zFukSӽ1'ÎV.{?m{EOeyr74FO[ܮGQ[a}{[}7jrenSln"$@"T#0x[O"^mIfqL_78g1RL[Xl0HGsVî[ySxw3hЋ_[g>QҘ`m.hyĸiol[ӦwxiћMwt4.?kOԋAU}zLb{R+̳bUwϖEOyڛ1%>mWM{o0(>l&ww7w4Bcl1[y: Akgyt\Y˻|Ot@""0þYe̿Ëc~fxa\ vxޠTյ0fVQ3W&&Vfz>CY{uEީN,CVoGY3Y0I=Ӝ-}WO4 &,N?nX7 ĂيRltYy7QX8`q>BМ7ӨL.o)G}c潥Ђspzt64ݱ:wuvҖf:]yAo%'ͬ-f8kͰa! <F[fԕ;t,iOH3_ .rtޝh4:zۼ/M24x6u\~yc1nϥo (p(\'N{S<=<=-~[@/홇e:s11"ů}I)Xl׍1}(4tF;4mapܙ?\]W\17;J ߻sskiWƽ0FϜr3?8h ;Lk>gozܶ0^ Bd2iߵJDzG]!D!kŠ{'!М+aa- 3k#,AcQ[Ľ4d̟`<_,\<'꠱e:G9S'#7sDS/M0E48-z`a( Ԓrh zn6ip@-*bkMuqw*E!$EET<꤯E|iK^xGxHAރ͕# ,+gͭY4FQ|?4׻vN1[3nym$r#@t)U5E^>}|O}kW?5$v~Hk>{o+aOJs_}(n֨N;$f$s>Vruݭb4Ju{۱-McXvƣGݽzߠ1Ӗx2o#ڸY#F3?{Qu|h{ i`}Wa1W{]_nn30wXԓbo 7#Qm0%=ebBY=_>BC@h䇗iiW(}ڻ*APM4>1"okEKO  FIM D#[n)URQg.)& , `-&[e ! Q\h1K}|G;f-,'TT3 Q}XH@> VX 7>Rׄhf?=oz Ӣ%o*ʝ)R||޼sm.4 n!50 y*q_ps:0 n syH9/2uOd_z2p1jzWah~Þ{499=> ͟v}1/kt>x*L|Kܰͮq-LxW5 p˲ =6 9 y!hv?hMF,*tkvZhRPt:\]a9Yjۖr:҇u qs" Z &<`1;hZ ),j)LukKq8͉wf Ң@Iuo<:#Q0."Eq9#Ji1lK#9!lv Qb1`uGEۗmop:}p~p`~S4t~bOVs_RbP6#-^mޘԼxYmԭBpRplI"# _'{?8zwm/0;N᜹a8x86²nQ/l6a͇0.Y#PB -0P0@g`Y)aC[` Q>`^zt‹UX_x)y*7Z20j}.ip#",S&Zmn:װ :B0`:i&9'mLUJgfYq1P[.7#wh)Ljo0A+irP%ͨ5i |LOD}5䛷l@fkG^{x4^.Lu,ppN`bH,עh=M?M~{ACVK<]YM "cms~G$^Lmg$/2pwhݒt{ZUw`OȮxyckpgk]{B?IΝ۩٭ҾNj ^z>N r/ˆU=uoWje>~\ޚ|;L ?=0b;_\PvSLOug_kNΟ-/)r~Ybff6Bl0]zpػo0;j{0/Clțw'32zc+3SxX']pojAoH9%d-X.#f'k~,ܠm4 Y/㌷b2og8^́P2l7?8.HK?w^V`RVG[G<4ųZhXYH'kK#1`߯2$ uJ!Ҟ _i{w_ Xm&**@hGM4{wh~q㙴H9ǺIM i1$Qw:^Ģ)BfS|M1jq\} ‘^{:8xu [wp_{j{~c~86;?7ٿk qVfv흪ݘJ^}y;7vt-lA}j'n.W_^wר7ϯEkd\l 'jՏ+7Xvb3Y8ޯ,0cRvHM@`_\"mCCm>нVj]\ (x9F8Op%ONg٨zf2W~ŀ` x&MrMй<Αo EMGzp0u -C.PM_W g:t+B`=V03 2B A ¦ m)&L/.U[9Z/]mdQ?Wuު-x*S },ˌ"zFHOqer}* xW "*(Q-];;{b@q \$ 31CCEnX:y){H=W2Re>=r "-5=JeeB,BM>]t5u$m+ͥ$m:/B)1 e& Z~4:wh̨bYze}KS4GЬ@o DX6i81C`AIXamֆT$o磶y6]j tLj(a`F3nXK}Ql.ִ$BY"TZs]_4W/ͿbۼW!ٿw?Ĵ_~Q換zgw2ٜ}F{2ʷn$/huۋb6?;zoX.Nu4[Wo0nXxgNj)y7X[ ~[ z`P`ɲ`eYQ"X%H(nFwÀbsS^QЈ`V=SV)ϗZ_{mD\:鯦/޻,׸HDzKMK⣈ף 3?Q]X>Lx ϻu2}Bj[?նҮqDE^ug>U[]7Pr y㣕ҨweH9 RܨZ:,/i+mIS'ȅqe@ueUb lO& Pu&JYSY,؅||p2ˆ}6ϫ8tO@ OR&Yo_tGїGfnb+1YC(h6}ƒi@qϿ}a~o]igOzު/ߚZH|;ڋ;ϖt ?k5g_~nV~vLKWw"-_S DG?ÈZ;hNzv4Ll̻+lruҿrG!xY9hSrquoزw8W;C26t&qlEZjh{!0]0 "ubW#P5»`S{Z*gZ /\/Bho^/-iw&ռM3xl|=~=Nzu52tgzz.3Msȶ{}ꠣ0R&"1_O8 )RPEB;冚hJ?CEP?SW.,0C[Py:$g HhC({b{ F7/҈1KobyuBG͠Zf㟜{s-`jwWͿԜ!n,qMMQgn30{7Gg-mOO/O䧸p~q>:Hz4$kbsUiځu3Ms7G#6=W<&9i=t]X7o`|LP 4gzSFghflw ,M15 oA[*+=!b)[3b~Բ\v`A u4`\enUVUq0Z7=4B4lEfݾsah[L#M r;TQ eH%vY#2zDk}`c vܥqN<ڔU<*0+/ԏԱ+MU>uxZqپB8]7ik6 F4ǾC0=~Bgdhu0 <&_) | V%ѿu{Ƌ2bŇyk~ p#4)iRJ`MqWaSѿ*_AP̦xUt07p rS|;Iߺb|nyopg3XBcb=? Я-]nS1}1̹?-÷)=C ;o>;a6̷8[X%kF|М_kl&85A3@9N~e\-0{7XUG^l:F-A"^M5qoB+m5Ts$,HCf!SyR:UFf#"2]]Ӏ`1UtVWր Z,eԥfXh#.!D, @'Ȏ蠡ZȲf<@\p[LEϴJgɿ/PƏ!`1)=L y>q7C0URۇzW q6ćp$͘ ,, | )DZ:P/ƛ^[U-.Wa-]#*/C]i+pɜU ~Az]X R>tZp(S@Ҝ"[((a#}X!<]Ϗ* ~SwBi 3^ 7߀? Vl\hָBPbBx-ȧo O{Z<[p iw <ۿ0_7aZv0~ 9O?f]L /}u=ml0B"`3FllǞB-k X]S:y3noz JP6uSz pd(}A;ƚgUr*FsB e:j Yo%,Sfl:YtO@ OS&h^!7S~:4Fal̷1xXGK]h 9>ccS 0]sޞ!Iߍ\0+Sz)s{,aS%KH:V1 ǯivpZH%aʰC'UCDP(0pHt19G'5,-y {LAҮW#R7WQicj+^jzr!ײjLWiۡ u:-|5^&m;^]{$ZFy]_HooXW}My:ZA\zFދBy.tx+セC: wˏ߁?P@1;y b h8 h|uzo 8k,YsgAIoA;zPy{y'¨}MTw= Fت*f$|-ޠFv0iؙ}qzx:X~,pzjn4x4̬Êؽ`0b|{ W`wyuXv<;*Z0Bad] ׆v3PމΜ^ڑ x٭|/}7SmXEʼ.T X7 *,g|R?Dl[,b4BXl|O1-`uZzp/7Q>3Y΃-,Q6&\ƛb5W+Q]} 'ϔA,=@O`*w@'Mߢ޶2oQ? >8@sQa]*7q" ~nB2sϳ|>M˼lDC4c5;xt9I`=dPs# w:wWh.$ugje,ȞP0?*+]j&,C' >a':|K)=&s뤋O!{2Y.vg8ۏx_26Y8@eKk=Yu}lu]ҔA[! Z >n~d^\ )*7*yiw.C}5Љ/սRڭ@ƭ}s^ps|rQߋA;hNnUAߥ1kݽ?B\є?*A"O߼6DF,I-4+wA) QDH_.GƷL#}aXP kfbmx ?!ᐴY{;JIel~Nΰ5G9|B僂iw⣆)IH6i@bdz/x ~}]eI#WfxV~^|{*xʎނoaT3xBgYSP/[%4r"Z8B#ֲ#79*56`~Ha!#yb#nFo 4CK 'n,]/8=t1@*]1?8NbR`>_ZӋ=j D e{a{_xm{G w,WszOsblͺfAof,7c? HmrbuGtsxb޻\\G;12]G4-S "8I@&K!b9*6ւΚcF$m[^ID#V&ϗ܆}0zL@BX+Q1m@f xp :EhazRˑe{A?Cq/_nv F5F\fnIMV')mgmvߔŗF]y3y|>xf };`02hz< jte5k~2K:Y~-<]iuvZU9it>ϭKA^#o)?FtL~R(iCf*aZ0{#9'WOi3:D ߙ!7t,[%ž s1^(5HtuxN!,=7͆{~Bb#=U&)?4*& –E2Q#M@#qlp҇gÅFlZӄ@k]>/ҶYoY;#ˍ1wh9ޖe)hD&5pj'!9C@f}>œ3Й7ޠ{8kcv7SehtO!403+'׎ዌi-ޢǸjӫ1jص En6#R ëP nLNTq9:QϷ)yNһ=^Rn_ N)rtšy2e{M젣C&:hJô[C_l\Ukxt` ?pwd/>xTS pDjL7ʋg=jSՄF`&E4AOJ-QUaBm\$,w9 v=F$N_Ё_Y>2Qa4ma3ȫ2S66WWzX a co; b'A.?h_Fb "@i+&Au=w_ .7ͳH^tϤQx!"2jcTZQ1T3>F9P`ĝ[)L|p)NkR󨽢^{Nk`GroLY$?, ~lKʴ9\H&J_>fg} wOS9Jkt\<9Ѱ9-gW^;Šވ.M#K77"R<Xo=m:(zH;K8=Gc0:r=FF+)@ FW6xD/#Um~LT`rcNBvKQ+ mrf T3vaz!썳u`ZGGwi0ҋ*0RN o<7Ǿ`1 = ۟2q] , 8R wM;(v:M#tyBl''Lc ߞ0I@is>>qaj=f]*„Gτ >C Vy](zW+p=xDwwf D9錽 Q>W`n;01moBIZ1 G}1m(3,Sl)R7s-#/R$2nB%0~6@JKdeR(6Wz srOw"]CHHPcbDAH_`?X E oҳH׿M_&%7ݱKwg:gOOΆ3L'gs~GٙS/e#_hf}>`׼unoS71sωT pNW>d+逛ۃ9:0 `U@IDAT\9$PSYvd&אoo"Z$~e;U}!'+UMr0?ɖlF\M~oԂz{יg?vBZBfE n|DJpz`B[[겹r0*䯂Jq`H6&3, > xc6␟$nɷ0CZ(=yKc jK5D~>}np<j4ݯ|po1' cd|O`)ԼJ̍xy h b>|tN&e`ưFlYrKyQlښ?^*GQz=]?}5u/*X@Bq~X(BK"lyu5, ڡԲFb9vtGf춴aVgxw:<8-rJ~w2uu%*ewݾ˰wU$nÈ_[@[?fXtCL8žKS;0JZcݺ'ݦo~y?&/0v=2sOWK*Uv0ˆ+LPled2u7^Wieg^24׫eZӘ:δEuY |ny^%2bDӃ:jw?@lqz|7W  #̱2K9λLm+@װt(id^L_/n ܴr1FKkKm\tumx3ZWәz䥿~-􊅽r-Gby-= *e,vq1VZwEj|7(ѺS>ꇱ iA1Bex4ۻsiTG^A&r\DA ҳуTKtxVWjඊ,_knL}wl_ +7xproJs6 lqTk6?T \!HՆA+MSa w/02Kj2FӯFƵl# Jϰ#nG1JutYa1J[N Y|.䳍06*lk߂Ehy܉OcA D,:Y! GH6pa1ރ;Sȍsawr*ל#0쟡@`0(-g%'@ "9!)ޢ~5nh2 س0?]gr~(wX*Sr㶝 >a/l_LuSm4J&j|IW?#(I S Ytn S-W=6.1N6y3 C:vKlwN i8Ѕq<i 0gGS.Έ'Vnʄf&hhv]{?ɣf:{ܼ袁p+hf\@2{{M_6o#1YͫL+5/3~/dk.E7H~9!jjN5m瓫1#z#:Gmb~_U6^뽷j+{Q( ԸuZָuMSo;y?,\˼םeT_2y- L[9|9!nLPvGO5 pcFÚ?^W*>0D" H$ RL'408|yӯ/fc__fzkg?p9j~e8wX\&޽jL]Lo=Lp* {L8G╦fC;{ ܔ]HޙvZsFYUl ]U0A=[?Bk Gs?m+0̓<'ᠹah{TeaRmjHfLpK/{$/n,ǿCEXJȗQPFD L ܩX3l?{?|+җ$4L}4a;݋4=rLl4{W^5t^|Io _ץWZװmg9K.TKWaBܼ9oo4^r/eݲXlϺ}5-4BOZeX37ӑ X ?Èc]@sr+ f7Kl0|hrY{u&ClǿMbcy׻?)d;\{<㍴/G@")A Oɋg޿xEzhFa8@͏ @ܪG5d2ګ~UqPtXT<3i]Wx޽]&[/U<0;ElP׶S ɼzrVGeUDٗrj}Ďps4{ pO?f]'> x#>!lg0,d#{:ȻfYx!H{v .4A7^]{"| HSR~*я>_m{^5;/´FMX9I#\뜦 xULI@ij Ad tNb5콖.,g]OouW뱼TKXoݵkh3f_/ 9ezZ_N0A! G `!ơ<}_^'t mv"w;?}ݺ?`6v6MẎij{[* Yy0Wk*cyjԤ(veu5:F W-@k($н. Bltjs*UIIǾܥCO!0%" !D5:y71(yAJ4;=kP>8g x^>/kI|q*T'Mb'prGhV2۔.H>1;ww {9 lGL/~uӽKMw7uo4ꃇɯp$߂ sߞusj]F\W/sOVH`ߥ"-Zq}jX=sl6pދZ2 iLVosCx>uC'YKԺ{ꟖXv_\54Q5&,eNJ7cz{ 2RFs-E=nv,=ḶxDvZj#dzFQ.g綏O͇A~ %5}4}xg2&&H_<)|LfGxgܾ<^:hKvVΝml^As6 s jQˉѾ-.!aX3(&snc60~s[gճ`vWߜ&Quj5R/t[߲xdu3NizDB*gysνUvFM"$ ^ź?'%p2 ٱNfX Z'zRWֻ!de ^})n@a|z8T q׸*`2-Տ ڎ e ՋEڔS+<~y?z Z#u2Z_I^@Q8 gæ7\DiD`'=MT}o~bʟZ\kë;;d9l}W\\e4u$m؇^q'CәNs|QUecJҮ*t2Kz-2'gZG:7Zz[z~ #>t2X?ID ȣ`|v=vYċ5 ֳ3ޱqk9eeA+(_ sFPD H / x啦2۽?xh0z=At;>E3 }9v>]9}m`n[iƀn@KئUiyHW P!Qde*du]sݺ4jaɠkzY4[gr+/i[27_Aǰ\.}4*_'fr٢דaT\&Qun[Fu"T;6 /*{)yFm|0CYRN!|֊gBq.@;x7`ڌN]bHbn4ju7w#ҟ$%K}qtM+t:`={9د#8 |ưsBŽambw10ϬyQm^=G(Y 7WҖVD=!P@ePa4,G0hza~I%@a"`= z<ܹNmxΦ8M\6BB-A'lB[Fqs)+؊xfnc/lq%ai"%N(t~Qcd Ҹg~ޯ?m7P%` f֟!`sz̟Ng]}iK@ w sk<,*]zDG|M9̍Z&O#x0Ѱ ð8MfN,S^b/bZ/ZךG:>q׸')4ⷲ+X>a㴶/|x$0.X2g3!NyθeB0pbYҀG]hh݈y`a rr::|4]=`3p=6=]"$)|}{ ӎX`r4ͯpϕ>+|My}gdґa0~*7ΰ~TԱܰLQ&c7M#UNW^Q/luTiuu_1?>[oirW |2p`3&= ;zÚcFp*ÀjuoV2)ixc@gJͦg͐Ma kw:Gӎ9&t" f#,?~`D8=W\zj˰`}ym{ 6E4[QOO"O1 @nsx-Gi&_e׿ģ#D٭W}Ч?\\{\q\cLWNEa"e㺲#|a2m9* ބF]*F*0O9oga_rnmx_ kg<+Mҩ4}n}d֍:'䮴Ѵdy7?EfŃ21[0wW^:;.FNͯg~W;v;L]pxb=/P-tp"d} Ώٱ u9҄'Vp]߿uK66]+h/Ɨ꣮(eZDzD3@ Z0[fmsW`^axcSF\Z8Ȩe~232dfrsJ zp?rʰ f>jk틨UmWev[w]_45N>3}\B\CC N]o@jT)Kk *"JIP@kHf=sYѿ@0lXWOz U`0-nhn X D `ߜ@փgg_p0Y ]Gl2ӆ@/{9}P8x&ܲo/sYϳf.sz>iUWS֣ ^R3/dꌴccrB4wF hY"1O4EX=KLM47fV`ʠmbM9ʂ: +$@"PLA|s7{+_ext-=Fٵ_Ώ=Uans}qr7P/ӓyKi|&S+1l d4nsrW#<︇%|r`.paStj*HZ>~<ͷ0SL>jm;P͂Q]璾ٻYL?e濃2" P}g> =|Q9+:ٺyҗY@?:l_]~[lMQ3?JNհ5[M.%5*9$=Aczſ;W*Z;4v^j{oʗx䗕XM"|YE! ;:Փ7'tԡ}N>umzr s̚7f79|;~?Νb_(9撘N֫D! 7Q}_;% ->> ymh#Hc<yd 딱ZYO6n'`Ϻq \>,W#@<7ZSr[g;CAaa=zOI,` &uAb~2)>!\%'i /gr EF tc/{M_QP@ih*wOҎoe R;KuSIhI74/Ouƣfh 4i}o~cݿW'tWKsD>}=~ᥭ~}.VɯkA?8ۿFm;RE,4gZ1@ `SFe; v 긌<|1vbŚ8GCV<܃}mRr2/r`dCi>Dhތh#[֡}g:`a@ Ph=U Β)=2>ۿ[YsV~DXA@h`M~74A't_'omW:h rݝhg< /훷tvJ":KuIB&q;[?kr0i 30h |YrWA8EȲdBF ݸ0giVz䙜fs#_?|?񰮵;̷'ߝx9?R=hPf?,/@nQ&3!Az\"Oo`_$iO7o -٫/#=ε4堦yy{IKzx3_jT V<棆H~s. ןO~tPnX tH}?Nw?{՝[?zDJ\j8ϴʒ0!Qdn]Kndh 4ui`uuGz]q_?|vM';f$o˗kiN\轸SyLp.LI{[,K[&x:R.#I[B9!P#oOL;fmϡ\ˁp}ŠO) Tx-`+Myo~w=%#4^|>zёFhQ52h٘`67cu~,}%[|%jGh = 03 R[5FVߎr ?6 wBpE?c$^ F-!c`^'J*+ ,P萠x>_ {LH PS|{СU~޹&oUn[mnRMA`d_(Ӏߕoa˳A}_͗i|WiVw47wa={G$ulw=9Oվry;tc\ҋ]kr;'b~FH^3@A,MF@6 M4"Pcޓ2ots 9)/K=3ק cC',?ndTf10+ csHH FauTGCpg~:≏/lda >!0IZɁCgR|Ϧa8)Xcc1D,:*x(_*qpn-Uo |s^mcT \P Z~ bO>@zeЫuԯ>bh;ۂ| ۷~>@iX 2@<<X C6jh2"f>G |%329XWzi}S澕, q&1hXpV·+-X _z9v3x,0v ίAX_r=)؏mVġ@h0:x5+dz9i:~i1<1zerg{Ocƺ>MPi @GɋZ{vNqd?T٥߿$@>c=[(WxX <8JrbPt7Syb#m+Z Y\-Lȹ 9ױC`vPVƼaPg'?s>϶|nI`AMF@`!)7d}L9MzA*q`|JSjrn`.٧iWi} xңOWït#}=ΰ`嬧u򜖪tB:2oqtòƯX UVeD&"./y4]‰|ῒc+_ie諃Xl3&(4 t52k` oiΧ~C/fۗtܯ@Jh13.| ;#J>iB 9OL[iJ.ǰ@\fdxg]тwqrn[ٜ'9w%d]XO"Qe\M{ 'i}~;Ot#)W@4i>GU>&4C}rZf>@~kQK;l &ːp`(KEC=DjɎҝ D輢@h 4tV>ԪYs bS3} 58~y.hFwPd#$BDʐ4uJvCJYl:rEg 1N܏3,Ph2똾zz+.JU!7UW _ 4 4C@ Y/ǣ_Q| >h N@J~~2E>H7bp0}pey={?/a{B W0#zc/4|b-hAȠ,6KC],2C?46psIKL[#_ELh܉}џznt<b˧\Ĥ՚K#@:哦6mF1dpn>oU\j@5!F+5>RBi>tš# PH7 %\XJHMq![Z B@Q?OU_Pt'[{Wѕp#e+}Yw Tz߲ #d#&Y),#'@O?&PfBg BtHki}vy3#v&"!nhǬY /T[[-N2v{]4Wu/Lm`:τM2tbSz7iީx]F|Џnj;dw;c=n8ntX.9lt8P=Χyg΢$#lќ B4p! zW._W^=un"C"s֎6R4{o}=nݪf4#6וZ#!/@4ax\okQF_1n<Y!QR^.Jgj,2t*g|`!-8W:0{gF;W:mŒqh3h}=TH4ӆNi'jS~3#}tAP35ԎҲl~6̎ϡU~W[ :36!25_*d'0fXB`/NGd2qsH>':_+2xmԙ~6ԙB.񛖑[XjK[wwm+i8\雗_i--+83 @}>my\}~r~#֬muԖ?OPv`jp2hdt{d\c_MOh 48M~Vi`Xw>w#K^rRhoF ٻ\1yYɯj3'(&:OzAPh1\J%mw}1^ף|ƸpQ[)2}^)2 ntN+Y|b/^Y{.wbY;O'<s/*k1 6zpq@~u#-0[x:GGBM`V~0>sמxv5?tY.|x՗G y9 4HpZ@r[2DZk̄lĸ `@,Kϣ=q6K~9$oJ?QYe-ͺ̞? M {JzWld}ɵ>+oAwꑍ!Тj|by8?\+Fk!(4 AaAYEV] GKzixto8;fj@2vxF$I/c)Xmt3im HS@yk$Qkѡ Smoo+Sv苺r e+Y{fy^||5fl'>?}7@M)Xa(@ES;4 l35+|CK)@IDATq.KiwPrN ,N >P}87O'ߚo؝:<+kȇjy<292~ |nd \6.cSrne-12.eY q_bA(/딩V:P ZY, B/0^LOJq>1;@fy/z 1E( Ҧn*pbi!@ɊgtS_9(gXM(wSHOT *0^A@h;վv"-{]fW\ޘudy$T~Ku$W,}Kۗ[p8t'eCdP 41j "1E Mzp 'rUi\Nr|rbqE}VVٌ8OJTx DǺW@tuGm NX=QX᪼ճd;~_# vyyi>iG}p\cG^c`9)VGz61HJ%4iUef15(V{YksP}3o?)9<@^cGWSz ^\?M_?3t)纹f4߸wJn M wwԕc!w"z>N?ӗ?uO\73D(+ҍźK@ ="=֍=:4@rLt<UFRl6<=ٖP?נ%/_֭Br@U4[ͭ_~x@h 4l UXXSt?9dcmݞǷّKv 9Pee<?JY`1վCp:SQ1g-k\meOARm_Rr"/^Ī|~pIډhJ}8j >ۖS.˫/Y  S5ZӤ=}3jW:MoX$BY5p! />W[N&e.qJ>(tqhV]n} wb7?]n734en-B( f1oxr{ [5xcGCO|-y m"'bZqYnGg..s' //g8WՇtMsrRs_ww"Ǿi=9p:jW #pg=d<(,4/k};GᡧsjGeG_>9xU?׶|S]ʏdECem()S]N3:=l. vSmI\[T+f~˟'5iPVvC$# :}dWcL |Yb.$g+Ѣ|F?wT*=Ik!6|tS [4 %_Fxenk$;;v*zA3?񑞡=w0DC;i^4rݷ 1@L#ܟc6]L?;R¼F,Npy PCZ;Ln*GV ߹EQ=kCw ޮ;JHb52d8@-3vJOCq`78ΝTfݼ;t몜ޢβۈ84 GaIk[Z;^GFsM}5̑|tÛyeUQٮtL!b@̃86J)6k#m9&MopGnWX{ʟpj=Mxy0i?|-)4R*V&m&Y!斦ݾ|Tͮ)X)OYrϿ(c׸uɲ_uCkw+t)_[!A2hCHt ̧>%̃XFK4m{\1i#r&6Ai m?.'-X &߁6Z^?[T[Z#ojf֤ݿ?a z&F# kn n4;8{=ndC06tM0F ꓛ y E^AUM[}ME^K K?˗\vF$J3! !=A>"vgȒX#/ XvY'OhAM3~>%m(q`п-X'_4:/: {">Co?$jh2Yԁ:U(h?OzuJ#Skz!щ$ =Z])moI:(K# Z Np <^zozZ&}`U1;k9>ãN?G@Ap,#]Gꖿ<?_}jeV&}˶# B@φR0Έ[21eM%Zeg˶kPf,7(cԐ,rce6>*}?~h4h#~u>O4|<i:qB5t.,<4y Y Hnec#7yX$ڳa죔i_|6oku$nMZm{}b15IB+_ah*֭%c0C/w lL*}nvzt8Ht4{o]' |41+!9<|}TL ֝#u֞yB0y0+<¢(`8!?$G@}6}f!O/lW :F7ZgI5AIŚ[ǀR؅pYhy\C%N5DS~vR鍑¥ %sAS@h[ 69ЛT9$hm&iґR^ݬo))ߞ _t;2\~=WI29 %8gqn2}v[XVMx\g |kL y/'O@~{20LkO{tE;\tvy^$M3i6ntW+߿9Fn.i\ }RY)g[PR߬3Tgzߝ" 5p! ag76Ȇs9f6Ϟ3kPdv3&8oWyoo ]18M}KDN~OΕK׎rI[UkU~kGt ^.Uץ1w#mc`]gYrzBօۖmL'*躼pq3+iUB@hi 3*3] #|?fJcy6 iwr-v$-m:y}n5l- !" " \d9&4%`]vYktHKmӀtɻ4n4V}[Cp:}x1&Cȱ$,DL0S?/F<2:/_+~O{i|Cy3; ONY@$J;Cs3ǘip9&NʶEΙބʝ:ŝ$Hqs7ɆV'r7n\N0BoxWڋ!>`}amzt VG oK!Xp\[K H@U[K#a 6\,i~&+ bkFN7eO}!C3 9 D bQ~9w8Ҍ1d ఞW'|w ݲ\+w<umELZr}*?P ,H$(4 ^Jlzݼdӝx>tfw&<$qPۿ7:???w.[lo߽ToM!9]!p-rQ -뺎;L)(־<OHK6}rN.[Sbߐ¯.\E~60loӷnK42JћO^-0CG맧8H-2k!U@d|[ ߥm8 L@by{|獞y-;~ܦ6U:QٞdžqcO>'4{Ww/(n(r9&>itZQ,$%!(بW9=tv>s9uJ72Zvnh-GIh 4h5p1 7Nj#> J?!Mox;o y"o5YǓ˘S:J<u(و1yy@ydI+𑿖᱌=!h`|VB@hjt4 x J/b(7Mxo7|&陠:ڤ|HѶLœwғ?0糔^z,Xm ˔~+$4R&ҡ@hiߤA '6!zIxls9M|)Mr­oi`/=}UGlfKi8՞%(4 uL?)m+k]- +3XOV4w`U2*@A j/@g(< })=Փ&(4 ^8b FDl73t?!d&6`/A hQ?9'4 ^8+Uiel0%~ʶ|WE<$ϠdPg͐ - ,잴;~\A\TJZ__+l)|̫ݮMw;B7^4+4/bk-Ǥe\"&9˳mYd?Xa1YW1llaPN\PGixsRrR]mt8qxegdHBFo'Bm yZ@Vk6[iEm $ۄ& ˫vt\X >3~+* BoBaAXu|3i mZ)'Wʖ`ByVHBׯA/NlgM,½lBf*PB|n\aNvlXB@hi VΨJKr;-yjK4eS,#Ò%63 7"6AҲ%>zS;e1Lqh 4 |=OOnaW ɤ+S55].yW?P:^AHh ݀߱e6ŮEx/RƲ.퓭 $0@;|6p Ph>XOȒRgIyhKĪnaw!R-e>D x \X`r[Kp*w^|=0ݡ %Š nThg;x𬤑Y9*c'(ӊj)p&c+6|.]qHjPh 4 ."Hіiygk!h1EȌ'4 ~ pFp@4nxtqxчuv :C22 r 9B@hui 3j}&:0W9sƦqO}^?Bux~aKྫ}=j WH΁nQ̧i4I@h 4xA:Rj>UYsh5ҥnܯf9ϞYb`\,gP4ڀz W|l;ϸe^L~H P]m=,+9'>Li^7TDHkv~UmnzMj9R@h[ kY{ٷ}_MB+t<_[gj ikwJݗano)KM69#لsY/i~$Q]Ji,4:q8{?}4y_o18@hk"\%.1FH+}?9 #l|FUPh 4 ~ U |ymt#4OW~uV(mwtZ{n{qN6ed:{4WP+ҍA,apV* B" \ m}''=يoNl%f[>qkN[p0d2nޘ8zMcU*r/hhW\ ǩW_bUu-F34o2P͏84 |5p1 7'_YhAĆ]e(Pa`Ȏ\_ u7/S42:O!@ ji|goj* ftFW걌> ~pzNu&6t*Lz}3" |4p! 77P@!_[F"xަ,>ZoY󀾿( }7Z]Ue.JFFtWݮTFUIz-{qh 4 8yy/d2R9z  |?NjF:@vA#Z:e* B߸8OW j;9gh‰~yAkw.# %K@ZחyT4]J3 _ A@h 4 4pQ ^]ՕY&Iu+Kr4k&+Ls͎Lǖe9QVhOB#.;<26ZSlmǁim-"ͳs Z{j1 e%t `,NRw_7עՀt$_ݝtDӈ=ɤsٯpPCۡo7T>m&~[λ>O=CSNY`oIn'6_S)ϛQS+ BH+j/ , P9ns96ȞŕtD@h 4 $Y6J_##@ M5:3~44>LAt٠@h 4x]".U6V1ڊ2TFˆr8v]/N#{rY.rqr mwNXj+ 28c)(4 ^]WݞoAcO%M?9Ga]LFuRSajrAu|WکVm#@9dYɓnYb#ߒӖU麎7IkeDh#2?/h.VOf9k8dpuG deA:yM<\f_i Zmr,fmK'4 (q݁P cK2W#OlQ͗26 Q-랖vsi?*#P3߳W6V@h 4x(+_1[Meۼ{5uJ٠@h 4xLJ#Dsh[kK35}^'&+<-l(p$-L[m݅`̢]GݫO[yh0' B3j ªfM&NpPO^uݰՕL@"M ^67ϱ'H.g)4X'?־9rÔ0p$L': ` ]6W/]tPmMf:O[kʶq?NU,z(4 \, \`)v=c9K—If"֤7?n8ykWiu_}_ANyR˶RұP,tW`,YL_޾dp_0>P_?ёU:`!~ܼU[sg?B\>Ң}5c~䅞N BM:z\_H>.( `Clk>@5"t}qˁj~ dy!f~P}熋hg% xv R @D)1'@ZpP5%:IѬ\O%7f+3Ν\)hn}x叞ۏBE@g!\^!E3 $@6gC4820ZȞ 8BFAZZhnA%]vu_PPh 4 KaIm֢umY+goO$f;i! ;O\~]jA_lo'4 Χ0zͣV+E޾RgPS&9~SSҼcZZ4Q[[/O-ϻ oS|fCCgr'p->L}{Yb6vnp\M7R< GYBC O4ږ`P x[cP+-c[>8g:`VvnJ ߡ5F42@h[0W&=i|`TrN@4&˖V 2dJإp>? @YlV"en8*0Aꧨ:@h 488_ l%-- Pgȋ[&*w.㹼ϳIɀmc06I*b0A>4q5 Bsh (MG>gjt nh@NM=w@:t;؄XxZgt]gŔ{K C 䰘h y)- Yppv%e/q6$ B0]7vYi/<ݿs+Mzr GȊ'Z fÝdzQ}O-,` f>x]5"ke"%+ɻm]e|0빰z 3??gԴC??( !W:m GlcPhO^HEؿ@:*n}/$KNadB7&ƬWv&>'%9E Cڜ(0>{iK^7άz2܎CX`;p(`K,wexe~!`>P# rugtmvdS6Z_T͎D],V+8ͷeuM0igtr@h%=$aNzyl9h&mIT$Bۧa!z g&}ތxoGDlBQ'cHeNrogvƿZG4bY(@ƁA]Vօ ~t+.::ʱ:~1]r4 Bg@gPWݤtI&tt0L͙ LMba-͒&>r@"sv[uS W}=eyԄ͓<U:_ޘT Bh|Υ|@fȤxj@y e@ϡTyG:ry6֚ FV:<*~O)jpWsv9BG~gͼa89M:/qo;rt$Ue?{Q;OX['׹W7{N~&Dg |@ȕZ亖g3+_ѫd&ːq[x;MQ[^Wc87+&M`4 \` \PM:_VfP aUig dd[a13r@2mp̍FE21<@?qmc -b敱ˊr,B%0 Bh (hx\gh1¾F{3Ȗ+b辈=۫߳U$14-UbQ vc U"x@m}\f E,UHBk ו&m#!In=>~.,2j:(ބugTY70Γ6TnGvY p&{<6\q>+] l!h ;yϪe@h 4 \ĮH?I%tokn$4)895CDiN?bn16nښ=:nGi:ZB+2Xq&o@IDAT0Y蹡N|;2dPGC] psd f jiⲞ6}{dJvOd^]n7 '߁oA>ʃ>:SA^~W^c:MC?6'CNmi_G9M\Q3[ҭ[y (yE>@hm/74ֹݾ'P6:UIx~|Oz=73;eW5]vkUCǔ8u}Ew\Y/?<_9>2|xF}|#A-K:퓪^'`9N3.^?O56Kovʹc%a7:r:ptJ$B[=y;M?ӍmHr*)ʇDC^Pޱm odqo ~{S6YpЧ/RLC@hЀ'd+ TO8xvsJ Ҏ-BN +` 9]ђmʉrM<`w9{ eκ̼gtba9qh 4(;IsgO>d9 exB1C<Բ}Ȑ6:@)8^Vx gc<}x{Jw9.}{}tgG5]d-)d}-˴dUY5$APУ@h 4xq $F"NYO@}}~\\<`' VInc!M'|ik[5RA@h 4pv pvVfP*EzM2tWC;d!Cc2a2/2dI;[(y> ^EJA@h 4p& 8)SLHGk~FO8tdsPgX2Jʓ&00f| U^7#IlNΗ1r?ٯC׮b ˵g:3m- Klys /rf:ݗVe:  p"`'|TЩ/ 29ʎ/| P{ku{%T.)xhǀdn`Nid@;c&?-Gݲ/t< IXyP{lIKX:=0= Yժ$r_mWZcJ)L__Rfɾ@Cb"^/X l>A@h 4p pul݌OM٫@=w9ɗnJԁ?*y7薱)XHC!GU"igةkYx1E7ȱjBy5wyGq6E*٫oc>Μy%} jt h^gC%os}|Y(4^23n4c .c*ڱ5tkYmF έA/@|@{ۭN{xr=$ɬD#qx "fW~k ӆ7/mSnY~1v!h2ƈ"ih= oRY43_/j\ݽ`lܕS }e^~&ޘ3'"R;rp2ֈ\h 458"e@A 9@ʽ PFN%ço-ydi>ӎ)X$+UVsѳ˒LmANAuޖĿ@h 4p& zL~܂\ &[~v=7t!# ߲%hSf?nDZ9Fe\VʗiAEFtmDD@h 4pV pV*wZdZ f,Y3( !'?V J)'>!'~i2Dm,Wzyg%5eklH Bi 4|vɧrS'*~7rs%1~y%/xP_]Sr]b1u1/!!K;_Ҏ BW0^R(z7SN 蛿q4r3oItH=sUe|HS Oͧ.Ҵv*n~|+L:1yԑ^M?*nMt0a8KI^DXfsuŊ&U5TGjɍDh 4P=kϊH}\Vg(~S)~Ly N;1 Am<Tw[cɆй]T`t6 @Oo~ Z Boxgg_+Zg%g'j#@5X/f weFDr_ ЗgB1C6N]18/u|*DX4) B@\X)~k˸]u~tYA<Ƃ&4r~֊C@hM@/K GlpPQD^~;&!9(dg)ib7-X)3-xl%;fR4&U8r u1Z ec-c><6s@utcjs#{Dfe=ܝѬWZ`xOaoM7>vG_ _4`À ɖ`RVZavgggș&L_dǐ3_D##"~g7}>jp{X9DEtCVV s̒/-wts}s;)va`U<}/S)C&TM8)q,ѥ8(qğ4&L",úz ?ez#|f{س90do?'7e,ҧ#Ւ*j>W{x>$ay֓kה׉ 3ZGB:?5x_=2yhOgή3J[ʡn!Kagmj(3`+m ׬^(ps0#8`y>nĪLx.2 l|͑t\1|y*x!S7y*|5vMiStY{ ø.).V\,=+y{EOLND@D xqfk50D=6y{tܯ5A4M؃7dtq@k4C+ː>`kFZ0n̰`|A|n3Po|f~73Ǝx2#.W8'w?>}== {LtL%D@D L~˝@P sY 0Yy"W>#W)c=nD,ux96ئ /ETVg(Ю1u]A@Fh`϶C78Y_<@Wn:[h]|pʽ[~v86iq"" _C_{_"Ԉ 5f .-IE >: -͐7X,g%1Q]]}?[᳘s1γkнG8n@4{8glkM@9Ol#l 7+Є6]kq;in g3X2|>3 ,g2kߟ!Vx2*,'?!He r" " /C@P <( 3H#+aDžBp iL(&lcpl6@"|ӛ@xƏdKH$M䲗l&|8 f"ns5yY)lMhM]`>ߩTW8FCGttIu/vw8'm\łKkL#w#(R2,RKpZa`lfwn^A

" + curTime + "" + curTime + "
"; /* Iterate through the lines of results. */ QString field; QStringList fieldlist; QString lastTime; QString lastSvc; foreach (QString resultline, results) { fieldlist = resultline.split("\t"); if (fieldlist.size() < 2) continue; htmlbuf +=""; QString curTime = fieldlist[0].trimmed(); field = fieldlist[1].trimmed(); int colon = field.indexOf(":"); if (colon > 0) { /* string is like : ..." * we split at ':' then remove the jobId xxxx string (always the same) */ QString curSvc(field.left(colon).replace(jobstr,"").trimmed()); if (curSvc == lastSvc && curTime == lastTime) { curTime.clear(); curSvc.clear(); } else { lastTime = curTime; lastSvc = curSvc; } htmlbuf += ""; htmlbuf += ""; /* rest of string is marked as pre-formatted (here trimming should * be avoided, to preserve original formatting) */ QString msg(field.mid(colon+2)); if (msg.startsWith( tr("Error:")) ) { /* FIXME: should really be translated ? */ /* error msg, use a specific class */ htmlbuf += ""; } else { htmlbuf += ""; } } else { /* non standard string, place as-is */ if (curTime == lastTime) { curTime.clear(); } else { lastTime = curTime; } htmlbuf += ""; htmlbuf += ""; } htmlbuf += ""; } /* foreach resultline */ htmlbuf += "
" + curTime + "

" + curSvc + "

" + msg + "
" + msg + "
" + curTime + "
" + field + "
"; /* full text ready. Here a custom sheet is used to align columns */ QString logSheet("p,pre,.err {margin-left: 10px} .err {color:#FF0000;}"); textEdit->document()->setDefaultStyleSheet(logSheet); textEdit->document()->setHtml(htmlbuf); textEdit->moveCursor(QTextCursor::Start); } /* if results from query */ } bacula-15.0.3/src/qt-console/joblog/joblog.h0000644000175000017500000000215514771010173020441 0ustar bsbuildbsbuild#ifndef _JOBLOG_H_ #define _JOBLOG_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_joblog.h" #include "console.h" class JobLog : public Pages, public Ui::JobLogForm { Q_OBJECT public: JobLog(QString &jobId, QTreeWidgetItem *parentTreeWidgetItem); public slots: private slots: private: void populateText(); void getFont(); QTextCursor *m_cursor; QString m_jobId; }; #endif /* _JOBLOG_H_ */ bacula-15.0.3/src/qt-console/medialist/0000755000175000017500000000000014771010173017510 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/medialist/medialist.h0000644000175000017500000000317214771010173021637 0ustar bsbuildbsbuild#ifndef _MEDIALIST_H_ #define _MEDIALIST_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_medialist.h" #include "console.h" #include class MediaList : public Pages, public Ui::MediaListForm { Q_OBJECT public: MediaList(); ~MediaList(); virtual void PgSeltreeWidgetClicked(); virtual void currentStackItem(); public slots: void treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *); private slots: void populateTree(); void showJobs(); void viewVolume(); void editVolume(); void deleteVolume(); void purgeVolume(); void pruneVolume(); void relabelVolume(); void allVolumesFromPool(); void allVolumes(); void volumeFromPool(); private: void createContextMenu(); void writeExpandedSettings(); QString m_currentVolumeName; QString m_currentVolumeId; bool m_populated; bool m_checkcurwidget; QTreeWidgetItem *m_topItem; }; #endif /* _MEDIALIST_H_ */ bacula-15.0.3/src/qt-console/medialist/medialist.ui0000644000175000017500000000744114771010173022030 0ustar bsbuildbsbuild MediaListForm 0 0 490 303 Media Tree 9 6 :/images/view-refresh.png Refresh Media List Requery the director for the list of media. :/images/cartridge-edit.png Edit Volume :/images/emblem-system.png List Jobs On Volume :/images/edit-delete.png Delete Volume :/images/edit-cut.png Prune Volume :/images/weather-severe-alert.png Purge Volume :/images/label.png Relabel Volume :/images/cartridge-edit.png Update all Volumes From Pool Update all Volumes From Pool Update all Volumes From Pool Update all Volumes From Pool :/images/cartridge-edit.png Update all Volumes from all Pools Update all Volumes from all Pools Update all Volumes from all Pools Update all Volumes from all Pools :/images/cartridge-edit.png Volume From Pool bacula-15.0.3/src/qt-console/medialist/mediaview.ui0000644000175000017500000001711014771010173022021 0ustar bsbuildbsbuild MediaViewForm 0 0 949 638 Form Edit :/images/edit.png:/images/edit.png true Purge :/images/purge.png:/images/purge.png true Delete :/images/purge.png:/images/purge.png true Prune :/images/edit-cut.png:/images/edit-cut.png true Qt::Horizontal 40 20 Filter Media Type: Status: Limit: 1 1000 100 Name: 0 0 Qt::Horizontal 40 20 Pool: Location: Expired Qt::Horizontal 40 20 Apply :/images/view-refresh.png:/images/view-refresh.png QAbstractItemView::ExtendedSelection QAbstractItemView::SelectRows true Volume Name Online Slot Vol Bytes Vol Usage Vol Status Pool Media Type Last Written When expire? bacula-15.0.3/src/qt-console/medialist/medialist.cpp0000644000175000017500000003704314771010173022176 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * MediaList Class * * Dirk Bartley, March 2007 * */ #include "bat.h" #include #include #include #include "medialist.h" #include "mediaedit/mediaedit.h" #include "mediainfo/mediainfo.h" #include "joblist/joblist.h" #include "relabel/relabel.h" #include "run/run.h" #include "util/fmtwidgetitem.h" MediaList::MediaList() : Pages() { setupUi(this); m_name = tr("Pools"); pgInitialize(); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/cartridge.png"))); /* mp_treeWidget, Storage Tree Tree Widget inherited from ui_medialist.h */ m_populated = false; m_checkcurwidget = true; m_closeable = false; /* add context sensitive menu items specific to this classto the page * selector tree. m_contextActions is QList of QActions */ m_contextActions.append(actionRefreshMediaList); } MediaList::~MediaList() { if (m_populated) writeExpandedSettings(); } /* * The main meat of the class!! The function that querries the director and * creates the widgets with appropriate values. */ void MediaList::populateTree() { QTreeWidgetItem *pooltreeitem = NULL; if (m_populated) { writeExpandedSettings(); } m_populated = true; Freeze frz(*mp_treeWidget); /* disable updating*/ QStringList headerlist = (QStringList() << tr("Volume Name") << tr("Id") << tr("Status") << tr("Enabled") << tr("Bytes") << tr("Files") << tr("Jobs") << tr("Retention") << tr("Media Type") << tr("Slot") << tr("Use Duration") << tr("Max Jobs") << tr("Max Files") << tr("Max Bytes") << tr("Recycle") << tr("Last Written") << tr("First Written") << tr("Read Time") << tr("Write Time") << tr("Recycle Count") << tr("Recycle Pool")); m_checkcurwidget = false; mp_treeWidget->clear(); m_checkcurwidget = true; mp_treeWidget->setColumnCount(headerlist.count()); m_topItem = new QTreeWidgetItem(mp_treeWidget); m_topItem->setText(0, tr("Pools")); m_topItem->setData(0, Qt::UserRole, 0); m_topItem->setExpanded(true); mp_treeWidget->setHeaderLabels(headerlist); QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup("MediaListTreeExpanded"); QString query; /* Comma separated list of pools first */ bool first = true; QString pool_comsep(""); foreach (QString pool_listItem, m_console->pool_list) { if (first) { pool_comsep += "'" + pool_listItem + "'"; first = false; } else pool_comsep += ",'" + pool_listItem + "'"; } /* Now use pool_comsep list to perform just one query */ if (pool_comsep != "") { query = "SELECT Pool.Name AS pul," " Media.VolumeName AS Media, " " Media.MediaId AS Id, Media.VolStatus AS VolStatus," " Media.Enabled AS Enabled, Media.VolBytes AS Bytes," " Media.VolFiles AS FileCount, Media.VolJobs AS JobCount," " Media.VolRetention AS VolumeRetention, Media.MediaType AS MediaType," " Media.InChanger AS InChanger, Media.Slot AS Slot, " " Media.VolUseDuration AS UseDuration," " Media.MaxVolJobs AS MaxJobs, Media.MaxVolFiles AS MaxFiles," " Media.MaxVolBytes AS MaxBytes, Media.Recycle AS Recycle," " Media.LastWritten AS LastWritten," " Media.FirstWritten AS FirstWritten," " (VolReadTime/1000000) AS ReadTime, (VolWriteTime/1000000) AS WriteTime," " RecycleCount AS ReCyCount," " RecPool.Name AS RecyclePool" " FROM Media" " JOIN Pool ON (Media.PoolId=Pool.PoolId)" " LEFT OUTER JOIN Pool AS RecPool ON (Media.RecyclePoolId=RecPool.PoolId)" " WHERE "; query += " Pool.Name IN (" + pool_comsep + ")"; query += " ORDER BY Pool.Name, Media"; if (mainWin->m_sqlDebug) { Pmsg1(000, "MediaList query cmd : %s\n",query.toUtf8().data()); } QStringList results; int counter = 0; if (m_console->sql_cmd(query, results)) { QStringList fieldlist; QString prev_pool(""); QString this_pool(""); /* Iterate through the lines of results. */ foreach (QString resultline, results) { fieldlist = resultline.split("\t"); this_pool = fieldlist.takeFirst(); if (prev_pool != this_pool) { prev_pool = this_pool; pooltreeitem = new QTreeWidgetItem(m_topItem); pooltreeitem->setText(0, this_pool); pooltreeitem->setData(0, Qt::UserRole, 1); } if(settings.contains(this_pool)) { pooltreeitem->setExpanded(settings.value(this_pool).toBool()); } else { pooltreeitem->setExpanded(true); } if (fieldlist.size() < 21) { // Handle recyclepool specifically, and pool is already removed Pmsg2(000, "Unexpected line %s %d", resultline.toUtf8().data(), fieldlist.size()); continue; // some fields missing, ignore row } int index = 0; TreeItemFormatter mediaitem(*pooltreeitem, 2); /* Iterate through fields in the record */ QStringListIterator fld(fieldlist); /* volname */ mediaitem.setTextFld(index++, fld.next()); /* id */ mediaitem.setNumericFld(index++, fld.next()); /* status */ mediaitem.setVolStatusFld(index++, fld.next()); /* enabled */ mediaitem.setBoolFld(index++, fld.next()); /* bytes */ mediaitem.setBytesFld(index++, fld.next()); /* files */ mediaitem.setNumericFld(index++, fld.next()); /* jobs */ mediaitem.setNumericFld(index++, fld.next()); /* retention */ mediaitem.setDurationFld(index++, fld.next()); /* media type */ mediaitem.setTextFld(index++, fld.next()); /* inchanger + slot */ int inchanger = fld.next().toInt(); if (inchanger) { mediaitem.setNumericFld(index++, fld.next()); } else { /* volume not in changer, show blank slot */ mediaitem.setNumericFld(index++, ""); fld.next(); } /* use duration */ mediaitem.setDurationFld(index++, fld.next()); /* max jobs */ mediaitem.setNumericFld(index++, fld.next()); /* max files */ mediaitem.setNumericFld(index++, fld.next()); /* max bytes */ mediaitem.setBytesFld(index++, fld.next()); /* recycle */ mediaitem.setBoolFld(index++, fld.next()); /* last written */ mediaitem.setTextFld(index++, fld.next()); /* first written */ mediaitem.setTextFld(index++, fld.next()); /* read time */ mediaitem.setDurationFld(index++, fld.next()); /* write time */ mediaitem.setDurationFld(index++, fld.next()); /* Recycle Count */ mediaitem.setNumericFld(index++, fld.next()); /* recycle pool */ if (fld.hasNext()) { mediaitem.setTextFld(index++, fld.next()); } else { mediaitem.setTextFld(index++, ""); } } /* foreach resultline */ counter += 1; } /* if results from query */ } /* foreach pool_listItem */ settings.endGroup(); /* Resize the columns */ for(int cnter=0; cnterresizeColumnToContents(cnter); } } /* * Called from the signal of the context sensitive menu! */ void MediaList::editVolume() { MediaEdit* edit = new MediaEdit(mainWin->getFromHash(this), m_currentVolumeId); connect(edit, SIGNAL(destroyed()), this, SLOT(populateTree())); } /* * Called from the signal of the context sensitive menu! */ void MediaList::showJobs() { QTreeWidgetItem *parentItem = mainWin->getFromHash(this); mainWin->createPageJobList(m_currentVolumeName, "", "", "", parentItem); } /* * Called from the signal of the context sensitive menu! */ void MediaList::viewVolume() { QTreeWidgetItem *parentItem = mainWin->getFromHash(this); MediaInfo* view = new MediaInfo(parentItem, m_currentVolumeName); connect(view, SIGNAL(destroyed()), this, SLOT(populateTree())); } /* * When the treeWidgetItem in the page selector tree is singleclicked, Make sure * The tree has been populated. */ void MediaList::PgSeltreeWidgetClicked() { if (!m_populated) { populateTree(); createContextMenu(); } if (!isOnceDocked()) { dockPage(); } } /* * Added to set the context menu policy based on currently active treeWidgetItem * signaled by currentItemChanged */ void MediaList::treeItemChanged(QTreeWidgetItem *currentwidgetitem, QTreeWidgetItem *previouswidgetitem ) { /* m_checkcurwidget checks to see if this is during a refresh, which will segfault */ if (m_checkcurwidget) { /* The Previous item */ if (previouswidgetitem) { /* avoid a segfault if first time */ foreach(QAction* mediaAction, mp_treeWidget->actions()) { mp_treeWidget->removeAction(mediaAction); } } int treedepth = currentwidgetitem->data(0, Qt::UserRole).toInt(); m_currentVolumeName=currentwidgetitem->text(0); mp_treeWidget->addAction(actionRefreshMediaList); if (treedepth == 2){ m_currentVolumeId=currentwidgetitem->text(1); mp_treeWidget->addAction(actionEditVolume); mp_treeWidget->addAction(actionListJobsOnVolume); mp_treeWidget->addAction(actionDeleteVolume); mp_treeWidget->addAction(actionPruneVolume); mp_treeWidget->addAction(actionPurgeVolume); mp_treeWidget->addAction(actionRelabelVolume); mp_treeWidget->addAction(actionVolumeFromPool); } else if (treedepth == 1) { mp_treeWidget->addAction(actionAllVolumesFromPool); } } } /* * Setup a context menu * Made separate from populate so that it would not create context menu over and * over as the tree is repopulated. */ void MediaList::createContextMenu() { mp_treeWidget->setContextMenuPolicy(Qt::ActionsContextMenu); connect(mp_treeWidget, SIGNAL(itemDoubleClicked(QTreeWidgetItem *, int)), this, SLOT(viewVolume())); connect(actionEditVolume, SIGNAL(triggered()), this, SLOT(editVolume())); connect(actionListJobsOnVolume, SIGNAL(triggered()), this, SLOT(showJobs())); connect(actionDeleteVolume, SIGNAL(triggered()), this, SLOT(deleteVolume())); connect(actionPurgeVolume, SIGNAL(triggered()), this, SLOT(purgeVolume())); connect(actionPruneVolume, SIGNAL(triggered()), this, SLOT(pruneVolume())); connect(actionRelabelVolume, SIGNAL(triggered()), this, SLOT(relabelVolume())); connect(mp_treeWidget, SIGNAL( currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)), this, SLOT(treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *))); /* connect to the action specific to this pages class */ connect(actionRefreshMediaList, SIGNAL(triggered()), this, SLOT(populateTree())); connect(actionAllVolumes, SIGNAL(triggered()), this, SLOT(allVolumes())); connect(actionAllVolumesFromPool, SIGNAL(triggered()), this, SLOT(allVolumesFromPool())); connect(actionVolumeFromPool, SIGNAL(triggered()), this, SLOT(volumeFromPool())); } /* * Virtual function which is called when this page is visible on the stack */ void MediaList::currentStackItem() { if(!m_populated) { populateTree(); /* Create the context menu for the medialist tree */ createContextMenu(); } } /* * Called from the signal of the context sensitive menu to delete a volume! */ void MediaList::deleteVolume() { if (QMessageBox::warning(this, "Bat", tr("Are you sure you want to delete?? !!!.\n" "This delete command is used to delete a Volume record and all associated catalog" " records that were created. This command operates only on the Catalog" " database and has no effect on the actual data written to a Volume. This" " command can be dangerous and we strongly recommend that you do not use" " it unless you know what you are doing. All Jobs and all associated" " records (File and JobMedia) will be deleted from the catalog." "Press OK to proceed with delete operation.?"), QMessageBox::Ok | QMessageBox::Cancel) == QMessageBox::Cancel) { return; } QString cmd("delete volume="); cmd += m_currentVolumeName; consoleCommand(cmd); } /* * Called from the signal of the context sensitive menu to purge! */ void MediaList::purgeVolume() { if (QMessageBox::warning(this, "Bat", tr("Are you sure you want to purge ?? !!!.\n" "The Purge command will delete associated Catalog database records from Jobs and" " Volumes without considering the retention period. Purge works only on the" " Catalog database and does not affect data written to Volumes. This command can" " be dangerous because you can delete catalog records associated with current" " backups of files, and we recommend that you do not use it unless you know what" " you are doing.\n" "Press OK to proceed with the purge operation?"), QMessageBox::Ok | QMessageBox::Cancel) == QMessageBox::Cancel) { return; } QString cmd("purge volume="); cmd += m_currentVolumeName; consoleCommand(cmd); populateTree(); } /* * Called from the signal of the context sensitive menu to prune! */ void MediaList::pruneVolume() { new prunePage(m_currentVolumeName, ""); } /* * Called from the signal of the context sensitive menu to relabel! */ void MediaList::relabelVolume() { setConsoleCurrent(); new relabelDialog(m_console, m_currentVolumeName); } /* * Called from the signal of the context sensitive menu to purge! */ void MediaList::allVolumesFromPool() { QString cmd = "update volume AllFromPool=" + m_currentVolumeName; consoleCommand(cmd); populateTree(); } void MediaList::allVolumes() { QString cmd = "update volume fromallpools"; consoleCommand(cmd); populateTree(); } /* * Called from the signal of the context sensitive menu to purge! */ void MediaList::volumeFromPool() { QTreeWidgetItem *currentItem = mp_treeWidget->currentItem(); QTreeWidgetItem *parent = currentItem->parent(); QString pool = parent->text(0); QString cmd; cmd = "update volume=" + m_currentVolumeName + " frompool=" + pool; consoleCommand(cmd); populateTree(); } /* * Write settings to save expanded states of the pools */ void MediaList::writeExpandedSettings() { if (m_topItem) { QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup("MediaListTreeExpanded"); int childcount = m_topItem->childCount(); for (int cnt=0; cntchild(cnt); settings.setValue(poolitem->text(0), poolitem->isExpanded()); } settings.endGroup(); } } bacula-15.0.3/src/qt-console/medialist/mediaview.cpp0000644000175000017500000003152014771010173022167 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bat.h" #include #include #include #include "mediaview.h" #include "mediaedit/mediaedit.h" #include "mediainfo/mediainfo.h" #include "joblist/joblist.h" #include "relabel/relabel.h" #include "run/run.h" #include "util/fmtwidgetitem.h" MediaView::MediaView() : Pages() { setupUi(this); m_name = tr("Media"); pgInitialize(); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/cartridge.png"))); connect(m_pbApply, SIGNAL(pressed()), this, SLOT(applyPushed())); connect(m_pbEdit, SIGNAL(pressed()), this, SLOT(editPushed())); connect(m_pbPurge, SIGNAL(pressed()), this, SLOT(purgePushed())); connect(m_pbDelete, SIGNAL(pressed()), this, SLOT(deletePushed())); connect(m_pbPrune, SIGNAL(pressed()), this, SLOT(prunePushed())); connect(m_tableMedia, SIGNAL(itemDoubleClicked(QTableWidgetItem*)), this, SLOT(showInfoForMedia(QTableWidgetItem *))); /* mp_treeWidget, Storage Tree Tree Widget inherited from ui_medialist.h */ m_populated = false; m_checkcurwidget = true; m_closeable = false; } void MediaView::showInfoForMedia(QTableWidgetItem * item) { QTreeWidgetItem* pageSelectorTreeWidgetItem = mainWin->getFromHash(this); int row = item->row(); QString vol = m_tableMedia->item(row, 0)->text(); new MediaInfo(pageSelectorTreeWidgetItem, vol); // connect(j, SIGNAL(destroyed()), this, SLOT(populateTree())); } MediaView::~MediaView() { } void MediaView::applyPushed() { populateTable(); } void MediaView::editPushed() { QStringList sel; QString cmd; getSelection(sel); for(int i=0; igetFromHash(this), cmd); } } void MediaView::purgePushed() { if (QMessageBox::warning(this, "Bat", tr("Are you sure you want to purge ?? !!!.\n" "The Purge command will delete associated Catalog database records from Jobs and" " Volumes without considering the retention period. Purge works only on the" " Catalog database and does not affect data written to Volumes. This command can" " be dangerous because you can delete catalog records associated with current" " backups of files, and we recommend that you do not use it unless you know what" " you are doing.\n" "Press OK to proceed with the purge operation?"), QMessageBox::Ok | QMessageBox::Cancel) == QMessageBox::Cancel) { return; } QStringList lst; QString cmd; getSelection(lst); for(int i=0; i items = m_tableMedia->selectedItems(); int row; int nrows; /* number of rows */ bool *tab; int nb = items.count(); if (!nb) { return false; } nrows = m_tableMedia->rowCount(); tab = (bool *) malloc (nrows * sizeof(bool)); memset(tab, 0, sizeof(bool)*nrows); for (int i = 0; i < nb; ++i) { row = items[i]->row(); if (!tab[row]) { tab[row] = true; it = m_tableMedia->item(row, 0); list.append(it->text()); } } free(tab); return list.count() > 0; } void MediaView::prunePushed() { QStringList sel; QString cmd; getSelection(sel); for(int i=0; iclear(); m_cbPool->addItem(""); m_cbPool->addItems(m_console->pool_list); m_cbStatus->clear(); m_cbStatus->addItem(""); m_cbStatus->addItems(m_console->volstatus_list); m_cbMediaType->clear(); m_cbMediaType->addItem(""); m_cbMediaType->addItems(m_console->mediatype_list); m_cbLocation->clear(); m_cbLocation->addItem(""); m_cbLocation->addItems(m_console->location_list); } /* * If chkExpired button is checked, we can remove all non Expired * entries */ void MediaView::filterExipired(QStringList &list) { utime_t t, now = time(NULL); QString resultline, stat, LastWritten; QStringList fieldlist; /* We should now in advance how many rows we will have */ if (m_chkExpired->isChecked()) { for (int i=list.size() -1; i >= 0; i--) { fieldlist = list.at(i).split("\t"); ASSERT(fieldlist.size() != 9); LastWritten = fieldlist.at(7); if (LastWritten == "") { list.removeAt(i); } else { stat = fieldlist.at(8); t = str_to_utime(LastWritten.toLatin1().data()); t = t + stat.toULongLong(); if (t > now) { list.removeAt(i); } } } } } /* * The main meat of the class!! The function that querries the director and * creates the widgets with appropriate values. */ void MediaView::populateTable() { utime_t t; time_t ttime; QString stat, resultline, query; QString str_usage; QHash hash_size; QStringList fieldlist, results; char buf[256]; float usage; struct tm tm; m_populated = true; Freeze frz(*m_tableMedia); /* disable updating*/ QStringList where; QString cmd; if (m_cbPool->currentText() != "") { cmd = " Pool.Name = '" + m_cbPool->currentText() + "'"; where.append(cmd); } if (m_cbStatus->currentText() != "") { cmd = " Media.VolStatus = '" + m_cbStatus->currentText() + "'"; where.append(cmd); } if (m_cbStatus->currentText() != "") { cmd = " Media.VolStatus = '" + m_cbStatus->currentText() + "'"; where.append(cmd); } if (m_cbMediaType->currentText() != "") { cmd = " Media.MediaType = '" + m_cbMediaType->currentText() + "'"; where.append(cmd); } if (m_cbLocation->currentText() != "") { cmd = " Location.Location = '" + m_cbLocation->currentText() + "'"; where.append(cmd); } if (m_textName->text() != "") { cmd = " Media.VolumeName like '%" + m_textName->text() + "%'"; where.append(cmd); } if (where.size() > 0) { cmd = " WHERE " + where.join(" AND "); } else { cmd = ""; } query = "SELECT AVG(VolBytes) AS size, COUNT(1) as nb, " "MediaType FROM Media " "WHERE VolStatus IN ('Full', 'Used') " "GROUP BY MediaType"; if (mainWin->m_sqlDebug) { Pmsg1(000, "MediaView query cmd : %s\n",query.toUtf8().data()); } if (m_console->sql_cmd(query, results)) { foreach (resultline, results) { fieldlist = resultline.split("\t"); if (fieldlist.size() != 3) { Pmsg1(000, "Unexpected line %s", resultline.toUtf8().data()); continue; } if (fieldlist.at(1).toInt() >= 1) { // MediaType hash_size[fieldlist.at(2)] = fieldlist.at(0).toFloat(); } } } m_tableMedia->clearContents(); query = "SELECT VolumeName, InChanger, " "Slot, MediaType, VolStatus, VolBytes, Pool.Name, " "LastWritten, Media.VolRetention " "FROM Media JOIN Pool USING (PoolId) " "LEFT JOIN Location ON (Media.LocationId=Location.LocationId) " + cmd + " ORDER BY VolumeName LIMIT " + m_sbLimit->cleanText(); m_tableMedia->sortByColumn(0, Qt::AscendingOrder); m_tableMedia->setSortingEnabled(false); /* Don't sort during insert */ results.clear(); if (mainWin->m_sqlDebug) { Pmsg1(000, "MediaView query cmd : %s\n",query.toUtf8().data()); } if (m_console->sql_cmd(query, results)) { int row=0; filterExipired(results); m_tableMedia->setRowCount(results.size()); foreach (resultline, results) { // should have only one result int index = 0; QString Slot, VolBytes, MediaType, LastWritten, VolStatus; fieldlist = resultline.split("\t"); if (fieldlist.size() != 9) { Pmsg1(000, "Unexpected line %s", resultline.toUtf8().data()); continue; } QStringListIterator fld(fieldlist); TableItemFormatter mediaitem(*m_tableMedia, row); /* VolumeName */ mediaitem.setTextFld(index++, fld.next()); /* Online */ mediaitem.setInChanger(index++, fld.next()); Slot = fld.next(); // Slot mediaitem.setNumericFld(index++, Slot); MediaType = fld.next(); VolStatus = fld.next(); /* Volume bytes */ VolBytes = fld.next(); mediaitem.setBytesFld(index++, VolBytes); /* Usage */ usage = 0; if (hash_size.contains(MediaType) && hash_size[MediaType] != 0) { usage = VolBytes.toLongLong() * 100 / hash_size[MediaType]; } mediaitem.setPercent(index++, usage); /* Volstatus */ mediaitem.setVolStatusFld(index++, VolStatus); /* Pool */ mediaitem.setTextFld(index++, fld.next()); /* MediaType */ mediaitem.setTextFld(index++, MediaType); LastWritten = fld.next(); buf[0] = 0; if (LastWritten != "") { stat = fld.next(); // VolUseDuration t = str_to_utime(LastWritten.toLatin1().data()); t = t + stat.toULongLong(); ttime = t; localtime_r(&ttime, &tm); strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", &tm); } /* LastWritten */ mediaitem.setTextFld(index++, LastWritten); /* When expired */ mediaitem.setTextFld(index++, buf); row++; } } m_tableMedia->resizeColumnsToContents(); m_tableMedia->resizeRowsToContents(); m_tableMedia->verticalHeader()->hide(); m_tableMedia->setSortingEnabled(true); /* make read only */ m_tableMedia->setEditTriggers(QAbstractItemView::NoEditTriggers); } /* * When the treeWidgetItem in the page selector tree is singleclicked, Make sure * The tree has been populated. */ void MediaView::PgSeltreeWidgetClicked() { if (!m_populated) { populateForm(); populateTable(); } if (!isOnceDocked()) { dockPage(); } } /* * Virtual function which is called when this page is visible on the stack */ void MediaView::currentStackItem() { if(!m_populated) { populateForm(); populateTable(); } } // /* // * Called from the signal of the context sensitive menu to relabel! // */ // void MediaView::relabelVolume() // { // setConsoleCurrent(); // new relabelDialog(m_console, m_currentVolumeName); // } // // /* // * Called from the signal of the context sensitive menu to purge! // */ // void MediaView::allVolumesFromPool() // { // QString cmd = "update volume AllFromPool=" + m_currentVolumeName; // consoleCommand(cmd); // populateTable(); // } // // void MediaView::allVolumes() // { // QString cmd = "update volume allfrompools"; // consoleCommand(cmd); // populateTable(); // } // // /* // * Called from the signal of the context sensitive menu to purge! // */ // void MediaView::volumeFromPool() // { // QTreeWidgetItem *currentItem = mp_treeWidget->currentItem(); // QTreeWidgetItem *parent = currentItem->parent(); // QString pool = parent->text(0); // QString cmd; // cmd = "update volume=" + m_currentVolumeName + " frompool=" + pool; // consoleCommand(cmd); // populateTable(); // } // bacula-15.0.3/src/qt-console/medialist/mediaview.h0000644000175000017500000000301714771010173021634 0ustar bsbuildbsbuild#ifndef _MEDIAVIEW_H_ #define _MEDIAVIEW_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_mediaview.h" #include "console.h" #include class MediaView : public Pages, public Ui::MediaViewForm { Q_OBJECT public: MediaView(); ~MediaView(); private slots: void populateTable(); void populateForm(); void PgSeltreeWidgetClicked(); void currentStackItem(); void applyPushed(); void editPushed(); void purgePushed(); void prunePushed(); void deletePushed(); bool getSelection(QStringList &ret); void showInfoForMedia(QTableWidgetItem * item); void filterExipired(QStringList &list); // void relabelVolume(); // void allVolumesFromPool(); // void allVolumes(); // void volumeFromPool(); private: bool m_populated; bool m_checkcurwidget; QTreeWidgetItem *m_topItem; }; #endif /* _MEDIAVIEW_H_ */ bacula-15.0.3/src/qt-console/bat.conf.example0000644000175000017500000000023114771010173020600 0ustar bsbuildbsbuild# # Bacula User Agent (or Console) Configuration File # Director { Name = rufus-dir DIRport = 8101 address = localhost Password = UA_password } bacula-15.0.3/src/qt-console/run/0000755000175000017500000000000014771010173016341 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/run/estimate.ui0000644000175000017500000001604014771010173020514 0ustar bsbuildbsbuild estimateForm 0 0 562 308 Form 9 6 0 6 0 6 Qt::RightToLeft List Files Qt::Horizontal 40 20 0 6 Level: levelCombo Client: clientCombo 65 16777215 Job: jobCombo FileSet: filesetCombo 0 6 Qt::Horizontal 71 21 16777215 30 <h3>Estimate a backup Job</h3> Qt::Horizontal 81 20 0 6 Qt::Horizontal 40 20 OK Cancel Qt::Horizontal 40 20 Qt::Vertical QSizePolicy::Expanding 351 16 Qt::Vertical QSizePolicy::Expanding 351 16 Qt::Horizontal 40 20 bacula-15.0.3/src/qt-console/run/run.ui0000644000175000017500000002357514771010173017520 0ustar bsbuildbsbuild runForm 0 0 594 415 0 0 Run job 16777215 30 11 <h3>Run a Job</h3> 0 0 0 5 Qt::Horizontal :/images/runit.png Job properties Job: jobCombo 6 0 0 0 0 Type: <h3>Backup<h3/> Client: clientCombo Priority: prioritySpin 1 10000 10 Level: levelCombo Qt::Vertical 56 151 FileSet: filesetCombo Pool: poolCombo Storage: storageCombo Messages: messagesCombo When: dateTimeEdit 0 2 0 2000 1 1 yyyy-mm-dd hh:mm:ss true Bootstrap: true bootstrap true 200 0 false 0 0 0 5 Qt::Horizontal Qt::Horizontal 40 20 OK Cancel bacula-15.0.3/src/qt-console/run/prune.ui0000644000175000017500000001664414771010173020044 0ustar bsbuildbsbuild pruneForm 0 0 514 363 Form 9 6 0 6 0 6 Prune Files Qt::Horizontal 40 20 Volume: volumeCombo 0 6 Qt::Horizontal 71 21 16777215 30 <h3>Prune Files/Jobs/Volumes</h3> Qt::Horizontal 81 20 0 6 Prune Jobs Qt::Horizontal 40 20 0 6 Qt::Horizontal 40 20 OK Cancel 65 16777215 Client: clientCombo 0 6 Prune Volumes Qt::Horizontal 40 20 Qt::Vertical QSizePolicy::Expanding 351 16 Qt::Vertical QSizePolicy::Expanding 351 16 Qt::Horizontal 40 20 Qt::Horizontal 40 20 bacula-15.0.3/src/qt-console/run/runbackup.ui0000644000175000017500000002422514771010173020677 0ustar bsbuildbsbuild runBackupForm 0 0 704 466 Form 9 6 0 6 0 6 Priority: prioritySpin 10000 1 12 Qt::Vertical 20 171 Qt::Horizontal 40 20 Qt::Vertical QSizePolicy::MinimumExpanding 572 16 Qt::Horizontal 40 20 Qt::Vertical QSizePolicy::MinimumExpanding 572 16 0 6 0 2 0 2000 1 1 yyyy-mm-dd hh:mm:ss true When: dateTimeEdit Where: true where Bootstrap: bootstrap Job: jobCombo true 200 0 false Storage: storageCombo FileSet: filesetCombo true 200 0 false Replace: To client: clientCombo Catalog: catalogCombo 0 6 Qt::Horizontal 40 20 OK Cancel 0 6 Qt::Horizontal QSizePolicy::Fixed 131 25 16777215 30 <h3>Run Restore Job</h3> Qt::AlignCenter Qt::Horizontal QSizePolicy::Fixed 131 25 bacula-15.0.3/src/qt-console/run/runadmin.ui0000644000175000017500000002422314771010173020520 0ustar bsbuildbsbuild runAdminForm 0 0 704 466 Form 9 6 0 6 0 6 Priority: prioritySpin 10000 1 12 Qt::Vertical 20 171 Qt::Horizontal 40 20 Qt::Vertical QSizePolicy::MinimumExpanding 572 16 Qt::Horizontal 40 20 Qt::Vertical QSizePolicy::MinimumExpanding 572 16 0 6 0 2 0 2000 1 1 yyyy-mm-dd hh:mm:ss true When: dateTimeEdit Where: true where Bootstrap: bootstrap Job: jobCombo true 200 0 false Storage: storageCombo FileSet: filesetCombo true 200 0 false Replace: To client: clientCombo Catalog: catalogCombo 0 6 Qt::Horizontal 40 20 OK Cancel 0 6 Qt::Horizontal QSizePolicy::Fixed 131 25 16777215 30 <h3>Run Restore Job</h3> Qt::AlignCenter Qt::Horizontal QSizePolicy::Fixed 131 25 bacula-15.0.3/src/qt-console/run/prune.cpp0000644000175000017500000000733314771010173020204 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Run Dialog class * * Kern Sibbald, February MMVII * * $Id$ */ #include "bat.h" #include "run.h" /* * Setup all the combo boxes and display the dialog */ prunePage::prunePage(const QString &volume, const QString &client) : Pages() { QDateTime dt; m_name = tr("Prune"); pgInitialize(); setupUi(this); m_conn = m_console->notifyOff(); QString query("SELECT VolumeName AS Media FROM Media ORDER BY Media"); if (mainWin->m_sqlDebug) { Pmsg1(000, "Query cmd : %s\n",query.toUtf8().data()); } QStringList results, volumeList; if (m_console->sql_cmd(query, results)) { QString field; QStringList fieldlist; /* Iterate through the lines of results. */ foreach (QString resultline, results) { fieldlist = resultline.split("\t"); volumeList.append(fieldlist[0]); } /* foreach resultline */ } /* if results from query */ volumeCombo->addItem(tr("Any")); volumeCombo->addItems(volumeList); clientCombo->addItem(tr("Any")); clientCombo->addItems(m_console->client_list); connect(okButton, SIGNAL(pressed()), this, SLOT(okButtonPushed())); connect(cancelButton, SIGNAL(pressed()), this, SLOT(cancelButtonPushed())); filesRadioButton->setChecked(true); if (clientCombo->findText(client, Qt::MatchExactly) != -1) clientCombo->setCurrentIndex(clientCombo->findText(client, Qt::MatchExactly)); else clientCombo->setCurrentIndex(0); if (volumeCombo->findText(volume, Qt::MatchExactly) != -1) volumeCombo->setCurrentIndex(volumeCombo->findText(volume, Qt::MatchExactly)); else volumeCombo->setCurrentIndex(0); connect(volumeCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(volumeChanged())); connect(clientCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(clientChanged())); dockPage(); setCurrent(); this->show(); } void prunePage::okButtonPushed() { this->hide(); QString cmd("prune"); if (filesRadioButton->isChecked()) { cmd += " files"; } if (jobsRadioButton->isChecked()) { cmd += " jobs"; } if (filesRadioButton->isChecked()) { cmd += " volume"; } if (volumeCombo->currentText() != tr("Any")) { cmd += " volume=\"" + volumeCombo->currentText() + "\""; } if (clientCombo->currentText() != tr("Any")) { cmd += " client=\"" + clientCombo->currentText() + "\""; } cmd += " yes"; if (mainWin->m_commandDebug) { Pmsg1(000, "command : %s\n", cmd.toUtf8().data()); } consoleCommand(cmd); m_console->notify(m_conn, true); closeStackPage(); mainWin->resetFocus(); } void prunePage::cancelButtonPushed() { mainWin->set_status(tr(" Canceled")); this->hide(); m_console->notify(m_conn, true); closeStackPage(); mainWin->resetFocus(); } void prunePage::volumeChanged() { if ((volumeCombo->currentText() == tr("Any")) && (clientCombo->currentText() == tr("Any"))) { clientCombo->setCurrentIndex(1); } } void prunePage::clientChanged() { if ((volumeCombo->currentText() == tr("Any")) && (clientCombo->currentText() == tr("Any"))) { volumeCombo->setCurrentIndex(1); } } bacula-15.0.3/src/qt-console/run/run.cpp0000644000175000017500000001413014771010173017650 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Run Dialog class * * Kern Sibbald, February MMVII * */ #include "bat.h" #include "run.h" runPage::runPage() : Pages() { init(); show(); } runPage::runPage(const QString &defJob) : Pages() { m_dockOnFirstUse = false; init(); if (defJob != "") jobCombo->setCurrentIndex(jobCombo->findText(defJob, Qt::MatchExactly)); show(); } runPage::runPage(const QString &defJob, const QString &level, const QString &pool, const QString &storage, const QString &client, const QString &fileset) : Pages() { m_dockOnFirstUse = false; init(); jobCombo->setCurrentIndex(jobCombo->findText(defJob, Qt::MatchExactly)); job_name_change(0); filesetCombo->setCurrentIndex(filesetCombo->findText(fileset, Qt::MatchExactly)); levelCombo->setCurrentIndex(levelCombo->findText(level, Qt::MatchExactly)); clientCombo->setCurrentIndex(clientCombo->findText(client,Qt::MatchExactly)); poolCombo->setCurrentIndex(poolCombo->findText(pool, Qt::MatchExactly)); if (storage != "") { // TODO: enable storage storageCombo->setCurrentIndex(storageCombo->findText(storage, Qt::MatchExactly)); } show(); } /* * Setup all the combo boxes and display the dialog */ void runPage::init() { QDateTime dt; QDesktopWidget *desk = QApplication::desktop(); QRect scrn; m_name = tr("Run"); pgInitialize(); setupUi(this); /* Get screen rectangle */ scrn = desk->screenGeometry(desk->primaryScreen()); /* Position this window in the middle of the screen */ this->move((scrn.width()-this->width())/2, (scrn.height()-this->height())/2); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/run.png"))); m_conn = m_console->notifyOff(); m_console->beginNewCommand(m_conn); jobCombo->addItems(m_console->job_list); filesetCombo->addItems(m_console->fileset_list); levelCombo->addItems(m_console->level_list); clientCombo->addItems(m_console->client_list); poolCombo->addItems(m_console->pool_list); storageCombo->addItems(m_console->storage_list); dateTimeEdit->setDisplayFormat(mainWin->m_dtformat); dateTimeEdit->setDateTime(dt.currentDateTime()); /*printf("listing messages resources"); ***FIME *** foreach(QString mes, m_console->messages_list) { printf("%s\n", mes.toUtf8().data()); }*/ messagesCombo->addItems(m_console->messages_list); messagesCombo->setEnabled(false); job_name_change(0); connect(jobCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(job_name_change(int))); connect(okButton, SIGNAL(pressed()), this, SLOT(okButtonPushed())); connect(cancelButton, SIGNAL(pressed()), this, SLOT(cancelButtonPushed())); // find a way to place the new window at the cursor position // or in the middle of the page // dockPage(); setCurrent(); } void runPage::okButtonPushed() { this->hide(); QString cmd; QTextStream(&cmd) << "run" << " job=\"" << jobCombo->currentText() << "\"" << " fileset=\"" << filesetCombo->currentText() << "\"" << " level=\"" << levelCombo->currentText() << "\"" << " client=\"" << clientCombo->currentText() << "\"" << " pool=\"" << poolCombo->currentText() << "\"" << " storage=\"" << storageCombo->currentText() << "\"" << " priority=\"" << prioritySpin->value() << "\"" " when=\"" << dateTimeEdit->dateTime().toString(mainWin->m_dtformat) << "\""; #ifdef xxx " messages=\"" << messagesCombo->currentText() << "\""; /* FIXME when there is an option to modify the messages resoruce associated * with a job */ #endif if (bootstrap->text() != "") { cmd += " bootstrap=\"" + bootstrap->text() + "\""; } cmd += " yes"; if (mainWin->m_commandDebug) { Pmsg1(000, "command : %s\n", cmd.toUtf8().data()); } consoleCommand(cmd); m_console->notify(m_conn, true); closeStackPage(); mainWin->resetFocus(); } void runPage::cancelButtonPushed() { mainWin->set_status(tr(" Canceled")); this->hide(); m_console->notify(m_conn, true); closeStackPage(); mainWin->resetFocus(); } /* * Called here when the jobname combo box is changed. * We load the default values for the new job in the * other combo boxes. */ void runPage::job_name_change(int index) { job_defaults job_defs; (void)index; job_defs.job_name = jobCombo->currentText(); if (m_console->get_job_defaults(job_defs)) { QString cmd; typeLabel->setText("

"+job_defs.type+"

"); filesetCombo->setCurrentIndex(filesetCombo->findText(job_defs.fileset_name, Qt::MatchExactly)); levelCombo->setCurrentIndex(levelCombo->findText(job_defs.level, Qt::MatchExactly)); clientCombo->setCurrentIndex(clientCombo->findText(job_defs.client_name, Qt::MatchExactly)); poolCombo->setCurrentIndex(poolCombo->findText(job_defs.pool_name, Qt::MatchExactly)); storageCombo->setCurrentIndex(storageCombo->findText(job_defs.store_name, Qt::MatchExactly)); messagesCombo->setCurrentIndex(messagesCombo->findText(job_defs.messages_name, Qt::MatchExactly)); m_console->level_list.clear(); cmd = ".levels " + job_defs.type; m_console->dir_cmd(cmd, m_console->level_list); levelCombo->clear(); levelCombo->addItems(m_console->level_list); levelCombo->setCurrentIndex(levelCombo->findText(job_defs.level, 0 /*Qt::MatchExactly*/)); } } bacula-15.0.3/src/qt-console/run/run.h0000644000175000017500000000412314771010173017316 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef _RUN_H_ #define _RUN_H_ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_run.h" #include "ui_runcmd.h" #include "ui_estimate.h" #include "ui_prune.h" #include "console.h" class runPage : public Pages, public Ui::runForm { Q_OBJECT public: runPage(); runPage(const QString &defJob); runPage(const QString &defJob, const QString &level, const QString &pool, const QString &storage, const QString &client, const QString &fileset); public slots: void okButtonPushed(); void cancelButtonPushed(); void job_name_change(int index); private: void init(); int m_conn; }; class runCmdPage : public Pages, public Ui::runCmdForm { Q_OBJECT public: runCmdPage(int conn); public slots: void okButtonPushed(); void cancelButtonPushed(); private: void fill(); int m_conn; }; class estimatePage : public Pages, public Ui::estimateForm { Q_OBJECT public: estimatePage(); public slots: void okButtonPushed(); void cancelButtonPushed(); void job_name_change(int index); private: int m_conn; bool m_aButtonPushed; }; class prunePage : public Pages, public Ui::pruneForm { Q_OBJECT public: prunePage(const QString &volume, const QString &client); public slots: void okButtonPushed(); void cancelButtonPushed(); void volumeChanged(); void clientChanged(); private: int m_conn; }; #endif /* _RUN_H_ */ bacula-15.0.3/src/qt-console/run/runrestore.ui0000644000175000017500000002422714771010173021117 0ustar bsbuildbsbuild runRestoreForm 0 0 704 466 Form 9 6 0 6 0 6 Priority: prioritySpin 10000 1 12 Qt::Vertical 20 171 Qt::Horizontal 40 20 Qt::Vertical QSizePolicy::MinimumExpanding 572 16 Qt::Horizontal 40 20 Qt::Vertical QSizePolicy::MinimumExpanding 572 16 0 6 0 2 0 2000 1 1 yyyy-mm-dd hh:mm:ss true When: dateTimeEdit Where: true where Bootstrap: bootstrap Job: jobCombo true 200 0 false Storage: storageCombo FileSet: filesetCombo true 200 0 false Replace: To client: clientCombo Catalog: catalogCombo 0 6 Qt::Horizontal 40 20 OK Cancel 0 6 Qt::Horizontal QSizePolicy::Fixed 131 25 16777215 30 <h3>Run Restore Job</h3> Qt::AlignCenter Qt::Horizontal QSizePolicy::Fixed 131 25 bacula-15.0.3/src/qt-console/run/runcmd.ui0000644000175000017500000002421714771010173020176 0ustar bsbuildbsbuild runCmdForm 0 0 704 466 Form 9 6 0 6 0 6 Priority: prioritySpin 10000 1 12 Qt::Vertical 20 171 Qt::Horizontal 40 20 Qt::Vertical QSizePolicy::MinimumExpanding 572 16 Qt::Horizontal 40 20 Qt::Vertical QSizePolicy::MinimumExpanding 572 16 0 6 0 2 0 2000 1 1 yyyy-mm-dd hh:mm:ss true When: dateTimeEdit Where: true where Bootstrap: bootstrap Job: jobCombo true 200 0 false Storage: storageCombo FileSet: filesetCombo true 200 0 false Replace: To client: clientCombo Catalog: catalogCombo 0 6 Qt::Horizontal 40 20 OK Cancel 0 6 Qt::Horizontal QSizePolicy::Fixed 131 25 16777215 30 <h3>Run Restore Job</h3> Qt::AlignCenter Qt::Horizontal QSizePolicy::Fixed 131 25 bacula-15.0.3/src/qt-console/run/runmigration.ui0000644000175000017500000002423314771010173021422 0ustar bsbuildbsbuild runMigrationForm 0 0 704 466 Form 9 6 0 6 0 6 Priority: prioritySpin 10000 1 12 Qt::Vertical 20 171 Qt::Horizontal 40 20 Qt::Vertical QSizePolicy::MinimumExpanding 572 16 Qt::Horizontal 40 20 Qt::Vertical QSizePolicy::MinimumExpanding 572 16 0 6 0 2 0 2000 1 1 yyyy-mm-dd hh:mm:ss true When: dateTimeEdit Where: true where Bootstrap: bootstrap Job: jobCombo true 200 0 false Storage: storageCombo FileSet: filesetCombo true 200 0 false Replace: To client: clientCombo Catalog: catalogCombo 0 6 Qt::Horizontal 40 20 OK Cancel 0 6 Qt::Horizontal QSizePolicy::Fixed 131 25 16777215 30 <h3>Run Restore Job</h3> Qt::AlignCenter Qt::Horizontal QSizePolicy::Fixed 131 25 bacula-15.0.3/src/qt-console/run/runcopy.ui0000644000175000017500000002422114771010173020400 0ustar bsbuildbsbuild runCopyForm 0 0 704 466 Form 9 6 0 6 0 6 Priority: prioritySpin 10000 1 12 Qt::Vertical 20 171 Qt::Horizontal 40 20 Qt::Vertical QSizePolicy::MinimumExpanding 572 16 Qt::Horizontal 40 20 Qt::Vertical QSizePolicy::MinimumExpanding 572 16 0 6 0 2 0 2000 1 1 yyyy-mm-dd hh:mm:ss true When: dateTimeEdit Where: true where Bootstrap: bootstrap Job: jobCombo true 200 0 false Storage: storageCombo FileSet: filesetCombo true 200 0 false Replace: To client: clientCombo Catalog: catalogCombo 0 6 Qt::Horizontal 40 20 OK Cancel 0 6 Qt::Horizontal QSizePolicy::Fixed 131 25 16777215 30 <h3>Run Restore Job</h3> Qt::AlignCenter Qt::Horizontal QSizePolicy::Fixed 131 25 bacula-15.0.3/src/qt-console/run/runcmd.cpp0000644000175000017500000001231614771010173020340 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Run Command Dialog class * * This is called when a Run Command signal is received from the * Director. We parse the Director's output and throw up a * dialog box. This happens, for example, after the user finishes * selecting files to be restored. The Director will then submit a * run command, that causes this page to be popped up. * * Kern Sibbald, March MMVII * */ #include "bat.h" #include "run.h" /* * Setup all the combo boxes and display the dialog */ runCmdPage::runCmdPage(int conn) : Pages() { m_name = tr("Restore Run"); pgInitialize(); setupUi(this); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/restore.png"))); m_conn = conn; m_console->notify(conn, false); fill(); connect(okButton, SIGNAL(pressed()), this, SLOT(okButtonPushed())); connect(cancelButton, SIGNAL(pressed()), this, SLOT(cancelButtonPushed())); //dockPage(); setCurrent(); this->show(); } void runCmdPage::fill() { QString item, val; QStringList items; QRegExp rx("^.*:\\s*(\\S.*$)"); /* Regex to get value */ clientCombo->addItems(m_console->client_list); filesetCombo->addItems(m_console->fileset_list); replaceCombo->addItems(QStringList() << tr("never") << tr("always") << tr("ifnewer") << tr("ifolder")); replaceCombo->setCurrentIndex(replaceCombo->findText(tr("never"), Qt::MatchExactly)); storageCombo->addItems(m_console->storage_list); dateTimeEdit->setDisplayFormat(mainWin->m_dtformat); m_console->read(m_conn); item = m_console->msg(m_conn); items = item.split("\n"); foreach(item, items) { rx.indexIn(item); val = rx.cap(1); Dmsg1(100, "Item=%s\n", item.toUtf8().data()); Dmsg1(100, "Value=%s\n", val.toUtf8().data()); if (item.startsWith("Title:")) { run->setText(val); } if (item.startsWith("JobName:")) { jobCombo->addItem(val); continue; } if (item.startsWith("Bootstrap:")) { bootstrap->setText(val); continue; } if (item.startsWith("Backup Client:")) { clientCombo->setCurrentIndex(clientCombo->findText(val, Qt::MatchExactly)); continue; } if (item.startsWith("Storage:")) { storageCombo->setCurrentIndex(storageCombo->findText(val, Qt::MatchExactly)); continue; } if (item.startsWith("Where:")) { where->setText(val); continue; } if (item.startsWith("When:")) { dateTimeEdit->setDateTime(QDateTime::fromString(val,mainWin->m_dtformat)); continue; } if (item.startsWith("Catalog:")) { catalogCombo->addItem(val); continue; } if (item.startsWith("FileSet:")) { filesetCombo->setCurrentIndex(filesetCombo->findText(val, Qt::MatchExactly)); continue; } if (item.startsWith("Priority:")) { bool okay; int pri = val.toInt(&okay, 10); if (okay) prioritySpin->setValue(pri); continue; } if (item.startsWith("Replace:")) { int replaceIndex = replaceCombo->findText(val, Qt::MatchExactly); if (replaceIndex >= 0) replaceCombo->setCurrentIndex(replaceIndex); continue; } } } void runCmdPage::okButtonPushed() { QString cmd(".mod"); cmd += " restoreclient=\"" + clientCombo->currentText() + "\""; cmd += " fileset=\"" + filesetCombo->currentText() + "\""; cmd += " storage=\"" + storageCombo->currentText() + "\""; cmd += " replace=\"" + replaceCombo->currentText() + "\""; cmd += " when=\"" + dateTimeEdit->dateTime().toString(mainWin->m_dtformat) + "\""; cmd += " bootstrap=\"" + bootstrap->text() + "\""; cmd += " where=\"" + where->text() + "\""; QString pri; QTextStream(&pri) << " priority=\"" << prioritySpin->value() << "\""; cmd += pri; cmd += " yes\n"; setConsoleCurrent(); QString displayhtml(""); displayhtml += cmd + "\n"; m_console->display_html(displayhtml); m_console->display_text("\n"); m_console->write_dir(m_conn, cmd.toUtf8().data()); m_console->displayToPrompt(m_conn); // consoleCommand(cmd); ***FIXME set back to consoleCommand when connection issue is resolved m_console->notify(m_conn, true); closeStackPage(); } void runCmdPage::cancelButtonPushed() { this->hide(); m_console->write_dir(m_conn, "no"); m_console->displayToPrompt(m_conn); m_console->notify(m_conn, true); mainWin->set_status(tr(" Canceled")); closeStackPage(); mainWin->resetFocus(); } bacula-15.0.3/src/qt-console/run/estimate.cpp0000644000175000017500000000631114771010173020661 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Run Dialog class * * Kern Sibbald, February MMVII * * $Id$ */ #include "bat.h" #include "run.h" /* * Setup all the combo boxes and display the dialog */ estimatePage::estimatePage() : Pages() { QDateTime dt; m_name = tr("Estimate"); pgInitialize(); setupUi(this); m_conn = m_console->notifyOff(); m_console->beginNewCommand(m_conn); jobCombo->addItems(m_console->job_list); filesetCombo->addItems(m_console->fileset_list); levelCombo->addItems(m_console->level_list); clientCombo->addItems(m_console->client_list); job_name_change(0); connect(jobCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(job_name_change(int))); connect(okButton, SIGNAL(pressed()), this, SLOT(okButtonPushed())); connect(cancelButton, SIGNAL(pressed()), this, SLOT(cancelButtonPushed())); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/estimate-job.png"))); dockPage(); setCurrent(); this->show(); m_aButtonPushed = false; } void estimatePage::okButtonPushed() { if (m_aButtonPushed) return; m_aButtonPushed = true; this->hide(); QString cmd; QTextStream(&cmd) << "estimate" << " job=\"" << jobCombo->currentText() << "\"" << " fileset=\"" << filesetCombo->currentText() << "\"" << " level=\"" << levelCombo->currentText() << "\"" << " client=\"" << clientCombo->currentText() << "\""; if (listingCheckBox->checkState() == Qt::Checked) { cmd += " listing"; } if (mainWin->m_commandDebug) { Pmsg1(000, "command : %s\n", cmd.toUtf8().data()); } consoleCommand(cmd, m_conn, true, true); m_console->notify(m_conn, true); closeStackPage(); mainWin->resetFocus(); } void estimatePage::cancelButtonPushed() { if (m_aButtonPushed) return; m_aButtonPushed = true; mainWin->set_status(" Canceled"); this->hide(); m_console->notify(m_conn, true); closeStackPage(); mainWin->resetFocus(); } /* * Called here when the jobname combo box is changed. * We load the default values for the new job in the * other combo boxes. */ void estimatePage::job_name_change(int index) { job_defaults job_defs; (void)index; job_defs.job_name = jobCombo->currentText(); if (m_console->get_job_defaults(m_conn, job_defs)) { filesetCombo->setCurrentIndex(filesetCombo->findText(job_defs.fileset_name, Qt::MatchExactly)); levelCombo->setCurrentIndex(levelCombo->findText(job_defs.level, Qt::MatchExactly)); clientCombo->setCurrentIndex(clientCombo->findText(job_defs.client_name, Qt::MatchExactly)); } } bacula-15.0.3/src/qt-console/util/0000755000175000017500000000000014771010173016512 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/util/fmtwidgetitem.h0000644000175000017500000001442614771010173021543 0ustar bsbuildbsbuild#ifndef _FMTWIDGETITEM_H_ #define _FMTWIDGETITEM_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * TreeView formatting helpers - Riccardo Ghetta, May 2008 */ class QWidget; class QTreeWidgetItem; class QTableWidget; class QTableWidgetItem; class QString; class QBrush; /* * common conversion routines * */ QString convertJobStatus(const QString &sts); /* bytes formatted as power-of-two with IEC suffixes (KiB, MiB, and so on) */ QString convertBytesIEC(qint64 fld); /* bytes formatted as power-of-ten with SI suffixes (kB, MB, and so on) */ QString convertBytesSI(qint64 fld); /* * disable widget updating */ class Freeze { private: QWidget *qw; public: Freeze(QWidget &q); ~Freeze(); }; /* * base class for formatters * */ class ItemFormatterBase { public: enum BYTES_CONVERSION { BYTES_CONVERSION_NONE, BYTES_CONVERSION_IEC, BYTES_CONVERSION_SI, }; public: virtual ~ItemFormatterBase(); /* Prints Yes if fld is != 0, No otherwise. Centers field if center true*/ void setBoolFld(int index, const QString &fld, bool center = true); void setBoolFld(int index, int fld, bool center = true); /* Print nice icon to represent percent */ void setPercent(int index, float number); /* Normal text field. Centers field if center true*/ void setTextFld(int index, const QString &fld, bool center = false); /* Normal date field. Centers field if center true*/ void setDateFld(int index, utime_t fld, bool center = false); /* Right-aligned text field. */ void setRightFld(int index, const QString &fld); /* Numeric field - sorted as numeric type */ void setNumericFld(int index, const QString &fld); void setNumericFld(int index, const QString &fld, const QVariant &sortVal); /* fld value interpreted as bytes and formatted with size suffixes */ void setBytesFld(int index, const QString &fld); /* fld value interpreted as seconds and formatted with y,m,w,h suffixes */ void setDurationFld(int index, const QString &fld); /* fld value interpreted as volume status. Colored accordingly */ void setVolStatusFld(int index, const QString &fld, bool center = true); /* fld value interpreted as job status. Colored accordingly */ void setJobStatusFld(int index, const QString &status, bool center = true); /* fld value interpreted as job type. */ void setJobTypeFld(int index, const QString &fld, bool center = false); /* fld value interpreted as job level. */ void setJobLevelFld(int index, const QString &fld, bool center = false); /* fld value interpreted as Online/Offline */ void setInChanger(int index, const QString &InChanger); /* fld value interpreted as file or folder */ void setFileType(int index, const QString &type); static void setBytesConversion(BYTES_CONVERSION b) { cnvFlag = b; } static BYTES_CONVERSION getBytesConversion() { return cnvFlag; } protected: /* only derived classes can create one of these */ ItemFormatterBase(); virtual void setText(int index, const QString &fld) = 0; virtual void setTextAlignment(int index, int align) = 0; virtual void setBackground(int index, const QBrush &) = 0; virtual void setPixmap(int index, const QPixmap &pix) = 0; virtual void setPixmap(int index, const QPixmap &pix, const QString &tip); /* sets the *optional* value used for sorting */ virtual void setSortValue(int index, const QVariant &value) = 0; private: static BYTES_CONVERSION cnvFlag; }; /* * This class can be used instead of QTreeWidgetItem (it allocates one internally, * to format data fields. * All setXXXFld routines receive a column index and the unformatted string value. */ class TreeItemFormatter : public ItemFormatterBase { public: TreeItemFormatter(QTreeWidgetItem &parent, int indent_level); /* access internal widget */ QTreeWidgetItem *widget() { return wdg; } const QTreeWidgetItem *widget() const { return wdg; } protected: virtual void setText(int index, const QString &fld); virtual void setTextAlignment(int index, int align); virtual void setBackground(int index, const QBrush &); virtual void setSortValue(int index, const QVariant &value); virtual void setPixmap(int index, const QPixmap &pix); private: QTreeWidgetItem *wdg; int level; }; /* * This class can be used instead of QTableWidgetItem (it allocates one internally, * to format data fields. * All setXXXFld routines receive the column and the unformatted string value. */ class TableItemFormatter : public ItemFormatterBase { private: /* specialized widget item - allows an optional data property for sorting */ class BatSortingTableItem : public QTableWidgetItem { private: static const int SORTDATA_ROLE = Qt::UserRole + 100; public: BatSortingTableItem(); /* uses the sort data if available, reverts to default behavior othervise */ virtual bool operator< ( const QTableWidgetItem & o ) const; /* set the value used for sorting - MUST BE A NUMERIC TYPE */ void setSortData(const QVariant &d); }; public: TableItemFormatter(QTableWidget &parent, int row); /* access internal widget at column col*/ QTableWidgetItem *widget(int col); const QTableWidgetItem *widget(int col) const; protected: virtual void setText(int index, const QString &fld); virtual void setTextAlignment(int index, int align); virtual void setBackground(int index, const QBrush &); virtual void setSortValue(int index, const QVariant &value); virtual void setPixmap(int index, const QPixmap &pix); virtual void setPixmap(int index, const QPixmap &pix, const QString &tip); private: QTableWidget *parent; int row; BatSortingTableItem *last; }; #endif /* _FMTWIDGETITEM_H_ */ bacula-15.0.3/src/qt-console/util/fmtwidgetitem.cpp0000644000175000017500000003403514771010173022074 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Helper functions for tree widget formatting * * Riccardo Ghetta, May 2008 * */ #include "../bat.h" #include #include #include #include #include #include #include #include #include "fmtwidgetitem.h" /*********************************************** * * common helpers * ***********************************************/ QString convertJobStatus(const QString &sts) { QString code( sts.trimmed() ); if ( code.size() != 1) { return QObject::tr("Invalid job status %1").arg(sts); } char buf[256]; jobstatus_to_ascii_gui( code[0].toLatin1(), buf, sizeof(buf)); return QString(buf); } /* * disable widget updating */ Freeze::Freeze(QWidget &q): qw(&q) { qw->setUpdatesEnabled(false); } Freeze::~Freeze() { if (qw) { qw->setUpdatesEnabled(true); qw->update(); } } /*********************************************** * * ItemFormatterBase static members * ***********************************************/ ItemFormatterBase::BYTES_CONVERSION ItemFormatterBase::cnvFlag(BYTES_CONVERSION_IEC); /* String to Electronic value based on K=1024 */ QString convertBytesIEC(qint64 qfld) { static const qint64 KB = Q_INT64_C(1024); static const qint64 MB = (KB * KB); static const qint64 GB = (MB * KB); static const qint64 TB = (GB * KB); static const qint64 PB = (TB * KB); static const qint64 EB = (PB * KB); /* note: division is integer, so to have some decimals we divide for a smaller unit (e.g. GB for a TB number and so on) */ char suffix; if (qfld >= EB) { qfld /= PB; suffix = 'E'; } else if (qfld >= PB) { qfld /= TB; suffix = 'P'; } else if (qfld >= TB) { qfld /= GB; suffix = 'T'; } else if (qfld >= GB) { qfld /= MB; suffix = 'G'; } else if (qfld >= MB) { qfld /= KB; suffix = 'M'; } else if (qfld >= KB) { suffix = 'K'; } else { /* plain bytes, no need to reformat */ return QString("%1 B").arg(qfld); } /* After dividing into a smaller value, we can safely convert from * to a double double and use the extra room for decimals */ return QString("%1 %2iB").arg(qfld / 1024.0, 0, 'f', 2).arg(suffix); } /* String to human value based on k=1000 */ QString convertBytesSI(qint64 qfld) { static const qint64 KB = Q_INT64_C(1000); static const qint64 MB = (KB * KB); static const qint64 GB = (MB * KB); static const qint64 TB = (GB * KB); static const qint64 PB = (TB * KB); static const qint64 EB = (PB * KB); /* Note: division is integer, so to have some decimals we divide for a smaller unit (e.g. GB for a TB number and so on) */ char suffix; if (qfld >= EB) { qfld /= PB; suffix = 'E'; } else if (qfld >= PB) { qfld /= TB; suffix = 'P'; } else if (qfld >= TB) { qfld /= GB; suffix = 'T'; } else if (qfld >= GB) { qfld /= MB; suffix = 'G'; } else if (qfld >= MB) { qfld /= KB; suffix = 'M'; } else if (qfld >= KB) { suffix = 'k'; /* SI uses lowercase k */ } else { /* plain bytes, no need to reformat */ return QString("%1 B").arg(qfld); } /* having divided for a smaller unit, now we can safely convert to double and use the extra room for decimals */ return QString("%1 %2B").arg(qfld / 1000.0, 0, 'f', 2).arg(suffix); } /*********************************************** * * base formatting routines * ***********************************************/ ItemFormatterBase::ItemFormatterBase() { } ItemFormatterBase::~ItemFormatterBase() { } void ItemFormatterBase::setPercent(int index, float value) { char buf[100]; bsnprintf(buf, sizeof(buf), "%.2f%%", value); QString val = buf; QString pix; if (value < 8) { pix = ":images/0p.png"; } else if (value < 24) { pix = ":images/16p.png"; } else if (value < 40) { pix = ":images/32p.png"; } else if (value < 56) { pix = ":images/48p.png"; } else if (value < 72) { pix = ":images/64p.png"; } else if (value < 88) { pix = ":images/80p.png"; } else { pix = ":images/96p.png"; } setPixmap(index, QPixmap(pix), val); //setSortValue(index, (int) value); //setBackground(index, Qt::green); } /* By default, the setPixmap implementation with tooltip don't implement * the tooltip stuff */ void ItemFormatterBase::setPixmap(int index, const QPixmap &pix, const QString & /* tip */) { setPixmap(index, pix); } void ItemFormatterBase::setInChanger(int index, const QString &InChanger) { setPixmap(index, QPixmap(":images/inflag"+InChanger+".png")); //setSortValue(index, InChanger.toInt() ); } void ItemFormatterBase::setFileType(int index, const QString &type) { setPixmap(index, QPixmap(":images/"+type+".png")); //setSortValue(index, InChanger.toInt() ); } void ItemFormatterBase::setTextFld(int index, const QString &fld, bool center) { setText(index, fld.trimmed()); if (center) { setTextAlignment(index, Qt::AlignCenter); } } void ItemFormatterBase::setDateFld(int index, utime_t fld, bool center) { char buf[200]; bstrutime(buf, sizeof(buf), fld); setText(index, QString(buf).trimmed()); if (center) { setTextAlignment(index, Qt::AlignCenter); } } void ItemFormatterBase::setRightFld(int index, const QString &fld) { setText(index, fld.trimmed()); setTextAlignment(index, Qt::AlignRight | Qt::AlignVCenter); } void ItemFormatterBase::setBoolFld(int index, const QString &fld, bool center) { if (fld.trimmed().toInt()) setTextFld(index, QObject::tr("Yes"), center); else setTextFld(index, QObject::tr("No"), center); } void ItemFormatterBase::setBoolFld(int index, int fld, bool center) { if (fld) setTextFld(index, QObject::tr("Yes"), center); else setTextFld(index, QObject::tr("No"), center); } void ItemFormatterBase::setNumericFld(int index, const QString &fld) { setRightFld(index, fld.trimmed()); setSortValue(index, fld.toDouble() ); } void ItemFormatterBase::setNumericFld(int index, const QString &fld, const QVariant &sortval) { setRightFld(index, fld.trimmed()); setSortValue(index, sortval ); } void ItemFormatterBase::setBytesFld(int index, const QString &fld) { qint64 qfld = fld.trimmed().toLongLong(); QString msg; switch (cnvFlag) { case BYTES_CONVERSION_NONE: msg = QString::number(qfld); break; case BYTES_CONVERSION_IEC: msg = convertBytesIEC(qfld); break; case BYTES_CONVERSION_SI: msg = convertBytesSI(qfld); break; default: msg = " "; break; } setNumericFld(index, msg, QVariant(qfld)); } void ItemFormatterBase::setDurationFld(int index, const QString &fld) { static const qint64 HOUR = Q_INT64_C(3600); static const qint64 DAY = HOUR * 24; static const qint64 WEEK = DAY * 7; static const qint64 MONTH = DAY * 30; static const qint64 YEAR = DAY * 365; static const qint64 divs[] = { YEAR, MONTH, WEEK, DAY, HOUR }; static const char sufs[] = { 'y', 'm', 'w', 'd', 'h', '\0' }; qint64 dfld = fld.trimmed().toLongLong(); char suffix = 's'; if (dfld) { for (int pos = 0 ; sufs[pos] ; ++pos) { if (dfld % divs[pos] == 0) { dfld /= divs[pos]; suffix = sufs[pos]; break; } } } QString msg; if (dfld < 100) { msg = QString("%1%2").arg(dfld).arg(suffix); } else { /* previous check returned a number too big. The original specification perhaps was mixed, like 1d 2h, so we try to match with this routine */ dfld = fld.trimmed().toLongLong(); msg = ""; for (int pos = 0 ; sufs[pos] ; ++pos) { if (dfld / divs[pos] != 0) { msg += QString(" %1%2").arg(dfld / divs[pos]).arg(sufs[pos]); dfld %= divs[pos]; } } if (dfld) msg += QString(" %1s").arg(dfld); } setNumericFld(index, msg, QVariant(fld.trimmed().toLongLong())); } void ItemFormatterBase::setVolStatusFld(int index, const QString &fld, bool center) { QString mp(fld.trimmed()); setTextFld(index, volume_status_to_str(mp.toUtf8()), center); if (mp == "Append" ) { setBackground(index, Qt::green); } else if (mp == "Error") { setBackground(index, Qt::red); } else if (mp == "Used" || mp == "Full"){ setBackground(index, Qt::yellow); } else if (mp == "Read-only" || mp == "Disabled"){ setBackground(index, Qt::lightGray); } } void ItemFormatterBase::setJobStatusFld(int index, const QString &status, bool center) { /* C (created, not yet running) uses the default background */ static QString greenchars("TR"); static QString redchars("BEf"); static QString yellowchars("eDAFSMmsjdctp"); setTextFld(index, convertJobStatus(status), center); QString st(status.trimmed()); if (greenchars.contains(st, Qt::CaseSensitive)) { setBackground(index, Qt::green); } else if (redchars.contains(st, Qt::CaseSensitive)) { setBackground(index, Qt::red); } else if (yellowchars.contains(st, Qt::CaseSensitive)){ setBackground(index, Qt::yellow); } } void ItemFormatterBase::setJobTypeFld(int index, const QString &fld, bool center) { QByteArray jtype(fld.trimmed().toLatin1()); if (jtype.size()) { setTextFld(index, job_type_to_str(jtype[0]), center); } else { setTextFld(index, "", center); } } void ItemFormatterBase::setJobLevelFld(int index, const QString &fld, bool center) { QByteArray lvl(fld.trimmed().toLatin1()); if (lvl.size()) { setTextFld(index, job_level_to_str(lvl[0]), center); } else { setTextFld(index, "", center); } } /*********************************************** * * treeitem formatting routines * ***********************************************/ TreeItemFormatter::TreeItemFormatter(QTreeWidgetItem &parent, int indent_level): ItemFormatterBase(), wdg(new QTreeWidgetItem(&parent)), level(indent_level) { } void TreeItemFormatter::setText(int index, const QString &fld) { wdg->setData(index, Qt::UserRole, level); wdg->setText(index, fld); } void TreeItemFormatter::setTextAlignment(int index, int align) { wdg->setTextAlignment(index, align); } void TreeItemFormatter::setBackground(int index, const QBrush &qb) { wdg->setBackground(index, qb); } /* at this time we don't sort trees, so this method does nothing */ void TreeItemFormatter::setSortValue(int /* index */, const QVariant & /* value */) { } void TreeItemFormatter::setPixmap(int index, const QPixmap &pix) { wdg->setIcon(index, QIcon(pix)); } /*********************************************** * * Specialized table widget used for sorting * ***********************************************/ TableItemFormatter::BatSortingTableItem::BatSortingTableItem(): QTableWidgetItem(1) { } void TableItemFormatter::BatSortingTableItem::setSortData(const QVariant &d) { setData(SORTDATA_ROLE, d); } bool TableItemFormatter::BatSortingTableItem::operator< ( const QTableWidgetItem & o ) const { QVariant my = data(SORTDATA_ROLE); QVariant other = o.data(SORTDATA_ROLE); if (!my.isValid() || !other.isValid() || my.type() != other.type()) return QTableWidgetItem::operator< (o); /* invalid combination, revert to default sorting */ /* 64bit integers must be handled separately, others can be converted to double */ if (QVariant::ULongLong == my.type()) { return my.toULongLong() < other.toULongLong(); } else if (QVariant::LongLong == my.type()) { return my.toLongLong() < other.toLongLong(); } else if (my.canConvert(QVariant::Double)) { return my.toDouble() < other.toDouble(); } else { return QTableWidgetItem::operator< (o); /* invalid combination, revert to default sorting */ } } /*********************************************** * * tableitem formatting routines * ***********************************************/ TableItemFormatter::TableItemFormatter(QTableWidget &tparent, int trow): ItemFormatterBase(), parent(&tparent), row(trow), last(NULL) { } void TableItemFormatter::setPixmap(int index, const QPixmap &pix) { // Centered, but not sortable ! QLabel *lbl = new QLabel(); lbl->setAlignment(Qt::AlignCenter); lbl->setPixmap(pix); parent->setCellWidget(row, index, lbl); } void TableItemFormatter::setPixmap(int index, const QPixmap &pix, const QString &tips) { // Centered, but not sortable ! QLabel *lbl = new QLabel(); lbl->setAlignment(Qt::AlignCenter); lbl->setPixmap(pix); if (!tips.isEmpty()) { lbl->setToolTip(tips); } parent->setCellWidget(row, index, lbl); // last = new BatSortingTableItem; // parent->setItem(row, index, last); // last->setIcon(pix); } void TableItemFormatter::setText(int col, const QString &fld) { last = new BatSortingTableItem; parent->setItem(row, col, last); last->setText(fld); } void TableItemFormatter::setTextAlignment(int /*index*/, int align) { last->setTextAlignment(align); } void TableItemFormatter::setBackground(int /*index*/, const QBrush &qb) { last->setBackground(qb); } void TableItemFormatter::setSortValue(int /* index */, const QVariant &value ) { last->setSortData(value); } QTableWidgetItem *TableItemFormatter::widget(int col) { return parent->item(row, col); } const QTableWidgetItem *TableItemFormatter::widget(int col) const { return parent->item(row, col); } bacula-15.0.3/src/qt-console/util/comboutil.h0000644000175000017500000000357714771010173020674 0ustar bsbuildbsbuild#ifndef _COMBOUTIL_H_ #define _COMBOUTIL_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Combobox helpers - Riccardo Ghetta, May 2008 */ class QComboBox; class QString; class QStringList; /* selects value val on combo, if exists */ void comboSel(QComboBox *combo, const QString &val); /* if the combo has selected something different from "Any" uses the selection * to build a condition on field fldname and adds it to the condition list */ void comboCond(QStringList &cndlist, const QComboBox *combo, const char *fldname); /* these helpers are used to give an uniform content to common combos. * There are two routines per combo type: * - XXXXComboFill fills the combo with values. * - XXXXComboCond checks the combo and, if selected adds a condition * on the field fldName to the list of conditions cndList */ /* boolean combo (yes/no) */ void boolComboFill(QComboBox *combo); void boolComboCond(QStringList &cndlist, const QComboBox *combo, const char *fldname); /* backup level combo */ void levelComboFill(QComboBox *combo); void levelComboCond(QStringList &cndlist, const QComboBox *combo, const char *fldname); /* job status combo */ void jobStatusComboFill(QComboBox *combo); void jobStatusComboCond(QStringList &cndlist, const QComboBox *combo, const char *fldname); #endif /* _COMBOUTIL_H_ */ bacula-15.0.3/src/qt-console/util/comboutil.cpp0000644000175000017500000001101714771010173021213 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * ComboBox helper functions * * Riccardo Ghetta, May 2008 * */ #include "bat.h" #include #include #include #include "fmtwidgetitem.h" #include "comboutil.h" static const QString QS_ANY(QObject::tr("Any")); /* selects value val on combo, if exists */ void comboSel(QComboBox *combo, const QString &val) { int index = combo->findText(val, Qt::MatchExactly); if (index != -1) { combo->setCurrentIndex(index); } } /* if the combo has selected something different from "Any" uses the selection * to build a condition on field fldname and adds it to the condition list */ void comboCond(QStringList &cndlist, const QComboBox *combo, const char *fldname) { int index = combo->currentIndex(); if (index != -1 && combo->itemText(index) != QS_ANY) { cndlist.append( QString("%1='%2'").arg(fldname).arg(combo->itemText(index)) ); } } /* boolean combo (yes/no) */ void boolComboFill(QComboBox *combo) { combo->addItem(QS_ANY, -1); combo->addItem(QObject::tr("No"), 0); combo->addItem(QObject::tr("Yes"), 1); } void boolComboCond(QStringList &cndlist, const QComboBox *combo, const char *fldname) { int index = combo->currentIndex(); if (index != -1 && combo->itemData(index).toInt() >= 0 ) { QString cnd = combo->itemData(index).toString(); cndlist.append( QString("%1='%2'").arg(fldname).arg(cnd) ); } } /* backup level combo */ void levelComboFill(QComboBox *combo) { combo->addItem(QS_ANY); combo->addItem(job_level_to_str(L_FULL), L_FULL); combo->addItem(job_level_to_str(L_INCREMENTAL), L_INCREMENTAL); combo->addItem(job_level_to_str(L_DIFFERENTIAL), L_DIFFERENTIAL); combo->addItem(job_level_to_str(L_SINCE), L_SINCE); combo->addItem(job_level_to_str(L_VERIFY_CATALOG), L_VERIFY_CATALOG); combo->addItem(job_level_to_str(L_VERIFY_INIT), L_VERIFY_INIT); combo->addItem(job_level_to_str(L_VERIFY_VOLUME_TO_CATALOG), L_VERIFY_VOLUME_TO_CATALOG); combo->addItem(job_level_to_str(L_VERIFY_DISK_TO_CATALOG), L_VERIFY_DISK_TO_CATALOG); combo->addItem(job_level_to_str(L_VERIFY_DATA), L_VERIFY_DATA); /* combo->addItem(job_level_to_str(L_BASE), L_BASE); base jobs ignored */ } void levelComboCond(QStringList &cndlist, const QComboBox *combo, const char *fldname) { int index = combo->currentIndex(); if (index != -1 && combo->itemText(index) != QS_ANY ) { QString cnd = combo->itemData(index).toChar(); cndlist.append( QString("%1='%2'").arg(fldname).arg(cnd) ); } } /* job status combo */ void jobStatusComboFill(QComboBox *combo) { static const char js[] = { JS_Terminated, JS_Created, JS_Running, JS_Blocked, JS_ErrorTerminated, JS_Error, JS_FatalError, JS_Differences, JS_Canceled, JS_WaitFD, JS_WaitSD, JS_WaitMedia, JS_WaitMount, JS_WaitStoreRes, JS_WaitJobRes, JS_WaitClientRes, JS_WaitMaxJobs, JS_WaitStartTime, JS_WaitPriority, JS_AttrDespooling, JS_AttrInserting, JS_DataDespooling, JS_DataCommitting, '\0'}; int pos; combo->addItem(QS_ANY); for (pos = 0 ; js[pos] != '\0' ; ++pos) { combo->addItem(convertJobStatus( QString(js[pos]) ), js[pos]); } } void jobStatusComboCond(QStringList &cndlist, const QComboBox *combo, const char *fldname) { int index = combo->currentIndex(); if (index != -1 && combo->itemText(index) != QS_ANY ) { QString cnd = combo->itemData(index).toChar(); cndlist.append( QString("%1='%2'").arg(fldname).arg(cnd) ); } } bacula-15.0.3/src/qt-console/mainwin.cpp0000644000175000017500000011005514771010173017705 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Main Window control for bat (qt-console) * * Kern Sibbald, January MMVII * */ #include "bat.h" #include "version.h" #include "joblist/joblist.h" #include "storage/storage.h" #include "fileset/fileset.h" #include "label/label.h" #include "run/run.h" #include "pages.h" #include "restore/restore.h" #include "medialist/medialist.h" #include "joblist/joblist.h" #include "clients/clients.h" #include "restore/restoretree.h" #include "help/help.h" #include "jobs/jobs.h" #include "medialist/mediaview.h" #ifdef HAVE_QWT #include "jobgraphs/jobplot.h" #endif #include "status/dirstat.h" #include "util/fmtwidgetitem.h" /* * Daemon message callback */ void message_callback(int /* type */, char *msg) { QMessageBox::warning(mainWin, "Bat", msg, QMessageBox::Ok); } MainWin::MainWin(QWidget *parent) : QMainWindow(parent) { app->setOverrideCursor(QCursor(Qt::WaitCursor)); m_isClosing = false; m_waitState = false; m_doConnect = false; m_treeStackTrap = false; m_dtformat = "yyyy-MM-dd HH:mm:ss"; mainWin = this; setupUi(this); /* Setup UI defined by main.ui (designer) */ register_message_callback(message_callback); readPreferences(); treeWidget->clear(); treeWidget->setColumnCount(1); treeWidget->setHeaderLabel( tr("Select Page") ); treeWidget->setContextMenuPolicy(Qt::ActionsContextMenu); tabWidget->setTabsClosable(true); /* wait for QT 4.5 */ createPages(); resetFocus(); /* lineEdit->setFocus() */ #ifndef HAVE_QWT actionJobPlot->setEnabled(false); actionJobPlot->setVisible(false); #endif this->show(); readSettings(); foreach(Console *console, m_consoleHash) { console->connect_dir(); } /* * Note, the notifier is now a global flag, although each notifier * can be individually turned on and off at a socket level. Once * the notifier is turned off, we don't accept anything from anyone * this prevents unwanted messages from getting into the input * dialogs such as restore that read from the director and "know" * what to expect. */ m_notify = true; m_currentConsole = (Console*)getFromHash(m_firstItem); QTimer::singleShot(2000, this, SLOT(popLists())); if (m_miscDebug) { QString directoryResourceName; m_currentConsole->getDirResName(directoryResourceName); Pmsg1(100, "Setting initial window to %s\n", directoryResourceName.toUtf8().data()); } app->restoreOverrideCursor(); } void MainWin::popLists() { foreach(Console *console, m_consoleHash) { console->populateLists(true); } m_doConnect = true; connectConsoleSignals(); connectSignals(); app->restoreOverrideCursor(); m_currentConsole->setCurrent(); } void MainWin::createPages() { DIRRES *dir; QTreeWidgetItem *item, *topItem; m_firstItem = NULL; LockRes(); foreach_res(dir, R_DIRECTOR) { /* Create console tree stacked widget item */ m_currentConsole = new Console(tabWidget); m_currentConsole->setDirRes(dir); m_currentConsole->readSettings(); /* The top tree item representing the director */ topItem = new QTreeWidgetItem(treeWidget); topItem->setText(0, dir->name()); topItem->setIcon(0, QIcon(":images/server.png")); /* Set background to grey for ease of identification of inactive Director */ QBrush greyBrush(Qt::lightGray); topItem->setBackground(0, greyBrush); m_currentConsole->setDirectorTreeItem(topItem); m_consoleHash.insert(topItem, m_currentConsole); /* Create Tree Widget Item */ item = new QTreeWidgetItem(topItem); item->setText(0, tr("Console")); if (!m_firstItem){ m_firstItem = item; } item->setIcon(0,QIcon(QString::fromUtf8(":images/utilities-terminal.png"))); /* insert the cosole and tree widget item into the hashes */ hashInsert(item, m_currentConsole); m_currentConsole->dockPage(); /* Set Color of treeWidgetItem for the console * It will be set to green in the console class if the connection is made. */ QBrush redBrush(Qt::red); item->setForeground(0, redBrush); /* * Create instances in alphabetic order of the rest * of the classes that will by default exist under each Director. */ new bRestore(); new Clients(); new FileSet(); new Jobs(); createPageJobList("", "", "", "", NULL); #ifdef HAVE_QWT JobPlotPass pass; pass.use = false; if (m_openPlot) new JobPlot(NULL, pass); #endif new MediaList(); new MediaView(); new Storage(); // if (m_openBrowser) { // new restoreTree(); // } if (m_openDirStat) { new DirStat(); } treeWidget->expandItem(topItem); tabWidget->setCurrentWidget(m_currentConsole); } UnlockRes(); } /* * create an instance of the the joblist class on the stack */ void MainWin::createPageJobList(const QString &media, const QString &client, const QString &job, const QString &fileset, QTreeWidgetItem *parentTreeWidgetItem) { QTreeWidgetItem *holdItem; /* save current tree widget item in case query produces no results */ holdItem = treeWidget->currentItem(); JobList* joblist = new JobList(media, client, job, fileset, parentTreeWidgetItem); /* If this is a query of jobs on a specific media */ if ((media != "") || (client != "") || (job != "") || (fileset != "")) { joblist->setCurrent(); /* did query produce results, if not close window and set back to hold */ if (joblist->m_resultCount == 0) { joblist->closeStackPage(); treeWidget->setCurrentItem(holdItem); } } } /* * Handle up and down arrow keys for the command line * history. */ void MainWin::keyPressEvent(QKeyEvent *event) { if (m_cmd_history.size() == 0) { event->ignore(); return; } switch (event->key()) { case Qt::Key_Down: if (m_cmd_last < 0 || m_cmd_last >= (m_cmd_history.size()-1)) { event->ignore(); return; } m_cmd_last++; break; case Qt::Key_Up: if (m_cmd_last == 0) { event->ignore(); return; } if (m_cmd_last < 0 || m_cmd_last > (m_cmd_history.size()-1)) { m_cmd_last = m_cmd_history.size() - 1; } else { m_cmd_last--; } break; default: event->ignore(); return; } lineEdit->setText(m_cmd_history[m_cmd_last]); } void MainWin::connectSignals() { /* Connect signals to slots */ connect(lineEdit, SIGNAL(returnPressed()), this, SLOT(input_line())); connect(actionAbout_bat, SIGNAL(triggered()), this, SLOT(about())); connect(actionBat_Help, SIGNAL(triggered()), this, SLOT(help())); connect(treeWidget, SIGNAL(itemClicked(QTreeWidgetItem *, int)), this, SLOT(treeItemClicked(QTreeWidgetItem *, int))); connect(treeWidget, SIGNAL(currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)), this, SLOT(treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *))); connect(tabWidget, SIGNAL(currentChanged(int)), this, SLOT(stackItemChanged(int))); connect(tabWidget, SIGNAL(tabCloseRequested(int)), this, SLOT(closePage(int))); connect(actionQuit, SIGNAL(triggered()), app, SLOT(closeAllWindows())); connect(actionLabel, SIGNAL(triggered()), this, SLOT(labelButtonClicked())); connect(actionRun, SIGNAL(triggered()), this, SLOT(runButtonClicked())); connect(actionEstimate, SIGNAL(triggered()), this, SLOT(estimateButtonClicked())); connect(actionBrowse, SIGNAL(triggered()), this, SLOT(browseButtonClicked())); connect(actionStatusDirPage, SIGNAL(triggered()), this, SLOT(statusPageButtonClicked())); #ifdef HAVE_QWT connect(actionJobPlot, SIGNAL(triggered()), this, SLOT(jobPlotButtonClicked())); #endif connect(actionRestore, SIGNAL(triggered()), this, SLOT(restoreButtonClicked())); connect(actionUndock, SIGNAL(triggered()), this, SLOT(undockWindowButton())); connect(actionToggleDock, SIGNAL(triggered()), this, SLOT(toggleDockContextWindow())); connect(actionClosePage, SIGNAL(triggered()), this, SLOT(closeCurrentPage())); connect(actionPreferences, SIGNAL(triggered()), this, SLOT(setPreferences())); connect(actionRepopLists, SIGNAL(triggered()), this, SLOT(repopLists())); connect(actionReloadRepop, SIGNAL(triggered()), this, SLOT(reloadRepopLists())); } void MainWin::disconnectSignals() { /* Connect signals to slots */ disconnect(lineEdit, SIGNAL(returnPressed()), this, SLOT(input_line())); disconnect(actionAbout_bat, SIGNAL(triggered()), this, SLOT(about())); disconnect(actionBat_Help, SIGNAL(triggered()), this, SLOT(help())); disconnect(treeWidget, SIGNAL(itemClicked(QTreeWidgetItem *, int)), this, SLOT(treeItemClicked(QTreeWidgetItem *, int))); disconnect(treeWidget, SIGNAL( currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)), this, SLOT(treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *))); disconnect(tabWidget, SIGNAL(currentChanged(int)), this, SLOT(stackItemChanged(int))); disconnect(tabWidget, SIGNAL(tabCloseRequested(int)), this, SLOT(closePage(int))); disconnect(actionQuit, SIGNAL(triggered()), app, SLOT(closeAllWindows())); disconnect(actionLabel, SIGNAL(triggered()), this, SLOT(labelButtonClicked())); disconnect(actionRun, SIGNAL(triggered()), this, SLOT(runButtonClicked())); disconnect(actionEstimate, SIGNAL(triggered()), this, SLOT(estimateButtonClicked())); disconnect(actionBrowse, SIGNAL(triggered()), this, SLOT(browseButtonClicked())); disconnect(actionStatusDirPage, SIGNAL(triggered()), this, SLOT(statusPageButtonClicked())); #ifdef HAVE_QWT disconnect(actionJobPlot, SIGNAL(triggered()), this, SLOT(jobPlotButtonClicked())); #endif disconnect(actionRestore, SIGNAL(triggered()), this, SLOT(restoreButtonClicked())); disconnect(actionUndock, SIGNAL(triggered()), this, SLOT(undockWindowButton())); disconnect(actionToggleDock, SIGNAL(triggered()), this, SLOT(toggleDockContextWindow())); disconnect(actionClosePage, SIGNAL(triggered()), this, SLOT(closeCurrentPage())); disconnect(actionPreferences, SIGNAL(triggered()), this, SLOT(setPreferences())); disconnect(actionRepopLists, SIGNAL(triggered()), this, SLOT(repopLists())); disconnect(actionReloadRepop, SIGNAL(triggered()), this, SLOT(reloadRepopLists())); } /* * Enter wait state */ void MainWin::waitEnter() { if (m_waitState || m_isClosing) { return; } m_waitState = true; if (mainWin->m_connDebug) Pmsg0(000, "Entering Wait State\n"); app->setOverrideCursor(QCursor(Qt::WaitCursor)); disconnectSignals(); disconnectConsoleSignals(m_currentConsole); m_waitTreeItem = treeWidget->currentItem(); } /* * Leave wait state */ void MainWin::waitExit() { if (!m_waitState || m_isClosing) { return; } if (mainWin->m_connDebug) Pmsg0(000, "Exiting Wait State\n"); if (m_waitTreeItem && (m_waitTreeItem != treeWidget->currentItem())) { treeWidget->setCurrentItem(m_waitTreeItem); } if (m_doConnect) { connectSignals(); connectConsoleSignals(); } app->restoreOverrideCursor(); m_waitState = false; } void MainWin::connectConsoleSignals() { connect(actionConnect, SIGNAL(triggered()), m_currentConsole, SLOT(connect_dir())); connect(actionSelectFont, SIGNAL(triggered()), m_currentConsole, SLOT(set_font())); connect(actionMessages, SIGNAL(triggered()), m_currentConsole, SLOT(messages())); } void MainWin::disconnectConsoleSignals(Console *console) { disconnect(actionConnect, SIGNAL(triggered()), console, SLOT(connect_dir())); disconnect(actionMessages, SIGNAL(triggered()), console, SLOT(messages())); disconnect(actionSelectFont, SIGNAL(triggered()), console, SLOT(set_font())); } /* * Two functions to respond to menu items to repop lists and execute reload and repopulate * the lists for jobs, clients, filesets .. .. */ void MainWin::repopLists() { m_currentConsole->populateLists(false); } void MainWin::reloadRepopLists() { QString cmd = "reload"; m_currentConsole->consoleCommand(cmd); m_currentConsole->populateLists(false); } /* * Reimplementation of QWidget closeEvent virtual function */ void MainWin::closeEvent(QCloseEvent *event) { m_isClosing = true; writeSettings(); /* Remove all groups from settings for OpenOnExit so that we can start some of the status windows */ foreach(Console *console, m_consoleHash){ QSettings settings(console->m_dir->name(), "bat"); settings.beginGroup("OpenOnExit"); settings.remove(""); settings.endGroup(); } /* close all non console pages, this will call settings in destructors */ while (m_consoleHash.count() < m_pagehash.count()) { foreach(Pages *page, m_pagehash) { if (page != page->console()) { QTreeWidgetItem* pageSelectorTreeWidgetItem = mainWin->getFromHash(page); if (pageSelectorTreeWidgetItem->childCount() == 0) { page->console()->setCurrent(); page->closeStackPage(); } } } } foreach(Console *console, m_consoleHash){ console->writeSettings(); console->terminate(); console->closeStackPage(); } event->accept(); } void MainWin::writeSettings() { QSettings settings("bacula.org", "bat"); settings.beginGroup("MainWin"); settings.setValue("winSize", size()); settings.setValue("winPos", pos()); settings.setValue("state", saveState()); settings.endGroup(); } void MainWin::readSettings() { QSettings settings("bacula.org", "bat"); settings.beginGroup("MainWin"); resize(settings.value("winSize", QSize(1041, 801)).toSize()); move(settings.value("winPos", QPoint(200, 150)).toPoint()); restoreState(settings.value("state").toByteArray()); settings.endGroup(); } /* * This subroutine is called with an item in the Page Selection window * is clicked */ void MainWin::treeItemClicked(QTreeWidgetItem *item, int /*column*/) { /* Is this a page that has been inserted into the hash */ Pages* page = getFromHash(item); if (page) { int stackindex = tabWidget->indexOf(page); if (stackindex >= 0) { tabWidget->setCurrentWidget(page); } page->dockPage(); /* run the virtual function in case this class overrides it */ page->PgSeltreeWidgetClicked(); } else { Dmsg0(000, "Page not in hash"); } } /* * Called with a change of the highlighed tree widget item in the page selector. */ void MainWin::treeItemChanged(QTreeWidgetItem *currentitem, QTreeWidgetItem *previousitem) { if (m_isClosing) return; /* if closing the application, do nothing here */ Pages *previousPage, *nextPage; Console *previousConsole = NULL; Console *nextConsole; /* remove all actions before adding actions appropriate for new page */ foreach(QAction* pageAction, treeWidget->actions()) { treeWidget->removeAction(pageAction); } /* first determine the next item */ /* knowing the treeWidgetItem, get the page from the hash */ nextPage = getFromHash(currentitem); nextConsole = m_consoleHash.value(currentitem); /* Is this a page that has been inserted into the hash */ if (nextPage) { nextConsole = nextPage->console(); /* then is it a treeWidgetItem representing a director */ } else if (nextConsole) { /* let the next page BE the console */ nextPage = nextConsole; } else { /* Should never get here */ nextPage = NULL; nextConsole = NULL; } /* The Previous item */ /* this condition prevents a segfault. The first time there is no previousitem*/ if (previousitem) { if (m_treeStackTrap == false) { /* keep track of previous items for going Back */ m_treeWidgetStack.append(previousitem); } /* knowing the treeWidgetItem, get the page from the hash */ previousPage = getFromHash(previousitem); previousConsole = m_consoleHash.value(previousitem); if (previousPage) { previousConsole = previousPage->console(); } else if (previousConsole) { previousPage = previousConsole; } if ((previousPage) || (previousConsole)) { if (nextConsole != previousConsole) { /* remove connections to the current console */ disconnectConsoleSignals(previousConsole); QTreeWidgetItem *dirItem = previousConsole->directorTreeItem(); QBrush greyBrush(Qt::lightGray); dirItem->setBackground(0, greyBrush); } } } /* process the current (next) item */ if ((nextPage) || (nextConsole)) { if (nextConsole != previousConsole) { /* make connections to the current console */ m_currentConsole = nextConsole; connectConsoleSignals(); setMessageIcon(); /* Set director's tree widget background to magenta for ease of identification */ QTreeWidgetItem *dirItem = m_currentConsole->directorTreeItem(); QBrush magentaBrush(Qt::magenta); dirItem->setBackground(0, magentaBrush); } /* set the value for the currently active console */ int stackindex = tabWidget->indexOf(nextPage); nextPage->firstUseDock(); /* Is this page currently on the stack or is it undocked */ if (stackindex >= 0) { /* put this page on the top of the stack */ tabWidget->setCurrentIndex(stackindex); } else { /* it is undocked, raise it to the front */ nextPage->raise(); } /* for the page selectors menu action to dock or undock, set the text */ nextPage->setContextMenuDockText(); treeWidget->addAction(actionToggleDock); /* if this page is closeable, and it has no childern, then add that action */ if ((nextPage->isCloseable()) && (currentitem->child(0) == NULL)) treeWidget->addAction(actionClosePage); /* Add the actions to the Page Selectors tree widget that are part of the * current items list of desired actions regardless of whether on top of stack*/ treeWidget->addActions(nextPage->m_contextActions); } } void MainWin::labelButtonClicked() { new labelPage(); } void MainWin::runButtonClicked() { new runPage(""); } void MainWin::estimateButtonClicked() { new estimatePage(); } void MainWin::browseButtonClicked() { // new restoreTree(); } void MainWin::statusPageButtonClicked() { /* if one exists, then just set it current */ bool found = false; foreach(Pages *page, m_pagehash) { if (m_currentConsole == page->console()) { if (page->name() == tr("Director Status")) { found = true; page->setCurrent(); } } } if (!found) { new DirStat(); } } void MainWin::restoreButtonClicked() { new prerestorePage(); if (mainWin->m_miscDebug) Pmsg0(000, "in restoreButtonClicked after prerestorePage\n"); } void MainWin::jobPlotButtonClicked() { #ifdef HAVE_QWT JobPlotPass pass; pass.use = false; new JobPlot(NULL, pass); #endif } /* * The user just finished typing a line in the command line edit box */ void MainWin::input_line() { int conn; QString cmdStr = lineEdit->text(); /* Get the text */ lineEdit->clear(); /* clear the lineEdit box */ if (m_currentConsole->is_connected()) { if (m_currentConsole->findDirComm(conn)) { m_currentConsole->consoleCommand(cmdStr, conn); } else { /* Use consoleCommand to allow typing anything */ m_currentConsole->consoleCommand(cmdStr); } } else { set_status(tr("Director not connected. Click on connect button.")); } m_cmd_history.append(cmdStr); m_cmd_last = -1; if (treeWidget->currentItem() != getFromHash(m_currentConsole)) m_currentConsole->setCurrent(); } void MainWin::about() { QMessageBox::about(this, tr("About bat"), tr("

Bacula Bat %1 (%2)

" "

Copyright © 2007-%3 Kern Sibbald" "

The bat is an administrative console" " interface to the Director.").arg(VERSION).arg(BDATE).arg(BYEAR)); } void MainWin::help() { Help::displayFile("index.html"); } void MainWin::set_statusf(const char *fmt, ...) { va_list arg_ptr; char buf[1000]; va_start(arg_ptr, fmt); bvsnprintf(buf, sizeof(buf), fmt, arg_ptr); va_end(arg_ptr); set_status(buf); } void MainWin::set_status_ready() { set_status(tr(" Ready")); } void MainWin::set_status(const QString &str) { statusBar()->showMessage(str); } void MainWin::set_status(const char *buf) { statusBar()->showMessage(buf); } /* * Function to respond to the button bar button to undock */ void MainWin::undockWindowButton() { Pages* page = (Pages*)tabWidget->currentWidget(); if (page) { page->togglePageDocking(); } } /* * Function to respond to action on page selector context menu to toggle the * dock status of the window associated with the page selectors current * tree widget item. */ void MainWin::toggleDockContextWindow() { QTreeWidgetItem *currentitem = treeWidget->currentItem(); /* Is this a page that has been inserted into the hash */ if (getFromHash(currentitem)) { Pages* page = getFromHash(currentitem); if (page) { page->togglePageDocking(); } } } /* * This function is called when the stack item is changed. Call * the virtual function here. Avoids a window being undocked leaving * a window at the top of the stack unpopulated. */ void MainWin::stackItemChanged(int) { if (m_isClosing) return; /* if closing the application, do nothing here */ Pages* page = (Pages*)tabWidget->currentWidget(); /* run the virtual function in case this class overrides it */ if (page) { page->currentStackItem(); } if (!m_waitState) { disconnect(treeWidget, SIGNAL(itemClicked(QTreeWidgetItem *, int)), this, SLOT(treeItemClicked(QTreeWidgetItem *, int))); disconnect(treeWidget, SIGNAL( currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)), this, SLOT(treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *))); treeWidget->setCurrentItem(getFromHash(page)); connect(treeWidget, SIGNAL(itemClicked(QTreeWidgetItem *, int)), this, SLOT(treeItemClicked(QTreeWidgetItem *, int))); connect(treeWidget, SIGNAL(currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)), this, SLOT(treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *))); } } /* * Function to simplify insertion of QTreeWidgetItem <-> Page association * into a double direction hash. */ void MainWin::hashInsert(QTreeWidgetItem *item, Pages *page) { m_pagehash.insert(item, page); m_widgethash.insert(page, item); } /* * Function to simplify removal of QTreeWidgetItem <-> Page association * into a double direction hash. */ void MainWin::hashRemove(QTreeWidgetItem *item, Pages *page) { /* I had all sorts of return status checking code here. Do we have a log * level capability in bat. I would have left it in but it used printf's * and it should really be some kind of log level facility ??? * ******FIXME********/ m_pagehash.remove(item); m_widgethash.remove(page); } /* * Function to retrieve a Page* when the item in the page selector's tree is * known. */ Pages* MainWin::getFromHash(QTreeWidgetItem *item) { return m_pagehash.value(item); } /* * Function to retrieve the page selectors tree widget item when the page is * known. */ QTreeWidgetItem* MainWin::getFromHash(Pages *page) { return m_widgethash.value(page); } void MainWin::closeCurrentPage() { closePage(-1); } /* * Function to respond to action on page selector context menu to close the * current window. */ void MainWin::closePage(int item) { QTreeWidgetItem *currentitem; Pages *page = NULL; if (item >= 0) { page = (Pages *)tabWidget->widget(item); } else { currentitem = treeWidget->currentItem(); /* Is this a page that has been inserted into the hash */ if (getFromHash(currentitem)) { page = getFromHash(currentitem); } } if (page) { if (page->isCloseable()) { page->closeStackPage(); } else { page->hidePage(); } } } /* Quick function to return the current console */ Console *MainWin::currentConsole() { return m_currentConsole; } /* Quick function to return the tree item for the director */ QTreeWidgetItem *MainWin::currentTopItem() { return m_currentConsole->directorTreeItem(); } /* Preferences menu item clicked */ void MainWin::setPreferences() { prefsDialog prefs; prefs.commDebug->setCheckState(m_commDebug ? Qt::Checked : Qt::Unchecked); prefs.connDebug->setCheckState(m_connDebug ? Qt::Checked : Qt::Unchecked); prefs.displayAll->setCheckState(m_displayAll ? Qt::Checked : Qt::Unchecked); prefs.sqlDebug->setCheckState(m_sqlDebug ? Qt::Checked : Qt::Unchecked); prefs.commandDebug->setCheckState(m_commandDebug ? Qt::Checked : Qt::Unchecked); prefs.miscDebug->setCheckState(m_miscDebug ? Qt::Checked : Qt::Unchecked); prefs.recordLimit->setCheckState(m_recordLimitCheck ? Qt::Checked : Qt::Unchecked); prefs.recordSpinBox->setValue(m_recordLimitVal); prefs.daysLimit->setCheckState(m_daysLimitCheck ? Qt::Checked : Qt::Unchecked); prefs.daysSpinBox->setValue(m_daysLimitVal); prefs.checkMessages->setCheckState(m_checkMessages ? Qt::Checked : Qt::Unchecked); prefs.checkMessagesSpin->setValue(m_checkMessagesInterval); prefs.executeLongCheckBox->setCheckState(m_longList ? Qt::Checked : Qt::Unchecked); prefs.rtPopDirCheckBox->setCheckState(m_rtPopDirDebug ? Qt::Checked : Qt::Unchecked); prefs.rtDirCurICCheckBox->setCheckState(m_rtDirCurICDebug ? Qt::Checked : Qt::Unchecked); prefs.rtDirICCheckBox->setCheckState(m_rtDirICDebug ? Qt::Checked : Qt::Unchecked); prefs.rtFileTabICCheckBox->setCheckState(m_rtFileTabICDebug ? Qt::Checked : Qt::Unchecked); prefs.rtVerTabICCheckBox->setCheckState(m_rtVerTabICDebug ? Qt::Checked : Qt::Unchecked); prefs.rtUpdateFTCheckBox->setCheckState(m_rtUpdateFTDebug ? Qt::Checked : Qt::Unchecked); prefs.rtUpdateVTCheckBox->setCheckState(m_rtUpdateVTDebug ? Qt::Checked : Qt::Unchecked); prefs.rtChecksCheckBox->setCheckState(m_rtChecksDebug ? Qt::Checked : Qt::Unchecked); prefs.rtIconStateCheckBox->setCheckState(m_rtIconStateDebug ? Qt::Checked : Qt::Unchecked); prefs.rtRestore1CheckBox->setCheckState(m_rtRestore1Debug ? Qt::Checked : Qt::Unchecked); prefs.rtRestore2CheckBox->setCheckState(m_rtRestore2Debug ? Qt::Checked : Qt::Unchecked); prefs.rtRestore3CheckBox->setCheckState(m_rtRestore3Debug ? Qt::Checked : Qt::Unchecked); switch (ItemFormatterBase::getBytesConversion()) { case ItemFormatterBase::BYTES_CONVERSION_NONE: prefs.radioConvertOff->setChecked(true); break; case ItemFormatterBase::BYTES_CONVERSION_IEC: prefs.radioConvertIEC->setChecked(true); break; default: prefs.radioConvertStandard->setChecked(true); break; } prefs.openPlotCheckBox->setCheckState(m_openPlot ? Qt::Checked : Qt::Unchecked); #ifndef HAVE_QWT prefs.openPlotCheckBox->setVisible(false); #endif prefs.openBrowserCheckBox->setCheckState(m_openBrowser ? Qt::Checked : Qt::Unchecked); prefs.openDirStatCheckBox->setCheckState(m_openDirStat ? Qt::Checked : Qt::Unchecked); prefs.exec(); } /* Preferences dialog */ prefsDialog::prefsDialog() : QDialog() { setupUi(this); } void prefsDialog::accept() { this->hide(); mainWin->m_commDebug = this->commDebug->checkState() == Qt::Checked; mainWin->m_connDebug = this->connDebug->checkState() == Qt::Checked; mainWin->m_displayAll = this->displayAll->checkState() == Qt::Checked; mainWin->m_sqlDebug = this->sqlDebug->checkState() == Qt::Checked; mainWin->m_commandDebug = this->commandDebug->checkState() == Qt::Checked; mainWin->m_miscDebug = this->miscDebug->checkState() == Qt::Checked; mainWin->m_recordLimitCheck = this->recordLimit->checkState() == Qt::Checked; mainWin->m_recordLimitVal = this->recordSpinBox->value(); mainWin->m_daysLimitCheck = this->daysLimit->checkState() == Qt::Checked; mainWin->m_daysLimitVal = this->daysSpinBox->value(); mainWin->m_checkMessages = this->checkMessages->checkState() == Qt::Checked; mainWin->m_checkMessagesInterval = this->checkMessagesSpin->value(); mainWin->m_longList = this->executeLongCheckBox->checkState() == Qt::Checked; mainWin->m_rtPopDirDebug = this->rtPopDirCheckBox->checkState() == Qt::Checked; mainWin->m_rtDirCurICDebug = this->rtDirCurICCheckBox->checkState() == Qt::Checked; mainWin->m_rtDirICDebug = this->rtDirICCheckBox->checkState() == Qt::Checked; mainWin->m_rtFileTabICDebug = this->rtFileTabICCheckBox->checkState() == Qt::Checked; mainWin->m_rtVerTabICDebug = this->rtVerTabICCheckBox->checkState() == Qt::Checked; mainWin->m_rtUpdateFTDebug = this->rtUpdateFTCheckBox->checkState() == Qt::Checked; mainWin->m_rtUpdateVTDebug = this->rtUpdateVTCheckBox->checkState() == Qt::Checked; mainWin->m_rtChecksDebug = this->rtChecksCheckBox->checkState() == Qt::Checked; mainWin->m_rtIconStateDebug = this->rtIconStateCheckBox->checkState() == Qt::Checked; mainWin->m_rtRestore1Debug = this->rtRestore1CheckBox->checkState() == Qt::Checked; mainWin->m_rtRestore2Debug = this->rtRestore2CheckBox->checkState() == Qt::Checked; mainWin->m_rtRestore3Debug = this->rtRestore3CheckBox->checkState() == Qt::Checked; if (this->radioConvertOff->isChecked()) { ItemFormatterBase::setBytesConversion(ItemFormatterBase::BYTES_CONVERSION_NONE); } else if (this->radioConvertIEC->isChecked()){ ItemFormatterBase::setBytesConversion(ItemFormatterBase::BYTES_CONVERSION_IEC); } else { ItemFormatterBase::setBytesConversion(ItemFormatterBase::BYTES_CONVERSION_SI); } mainWin->m_openPlot = this->openPlotCheckBox->checkState() == Qt::Checked; mainWin->m_openBrowser = this->openBrowserCheckBox->checkState() == Qt::Checked; mainWin->m_openDirStat = this->openDirStatCheckBox->checkState() == Qt::Checked; QSettings settings("www.bacula.org", "bat"); settings.beginGroup("Debug"); settings.setValue("commDebug", mainWin->m_commDebug); settings.setValue("connDebug", mainWin->m_connDebug); settings.setValue("displayAll", mainWin->m_displayAll); settings.setValue("sqlDebug", mainWin->m_sqlDebug); settings.setValue("commandDebug", mainWin->m_commandDebug); settings.setValue("miscDebug", mainWin->m_miscDebug); settings.endGroup(); settings.beginGroup("JobList"); settings.setValue("recordLimitCheck", mainWin->m_recordLimitCheck); settings.setValue("recordLimitVal", mainWin->m_recordLimitVal); settings.setValue("daysLimitCheck", mainWin->m_daysLimitCheck); settings.setValue("daysLimitVal", mainWin->m_daysLimitVal); settings.endGroup(); settings.beginGroup("Timers"); settings.setValue("checkMessages", mainWin->m_checkMessages); settings.setValue("checkMessagesInterval", mainWin->m_checkMessagesInterval); settings.endGroup(); settings.beginGroup("Misc"); settings.setValue("longList", mainWin->m_longList); settings.setValue("byteConvert", ItemFormatterBase::getBytesConversion()); settings.setValue("openplot", mainWin->m_openPlot); settings.setValue("openbrowser", mainWin->m_openBrowser); settings.setValue("opendirstat", mainWin->m_openDirStat); settings.endGroup(); settings.beginGroup("RestoreTree"); settings.setValue("rtPopDirDebug", mainWin->m_rtPopDirDebug); settings.setValue("rtDirCurICDebug", mainWin->m_rtDirCurICDebug); settings.setValue("rtDirCurICRetDebug", mainWin->m_rtDirICDebug); settings.setValue("rtFileTabICDebug", mainWin->m_rtFileTabICDebug); settings.setValue("rtVerTabICDebug", mainWin->m_rtVerTabICDebug); settings.setValue("rtUpdateFTDebug", mainWin->m_rtUpdateFTDebug); settings.setValue("rtUpdateVTDebug", mainWin->m_rtUpdateVTDebug); settings.setValue("rtChecksDebug", mainWin->m_rtChecksDebug); settings.setValue("rtIconStateDebug", mainWin->m_rtIconStateDebug); settings.setValue("rtRestore1Debug", mainWin->m_rtRestore1Debug); settings.setValue("rtRestore2Debug", mainWin->m_rtRestore2Debug); settings.setValue("rtRestore3Debug", mainWin->m_rtRestore3Debug); settings.endGroup(); } void prefsDialog::reject() { this->hide(); mainWin->set_status(tr("Canceled")); } /* read preferences for the prefences dialog box */ void MainWin::readPreferences() { QSettings settings("www.bacula.org", "bat"); settings.beginGroup("Debug"); m_commDebug = settings.value("commDebug", false).toBool(); m_connDebug = settings.value("connDebug", false).toBool(); m_displayAll = settings.value("displayAll", false).toBool(); m_sqlDebug = settings.value("sqlDebug", false).toBool(); m_commandDebug = settings.value("commandDebug", false).toBool(); m_miscDebug = settings.value("miscDebug", false).toBool(); settings.endGroup(); settings.beginGroup("JobList"); m_recordLimitCheck = settings.value("recordLimitCheck", true).toBool(); m_recordLimitVal = settings.value("recordLimitVal", 50).toInt(); m_daysLimitCheck = settings.value("daysLimitCheck", false).toBool(); m_daysLimitVal = settings.value("daysLimitVal", 28).toInt(); settings.endGroup(); settings.beginGroup("Timers"); m_checkMessages = settings.value("checkMessages", false).toBool(); m_checkMessagesInterval = settings.value("checkMessagesInterval", 28).toInt(); settings.endGroup(); settings.beginGroup("Misc"); m_longList = settings.value("longList", false).toBool(); ItemFormatterBase::setBytesConversion( (ItemFormatterBase::BYTES_CONVERSION) settings.value("byteConvert", ItemFormatterBase::BYTES_CONVERSION_IEC).toInt()); m_openPlot = settings.value("openplot", false).toBool(); m_openBrowser = settings.value("openbrowser", false).toBool(); m_openDirStat = settings.value("opendirstat", false).toBool(); settings.endGroup(); settings.beginGroup("RestoreTree"); m_rtPopDirDebug = settings.value("rtPopDirDebug", false).toBool(); m_rtDirCurICDebug = settings.value("rtDirCurICDebug", false).toBool(); m_rtDirICDebug = settings.value("rtDirCurICRetDebug", false).toBool(); m_rtFileTabICDebug = settings.value("rtFileTabICDebug", false).toBool(); m_rtVerTabICDebug = settings.value("rtVerTabICDebug", false).toBool(); m_rtUpdateFTDebug = settings.value("rtUpdateFTDebug", false).toBool(); m_rtUpdateVTDebug = settings.value("rtUpdateVTDebug", false).toBool(); m_rtChecksDebug = settings.value("rtChecksDebug", false).toBool(); m_rtIconStateDebug = settings.value("rtIconStateDebug", false).toBool(); m_rtRestore1Debug = settings.value("rtRestore1Debug", false).toBool(); m_rtRestore2Debug = settings.value("rtRestore2Debug", false).toBool(); m_rtRestore3Debug = settings.value("rtRestore3Debug", false).toBool(); settings.endGroup(); } void MainWin::setMessageIcon() { if (m_currentConsole->is_messagesPending()) actionMessages->setIcon(QIcon(QString::fromUtf8(":/images/mail-message-pending.png"))); else actionMessages->setIcon(QIcon(QString::fromUtf8(":/images/mail-message-new.png"))); } void MainWin::goToPreviousPage() { m_treeStackTrap = true; bool done = false; while (!done) { /* If stack list is emtpty, then done */ if (m_treeWidgetStack.isEmpty()) { done = true; } else { QTreeWidgetItem* testItem = m_treeWidgetStack.takeLast(); QTreeWidgetItemIterator it(treeWidget); /* lets avoid a segfault by setting an item current that no longer exists */ while (*it) { if (*it == testItem) { if (testItem != treeWidget->currentItem()) { treeWidget->setCurrentItem(testItem); done = true; } break; } ++it; } } } m_treeStackTrap = false; } bacula-15.0.3/src/qt-console/main.cpp0000644000175000017500000001564114771010173017174 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Main program for bat (qt-console) * * Written by Kern Sibbald, January MMVII * */ #include "bat.h" #include #include /* * We need Qt version 4.8.4 or later to be able to comple correctly */ #if QT_VERSION < 0x040804 #error "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" #error "You need Qt version 4.8.4 or later to build Bat" #error "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" #endif MainWin *mainWin; QApplication *app; /* Forward referenced functions */ void terminate_console(int sig); static void usage(); static int check_resources(); extern bool parse_bat_config(CONFIG *config, const char *configfile, int exit_code); extern void message_callback(int /* type */, char *msg); #define CONFIG_FILE "bat.conf" /* default configuration file */ /* Static variables */ static CONFIG *config; static char *configfile = NULL; int main(int argc, char *argv[]) { int ch; bool no_signals = true; bool test_config = false; app = new QApplication(argc, argv); app->setQuitOnLastWindowClosed(true); #if QT_VERSION < 0x050000 app->setStyle(new QPlastiqueStyle()); QTextCodec::setCodecForCStrings(QTextCodec::codecForName("UTF-8")); #endif QTranslator qtTranslator; qtTranslator.load(QString("qt_") + QLocale::system().name(),QLibraryInfo::location(QLibraryInfo::TranslationsPath)); app->installTranslator(&qtTranslator); QTranslator batTranslator; batTranslator.load(QString("bat_") + QLocale::system().name(),QLibraryInfo::location(QLibraryInfo::TranslationsPath)); app->installTranslator(&batTranslator); register_message_callback(message_callback); #ifdef xENABLE_NLS setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); #endif #ifdef HAVE_WIN32 set_trace(true); /* output to trace file */ #endif init_stack_dump(); my_name_is(argc, argv, "bat"); lmgr_init_thread(); init_msg(NULL, NULL); working_directory = "/tmp"; #ifndef HAVE_WIN32 struct sigaction sigignore; sigignore.sa_flags = 0; sigignore.sa_handler = SIG_IGN; sigfillset(&sigignore.sa_mask); sigaction(SIGPIPE, &sigignore, NULL); sigaction(SIGUSR2, &sigignore, NULL); #endif while ((ch = getopt(argc, argv, "bc:d:r:st?")) != -1) { switch (ch) { case 'c': /* configuration file */ if (configfile != NULL) { free(configfile); } configfile = bstrdup(optarg); break; case 'd': debug_level = atoi(optarg); if (debug_level <= 0) debug_level = 1; break; case 's': /* turn off signals */ no_signals = true; break; case 't': test_config = true; break; case '?': default: usage(); } } argc -= optind; argv += optind; if (!no_signals) { init_signals(terminate_console); } if (argc) { usage(); } OSDependentInit(); #ifdef HAVE_WIN32 WSA_Init(); /* Initialize Windows sockets */ #endif if (configfile == NULL) { configfile = bstrdup(CONFIG_FILE); } config = New(CONFIG()); parse_bat_config(config, configfile, M_ERROR_TERM); if (init_crypto() != 0) { Emsg0(M_ERROR_TERM, 0, _("Cryptography library initialization failed.\n")); } if (!check_resources()) { Emsg1(M_ERROR_TERM, 0, _("Please correct configuration file: %s\n"), configfile); } if (test_config) { exit(0); } mainWin = new MainWin; mainWin->show(); return app->exec(); } void terminate_console(int /*sig*/) { #ifdef HAVE_WIN32 WSACleanup(); /* TODO: check when we have to call it */ #endif exit(0); } static void usage() { fprintf(stderr, _( PROG_COPYRIGHT "\n%sVersion: %s (%s) %s %s %s\n\n" "Usage: bat [-s] [-c config_file] [-d debug_level] [config_file]\n" " -c set configuration file to file\n" " -dnn set debug level to nn\n" " -s no signals\n" " -t test - read configuration and exit\n" " -? print this message.\n" "\n"), 2007, BDEMO, VERSION, BDATE, HOST_OS, DISTNAME, DISTVER); exit(1); } /* * Make a quick check to see that we have all the * resources needed. */ static int check_resources() { bool ok = true; DIRRES *director; int numdir; bool tls_needed; LockRes(); numdir = 0; foreach_res(director, R_DIRECTOR) { numdir++; /* tls_require implies tls_enable */ if (director->tls_require) { if (have_tls) { director->tls_enable = true; } else { Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); ok = false; continue; } } tls_needed = director->tls_enable || director->tls_authenticate; if ((!director->tls_ca_certfile && !director->tls_ca_certdir) && tls_needed) { Emsg2(M_FATAL, 0, _("Neither \"TLS CA Certificate File\"" " or \"TLS CA Certificate Dir\" are defined for Director \"%s\" in %s." " At least one CA certificate store is required.\n"), director->hdr.name, configfile); ok = false; } } if (numdir == 0) { Emsg1(M_FATAL, 0, _("No Director resource defined in %s\n" "Without that I don't how to speak to the Director :-(\n"), configfile); ok = false; } CONRES *cons; /* Loop over Consoles */ foreach_res(cons, R_CONSOLE) { /* tls_require implies tls_enable */ if (cons->tls_require) { if (have_tls) { cons->tls_enable = true; } else { Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); ok = false; continue; } } tls_needed = cons->tls_enable || cons->tls_authenticate; if ((!cons->tls_ca_certfile && !cons->tls_ca_certdir) && tls_needed) { Emsg2(M_FATAL, 0, _("Neither \"TLS CA Certificate File\"" " or \"TLS CA Certificate Dir\" are defined for Console \"%s\" in %s.\n"), cons->hdr.name, configfile); ok = false; } } UnlockRes(); return ok; } bacula-15.0.3/src/qt-console/install_conf_file.in0000755000175000017500000000071114771010173021541 0ustar bsbuildbsbuild#!/bin/sh sbindir=@sbindir@ sysconfdir=@sysconfdir@ INSTALL_CONFIG="@INSTALL@ -m 640" DESTDIR=`echo ${DESTDIR}` srcconf=bat.conf if test -f ${DESTDIR}${sysconfdir}/${srcconf}; then destconf=${srcconf}.new echo " ==> Found existing $srcconf, installing new conf file as ${destconf}" else destconf=${srcconf} fi echo "${INSTALL_CONFIG} ${srcconf} ${DESTDIR}${sysconfdir}/${destconf}" ${INSTALL_CONFIG} ${srcconf} ${DESTDIR}${sysconfdir}/${destconf} bacula-15.0.3/src/qt-console/ts/0000755000175000017500000000000014771010173016163 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/ts/bat_fr.ts0000644000175000017500000061415414771010173020003 0ustar bsbuildbsbuild ClientForm Client Tree Arborescence Client Refresh Client List Rafraichir la liste des Clients Requery the director for the list of clients. Interroger le director pour la liste des clients. List Jobs of Client Lister les travaux du client Open a joblist page selecting this client. Ouvrir une liste des travaux en sélectionnant ce client. Purge Jobs Purger les travaux Purge jobs peformed from this client. Purger les travaux de ce client. Prune Jobs Élaguer les travaux Open the diaolog to prune for this client. Ouvrir un dialogue pour élaguer (prune) ce client. Status Client État du client ClientStat Client Status %1 État du client %1 Job Id Id du travail Job Level Niveau Job Files Fichiers Job Bytes Octets Job Status État Job Time Heure Job Name Nom ClientStatForm Form Form Running En cours Header Entête Refresh Timer Sablier de MàJ Do Refresh Activer <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Travaux terminés</span></p></body></html> Refresh Actualiser Cancel Running Job Annuler le travail en cours Clients Clients Clients Client Name Nom du client File Retention Durée de rétention du fichier Job Retention Rétention du travail AutoPrune Élagage auto ClientId Id du client Uname Uname Are you sure you want to purge all jobs of client "%1" ? The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. Is there any way I can get you to click Cancel here? You really don't want to do this Press OK to proceed with the purge operation? Êtes-vous sûr de vouloir purger tous les travaux du client "%1" ? La commande Purge va détruire les enregistrements du catalogue pour les travaux et les volumes sans prendre en considération les durées de rétention. Purger n'affecte que le catalogue, pas les données écrites dans les Volumes. Cette commande peut être dangereuse car vous pouvez détruire les enregistrements associés aux sauvegardes de fichiers actuelles, et nous vous recommandons de ne pas l'utiliser sauf si vous savez vraiment ce que vous faîtes. Comment vous persuader de clicker Annuler ? Vous ne désirez probablement pas faire ça ! Cliquer OK pour réaliser l'opération de purge ? Client Status %1 État du client %1 Console Console Console No Director found. Aucun Director trouvé. Director not connected. Click on connect button. Director déconnecté. Cliquer sur le bouton connexion. Director is currently disconnected Please reconnect! le Director est actuellement déconnecté Veuillez vous reconnecter ! ConsoleForm Console Console StatusDir Console Help Aide de la Console Request Messages Récupérer les messages Reload bacula-dir.conf Recharger bacula-dir.conf Content Storage Status %1 Etat du dépôt %1 ContentForm Form Form Actions Actions Update slots M.à.J. slots Label Étiquette Move to tray Charger Empty tray Décharger Mount Monter Unmount Démonter Status État Release Libérer Drives Lecteurs Drive Lecteur Volume Volume Import/Export Slot Slot Content Contenu Slot Slot Volume Volume Bytes Octets Status État Media Type Type de support Pool Groupe Last Written Dernière écriture When expire? Expire le ? DirComm Bat Bat Already connected. Déjà connecté. DirStat Job Id Id du travail Job Level Niveau Job Files Fichiers Job Bytes Octets Job Status État Job Time Heure Job Name Nom Job Type Type de travail Priority Priorité Volume Volume Job Data Données Job Info Information Director Status État du director DirStatForm Form Form <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Scheduled Jobs</span></p></body></html> <html><head><meta name="qrichtext" content="1" /><style type="text/css">p, li { white-space: pre-wrap; }</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"><p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Travaux prévus</span></p></body></html> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Running Jobs</span></p></body></html> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Travaux en cours</span></p></body></html> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Travaux Terminés</span></p></body></html> Refresh Timer Sablier de rafraîchissement Do Refresh Activer le sablier Refresh Actualiser Cancel Selected Running Jobs Annuler les travaux en cours sélectionnés Disable Scheduled Job Désactiver le job planifié FileSet FileSets Jeux de fichiers FileSet Name FileSet Name FileSet Id Id du jeu de fichiers Create Time Date de création FileSet FileSet FileSet Name Nom FileSetForm FileSet Tree Arbre des jeux de fichiers Refresh FileSet List Rafraichir la liste des jeux de fichier Requery the director for the list of storage objects. Réinterroger le director pour la liste des dépôts. ShowJobs Afficher les travaux Show FileSet In Console Afficher le jeu de fichiers dans la console Help Help: %1 Aide : %1 Job Job Travail Are you sure you want to delete?? !!!. This delete command is used to delete a Job record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. The Job and all its associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? Êtes-vous sûr de vouloir détruire?? !!!.Cette commande est utilisée pour détruire un enregistrement "travail" et tous les enregistrements associés du catalogue. Cette commande influe uniquement sur le catalogue et n'a pas d'effet sur les données écrites sur un volume. Cette commande peut être dangereuse et nous vous recommandons fortement de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. Le travail et les autres enregistrements associés (fichiers et supports) seront détruits du catalogue. Cliquer OK pour réaliser la destruction ? Are you sure you want to cancel this job? Êtes-vous sûr de vouloir annuler ce travail ? Bat Bat There were no results! It is possible you may need to add "catalog = all" to the Messages resource for this job. Aucun résultat ! Il se pourrait que vous deviez ajouter "catalog = all" à la ressource "Messages" de ce travail. Error: Erreur : JobForm Form Form Cancel Annuler Delete Supprimer View errors for this Job Voir les erreurs de ce travail Errors Erreurs Media Support History Historique Run again Ré-éxécuter Read doc FileSet Jeu de fichiers Stats Statistiques Refresh Actualiser Basic Information Informations de base JobId: Id Travail : 2 2 Job Name: Nom du Travail : Test Level: Niveau : VirtualFull Client: Client : client-fd FileSet: Jeu de fichier : TheFileSet Pool: Groupe : ThePool Status État Status: État : Errors: Erreurs : 0 0 Files: Fichiers : 1,924 1,924 Bytes: Octets : 109 MB Purged: Purgés : Times Temps Sched Time: Heure prévue : 2009-07-31 00:10:00 2009-07-31 00:10:00 Start Time: Heure début : End Time: Heure de fin : 2009-07-31 00:20:00 2009-07-31 00:20:00 Duration: Durée : 00:10:00 00:10:00 Volume Used Volume utilisé Vol0001 Running Information Informations en cours Speed: Vitesse : Files Examined: nb Fichers examinés : Current File: Fichier en cours : /var/www/bacula/spool 100,000 100,000 100 MB/s kB/s Bandwidth Limit: <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;"> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'DejaVu Sans'; font-size:10pt;"></p></body></html> JobList The Jobs query returned no results. Press OK to continue? La requête ne renvoie aucun travail. Cliquer OK pour continuer ? Are you sure you want to delete?? !!!. This delete command is used to delete a Job record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. The Job and all its associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? Êtes-vous sûr de vouloir détruire?? !!!.Cette commande est utilisée pour détruire un enregistrement "travail" et tous les enregistrements associés du catalogue. Cette commande influe uniquement sur le catalogue et n'a pas d'effet sur les données écrites sur un volume. Cette commande peut être dangereuse et nous vous recommandons fortement de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. Le travail et les autres enregistrements associés (fichiers et supports) seront détruits du catalogue. Cliquer OK pour réaliser la destruction ? Are you sure you want to purge ?? !!!. The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. Press OK to proceed with the purge operation? Êtes-vous sûr de vouloir purger ?? !!!. La commande de purge va détruire les enregistrements du catalogue associés (travaux et volumes) sans prendre en compte la durée de rétention. La purge ne concerne que le catalogue et n'affecte pas les données écrites sur les volumes. Cette commande peut être dangereuse car vous pouvez détruire des enregistrements associés aux sauveardes actuelles, et nous vous recommandons de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. Cliquer OK pour réaliser la purge? Any Tous, mais peut-on le traduire ? Job Id Id du travail Job Name Nom Client Client Job Starttime Heure de début Job Type Type de travail Job Level Niveau Job Files Fichiers Job Bytes Octets Job Status État Purged Purgé File Set Jeu de fichiers Backup Sauvegarde .... mais peut-on le traduire ? Restore Restauration Pool Name Nom du Groupe First Volume Premier Volume VolCount nb de volumes Jobs Run on Volume %1 Exécutions sur le volume %1 Jobs Run from Client %1 Exécutions depuis le client %1 Jobs Run of Job %1 Exécutions du travail %1 Jobs Run with fileset %1 Exécutions avec le jeu de fichiers %1 Jobs Run Exécutions de travaux run job="%1" client="%2" level=%3 faut-il traduire les commandes envoyées à la console ? je ne crois pas. fileset="%1" faut-il traduire les commandes envoyées à la console ? je ne crois pas. Delete list of %1 Jobs Détruire %1 travaux Purge Files from list of %1 Jobs Purger les fichiers de %1 travaux Delete Single Job Détruire un travail Purge Files from single job Purger les fichiers d'un travail Running En cours ... mais faut-il le traduire ? Created, not yet running Créé, pas encore lancé ... mais faut-il le traduire ? JobListForm Form Form Refresh Actualiser Graph Graphique FileSet Jeu de fichiers Status État Purged Purgé Record Limit Limitation en nombre Days Limit Limitation en jours Clients Clients Volume Volume Job Travail Level Niveau Refresh Job List Actualiser la liste Requery the director for the list of jobs. Réinterroger le Director pour la liste des travaux. List Files On Job Lister les fichiers du travail Restore From Job Restaurer depuis ce travail Restore From Time Restaurer à cette date Cancel Currently Running Job Annuler le travail en cours Pool Groupe Filter Copy Jobs Filtrer les copies Filter Migration Jobs Filtrer les migrations List Job Volumes Lister les volumes du travail List Volumes Lister les volumes Delete Job Détruire le travail Purge Files Purger les fichiers Restart Job Relancer le travail Show Job Log Voir les traces du travail Show Job Info Voir les informations du travail List Job Totals on Console Lister les grands totaux des travaux dans la console JobLog Bat Bat JobLog Traces du travail There were no results! It is possible you may need to add "catalog = all" to the Messages resource for this job. Aucun résultat ! Il se pourrait que vous deviez ajouter "catalog = all" à la ressource "Messages" de ce travail. Log records for job %1 Enregistrements de trace du travail %1 Error: Erreur : ... mais faut-il le traduire ? JobLogForm Job Log Traces du travail JobPlot The Jobs query returned no results. Press OK to continue? La requête ne renvoie aucun travail. Cliquer OK pour continuer ? JobPlot Sticks Lines Steps None Any Tous Files and Bytes backed up <-- Bytes Kb date of backup --> Number of Files --> Files Fichiers Bytes Octets Fitted Ellipse Rect Diamond Triangle DTrianle UTriangle LTriangle RTriangle Cross XCross HLine Vline Star1 Star2 Hexagon JobPlotControlsForm Form Form File Data Byte Data Refresh Actualiser Status État Level Niveau Purged Purgé FileSet Jeu de fichiers Volume Volume Client Client Job Travail Days Limit Limitation en jour Record Limit Limitation en nombre Byte Symbol Type File Symbol Type Graph Type Type de graphique Jobs Jobs Travaux Job Name Nom du travail Pool Groupe Messages Messages Client Client Storage Dépôt Where Level Niveau Type Type FileSet Jeu de fichiers Catalog Catalogue Enabled Activé MainForm bat - Bacula Admin Tool Bacula Administration Tool Outil d'administration de Bacula It's a Dock widget to allow page selection Settings Options &Help &Aide &File &Fichier Current Status Barre d'état Tool Bar Barre d'outils Page Selector Choisir Page Selects panel window Choisir l'onglet actif Use items in this tree to select what window is in panel Cliquer sur un élément de cette liste pour choisir l'onglet actif du panneau Enter a bacula command Entrer une commande Bacula Command: Commande : Click here to enter command Cliquer ici pour entrer une commande &Quit &Quitter Ctrl+Q &About bat &À propos de bat &Copy Co&pier Cu&t &Couper new nouveau open ouvrir &Paste Co&ller &Print &Imprimer Print Imprimer &Save &Enregistrer Save (not implemented) Enregistrer (pas implémenté) Connect Connexion Connect/disconnect Connexion/Déconnexion Label Étiquette (Label) Label a Volume Étiqueter un volume Restore Restaurer Restore Files Restaurer des fichiers Run Job Lancer un travail Run a Job Lancer un nouveau travail Estimate Job Estimer un travail Estimate a Job Estimer un travail Status Dir Query status of director État du director Status Dir État du director Query status of director in console Interroger l'état du director dans la console &Select Font ... &Choisir la police ... Undock Window Déplacer vers une nouvelle fenêtre Undock Current Window Déplacer l'onglet courant vers une nouvelle fenêtre ToggleDock Toggle Dock Status Close Page Fermer l'onglet Close The Current Page Fermer l'onglet en cours Messages Messages Display any messages queued at the director Afficher tous les messages en attente dans le director &Preferences ... &Préférences ... Set Preferences Définir les préférences bat &Help &Aide de bat Browse Naviguer Browse Cataloged Files Naviguer dans les fichiers du catalogue JobPlot Plot Job Files and Bytes Status Dir Page État du director Director Status Page État du director 1 1 Repop Lists Remplir les listes Reload and Repop Recharger et remplir les listes back Previous Page MainWin Director not connected. Click on connect button. le Director n'est pas connecté. Cliquer le bouton de connexion. About bat À propos de bat <br><h2>bat 1.0, by Dirk H Bartley and Kern Sibbald</h2><p>Copyright &copy; 2007- <br><h2>bat 1.0, par Dirk H Bartley et Kern Sibbald</h2><p>Copyright &copy; 2007- Ready Prêt Select Page Sélection d'onglet Console Console Director Status Etat du director <br><h2>Bacula Bat %1 (%2)</h2><p>Copyright &copy; 2007-%3 Kern Sibbald<p>The <b>bat</b> is an administrative console interface to the Director. <br><h2>bat %1 (%2), by Dirk H Bartley and Kern Sibbald</h2><p>Copyright &copy; 2007-%3 Kern Sibbald<p>The <b>bat</b> is an administrative console interface to the Director. <br><h2>bat %1 (%2), par Dirk H Bartley et Kern Sibbald</h2><p>Copyright &copy; 2007-%3 Kern Sibbald<p><b>bat</b> est une console d'administration pour le "Director". MediaEdit Media Edit Édition du support No Volume name Pas de nom de volume No Volume name given Pas de nom de Volume donné MediaInfo Media Info Informations du support Are you sure you want to purge ?? !!!. The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. Press OK to proceed with the purge operation? Êtes-vous sûr de vouloir purger ?? !!!. La commande de purge va détruire les enregistrements du catalogue associés (travaux et volumes) sans prendre en compte la durée de rétention. La purge ne concerne que le catalogue et n'affecte pas les données écrites sur les volumes. Cette commande peut être dangereuse car vous pouvez détruire des enregistrements associés aux sauvegardes actuelles, et nous vous recommandons de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. Cliquer OK pour réaliser la purge? Are you sure you want to delete?? !!!. This delete command is used to delete a Volume record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. All Jobs and all associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? Êtes-vous sûr de vouloir détruire?? !!!. Cette commande est utilisée pour détruire un enregistrement "volume" et tous les enregistrements associés du catalogue. Cette commande influe uniquement sur le catalogue et n'a pas d'effet sur les données écritessur un volume. Cette commande peut être dangereuse et nous vous recommandons fortement de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. Tous les travaux et autres enregistrements associés (fichiers et supports) seront détruits du catalogue. Cliquer OK pour réaliser la destruction ? MediaList Are you sure you want to delete?? !!!. This delete command is used to delete a Volume record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. All Jobs and all associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? Êtes-vous sûr de vouloir détruire?? !!!. Cette commande est utilisée pour détruire un enregistrement "volume" et tous les enregistrements associés du catalogue. Cette commande influe uniquement sur le catalogue et n'a pas d'effet sur les données écritessur un volume. Cette commande peut être dangereuse et nous vous recommandons fortement de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. Tous les travaux et autres enregistrements associés (fichiers et supports) seront détruits du catalogue. Cliquer OK pour réaliser la destruction ? Are you sure you want to purge ?? !!!. The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. Press OK to proceed with the purge operation? Êtes-vous sûr de vouloir purger ?? !!!. La commande de purge va détruire les enregistrements du catalogue associés (travaux et volumes) sans prendre en compte la durée de rétention. La purge ne concerne que le catalogue et n'affecte pas les données écrites sur les volumes. Cette commande peut être dangereuse car vous pouvez détruire des enregistrements associés aux sauveardes actuelles, et nous vous recommandons de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. Cliquer OK pour réaliser la purge? Volume Name Nom de volume Id Status État Enabled Activé Bytes Octets Files Fichiers Jobs Travaux Retention Rétention Media Type Type de support Slot Slot Use Duration Durée d'utilisation Max Jobs nb max de travaux Max Files nb max de fichiers Max Bytes nb max d'octets Recycle Recycler Last Written Dernière écriture Pools Groupes First Written Première écriture Read Time Temps de lecture Write Time Temps d'écriture Recycle Count Compteur de recyclage Recycle Pool Groupe de recyclage MediaListForm Media Tree Arbre des supports Refresh Media List Actualiser la liste des supports Requery the director for the list of media. Interroger le director pour la liste des supports. Edit Volume Éditer le volume List Jobs On Volume Lister les travaux du volume Delete Volume Détruire le volume Prune Volume Élaguer le volume (prune) Purge Volume Purger le volume Relabel Volume Ré-étiqueter le volume Update all Volumes From Pool Mettre à jour tous les volumes du groupe Update all Volumes from all Pools Mettre à jour tous les volumes de tous les groupes Volume From Pool Volumes du groupe MediaView Media Support Are you sure you want to purge ?? !!!. The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. Press OK to proceed with the purge operation? Êtes-vous sûr de vouloir purger ?? !!!. La commande de purge va détruire les enregistrements du catalogue associés (travaux et volumes) sans prendre en compte la durée de rétention. La purge ne concerne que le catalogue et n'affecte pas les données écrites sur les volumes. Cette commande peut être dangereuse car vous pouvez détruire des enregistrements associés aux sauvegardes actuelles, et nous vous recommandons de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. Cliquer OK pour réaliser la purge ? Are you sure you want to delete?? !!!. This delete command is used to delete a Volume record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. All Jobs and all associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? Êtes-vous sûr de vouloir détruire?? !!!. Cette commande est utilisée pour détruire un enregistrement "volume" et tous les enregistrements associés du catalogue. Cette commande influe uniquement sur le catalogue et n'a pas d'effet sur les données écritessur un volume. Cette commande peut être dangereuse et nous vous recommandons fortement de ne pas l'utiliser, sauf si vous savez exactement ce que vous faites. Tous les travaux et autres enregistrements associés (fichiers et supports) seront détruits du catalogue. Cliquer OK pour réaliser la destruction ? MediaViewForm Form Form Edit Éditer Purge Purger Delete Détruire Prune Élaguer (prune) Filter Filtre Media Type: Type de support : Status: État : Limit: Limite : Name: Nom : Pool: Groupe :Pool : Location: Lieu : Expired Expiré Apply Appliquer Volume Name Nom du volume Online Disponible Slot Emplacement Vol Bytes Octets Vol Usage Utilisation Vol Status ÉTAT Pool gROUPE Media Type Type de support Last Written Dernière écriture When expire? Expire le ? Pages %1 of Director %2 %1 du Director %2 UnDock %1 Window Déplacer %1 vers une nouvelle fenêtre ReDock %1 Window Remettre %1 en onglet Console Console PrefsForm Preferences Préférences Messages Messages Messages Options Options des Messages Message check interval in seconds Intervalle de vérification Check Messages Activer la vérification auto Joblist Travaux Joblist Limit Options Options de limitation sur Travaux Days Limit Limitation en jour Record Limit Limitation sur la taille Misc Autres Convert Conversion Convert Off Désactiver la conversion Convert Bytes with IEC 1000B = KB Convertir les octets avec IEC 1000B = KB Convert Bytes with 1024B = KB Convertir les octets avec 1024B = KB Context Sensitive List Commands Liste de commandes sensible au contexte Execute Long List Open Plot page on startup Open Browser page on startup Ouvrir l'onglet de navigation au démarrage Open Director Status page on startup Ouvrir l'onglet d'état du director au démarrage Debug Déboguer Debugging Options Options de débogage Debug comm Débogage communications avec le director Display all messages in console Afficher tous les messages dans la console Debug Commands Débogage des commandes Debug Sql queries Débogage des requêtes SQL Debug Miscelaneous Items Débogage divers Debug Miscellaneous Items RestoreTree Arbre de restauration Restore Debug 2 Directory Item Changed Restore Debug 1 Directory Current Item Changed Debug Update File Table Debug Version Table Item Changed Debug File Table Item Changed Debug Icon State Debug Update Checks Debug Restore Debug 3 Update Version Table Debug Populate Directory Debug <h2>Preferences</h2> <h2>Préférences</h2> Timers Sabliers Display Bytes using IEC units (1024B = 1 KiB) Afficher les octets avec les unités IEC (1 KiO = 1000 octets) Display Bytes using SI units (1000B = 1KB) Afficher les octets avec les unités SI (1 KO = 1000 octets) Open Pages Ouverture d'onglets Debug multiple connection QObject Any Tous No Non Yes Oui Invalid job status %1 État de travail invalide : %1 StorStat Storage Status %1 Etat du dépôt %1 Job Id Id du travail Job Level Niveau du Travail Job Files Fichiers Job Bytes Octets Job Status État Job Time Heure Job Name Nom StorStatForm Form Form Header Entête Waitreservation Attente de réservation Devices Lecteurs Volumes Volumes Spooling Running En cours Misc Autres Mount Monter UMount Démonter Label Étiquette Release Libérer Refresh Timer Sablier de rafraîchissement Do Refresh Activer le sablier <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Travaux Terminés</span></p></body></html> Refresh Actualiser Cancel Running Job Annuler le travail en cours Disable Scheduled Job Désactiver le job planifié Storage Storage Dépôt Name Nom Id Id Changer Changeur Slot Emplacement Status État Enabled Activé Pool Groupe Media Type Type de support Yes Oui Status Storage "%1" État du dépôt "%1" Status Storage "%1" in Window État du dépôt "%1" (dans une fenêtre) Label media in Storage "%1" Étiqueter le support dans le dépôt "%1" Mount media in Storage "%1" Monter le support dans le dépôt "%1" "UN" Mount media in Storage "%1" Démonter le support dans le dépôt "%1" Release media in Storage "%1" Libérer le support dans le dépôt "%1" Barcode Scan media in Storage "%1" Scanner le code-barre du support dans le dépôt "%1" Read scan media in Storage "%1" Storage Status %1 État du dépôt %1 StorageForm Storage Tree Arbre des dépôts Refresh Storage List Rafraichir la liste Requery the director for the list of storage objects. Réinterroger le director pour la liste des dépôts. Status Storage In Console État du dépôt (dans la console) Label Storage Étiqueter le dépôt MountStorage Monter UnMount Storage Démonter Update Slots Mettre à jour les slots Update Slots Scan Mettre à jour les slots (scan) Release Relâcher 1 1 Status Storage Window État du dépôt (dans une fenêtre) bRestore bRestore b-Restauration bRestoreForm brestore brestore Type Type File Name Nom de fichier Size Taille Date Date InChanger dans changeur Volume Volume JobId Id du travail Chksum Somme de contrôle Clear Effacer Estimate Estimer Restore Restaurer FileName Nom de fichier FileIndex Position du fichier Nb Files Nb de fichiers Merge Jobs Fusionner les travaux View all Versions Voir toutes les versions Current Directory Dossier courant File list -- drag below for restore Liste des fichiers -- glisser en-dessous pour restaurer Double Click File Name to decend Double-cliquer le dossier pour y entrer, double-cliquer le fichier pour voir ses versions Double Click to decend Double-cliquer pour entrer Selection Range Rang de sélection - - File Filter Filtre de fichiers File revisions -- drag below for restore Versions de fichier -- glisser en-dessous pour restaurer <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Drag and drop <span style=" font-weight:600;">File list</span> and/or <span style=" font-weight:600;">File revisions</span> items here for Restore</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Glisser et déplacer ici depuis <span style=" font-weight:600;">"Liste des fichiers"</span> et/ou depuis <span style=" font-weight:600;">"Versions de fichier"</span> les éléments à restaurer.</p></body></html> Restore... Select from Client list drop down then select from Job list drop down Choisir un élément dans la liste déroulante "Client List" puis dans la liste déroulante "Job list" bRunRestoreForm Run restore Lancer la restauration Standard Standard Restore options Options de restauration Client: Client : Where: Où : Replace: Remplacer : Comment: Commentaire : Media needed Supports nécessaires InChanger dans Changeur Volume Volume Compute with directories Advanced Avancé File Relocation Déplacement de fichiers Use file relocation: Utiliser Strip prefix: préfixe à supprimer : Add prefix: préfixe à ajouter : Add suffix: suffixe à ajouter : Use regexp: utiliser une "regexp" : Where regexp: "regexp" : Other options Autres options When: Quand : Priority: Priorité : Storage: Dépôt : Job: Travail : estimateForm Form Form List Files Lister les fichiers Level: Niveau : Client: Client : Job: Travail : FileSet: Jeu de fichiers : <h3>Estimate a backup Job</h3> <h3>Estimer un travail de sauvegarde</h3> OK OK Cancel Annuler estimatePage Estimate Estimer helpForm Form Form &Home &Accueil &Back &En arrière Close Fermer jobsForm Client Tree Arborescence Client Refresh Jobs List Actualiser la liste des travaux Requery the director for the list of clients. Interroger le director pour la liste des clients. List Files Command commande "list files" : liste des fichiers List Volumes Command commande "list volumes" : liste des volumes List Next Volume Command commande "next volume" : volume suivant Enable Job Command commande "enable job" : activer le travail Disable Job Command commande "disable job" : désactiver le travail Open JobList on Job Ouvrie la liste des exécutions de ce travail Cancel Job Command commande "cancel job" : annuler le travail RunJob Èxécuter ce travail labelForm Form Form OK OK Cancel Annuler Volume Name: Nom du volume : Storage: Dépôt : Slot: Emplacement : Execute Automount Montage automatique On Activer Off Désactiver Pool: Groupe : <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Label a Volume</span></p></body></html> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Étiqueter un volume</span></p></body></html> Enter Name of Volume to create Entrer le nom du volume à créer mediaEditForm Form Form Pool: Groupe : Volume Status: État du volume : Max Volume Bytes: Nb Max d'octets : Slot: Emplacement : Max Volume Jobs: Nb Max de Travaux : Use Duration: Durée d'utilisation : OK OK Cancel Annuler Recycle Pool: Groupe de recyclage : Enabled Activé Max Volume Files: nb max de fichiers : Years Années Seconds Secondes Use Duration Durée d'utilisation Days Jours Hours Heures Months Mois Retention Rétention Minutes Minutes Volume : Volume : Recycle Recycler <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Edit a Volume</span></p></body></html> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Éditer un Volume</span></p></body></html> Retention: Rétention : mediaInfoForm Form Form Edit Éditer Purge Purger Delete Supprimer Prune Élaguer (prune) Load Charger Unload Décharger Information Informations Name: Nom : Vol0001 Pool: Groupe : Default Online: Disponible : Enabled: Activé : yes Location: Lieu : Vault Status: État : Append Media Type: Type de support : File Recycle Pool: Groupe de recyclage : Scratch Statistics Statistiques Vol Bytes: Octets : 19.8 MB Vol Mounts: Montages : 10 10 Recycle count: Recyclages : 5 5 Read time: Temps de lecture : 10 mins Write time: Temps d'écriture : 20 mins Errors: Erreurs : 0 0 Last Written: Dernière écriture : 2009-07-05 12:23:00 2009-07-05 12:23:00 First Written: Première écriture : 2009-06-05 10:00:00 2009-06-05 10:00:00 Limits Limitations Use duration: Durée d'utilisation : Max jobs: Nb max de travaux : Max files: Nb max de fichiers : Max bytes: Nb max d'octets : Recycle: Recyclage : Retention: Rétention : 365 days Expire: Expire le : 2010-08-03 23:10:03 2010-08-03 23:10:03 Jobs Travaux JobId Id du travail Name Nom Start Time début le Type Type Level Niveau Files Fichiers Bytes Octets Status État mountDialog Storage : %1 Dépôt : %1 No Storage name Pas de nom de dépôt No Storage name given Pas de nom de dépôt indiqué Context sensitive command : Commande sensible au contexte : Director Response : Réponse du Director : mountForm Label Étiquette TextLabel TextLabel Slot: Emplacement : <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Mount a Slot</span></p></body></html> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Monter un emplacement</span></p></body></html> prefsDialog Canceled Annulé prerestoreForm Form Form All Files Tous les fichiers Select Files Fichiers sélectionnés Job Travail JobIds Travaux yyyy-mm-dd h:mm:ss aaaa-mm-jj h:mm:ss Use Most Recent Utiliser le plus récent File Set: File Set : Client: Client : Storage: Dépôt : Before: Avant : Pool: Pool : OK OK Cancel Annuler <h3>Select Jobs</h3> <h3>Sélectionner les Travaux</h3> prerestorePage Bat Bat There can be no spaces in the text for the joblist. Press OK to continue? Il ne peut pas y avoir d'espace dans la liste des travaux. Appuyer sur OK pour continuer ? Comma separated list of JobIds At least one of the jobs is not a valid job of type "Backup". Press OK to continue? Il y a au moins un travail qui n'est pas du type "Backup" (Sauvegarde). Appuyer sur OK pour continuer ? All jobs in the list must be of the same jobName and same client. Press OK to continue? Tous les travaux de la liste doivent avoir le même nom et le même Client. Appuyer sur OK pour continuer ? Restore Restaurer Any Tous Comma separted list of Job Ids Liste de "id de travail" séparés par des virgules Canceled Annulé The string is not a comma separated list of integers. Press OK to continue? La chaine n'est pas une liste de nombres séparés par des virgules. Appuyer sur OK pour continuer ? pruneForm Form Form Prune Files Élaguer (Prune) les fichiers Volume: Volume : <h3>Prune Files/Jobs/Volumes</h3> <h3>Élaguer (Prune) les Fichiers/Travaux/Volumes</h3> Prune Jobs Élaguer (prune) les travaux OK OK Cancel Annuler Client: Client : Prune Volumes Élaguer (Pruner) les Volumes prunePage Prune Élaguer (prune) Any Tous Canceled Annulé relabelDialog From Volume : Volume : No Volume name Pas de nom de volume No Volume name given Pas de nom de Volume donné New name must be different Le nouveau nom doit être différent relabelForm Label Label From Volume : Ancien Volume : Pool: Pool : Storage: Dépôt : New Volume Name: Nouveau nom de Volume : Slot: Slot : <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Relabel a Volume</span></p></body></html> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Relabéliser un Volume</span></p></body></html> restoreForm Form Form 1 1 2 2 3 3 4 4 5 5 6 6 <h2>Directories</h2> <h2>Répertoires</h2> <h3>Restore Select</h3> <h3>Restaurer la sélection</h3> Up Dossier parent Mark Sélectionner Unmark Désélectionner <h2>Files</h2> <h2>Fichiers</h2> Status: État : Current Dir: Dossier courant : OK OK Cancel Annuler UnMark Désélectionner Select a Directory Choisir un dossier Directories Dossiers restorePage Restore Select Sélection pour restauration Mark Sélectionner File Fichier Mode Mode User Propriétaire Group Groupe Size Taille Date Date In else of if parent cwd "%1" newdir "%2" Dans le "else" du "if parent" cwd "%1" newdir "%2" Canceled Annulé In addDirectory cwd "%1" newdir "%2" fullpath "%3" Dans "addDirectory" cwd "%1" newdir "%2" fullpath "%3" Nothing selected, nothing done Rien de sélectionné, rien de fait cd command failed la commande cd a échoué .pwd command failed la commande ".pwd" a échoué restoreTree Version Browser Navigateur temporel Directories Dossiers Any Tous Refresh From JobChecks Rafraichir depuis JobChecks Task Taches Querying Database Interrogation en cours Querying Jobs Interrogation des travaux Querying for Directories Interrogation des dossiers Processing Directories Analyse des dossiers File Name Nom de fichier Filename Id Id du nom de fichier Job Id Type Type End Time Heure de fin Hash FileId FileId RestoreTreePage Onglet de l'arbre de restauration Level Niveau Name Nom Purged Purgé TU TD Refresh From Re-Select Task %1 of %2 Tâche %1 sur %2 No jobs were selected in the job query !!!. Press OK to continue La requête ne renvoie aucun travail !!! Cliquer OK pour continuer Present Working Directory: %1 Dossier de travail actuel : %1 Job Type Type de travail First Volume Premier volume Task 1 of 3 Tâche 1 sur 3 Processing Checked directories Traitement des dossiers Task 2 of 3 Tâche 2 sur 3 Processing Exceptions Traitement des exceptions Task 3 of 3 Tâche 3 sur 3 Filling Database Table Remplissage de la base de données restoreTreeForm Form Form Jobs Travaux TextLabel TextLabel Files Fichiers Versions of File Versions du fichier FileName Nom de fichier Refresh Actualiser Restore Restaurer Job Travail Job List Job Criterion Selector Job List Job Criterion Selector Client Client Job List Client Criterion Selector Job List Client Criterion Selector FileSet Jeu de fichiers Job List Fileset Criterion Selector Job List Fileset Criterion Selector Record Limit Limitation en nombre Days Limit Limitation en jour Directory Dossier Select Directory Sélectionner le dossier UnselectDirectory Désélectionner le dossier runCmdForm Form Form Priority: Priorité : yyyy-mm-dd hh:mm:ss aaaa-mm-jj hh:mm:ss When: Quand : Where: Ou : Bootstrap: Bootstrap : Job: Travail : Storage: Dépôt : FileSet: Jeu de fichiers : Replace: Remplacer : To client: Vers le client : Catalog: Catalogue : OK OK Cancel Annuler <h3>Run Restore Job</h3> <h3>Lancer la restauration</h3> runCmdPage Restore Run Restauration never jamais ... mais il faudrait retraduire dans okButtonPushed. never always toujours ... mais il faudrait retraduire dans okButtonPushed. always ifnewer si nouveau ... mais il faudrait retraduire dans okButtonPushed. si nouveau ifolder si plus vieux ... mais il faudrait retraduire dans okButtonPushed. si ancien Canceled Annulé runForm Form Form Level: Niveau : Bootstrap: Bootstrap : yyyy-mm-dd hh:mm:ss aaaa-mm-jj h:mm:ss Job: Travail : Pool: Groupe : Type: Type : <h3>Backup<h3/> <h3>Sauvegarde</h3> FileSet: Jeu de fichiers : Messages: Messages : <h3>Run a Job</h3> <h3>Lancer un Travail</h3> Priority: Priorité : Client: Client : OK OK Cancel Annuler Storage: Dépôt : When: Quand : Run job Lancer le travail Job properties Propriétés du travail runPage Run Lancer Canceled Annulé selectDialog Canceled Annulé selectForm Selection dialog Dialogue de sélection textInputDialog Canceled Annulé textInputForm Selection dialog Dialogue de sélection TextLabel TextLabel Text input dialog Message yesnoPopUp Bat Question Bat Question bacula-15.0.3/src/qt-console/ts/bat_de.ts0000644000175000017500000055666014771010173017773 0ustar bsbuildbsbuild ClientForm Client Tree Refresh Client List Requery the director for the list of clients. List Jobs of Client Open a joblist page selecting this client. Status Client Purge Jobs Purge jobs peformed from this client. Prune Jobs Open the diaolog to prune for this client. ClientStat Client Status %1 Job Id Job Level Job Files Job Bytes Job Status Job Time Job Name ClientStatForm Form Running Header Refresh Timer Do Refresh <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> Refresh Cancel Running Job Clients Clients Client Name File Retention Job Retention AutoPrune ClientId Uname Are you sure you want to purge all jobs of client "%1" ? The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. Is there any way I can get you to click Cancel here? You really don't want to do this Press OK to proceed with the purge operation? Client Status %1 Console Console No Director found. Director not connected. Click on connect button. Director is currently disconnected Please reconnect! ConsoleForm Console StatusDir Console Help Request Messages Reload bacula-dir.conf Content Storage Status %1 ContentForm Form Actions Update slots Label Move to tray Empty tray Mount Unmount Status Release Drives Drive Volume Import/Export Slot Content Slot Volume Bytes Status Media Type Pool Last Written When expire? DirComm Already connected. DirStat Job Id Job Level Job Files Job Bytes Job Status Job Time Job Name Job Type Priority Volume Job Data Job Info Director Status DirStatForm Form Refresh Timer Do Refresh <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Scheduled Jobs</span></p></body></html> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Running Jobs</span></p></body></html> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> Refresh Cancel Selected Running Jobs Disable Scheduled Job FileSet FileSets FileSet Id FileSet Name Create Time FileSetForm FileSet Tree Refresh FileSet List Requery the director for the list of storage objects. Show FileSet In Console ShowJobs Help Help: %1 Job Job Are you sure you want to delete?? !!!. This delete command is used to delete a Job record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. The Job and all its associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? Are you sure you want to cancel this job? Bat There were no results! It is possible you may need to add "catalog = all" to the Messages resource for this job. Error: JobForm Form Cancel Delete View errors for this Job Errors Media History Run again Read doc FileSet Stats Refresh Basic Information JobId: 2 Job Name: Test Level: VirtualFull Client: client-fd FileSet: TheFileSet Pool: ThePool Status Status: Errors: 0 Files: 1,924 Bytes: 109 MB Purged: Times Sched Time: 2009-07-31 00:10:00 Start Time: End Time: 2009-07-31 00:20:00 Duration: 00:10:00 Volume Used Vol0001 Running Information Speed: Files Examined: Current File: /var/www/bacula/spool 100,000 100 MB/s kB/s Bandwidth Limit: <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;"> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'DejaVu Sans'; font-size:10pt;"></p></body></html> JobList Any Job Id Job Name Client Job Starttime Job Type Job Level Job Files Job Bytes Job Status Purged File Set Pool Name First Volume VolCount Delete list of %1 Jobs Purge Files from list of %1 Jobs Delete Single Job Purge Files from single job Running Created, not yet running Backup The Jobs query returned no results. Press OK to continue? Jobs Run on Volume %1 Jobs Run from Client %1 Jobs Run of Job %1 Jobs Run with fileset %1 Jobs Run Are you sure you want to delete?? !!!. This delete command is used to delete a Job record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. The Job and all its associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? run job="%1" client="%2" level=%3 fileset="%1" Are you sure you want to purge ?? !!!. The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. Press OK to proceed with the purge operation? JobListForm Form Refresh Graph FileSet Status Purged Record Limit Days Limit Clients Volume Job Level Pool Filter Copy Jobs Filter Migration Jobs Refresh Job List Requery the director for the list of jobs. List Job Volumes List Volumes Delete Job Purge Files Restart Job Show Job Log Show Job Info List Job Totals on Console List Files On Job Restore From Job Restore From Time Cancel Currently Running Job JobLog JobLog Bat There were no results! It is possible you may need to add "catalog = all" to the Messages resource for this job. Log records for job %1 Error: JobLogForm Job Log JobPlot JobPlot Sticks Lines Steps None Any The Jobs query returned no results. Press OK to continue? Files and Bytes backed up <-- Bytes Kb date of backup --> Number of Files --> Files Bytes Fitted Ellipse Rect Diamond Triangle DTrianle UTriangle LTriangle RTriangle Cross XCross HLine Vline Star1 Star2 Hexagon JobPlotControlsForm Form File Data Byte Data Refresh Status Level Purged FileSet Volume Client Job Days Limit Record Limit Byte Symbol Type File Symbol Type Graph Type Jobs Jobs Job Name Pool Messages Client Storage Where Level Type FileSet Catalog Enabled MainForm bat - Bacula Admin Tool Bacula Administration Tool It's a Dock widget to allow page selection Settings &Help &File Current Status Tool Bar Selects panel window Use items in this tree to select what window is in panel 1 Command: Click here to enter command &Quit Ctrl+Q &About bat &Copy Cu&t new open &Paste &Print Print &Save Save (not implemented) Connect Connect/disconnect Label Label a Volume Restore Restore Files Run Job Run a Job Estimate Job Estimate a Job Status Dir Query status of director Status Dir Query status of director in console &Select Font ... Undock Window Undock Current Window ToggleDock Toggle Dock Status Close Page Close The Current Page Messages Display any messages queued at the director &Preferences ... Set Preferences bat &Help Browse Browse Cataloged Files JobPlot Plot Job Files and Bytes Status Dir Page Director Status Page Repop Lists Reload and Repop back Previous Page MainWin Select Page Console Director Status Director not connected. Click on connect button. About bat <br><h2>Bacula Bat %1 (%2)</h2><p>Copyright &copy; 2007-%3 Kern Sibbald<p>The <b>bat</b> is an administrative console interface to the Director. Ready MediaEdit Media Edit No Volume name No Volume name given MediaInfo Media Info Are you sure you want to purge ?? !!!. The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. Press OK to proceed with the purge operation? Are you sure you want to delete?? !!!. This delete command is used to delete a Volume record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. All Jobs and all associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? MediaList Volume Name Id Status Enabled Bytes Files Jobs Retention Media Type Slot Use Duration Max Jobs Max Files Max Bytes Recycle Last Written Pools First Written Read Time Write Time Recycle Count Recycle Pool Are you sure you want to delete?? !!!. This delete command is used to delete a Volume record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. All Jobs and all associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? Are you sure you want to purge ?? !!!. The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. Press OK to proceed with the purge operation? MediaListForm Media Tree Refresh Media List Requery the director for the list of media. Edit Volume List Jobs On Volume Delete Volume Prune Volume Purge Volume Relabel Volume Update all Volumes From Pool Update all Volumes from all Pools Volume From Pool MediaView Media Are you sure you want to purge ?? !!!. The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. Purge works only on the Catalog database and does not affect data written to Volumes. This command can be dangerous because you can delete catalog records associated with current backups of files, and we recommend that you do not use it unless you know what you are doing. Press OK to proceed with the purge operation? Are you sure you want to delete?? !!!. This delete command is used to delete a Volume record and all associated catalog records that were created. This command operates only on the Catalog database and has no effect on the actual data written to a Volume. This command can be dangerous and we strongly recommend that you do not use it unless you know what you are doing. All Jobs and all associated records (File and JobMedia) will be deleted from the catalog.Press OK to proceed with delete operation.? MediaViewForm Form Edit Purge Delete Prune Filter Media Type: Status: Limit: Name: Pool: Location: Expired Apply Volume Name Online Slot Vol Bytes Vol Usage Vol Status Pool Media Type Last Written When expire? Pages %1 of Director %2 UnDock %1 Window ReDock %1 Window Console PrefsForm Preferences Messages Options Message check interval in seconds Check Messages Joblist Joblist Limit Options Days Limit Record Limit Misc Convert Convert Off Context Sensitive List Commands Execute Long List Open Plot page on startup Open Browser page on startup Open Director Status page on startup Debug Debugging Options Debug comm Display all messages in console Debug Commands Debug Miscellaneous Items Debug Sql queries Timers Display Bytes using IEC units (1024B = 1 KiB) Display Bytes using SI units (1000B = 1KB) Open Pages Debug multiple connection RestoreTree Restore Debug 2 Directory Item Changed Restore Debug 1 Directory Current Item Changed Debug Update File Table Debug Version Table Item Changed Debug File Table Item Changed Debug Icon State Debug Update Checks Debug Restore Debug 3 Update Version Table Debug Populate Directory Debug <h2>Preferences</h2> QObject Any No Yes Invalid job status %1 StorStat Storage Status %1 Job Id Job Level Job Files Job Bytes Job Status Job Time Job Name StorStatForm Form Header Waitreservation Devices Volumes Spooling Running Misc Mount UMount Label Release Refresh Timer Do Refresh <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> Refresh Cancel Running Job Disable Scheduled Job Storage Storage Name Id Changer Slot Status Enabled Pool Media Type Yes Status Storage "%1" Status Storage "%1" in Window Label media in Storage "%1" Mount media in Storage "%1" "UN" Mount media in Storage "%1" Release media in Storage "%1" Barcode Scan media in Storage "%1" Read scan media in Storage "%1" Storage Status %1 StorageForm Storage Tree 1 Refresh Storage List Requery the director for the list of storage objects. Status Storage In Console Label Storage MountStorage UnMount Storage Update Slots Update Slots Scan Release Status Storage Window bRestore bRestore bRestoreForm brestore Merge Jobs View all Versions Current Directory File list -- drag below for restore Double Click File Name to decend Type File Name Double Click to decend Size Date Selection Range - File Filter File revisions -- drag below for restore <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Drag and drop <span style=" font-weight:600;">File list</span> and/or <span style=" font-weight:600;">File revisions</span> items here for Restore</p></body></html> Restore... Select from Client list drop down then select from Job list drop down InChanger Volume JobId Chksum Clear Estimate FileName FileIndex Nb Files bRunRestoreForm Run restore Standard Restore options Client: Where: Replace: Comment: Media needed InChanger Volume Compute with directories Advanced File Relocation Use file relocation: Strip prefix: Add prefix: Add suffix: Use regexp: Where regexp: Other options When: Priority: Storage: Job: estimateForm Form List Files Level: Client: Job: FileSet: <h3>Estimate a backup Job</h3> OK Cancel estimatePage Estimate helpForm Form &Home &Back Close jobsForm Client Tree Refresh Jobs List Requery the director for the list of clients. List Files Command List Volumes Command List Next Volume Command Enable Job Command Disable Job Command Open JobList on Job Cancel Job Command RunJob labelForm Form OK Cancel Volume Name: Storage: Enter Name of Volume to create Slot: Execute Automount On Off Pool: <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Label a Volume</span></p></body></html> mediaEditForm Form Pool: Volume Status: Max Volume Bytes: Slot: Max Volume Jobs: Use Duration: OK Cancel Retention: Recycle Pool: Enabled Max Volume Files: Years Seconds Use Duration Days Hours Months Retention Minutes Volume : Recycle <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Edit a Volume</span></p></body></html> mediaInfoForm Form Edit Purge Delete Prune Load Unload Information Name: Vol0001 Pool: Default Online: Enabled: yes Location: Vault Status: Append Media Type: File Recycle Pool: Scratch Statistics Vol Bytes: 19.8 MB Vol Mounts: 10 Recycle count: 5 Read time: 10 mins Write time: 20 mins Errors: 0 Last Written: 2009-07-05 12:23:00 First Written: 2009-06-05 10:00:00 Limits Use duration: Max jobs: Max files: Max bytes: Recycle: Retention: 365 days Expire: 2010-08-03 23:10:03 Jobs JobId Name Start Time Type Level Files Bytes Status mountDialog Storage : %1 No Storage name No Storage name given Context sensitive command : Director Response : mountForm Label TextLabel Slot: <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Mount a Slot</span></p></body></html> prefsDialog Canceled prerestoreForm Form All Files Select Files Job JobIds yyyy-mm-dd h:mm:ss Use Most Recent File Set: Client: Storage: Before: Pool: OK Cancel <h3>Select Jobs</h3> prerestorePage Restore Any Comma separated list of JobIds Canceled There can be no spaces in the text for the joblist. Press OK to continue? The string is not a comma separated list of integers. Press OK to continue? Bat At least one of the jobs is not a valid job of type "Backup". Press OK to continue? All jobs in the list must be of the same jobName and same client. Press OK to continue? pruneForm Form Prune Files Volume: <h3>Prune Files/Jobs/Volumes</h3> Prune Jobs OK Cancel Client: Prune Volumes prunePage Prune Any Canceled relabelDialog From Volume : No Volume name No Volume name given New name must be different relabelForm Label From Volume : Pool: Storage: New Volume Name: Slot: <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Relabel a Volume</span></p></body></html> restoreForm Form Select a Directory Directories 1 2 3 4 5 6 <h3>Restore Select</h3> Up Mark Unmark <h2>Files</h2> Status: Current Dir: OK Cancel UnMark restorePage Restore Select Mark File Mode User Group Size Date In addDirectory cwd "%1" newdir "%2" fullpath "%3" Nothing selected, nothing done cd command failed .pwd command failed In else of if parent cwd "%1" newdir "%2" Canceled restoreTree Version Browser Directories Any Refresh From Re-Select Refresh From JobChecks Querying Database Querying Jobs Querying for Directories Processing Directories File Name Filename Id Task %1 of %2 No jobs were selected in the job query !!!. Press OK to continue Present Working Directory: %1 Job Id Type End Time Hash FileId Job Type First Volume RestoreTreePage Level Name Purged TU TD Task 1 of 3 Processing Checked directories Task 2 of 3 Processing Exceptions Task 3 of 3 Filling Database Table restoreTreeForm Form Jobs TextLabel Files Versions of File FileName Refresh Restore Job Job List Job Criterion Selector Client Job List Client Criterion Selector FileSet Job List Fileset Criterion Selector Record Limit Days Limit Directory Select Directory UnselectDirectory runCmdForm Form Priority: yyyy-mm-dd hh:mm:ss When: Where: Bootstrap: Job: Storage: FileSet: Replace: To client: Catalog: OK Cancel <h3>Run Restore Job</h3> runCmdPage Restore Run never always ifnewer ifolder Canceled runForm Level: Bootstrap: yyyy-mm-dd hh:mm:ss Job: Pool: Type: Run job Job properties <h3>Backup<h3/> FileSet: Messages: <h3>Run a Job</h3> Priority: Client: OK Cancel Storage: When: runPage Run Canceled selectDialog Canceled selectForm Selection dialog textInputDialog Canceled textInputForm Text input dialog Message yesnoPopUp Bat Question bacula-15.0.3/src/qt-console/jobgraphs/0000755000175000017500000000000014771010173017514 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/jobgraphs/jobplotcontrols.ui0000644000175000017500000002061514771010173023314 0ustar bsbuildbsbuild JobPlotControlsForm 0 0 308 535 5 0 0 0 0 0 Form 9 6 Qt::Vertical 236 16 0 6 Qt::Horizontal 21 20 0 6 File Data Byte Data Qt::Horizontal 21 20 0 6 Qt::Horizontal 40 20 4 0 0 0 82 30 Refresh :/images/view-refresh.png Qt::Horizontal 40 20 5 0 0 0 7 5 0 0 0 10000 1 25 Status Level Purged 16777215 20 FileSet Volume Client Job Days Limit 0 0 0 0 Record Limit Byte Symbol Type File Symbol Type Graph Type bacula-15.0.3/src/qt-console/jobgraphs/jobplot.cpp0000644000175000017500000004531314771010173021677 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * JobPlots Class * * Dirk Bartley, March 2007 * */ #include "bat.h" #if QT_VERSION >= 0x050000 #include #else #include #endif #include "util/comboutil.h" #include "jobgraphs/jobplot.h" JobPlotPass::JobPlotPass() { use = false; } JobPlotPass& JobPlotPass::operator=(const JobPlotPass &cp) { use = cp.use; recordLimitCheck = cp.recordLimitCheck; daysLimitCheck = cp.daysLimitCheck; recordLimitSpin = cp.recordLimitSpin; daysLimitSpin = cp.daysLimitSpin; jobCombo = cp.jobCombo; clientCombo = cp.clientCombo; volumeCombo = cp.volumeCombo; fileSetCombo = cp.fileSetCombo; purgedCombo = cp.purgedCombo; levelCombo = cp.levelCombo; statusCombo = cp.statusCombo; return *this; } /* * Constructor for the controls class which inherits QScrollArea and a ui header */ JobPlotControls::JobPlotControls() { setupUi(this); } /* * Constructor, this class does not inherit anything but pages. */ JobPlot::JobPlot(QTreeWidgetItem *parentTreeWidgetItem, JobPlotPass &passVals) : Pages() { setupUserInterface(); pgInitialize(tr("JobPlot"), parentTreeWidgetItem); readSplitterSettings(); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/applications-graphics.png"))); m_drawn = false; /* this invokes the pass values = operator function */ m_pass = passVals; dockPage(); /* If the values of the controls are predetermined (from joblist), then set * this class as current window at the front of the stack */ if (m_pass.use) setCurrent(); m_jobPlot->replot(); } /* * Kill, crush Destroy */ JobPlot::~JobPlot() { if (m_drawn) writeSettings(); m_pjd.clear(); } /* * This is called when the page selector has this page selected */ void JobPlot::currentStackItem() { if (!m_drawn) { setupControls(); reGraph(); m_drawn=true; } } /* * Slot for the refresh push button, also called from constructor. */ void JobPlot::reGraph() { /* clear m_pjd */ m_pjd.clear(); runQuery(); m_jobPlot->clear(); addCurve(); m_jobPlot->replot(); } /* * Setup the control widgets for the graph, this are the objects from JobPlotControls */ void JobPlot::setupControls() { QStringList graphType = QStringList() << /* tr("Fitted") <<*/ tr("Sticks") << tr("Lines") << tr("Steps") << tr("None"); controls->plotTypeCombo->addItems(graphType); fillSymbolCombo(controls->fileSymbolTypeCombo); fillSymbolCombo(controls->byteSymbolTypeCombo); readControlSettings(); controls->fileCheck->setCheckState(Qt::Checked); controls->byteCheck->setCheckState(Qt::Checked); connect(controls->plotTypeCombo, SIGNAL(currentIndexChanged(QString)), this, SLOT(setPlotType(QString))); connect(controls->fileSymbolTypeCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(setFileSymbolType(int))); connect(controls->byteSymbolTypeCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(setByteSymbolType(int))); connect(controls->fileCheck, SIGNAL(stateChanged(int)), this, SLOT(fileCheckChanged(int))); connect(controls->byteCheck, SIGNAL(stateChanged(int)), this, SLOT(byteCheckChanged(int))); connect(controls->refreshButton, SIGNAL(pressed()), this, SLOT(reGraph())); controls->clientComboBox->addItem(tr("Any")); controls->clientComboBox->addItems(m_console->client_list); QStringList volumeList; getVolumeList(volumeList); controls->volumeComboBox->addItem(tr("Any")); controls->volumeComboBox->addItems(volumeList); controls->jobComboBox->addItem(tr("Any")); controls->jobComboBox->addItems(m_console->job_list); levelComboFill(controls->levelComboBox); boolComboFill(controls->purgedComboBox); controls->fileSetComboBox->addItem(tr("Any")); controls->fileSetComboBox->addItems(m_console->fileset_list); QStringList statusLongList; getStatusList(statusLongList); controls->statusComboBox->addItem(tr("Any")); controls->statusComboBox->addItems(statusLongList); if (m_pass.use) { controls->limitCheckBox->setCheckState(m_pass.recordLimitCheck); controls->limitSpinBox->setValue(m_pass.recordLimitSpin); controls->daysCheckBox->setCheckState(m_pass.daysLimitCheck); controls->daysSpinBox->setValue(m_pass.daysLimitSpin); comboSel(controls->jobComboBox, m_pass.jobCombo); comboSel(controls->clientComboBox, m_pass.clientCombo); comboSel(controls->volumeComboBox, m_pass.volumeCombo); comboSel(controls->fileSetComboBox, m_pass.fileSetCombo); comboSel(controls->purgedComboBox, m_pass.purgedCombo); comboSel(controls->levelComboBox, m_pass.levelCombo); comboSel(controls->statusComboBox, m_pass.statusCombo); } else { /* Set Defaults for check and spin for limits */ controls->limitCheckBox->setCheckState(mainWin->m_recordLimitCheck ? Qt::Checked : Qt::Unchecked); controls->limitSpinBox->setValue(mainWin->m_recordLimitVal); controls->daysCheckBox->setCheckState(mainWin->m_daysLimitCheck ? Qt::Checked : Qt::Unchecked); controls->daysSpinBox->setValue(mainWin->m_daysLimitVal); } } /* * Setup the control widgets for the graph, this are the objects from JobPlotControls */ void JobPlot::runQuery() { /* Set up query */ QString query(""); query += "SELECT DISTINCT " " Job.Starttime AS JobStart," " Job.Jobfiles AS FileCount," " Job.JobBytes AS Bytes," " Job.JobId AS JobId" " FROM Job" " JOIN Client ON (Client.ClientId=Job.ClientId)" " JOIN Status ON (Job.JobStatus=Status.JobStatus)" " LEFT OUTER JOIN FileSet ON (FileSet.FileSetId=Job.FileSetId)"; QStringList conditions; comboCond(conditions, controls->jobComboBox, "Job.Name"); comboCond(conditions, controls->clientComboBox, "Client.Name"); int volumeIndex = controls->volumeComboBox->currentIndex(); if ((volumeIndex != -1) && (controls->volumeComboBox->itemText(volumeIndex) != tr("Any"))) { query += " LEFT OUTER JOIN JobMedia ON (JobMedia.JobId=Job.JobId)" " LEFT OUTER JOIN Media ON (JobMedia.MediaId=Media.MediaId)"; conditions.append("Media.VolumeName='" + controls->volumeComboBox->itemText(volumeIndex) + "'"); } comboCond(conditions, controls->fileSetComboBox, "FileSet.FileSet"); boolComboCond(conditions, controls->purgedComboBox, "Job.PurgedFiles"); levelComboCond(conditions, controls->levelComboBox, "Job.Level"); comboCond(conditions, controls->statusComboBox, "Status.JobStatusLong"); /* If Limit check box For limit by days is checked */ if (controls->daysCheckBox->checkState() == Qt::Checked) { QDateTime stamp = QDateTime::currentDateTime().addDays(-controls->daysSpinBox->value()); QString since = stamp.toString(Qt::ISODate); conditions.append("Job.Starttime>'" + since + "'"); } bool first = true; foreach (QString condition, conditions) { if (first) { query += " WHERE " + condition; first = false; } else { query += " AND " + condition; } } /* Descending */ query += " ORDER BY Job.Starttime DESC, Job.JobId DESC"; /* If Limit check box for limit records returned is checked */ if (controls->limitCheckBox->checkState() == Qt::Checked) { QString limit; limit.setNum(controls->limitSpinBox->value()); query += " LIMIT " + limit; } if (mainWin->m_sqlDebug) { Pmsg1(000, "Query cmd : %s\n",query.toUtf8().data()); } QString resultline; QStringList results; if (m_console->sql_cmd(query, results)) { QString field; QStringList fieldlist; int row = 0; /* Iterate through the record returned from the query */ foreach (resultline, results) { PlotJobData *plotJobData = new PlotJobData(); fieldlist = resultline.split("\t"); int column = 0; QString statusCode(""); /* Iterate through fields in the record */ foreach (field, fieldlist) { field = field.trimmed(); /* strip leading & trailing spaces */ if (column == 0) { plotJobData->dt = QDateTime::fromString(field, mainWin->m_dtformat); } else if (column == 1) { plotJobData->files = field.toDouble(); } else if (column == 2) { plotJobData->bytes = field.toDouble(); } column++; m_pjd.prepend(plotJobData); } row++; } } if ((controls->volumeComboBox->itemText(volumeIndex) != tr("Any")) && (results.count() == 0)){ /* for context sensitive searches, let the user know if there were no * * results */ QMessageBox::warning(this, "Bat", tr("The Jobs query returned no results.\n" "Press OK to continue?"), QMessageBox::Ok ); } } /* * The user interface that used to be in the ui header. I wanted to have a * scroll area which is not in designer. */ void JobPlot::setupUserInterface() { QSizePolicy sizePolicy(static_cast(1), static_cast(5)); sizePolicy.setHorizontalStretch(0); sizePolicy.setVerticalStretch(0); sizePolicy.setVerticalStretch(0); sizePolicy.setVerticalPolicy(QSizePolicy::Ignored); sizePolicy.setHorizontalPolicy(QSizePolicy::Ignored); m_gridLayout = new QGridLayout(this); m_gridLayout->setSpacing(6); m_gridLayout->setMargin(9); m_gridLayout->setObjectName(QString::fromUtf8("m_gridLayout")); m_splitter = new QSplitter(this); m_splitter->setObjectName(QString::fromUtf8("m_splitter")); m_splitter->setOrientation(Qt::Horizontal); m_jobPlot = new QwtPlot(m_splitter); m_jobPlot->setObjectName(QString::fromUtf8("m_jobPlot")); m_jobPlot->setSizePolicy(sizePolicy); m_jobPlot->setMinimumSize(QSize(0, 0)); QScrollArea *area = new QScrollArea(m_splitter); area->setObjectName(QString::fromUtf8("area")); controls = new JobPlotControls(); area->setWidget(controls); m_splitter->addWidget(m_jobPlot); m_splitter->addWidget(area); m_gridLayout->addWidget(m_splitter, 0, 0, 1, 1); } /* * Add the curves to the plot */ void JobPlot::addCurve() { m_jobPlot->setTitle(tr("Files and Bytes backed up")); m_jobPlot->insertLegend(new QwtLegend(), QwtPlot::RightLegend); // Set axis titles m_jobPlot->enableAxis(QwtPlot::yRight); m_jobPlot->setAxisTitle(QwtPlot::yRight, tr("<-- Bytes Kb")); m_jobPlot->setAxisTitle(m_jobPlot->xBottom, tr("date of backup -->")); m_jobPlot->setAxisTitle(m_jobPlot->yLeft, tr("Number of Files -->")); m_jobPlot->setAxisScaleDraw(QwtPlot::xBottom, new DateTimeScaleDraw()); // Insert new curves m_fileCurve = new QwtPlotCurve( tr("Files") ); m_fileCurve->setPen(QPen(Qt::red)); m_fileCurve->setCurveType(m_fileCurve->Yfx); m_fileCurve->setYAxis(QwtPlot::yLeft); m_byteCurve = new QwtPlotCurve(tr("Bytes")); m_byteCurve->setPen(QPen(Qt::blue)); m_byteCurve->setCurveType(m_byteCurve->Yfx); m_byteCurve->setYAxis(QwtPlot::yRight); setPlotType(controls->plotTypeCombo->currentText()); setFileSymbolType(controls->fileSymbolTypeCombo->currentIndex()); setByteSymbolType(controls->byteSymbolTypeCombo->currentIndex()); m_fileCurve->attach(m_jobPlot); m_byteCurve->attach(m_jobPlot); // attach data int size = m_pjd.count(); int j = 0; #if defined(__GNU_C) double tval[size]; double fval[size]; double bval[size]; #else double *tval; double *fval; double *bval; tval = (double *)malloc(size * sizeof(double)); fval = (double *)malloc(size * sizeof(double)); bval = (double *)malloc(size * sizeof(double)); #endif foreach (PlotJobData* plotJobData, m_pjd) { // printf("%.0f %.0f %s\n", plotJobData->bytes, plotJobData->files, // plotJobData->dt.toString(mainWin->m_dtformat).toUtf8().data()); fval[j] = plotJobData->files; bval[j] = plotJobData->bytes / 1024; tval[j] = plotJobData->dt.toTime_t(); // printf("%i %.0f %.0f %.0f\n", j, tval[j], fval[j], bval[j]); j++; } m_fileCurve->setData(tval,fval,size); m_byteCurve->setData(tval,bval,size); for (int year=2000; year<2010; year++) { for (int month=1; month<=12; month++) { QString monthBegin; if (month > 9) { QTextStream(&monthBegin) << year << "-" << month << "-01 00:00:00"; } else { QTextStream(&monthBegin) << year << "-0" << month << "-01 00:00:00"; } QDateTime mdt = QDateTime::fromString(monthBegin, mainWin->m_dtformat); double monbeg = mdt.toTime_t(); // ...a vertical line at the first of each month QwtPlotMarker *mX = new QwtPlotMarker(); mX->setLabel(mdt.toString("MMM-d")); mX->setLabelAlignment(Qt::AlignRight|Qt::AlignTop); mX->setLineStyle(QwtPlotMarker::VLine); QPen pen(Qt::darkGray); pen.setStyle(Qt::DashDotDotLine); mX->setLinePen(pen); mX->setXValue(monbeg); mX->attach(m_jobPlot); } } #if !defined(__GNU_C) free(tval); free(fval); free(bval); #endif } /* * slot to respond to the plot type combo changing */ void JobPlot::setPlotType(QString currentText) { QwtPlotCurve::CurveStyle style = QwtPlotCurve::NoCurve; if (currentText == tr("Fitted")) { style = QwtPlotCurve::Lines; m_fileCurve->setCurveAttribute(QwtPlotCurve::Fitted); m_byteCurve->setCurveAttribute(QwtPlotCurve::Fitted); } else if (currentText == tr("Sticks")) { style = QwtPlotCurve::Sticks; } else if (currentText == tr("Lines")) { style = QwtPlotCurve::Lines; m_fileCurve->setCurveAttribute(QwtPlotCurve::Fitted); m_byteCurve->setCurveAttribute(QwtPlotCurve::Fitted); } else if (currentText == tr("Steps")) { style = QwtPlotCurve::Steps; } else if (currentText == tr("None")) { style = QwtPlotCurve::NoCurve; } m_fileCurve->setStyle(style); m_byteCurve->setStyle(style); m_jobPlot->replot(); } void JobPlot::fillSymbolCombo(QComboBox *q) { q->addItem( tr("Ellipse"), (int)QwtSymbol::Ellipse); q->addItem( tr("Rect"), (int)QwtSymbol::Rect); q->addItem( tr("Diamond"), (int)QwtSymbol::Diamond); q->addItem( tr("Triangle"), (int)QwtSymbol::Triangle); q->addItem( tr("DTrianle"), (int)QwtSymbol::DTriangle); q->addItem( tr("UTriangle"), (int)QwtSymbol::UTriangle); q->addItem( tr("LTriangle"), (int)QwtSymbol::LTriangle); q->addItem( tr("RTriangle"), (int)QwtSymbol::RTriangle); q->addItem( tr("Cross"), (int)QwtSymbol::Cross); q->addItem( tr("XCross"), (int)QwtSymbol::XCross); q->addItem( tr("HLine"), (int)QwtSymbol::HLine); q->addItem( tr("Vline"), (int)QwtSymbol::VLine); q->addItem( tr("Star1"), (int)QwtSymbol::Star1); q->addItem( tr("Star2"), (int)QwtSymbol::Star2); q->addItem( tr("Hexagon"), (int)QwtSymbol::Hexagon); q->addItem( tr("None"), (int)QwtSymbol::NoSymbol); } /* * slot to respond to the symbol type combo changing */ void JobPlot::setFileSymbolType(int index) { setSymbolType(index, 0); } void JobPlot::setByteSymbolType(int index) { setSymbolType(index, 1); } void JobPlot::setSymbolType(int index, int type) { QwtSymbol sym; sym.setPen(QColor(Qt::black)); sym.setSize(7); QVariant style; if (0 == type) { style = controls->fileSymbolTypeCombo->itemData(index); sym.setStyle( (QwtSymbol::Style)style.toInt() ); sym.setBrush(QColor(Qt::yellow)); m_fileCurve->setSymbol(sym); } else { style = controls->byteSymbolTypeCombo->itemData(index); sym.setStyle( (QwtSymbol::Style)style.toInt() ); sym.setBrush(QColor(Qt::blue)); m_byteCurve->setSymbol(sym); } m_jobPlot->replot(); } /* * slot to respond to the file check box changing state */ void JobPlot::fileCheckChanged(int newstate) { if (newstate == Qt::Unchecked) { m_fileCurve->detach(); m_jobPlot->enableAxis(QwtPlot::yLeft, false); } else { m_fileCurve->attach(m_jobPlot); m_jobPlot->enableAxis(QwtPlot::yLeft); } m_jobPlot->replot(); } /* * slot to respond to the byte check box changing state */ void JobPlot::byteCheckChanged(int newstate) { if (newstate == Qt::Unchecked) { m_byteCurve->detach(); m_jobPlot->enableAxis(QwtPlot::yRight, false); } else { m_byteCurve->attach(m_jobPlot); m_jobPlot->enableAxis(QwtPlot::yRight); } m_jobPlot->replot(); } /* * Save user settings associated with this page */ void JobPlot::writeSettings() { QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup("JobPlot"); settings.setValue("m_splitterSizes", m_splitter->saveState()); settings.setValue("fileSymbolTypeCombo", controls->fileSymbolTypeCombo->currentText()); settings.setValue("byteSymbolTypeCombo", controls->byteSymbolTypeCombo->currentText()); settings.setValue("plotTypeCombo", controls->plotTypeCombo->currentText()); settings.endGroup(); } /* * Read settings values for Controls */ void JobPlot::readControlSettings() { QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup("JobPlot"); int fileSymbolTypeIndex = controls->fileSymbolTypeCombo->findText(settings.value("fileSymbolTypeCombo").toString(), Qt::MatchExactly); if (fileSymbolTypeIndex == -1) fileSymbolTypeIndex = 2; controls->fileSymbolTypeCombo->setCurrentIndex(fileSymbolTypeIndex); int byteSymbolTypeIndex = controls->byteSymbolTypeCombo->findText(settings.value("byteSymbolTypeCombo").toString(), Qt::MatchExactly); if (byteSymbolTypeIndex == -1) byteSymbolTypeIndex = 3; controls->byteSymbolTypeCombo->setCurrentIndex(byteSymbolTypeIndex); int plotTypeIndex = controls->plotTypeCombo->findText(settings.value("plotTypeCombo").toString(), Qt::MatchExactly); if (plotTypeIndex == -1) plotTypeIndex = 2; controls->plotTypeCombo->setCurrentIndex(plotTypeIndex); settings.endGroup(); } /* * Read and restore user settings associated with this page */ void JobPlot::readSplitterSettings() { QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup("JobPlot"); if (settings.contains("m_splitterSizes")) { m_splitter->restoreState(settings.value("m_splitterSizes").toByteArray()); } settings.endGroup(); } bacula-15.0.3/src/qt-console/jobgraphs/jobplot.h0000644000175000017500000000662714771010173021351 0ustar bsbuildbsbuild#ifndef _JOBPLOT_H_ #define _JOBPLOT_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "pages.h" #include "ui_jobplotcontrols.h" #include #include #include #include #include #include #include #include #include #include /* * Structure to hold data items of jobs when and how much. * If I worked at it I could eliminate this. It's just the way it evolved. */ struct PlotJobData { double files; double bytes; QDateTime dt; }; /* * Class for the purpose of having a single object to pass data to the JobPlot * Constructor. The other option was a constructor with this many passed * values or some sort of code to parse a list. I liked this best at the time. */ class JobPlotPass { public: JobPlotPass(); JobPlotPass& operator=(const JobPlotPass&); bool use; Qt::CheckState recordLimitCheck; Qt::CheckState daysLimitCheck; int recordLimitSpin; int daysLimitSpin; QString jobCombo; QString clientCombo; QString volumeCombo; QString fileSetCombo; QString purgedCombo; QString levelCombo; QString statusCombo; }; /* * Class to Change the display of the time scale to display dates. */ class DateTimeScaleDraw : public QwtScaleDraw { public: virtual QwtText label(double v) const { QDateTime dtlabel(QDateTime::fromTime_t((uint)v)); return dtlabel.toString("M-d-yy"); } }; /* * These are the user interface control widgets as a separate class. * Separately for the purpos of having the controls in a Scroll Area. */ class JobPlotControls : public QWidget, public Ui::JobPlotControlsForm { Q_OBJECT public: JobPlotControls(); }; /* * The main class */ class JobPlot : public Pages { Q_OBJECT public: JobPlot(QTreeWidgetItem *parentTreeWidgetItem, JobPlotPass &); ~JobPlot(); virtual void currentStackItem(); private slots: void setPlotType(QString); void setFileSymbolType(int); void setByteSymbolType(int); void fileCheckChanged(int); void byteCheckChanged(int); void reGraph(); private: void fillSymbolCombo(QComboBox *q); void setSymbolType(int, int type); void addCurve(); void writeSettings(); void readSplitterSettings(); void readControlSettings(); void setupControls(); void runQuery(); bool m_drawn; JobPlotPass m_pass; JobPlotControls* controls; QList m_pjd; QwtPlotCurve *m_fileCurve; QwtPlotCurve *m_byteCurve; /* from the user interface before using scroll area */ void setupUserInterface(); QGridLayout *m_gridLayout; QSplitter *m_splitter; QwtPlot *m_jobPlot; }; #endif /* _JOBPLOT_H_ */ bacula-15.0.3/src/qt-console/select/0000755000175000017500000000000014771010173017014 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/select/textinput.ui0000644000175000017500000000705214771010173021423 0ustar bsbuildbsbuild textInputForm Qt::ApplicationModal 0 0 430 96 Qt::StrongFocus Text input dialog 0 0 50 0 100 16777215 Qt::TabFocus 0 0 Message 5 Qt::Horizontal QSizePolicy::Expanding 26 9 0 0 16777215 38 Qt::Horizontal QDialogButtonBox::Cancel|QDialogButtonBox::Ok buttonBox accepted() textInputForm accept() 248 254 157 274 buttonBox rejected() textInputForm reject() 316 260 286 274 bacula-15.0.3/src/qt-console/select/select.cpp0000644000175000017500000000561714771010173021010 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Select dialog class * * Kern Sibbald, March MMVII * */ #include "bat.h" #include "select.h" /* * Read the items for the selection */ selectDialog::selectDialog(Console *console, int conn) : QDialog() { m_conn = conn; QDateTime dt; int stat; QListWidgetItem *item; int row = 0; m_console = console; m_console->notify(m_conn, false); setupUi(this); connect(listBox, SIGNAL(currentRowChanged(int)), this, SLOT(index_change(int))); setAttribute(Qt::WA_DeleteOnClose); m_console->read(m_conn); /* get title */ labelWidget->setText(m_console->msg(m_conn)); while ((stat=m_console->read(m_conn)) > 0) { item = new QListWidgetItem; item->setText(m_console->msg(m_conn)); listBox->insertItem(row++, item); } m_console->displayToPrompt(m_conn); this->show(); } void selectDialog::accept() { char cmd[100]; this->hide(); bsnprintf(cmd, sizeof(cmd), "%d", m_index+1); m_console->write_dir(m_conn, cmd); m_console->displayToPrompt(m_conn); this->close(); mainWin->resetFocus(); m_console->displayToPrompt(m_conn); m_console->notify(m_conn, true); } void selectDialog::reject() { this->hide(); mainWin->set_status(tr(" Canceled")); this->close(); mainWin->resetFocus(); m_console->beginNewCommand(m_conn); m_console->notify(m_conn, true); } /* * Called here when the jobname combo box is changed. * We load the default values for the new job in the * other combo boxes. */ void selectDialog::index_change(int index) { m_index = index; } /* * Handle yesno PopUp when Bacula asks a yes/no question. */ /* * Read the items for the selection */ yesnoPopUp::yesnoPopUp(Console *console, int conn) : QDialog() { QMessageBox msgBox; setAttribute(Qt::WA_DeleteOnClose); console->read(conn); /* get yesno question */ msgBox.setWindowTitle(tr("Bat Question")); msgBox.setText(console->msg(conn)); msgBox.setStandardButtons(QMessageBox::Yes | QMessageBox::No); console->displayToPrompt(conn); switch (msgBox.exec()) { case QMessageBox::Yes: console->write_dir(conn, "yes"); break; case QMessageBox::No: console->write_dir(conn, "no"); break; } console->displayToPrompt(conn); mainWin->resetFocus(); } bacula-15.0.3/src/qt-console/select/select.ui0000644000175000017500000000355014771010173020635 0ustar bsbuildbsbuild selectForm Qt::NonModal 0 0 377 323 Selection dialog 9 6 Qt::Horizontal QDialogButtonBox::Cancel|QDialogButtonBox::NoButton|QDialogButtonBox::Ok buttonBox accepted() selectForm accept() 248 254 157 274 buttonBox rejected() selectForm reject() 316 260 286 274 bacula-15.0.3/src/qt-console/select/textinput.cpp0000644000175000017500000000317114771010173021566 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Select dialog class * * Kern Sibbald, March MMVII * */ #include "bat.h" #include "textinput.h" /* * Read input text box */ textInputDialog::textInputDialog(Console *console, int conn) { m_conn = conn; QDateTime dt; m_console = console; m_console->notify(m_conn, false); setupUi(this); setAttribute(Qt::WA_DeleteOnClose); m_console->read(m_conn); /* get title */ labelWidget->setText(m_console->msg(m_conn)); this->show(); } void textInputDialog::accept() { this->hide(); m_console->write_dir(m_conn, lineEdit->text().toUtf8().data()); /* Do not displayToPrompt because there may be another Text Input required */ this->close(); mainWin->resetFocus(); m_console->notify(m_conn, true); } void textInputDialog::reject() { this->hide(); mainWin->set_status(tr(" Canceled")); m_console->write_dir(m_conn, "."); this->close(); mainWin->resetFocus(); m_console->beginNewCommand(m_conn); m_console->notify(m_conn, true); } bacula-15.0.3/src/qt-console/select/textinput.h0000644000175000017500000000206214771010173021231 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef _TEXTENTRY_H_ #define _TEXTENTRY_H_ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_textinput.h" #include "console.h" class textInputDialog : public QDialog, public Ui::textInputForm { Q_OBJECT public: textInputDialog(Console *console, int conn); public slots: void accept(); void reject(); private: Console *m_console; int m_conn; }; #endif /* _TEXTENTRY_H_ */ bacula-15.0.3/src/qt-console/select/select.h0000644000175000017500000000227114771010173020446 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef _SELECT_H_ #define _SELECT_H_ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_select.h" #include "console.h" class selectDialog : public QDialog, public Ui::selectForm { Q_OBJECT public: selectDialog(Console *console, int conn); public slots: void accept(); void reject(); void index_change(int index); private: Console *m_console; int m_index; int m_conn; }; class yesnoPopUp : public QDialog { Q_OBJECT public: yesnoPopUp(Console *console, int conn); }; #endif /* _SELECT_H_ */ bacula-15.0.3/src/qt-console/qstd.h0000644000175000017500000000463314771010173016667 0ustar bsbuildbsbuild#ifndef QSTD_H #define QSTD_H #include #include #include /** @short helper objects and functions which help reduce the need for char[] and the standard library. defines three @ref QTextStream instances which behave like the c++ standard iostreams, bound to the standard in/out/error. Also provided, some helper functions for writing interactive stdin/stdout applications. */ //start namespace qstd { /** @short An alias for standard input */ extern QTextStream cin; /* declared only, defined in the .cpp file */ /** @short An alias for standard output */ extern QTextStream cout; /** @short An alias for standard error */ extern QTextStream cerr; /** yes/no prompt interactive stdin UI - prompts user with a yes/no question. Repeatedly-asks until user supplies a valid answer. @param yesNoQuestion the yes/no question @return true/false depending on what the user responded. */ bool yes(QString yesNoQuestion); /** Convenience function that feeds a specific question to the yes() function. @usage do {.....} while(more ("foobar")); so that user sees the question: "Another foobar (y/n)? " @param name of the item being handled by the loop. */ bool more(QString prompt); /** A function for safely taking an int from the keyboard. Takes data into a QString and tests to make sure it can be converted to int before returning. @param base allows choice of number base. @return returns validated int. */ int promptInt(int base = 10); /** A function for safely taking a double from the keyboard. Takes data into a QString and tests to make sure it can be converted to double before returning. @return returns validated int. */ double promptDouble(); /** Complete dialog for opening a file for output. Asks user for file name, checks to see if file already exists and, if so, asks the user if it is ok to overwrite. @param Reference QFile parameter is set to point to the (eventually) opened file. */ /** @short Dialog for a output file prompt */ void promptOutputFile(QFile& outfile); /** @short Dialog for input file prompt */ void promptInputFile(QFile& infile); //end } #endif bacula-15.0.3/src/qt-console/mount/0000755000175000017500000000000014771010173016677 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/mount/mount.h0000644000175000017500000000214114771010173020210 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Kern Sibbald, February MMVII */ #ifndef _MOUNT_H_ #define _MOUNT_H_ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_mount.h" #include "console.h" class mountDialog : public QDialog, public Ui::mountForm { Q_OBJECT public: mountDialog(Console *console, QString &storage); private slots: void accept(); void reject(); private: Console *m_console; QString m_storageName; int m_conn; }; #endif /* _MOUNT_H_ */ bacula-15.0.3/src/qt-console/mount/mount.cpp0000644000175000017500000000410114771010173020541 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Label Dialog class * * Kern Sibbald, February MMVII * */ #include "bat.h" #include "mount/mount.h" #include /* * A constructor */ mountDialog::mountDialog(Console *console, QString &storageName) : QDialog() { m_console = console; m_storageName = storageName; m_conn = m_console->notifyOff(); setupUi(this); this->show(); QString labelText( tr("Storage : %1").arg(storageName) ); storageLabel->setText(labelText); } void mountDialog::accept() { QString scmd; if (m_storageName == "") { QMessageBox::warning(this, tr("No Storage name"), tr("No Storage name given"), QMessageBox::Ok, QMessageBox::Ok); return; } this->hide(); scmd = QString("mount storage=\"%1\" slot=%2") .arg(m_storageName) .arg(slotSpin->value()); if (mainWin->m_commandDebug) { Pmsg1(000, "sending command : %s\n",scmd.toUtf8().data()); } m_console->display_text( tr("Context sensitive command :\n\n")); m_console->display_text("**** "); m_console->display_text(scmd + " ****\n"); m_console->display_text(tr("Director Response :\n\n")); m_console->write_dir(scmd.toUtf8().data()); m_console->displayToPrompt(m_conn); m_console->notify(m_conn, true); delete this; mainWin->resetFocus(); } void mountDialog::reject() { this->hide(); m_console->notify(m_conn, true); delete this; mainWin->resetFocus(); } bacula-15.0.3/src/qt-console/mount/mount.ui0000644000175000017500000001167114771010173020406 0ustar bsbuildbsbuild mountForm Qt::WindowModal 0 0 400 300 Label 9 6 16777215 20 TextLabel Qt::AlignCenter 0 6 Slot: slotSpin 10000 Qt::Horizontal QDialogButtonBox::Cancel|QDialogButtonBox::NoButton|QDialogButtonBox::Ok Qt::Vertical QSizePolicy::Maximum 21 16 0 6 Qt::Horizontal 71 21 16777215 30 <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Mount a Slot</span></p></body></html> Qt::Horizontal 81 20 Qt::Vertical QSizePolicy::Maximum 382 16 buttonBox accepted() mountForm accept() 248 254 157 274 buttonBox rejected() mountForm reject() 316 260 286 274 bacula-15.0.3/src/qt-console/joblist/0000755000175000017500000000000014771010173017203 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/joblist/joblist.ui0000644000175000017500000003233014771010173021211 0ustar bsbuildbsbuild JobListForm 0 0 696 456 Form 60 10 457 131 50 190 541 171 900 172 QFrame::StyledPanel QFrame::Raised 6 3 6 3 Record Limit 1 10000 25 6 3 Days Limit 7 6 3 6 3 Clients 6 3 Volume 6 3 6 3 Job 6 3 Level 6 3 6 3 Status 6 3 Purged 3 6 3 FileSet 6 3 Pool 3 3 Refresh :/images/view-refresh.png:/images/view-refresh.png Graph :/images/applications-graphics.png:/images/applications-graphics.png Filter Copy Jobs Filter Migration Jobs :/images/view-refresh.png:/images/view-refresh.png Refresh Job List Requery the director for the list of jobs. :/images/utilities-terminal.png:/images/utilities-terminal.png List Files On Job :/images/utilities-terminal.png:/images/utilities-terminal.png List Job Volumes :/images/utilities-terminal.png:/images/utilities-terminal.png List Volumes :/images/weather-severe-alert.png:/images/weather-severe-alert.png Delete Job :/images/weather-severe-alert.png:/images/weather-severe-alert.png Purge Files :/images/weather-severe-alert.png:/images/weather-severe-alert.png Restart Job :/images/restore.png:/images/restore.png Restore From Job :/images/restore.png:/images/restore.png Restore From Time :/images/joblog.png:/images/joblog.png Show Job Log :/images/joblog.png:/images/joblog.png Show Job Info :/images/unmark.png:/images/unmark.png Cancel Currently Running Job :/images/utilities-terminal.png:/images/utilities-terminal.png List Job Totals on Console bacula-15.0.3/src/qt-console/joblist/joblist.cpp0000644000175000017500000006064314771010173021366 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #include "bat.h" #include #include #include "joblist.h" #include "restore.h" #include "job/job.h" #include "joblog/joblog.h" #ifdef HAVE_QWT #include "jobgraphs/jobplot.h" #endif #include "util/fmtwidgetitem.h" #include "util/comboutil.h" /* * Constructor for the class */ JobList::JobList(const QString &mediaName, const QString &clientName, const QString &jobName, const QString &filesetName, QTreeWidgetItem *parentTreeWidgetItem) : Pages() { setupUi(this); m_name = "Jobs Run"; /* treeWidgetName has a virtual override in this class */ m_mediaName = mediaName; m_clientName = clientName; m_jobName = jobName; m_filesetName = filesetName; pgInitialize("", parentTreeWidgetItem); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/emblem-system.png"))); m_resultCount = 0; m_populated = false; m_closeable = false; if ((m_mediaName != "") || (m_clientName != "") || (m_jobName != "") || (m_filesetName != "")) { m_closeable=true; } m_checkCurrentWidget = true; /* Set Defaults for check and spin for limits */ limitCheckBox->setCheckState(mainWin->m_recordLimitCheck ? Qt::Checked : Qt::Unchecked); limitSpinBox->setValue(mainWin->m_recordLimitVal); daysCheckBox->setCheckState(mainWin->m_daysLimitCheck ? Qt::Checked : Qt::Unchecked); daysSpinBox->setValue(mainWin->m_daysLimitVal); QGridLayout *gridLayout = new QGridLayout(this); gridLayout->setSpacing(6); gridLayout->setMargin(9); gridLayout->setObjectName(QString::fromUtf8("gridLayout")); m_splitter = new QSplitter(Qt::Vertical, this); QScrollArea *area = new QScrollArea(); area->setObjectName(QString::fromUtf8("area")); area->setWidget(frame); area->setWidgetResizable(true); m_splitter->addWidget(area); m_splitter->addWidget(mp_tableWidget); gridLayout->addWidget(m_splitter, 0, 0, 1, 1); createConnections(); readSettings(); if (m_closeable) { dockPage(); } } /* * Write the m_splitter settings in the destructor */ JobList::~JobList() { writeSettings(); } /* * The Meat of the class. * This function will populate the QTableWidget, mp_tablewidget, with * QTableWidgetItems representing the results of a query for what jobs exist on * the media name passed from the constructor stored in m_mediaName. */ void JobList::populateTable() { /* Can't do this in constructor because not neccesarily conected in constructor */ prepareFilterWidgets(); m_populated = true; Freeze frz(*mp_tableWidget); /* disable updating*/ /* Set up query */ QString query; fillQueryString(query); /* Set up the Header for the table */ QStringList headerlist = (QStringList() << tr("Job Id") << tr("Job Name") << tr("Client") << tr("Job Starttime") << tr("Job Type") << tr("Job Level") << tr("Job Files") << tr("Job Bytes") << tr("Job Status") << tr("Purged") << tr("File Set") << tr("Pool Name") << tr("First Volume") << tr("VolCount")); m_jobIdIndex = headerlist.indexOf(tr("Job Id")); m_purgedIndex = headerlist.indexOf(tr("Purged")); m_typeIndex = headerlist.indexOf(tr("Job Type")); m_statusIndex = headerlist.indexOf(tr("Job Status")); m_startIndex = headerlist.indexOf(tr("Job Starttime")); m_filesIndex = headerlist.indexOf(tr("Job Files")); m_bytesIndex = headerlist.indexOf(tr("Job Bytes")); m_levelIndex = headerlist.indexOf(tr("Job Level")); m_nameIndex = headerlist.indexOf(tr("Job Name")); m_filesetIndex = headerlist.indexOf(tr("File Set")); m_clientIndex = headerlist.indexOf(tr("Client")); /* Initialize the QTableWidget */ m_checkCurrentWidget = false; mp_tableWidget->clear(); m_checkCurrentWidget = true; mp_tableWidget->setColumnCount(headerlist.size()); mp_tableWidget->setHorizontalHeaderLabels(headerlist); mp_tableWidget->horizontalHeader()->setHighlightSections(false); mp_tableWidget->setSelectionBehavior(QAbstractItemView::SelectRows); mp_tableWidget->setSortingEnabled(false); /* rows move on insert if sorting enabled */ if (mainWin->m_sqlDebug) { Pmsg1(000, "Query cmd : %s\n",query.toUtf8().data()); } QStringList results; if (m_console->sql_cmd(query, results)) { m_resultCount = results.count(); QStringList fieldlist; mp_tableWidget->setRowCount(results.size()); int row = 0; /* Iterate through the record returned from the query */ QString resultline; foreach (resultline, results) { fieldlist = resultline.split("\t"); if (fieldlist.size() < 13) continue; /* some fields missing, ignore row */ TableItemFormatter jobitem(*mp_tableWidget, row); /* Iterate through fields in the record */ QStringListIterator fld(fieldlist); int col = 0; /* job id */ jobitem.setNumericFld(col++, fld.next()); /* job name */ jobitem.setTextFld(col++, fld.next()); /* client */ jobitem.setTextFld(col++, fld.next()); /* job starttime */ jobitem.setTextFld(col++, fld.next(), true); /* job type */ jobitem.setJobTypeFld(col++, fld.next()); /* job level */ jobitem.setJobLevelFld(col++, fld.next()); /* job files */ jobitem.setNumericFld(col++, fld.next()); /* job bytes */ jobitem.setBytesFld(col++, fld.next()); /* job status */ jobitem.setJobStatusFld(col++, fld.next()); /* purged */ jobitem.setBoolFld(col++, fld.next()); /* fileset */ jobitem.setTextFld(col++, fld.next()); /* pool name */ jobitem.setTextFld(col++, fld.next()); /* First Media */ jobitem.setTextFld(col++, fld.next()); /* Medias count */ jobitem.setNumericFld(col++, fld.next()); row++; } } /* set default sorting */ mp_tableWidget->sortByColumn(m_jobIdIndex, Qt::DescendingOrder); mp_tableWidget->setSortingEnabled(true); /* Resize the columns */ mp_tableWidget->resizeColumnsToContents(); mp_tableWidget->resizeRowsToContents(); mp_tableWidget->verticalHeader()->hide(); if ((m_mediaName != tr("Any")) && (m_resultCount == 0)){ /* for context sensitive searches, let the user know if there were no * results */ QMessageBox::warning(this, "Bat", tr("The Jobs query returned no results.\n" "Press OK to continue?"), QMessageBox::Ok ); } /* make read only */ mp_tableWidget->setEditTriggers(QAbstractItemView::NoEditTriggers); } void JobList::prepareFilterWidgets() { if (!m_populated) { clientComboBox->addItem(tr("Any")); clientComboBox->addItems(m_console->client_list); comboSel(clientComboBox, m_clientName); QStringList volumeList; getVolumeList(volumeList); volumeComboBox->addItem(tr("Any")); volumeComboBox->addItems(volumeList); comboSel(volumeComboBox, m_mediaName); jobComboBox->addItem(tr("Any")); jobComboBox->addItems(m_console->job_list); comboSel(jobComboBox, m_jobName); levelComboFill(levelComboBox); boolComboFill(purgedComboBox); fileSetComboBox->addItem(tr("Any")); fileSetComboBox->addItems(m_console->fileset_list); comboSel(fileSetComboBox, m_filesetName); poolComboBox->addItem(tr("Any")); poolComboBox->addItems(m_console->pool_list); jobStatusComboFill(statusComboBox); } } void JobList::fillQueryString(QString &query) { query = ""; int volumeIndex = volumeComboBox->currentIndex(); if (volumeIndex != -1) m_mediaName = volumeComboBox->itemText(volumeIndex); QString distinct = ""; if (m_mediaName != tr("Any")) { distinct = "DISTINCT "; } query += "SELECT " + distinct + "Job.JobId AS JobId, Job.Name AS JobName, " " Client.Name AS Client," " Job.Starttime AS JobStart, Job.Type AS JobType," " Job.Level AS BackupLevel, Job.Jobfiles AS FileCount," " Job.JobBytes AS Bytes, Job.JobStatus AS Status," " Job.PurgedFiles AS Purged, FileSet.FileSet," " Pool.Name AS Pool," " (SELECT Media.VolumeName FROM JobMedia JOIN Media ON JobMedia.MediaId=Media.MediaId WHERE JobMedia.JobId=Job.JobId ORDER BY JobMediaId LIMIT 1) AS FirstVolume," " (SELECT count(DISTINCT MediaId) FROM JobMedia WHERE JobMedia.JobId=Job.JobId) AS Volumes" " FROM Job" " JOIN Client ON (Client.ClientId=Job.ClientId)" " LEFT OUTER JOIN FileSet ON (FileSet.FileSetId=Job.FileSetId) " " LEFT OUTER JOIN Pool ON Job.PoolId = Pool.PoolId "; QStringList conditions; if (m_mediaName != tr("Any")) { query += " LEFT OUTER JOIN JobMedia ON (JobMedia.JobId=Job.JobId) " " LEFT OUTER JOIN Media ON (JobMedia.MediaId=Media.MediaId) "; conditions.append("Media.VolumeName='" + m_mediaName + "'"); } comboCond(conditions, clientComboBox, "Client.Name"); comboCond(conditions, jobComboBox, "Job.Name"); levelComboCond(conditions, levelComboBox, "Job.Level"); jobStatusComboCond(conditions, statusComboBox, "Job.JobStatus"); boolComboCond(conditions, purgedComboBox, "Job.PurgedFiles"); comboCond(conditions, fileSetComboBox, "FileSet.FileSet"); comboCond(conditions, poolComboBox, "Pool.Name"); /* If Limit check box For limit by days is checked */ if (daysCheckBox->checkState() == Qt::Checked) { QDateTime stamp = QDateTime::currentDateTime().addDays(-daysSpinBox->value()); QString since = stamp.toString(Qt::ISODate); conditions.append("Job.Starttime > '" + since + "'"); } if (filterCopyCheckBox->checkState() == Qt::Checked) { conditions.append("Job.Type != 'c'" ); } if (filterMigrationCheckBox->checkState() == Qt::Checked) { conditions.append("Job.Type != 'g'" ); } bool first = true; foreach (QString condition, conditions) { if (first) { query += " WHERE " + condition; first = false; } else { query += " AND " + condition; } } /* Descending */ query += " ORDER BY Job.JobId DESC"; /* If Limit check box for limit records returned is checked */ if (limitCheckBox->checkState() == Qt::Checked) { QString limit; limit.setNum(limitSpinBox->value()); query += " LIMIT " + limit; } } /* * When the treeWidgetItem in the page selector tree is singleclicked, Make sure * The tree has been populated. */ void JobList::PgSeltreeWidgetClicked() { if (!m_populated) { populateTable(); /* Lets make sure the splitter is not all the way to size index 0 == 0 */ QList sizes = m_splitter->sizes(); if (sizes[0] == 0) { int frameMax = frame->maximumHeight(); int sizeSum = 0; foreach(int size, sizes) { sizeSum += size; } int tabHeight = mainWin->tabWidget->geometry().height(); sizes[0] = frameMax; sizes[1] = tabHeight - frameMax; m_splitter->setSizes(sizes); } } if (!isOnceDocked()) { dockPage(); } } /* * Virtual function override of pages function which is called when this page * is visible on the stack */ void JobList::currentStackItem() { /* if (!m_populated) populate every time user comes back to this object */ populateTable(); } /* * Virtual Function to return the name for the medialist tree widget */ void JobList::treeWidgetName(QString &desc) { if (m_mediaName != "" ) { desc = tr("Jobs Run on Volume %1").arg(m_mediaName); } else if (m_clientName != "" ) { desc = tr("Jobs Run from Client %1").arg(m_clientName); } else if (m_jobName != "" ) { desc = tr("Jobs Run of Job %1").arg(m_jobName); } else if (m_filesetName != "" ) { desc = tr("Jobs Run with fileset %1").arg(m_filesetName); } else { desc = tr("Jobs Run"); } } /* * Function to create connections for context sensitive menu for this and * the page selector */ void JobList::createConnections() { /* connect to the action specific to this pages class that shows up in the * page selector tree */ connect(actionRefreshJobList, SIGNAL(triggered()), this, SLOT(populateTable())); connect(refreshButton, SIGNAL(pressed()), this, SLOT(populateTable())); #ifdef HAVE_QWT connect(graphButton, SIGNAL(pressed()), this, SLOT(graphTable())); #else graphButton->setEnabled(false); graphButton->setVisible(false); #endif /* for the selectionChanged to maintain m_currentJob and a delete selection */ connect(mp_tableWidget, SIGNAL(itemSelectionChanged()), this, SLOT(selectionChanged())); connect(mp_tableWidget, SIGNAL(itemDoubleClicked(QTableWidgetItem*)), this, SLOT(showInfoForJob())); /* Do what is required for the local context sensitive menu */ /* setContextMenuPolicy is required */ mp_tableWidget->setContextMenuPolicy(Qt::ActionsContextMenu); connect(actionListFilesOnJob, SIGNAL(triggered()), this, SLOT(consoleListFilesOnJob())); connect(actionListJobMedia, SIGNAL(triggered()), this, SLOT(consoleListJobMedia())); connect(actionDeleteJob, SIGNAL(triggered()), this, SLOT(consoleDeleteJob())); connect(actionRestartJob, SIGNAL(triggered()), this, SLOT(consoleRestartJob())); connect(actionPurgeFiles, SIGNAL(triggered()), this, SLOT(consolePurgeFiles())); connect(actionRestoreFromJob, SIGNAL(triggered()), this, SLOT(preRestoreFromJob())); connect(actionRestoreFromTime, SIGNAL(triggered()), this, SLOT(preRestoreFromTime())); connect(actionShowLogForJob, SIGNAL(triggered()), this, SLOT(showLogForJob())); connect(actionShowInfoForJob, SIGNAL(triggered()), this, SLOT(showInfoForJob())); connect(actionCancelJob, SIGNAL(triggered()), this, SLOT(consoleCancelJob())); connect(actionListJobTotals, SIGNAL(triggered()), this, SLOT(consoleListJobTotals())); connect(m_splitter, SIGNAL(splitterMoved(int, int)), this, SLOT(splitterMoved(int, int))); m_contextActions.append(actionRefreshJobList); m_contextActions.append(actionListJobTotals); } /* * Functions to respond to local context sensitive menu sending console commands * If I could figure out how to make these one function passing a string, Yaaaaaa */ void JobList::consoleListFilesOnJob() { QString cmd("list files jobid="); cmd += m_currentJob; if (mainWin->m_longList) { cmd.prepend("l"); } consoleCommand(cmd); } void JobList::consoleListJobMedia() { QString cmd("list jobmedia jobid="); cmd += m_currentJob; if (mainWin->m_longList) { cmd.prepend("l"); } consoleCommand(cmd); } void JobList::consoleListJobTotals() { QString cmd("list jobtotals"); if (mainWin->m_longList) { cmd.prepend("l"); } consoleCommand(cmd); } void JobList::consoleDeleteJob() { if (QMessageBox::warning(this, "Bat", tr("Are you sure you want to delete?? !!!.\n" "This delete command is used to delete a Job record and all associated catalog" " records that were created. This command operates only on the Catalog" " database and has no effect on the actual data written to a Volume. This" " command can be dangerous and we strongly recommend that you do not use" " it unless you know what you are doing. The Job and all its associated" " records (File and JobMedia) will be deleted from the catalog." "Press OK to proceed with delete operation.?"), QMessageBox::Ok | QMessageBox::Cancel) == QMessageBox::Cancel) { return; } QString cmd("delete job jobid="); cmd += m_selectedJobs; consoleCommand(cmd, false); populateTable(); } void JobList::consoleRestartJob() { QString cmd; cmd = tr("run job=\"%1\" client=\"%2\" level=%3").arg(m_jobName).arg(m_clientName).arg(m_levelName); if (m_filesetName != "" && m_filesetName != "*None*") { cmd += tr(" fileset=\"%1\"").arg(m_filesetName); } if (mainWin->m_commandDebug) Pmsg1(000, "Run cmd : %s\n",cmd.toUtf8().data()); consoleCommand(cmd, false); populateTable(); } void JobList::consolePurgeFiles() { if (QMessageBox::warning(this, "Bat", tr("Are you sure you want to purge ?? !!!.\n" "The Purge command will delete associated Catalog database records from Jobs and" " Volumes without considering the retention period. Purge works only on the" " Catalog database and does not affect data written to Volumes. This command can" " be dangerous because you can delete catalog records associated with current" " backups of files, and we recommend that you do not use it unless you know what" " you are doing.\n" "Press OK to proceed with the purge operation?"), QMessageBox::Ok | QMessageBox::Cancel) == QMessageBox::Cancel) { return; } m_console->m_warningPrevent = true; foreach(QString job, m_selectedJobsList) { QString cmd("purge files jobid="); cmd += job; consoleCommand(cmd, false); } m_console->m_warningPrevent = false; populateTable(); } /* * Subroutine to call preRestore to restore from a select job */ void JobList::preRestoreFromJob() { new prerestorePage(m_currentJob, R_JOBIDLIST); } /* * Subroutine to call preRestore to restore from a select job */ void JobList::preRestoreFromTime() { new prerestorePage(m_currentJob, R_JOBDATETIME); } /* * Subroutine to call class to show the log in the database from that job */ void JobList::showLogForJob() { QTreeWidgetItem* pageSelectorTreeWidgetItem = mainWin->getFromHash(this); new JobLog(m_currentJob, pageSelectorTreeWidgetItem); } /* * Subroutine to call class to show the log in the database from that job */ void JobList::showInfoForJob(QTableWidgetItem * /*item*/) { QTreeWidgetItem* pageSelectorTreeWidgetItem = mainWin->getFromHash(this); new Job(m_currentJob, pageSelectorTreeWidgetItem); } /* * Cancel a running job */ void JobList::consoleCancelJob() { QString cmd("cancel jobid="); cmd += m_currentJob; consoleCommand(cmd); } /* * Graph this table */ void JobList::graphTable() { #ifdef HAVE_QWT JobPlotPass pass; pass.recordLimitCheck = limitCheckBox->checkState(); pass.daysLimitCheck = daysCheckBox->checkState(); pass.recordLimitSpin = limitSpinBox->value(); pass.daysLimitSpin = daysSpinBox->value(); pass.jobCombo = jobComboBox->currentText(); pass.clientCombo = clientComboBox->currentText(); pass.volumeCombo = volumeComboBox->currentText(); pass.fileSetCombo = fileSetComboBox->currentText(); pass.purgedCombo = purgedComboBox->currentText(); pass.levelCombo = levelComboBox->currentText(); pass.statusCombo = statusComboBox->currentText(); pass.use = true; QTreeWidgetItem* pageSelectorTreeWidgetItem = mainWin->getFromHash(this); new JobPlot(pageSelectorTreeWidgetItem, pass); #endif } /* * Save user settings associated with this page */ void JobList::writeSettings() { QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup(m_groupText); settings.setValue(m_splitText, m_splitter->saveState()); settings.setValue("FilterCopyCheckState", filterCopyCheckBox->checkState()); settings.setValue("FilterMigrationCheckState", filterMigrationCheckBox->checkState()); settings.endGroup(); } /* * Read and restore user settings associated with this page */ void JobList::readSettings() { m_groupText = "JobListPage"; m_splitText = "splitterSizes_2"; QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup(m_groupText); if (settings.contains(m_splitText)) { m_splitter->restoreState(settings.value(m_splitText).toByteArray()); } filterCopyCheckBox->setCheckState((Qt::CheckState)settings.value("FilterCopyCheckState").toInt()); filterMigrationCheckBox->setCheckState((Qt::CheckState)settings.value("FilterMigrationCheckState").toInt()); settings.endGroup(); } /* * Function to fill m_selectedJobsCount and m_selectedJobs with selected values */ void JobList::selectionChanged() { QList rowList; QList sitems = mp_tableWidget->selectedItems(); foreach (QTableWidgetItem *sitem, sitems) { int row = sitem->row(); if (!rowList.contains(row)) { rowList.append(row); } } m_selectedJobs = ""; m_selectedJobsList.clear(); bool first = true; foreach(int row, rowList) { QTableWidgetItem * sitem = mp_tableWidget->item(row, m_jobIdIndex); if (!first) m_selectedJobs.append(","); else first = false; m_selectedJobs.append(sitem->text()); m_selectedJobsList.append(sitem->text()); } m_selectedJobsCount = rowList.count(); if (m_selectedJobsCount > 1) { QString text = QString( tr("Delete list of %1 Jobs")).arg(m_selectedJobsCount); actionDeleteJob->setText(text); text = QString( tr("Purge Files from list of %1 Jobs")).arg(m_selectedJobsCount); actionPurgeFiles->setText(text); } else { actionDeleteJob->setText(tr("Delete Single Job")); actionPurgeFiles->setText(tr("Purge Files from single job")); } /* remove all actions */ foreach(QAction* mediaAction, mp_tableWidget->actions()) { mp_tableWidget->removeAction(mediaAction); } /* Add Actions */ mp_tableWidget->addAction(actionRefreshJobList); if (m_selectedJobsCount == 1) { mp_tableWidget->addAction(actionListFilesOnJob); mp_tableWidget->addAction(actionListJobMedia); mp_tableWidget->addAction(actionRestartJob); mp_tableWidget->addAction(actionRestoreFromJob); mp_tableWidget->addAction(actionRestoreFromTime); mp_tableWidget->addAction(actionShowLogForJob); mp_tableWidget->addAction(actionShowInfoForJob); } if (m_selectedJobsCount >= 1) { mp_tableWidget->addAction(actionDeleteJob); mp_tableWidget->addAction(actionPurgeFiles); } /* Make Connections */ if (m_checkCurrentWidget) { int row = mp_tableWidget->currentRow(); QTableWidgetItem* jobitem = mp_tableWidget->item(row, 0); m_currentJob = jobitem->text(); /* get JobId */ jobitem = mp_tableWidget->item(row, m_clientIndex); m_clientName = jobitem->text(); /* get Client Name */ jobitem = mp_tableWidget->item(row, m_nameIndex); m_jobName = jobitem->text(); /* get Job Name */ jobitem = mp_tableWidget->item(row, m_levelIndex); m_levelName = jobitem->text(); /* get level */ jobitem = mp_tableWidget->item(row, m_filesetIndex); if (jobitem) { m_filesetName = jobitem->text(); /* get FileSet Name */ } else { m_filesetName = ""; } /* include purged action or not */ jobitem = mp_tableWidget->item(row, m_purgedIndex); QString purged = jobitem->text(); /* mp_tableWidget->removeAction(actionPurgeFiles); if (purged == tr("No") ) { mp_tableWidget->addAction(actionPurgeFiles); }*/ /* include restore from time and job action or not */ jobitem = mp_tableWidget->item(row, m_typeIndex); QString type = jobitem->text(); if (m_selectedJobsCount == 1) { mp_tableWidget->removeAction(actionRestoreFromJob); mp_tableWidget->removeAction(actionRestoreFromTime); if (type == tr("Backup")) { mp_tableWidget->addAction(actionRestoreFromJob); mp_tableWidget->addAction(actionRestoreFromTime); } } /* include cancel action or not */ jobitem = mp_tableWidget->item(row, m_statusIndex); QString status = jobitem->text(); mp_tableWidget->removeAction(actionCancelJob); if (status == tr("Running") || status == tr("Created, not yet running")) { mp_tableWidget->addAction(actionCancelJob); } } } /* * Function to prevent the splitter from making index 0 of the size larger than it * needs to be */ void JobList::splitterMoved(int /*pos*/, int /*index*/) { int frameMax = frame->maximumHeight(); QList sizes = m_splitter->sizes(); int sizeSum = 0; foreach(int size, sizes) { sizeSum += size; } if (sizes[0] > frameMax) { sizes[0] = frameMax; sizes[1] = sizeSum - frameMax; m_splitter->setSizes(sizes); } } bacula-15.0.3/src/qt-console/joblist/joblist.h0000644000175000017500000000473314771010173021031 0ustar bsbuildbsbuild#ifndef _JOBLIST_H_ #define _JOBLIST_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_joblist.h" #include "console.h" #include "pages.h" class JobList : public Pages, public Ui::JobListForm { Q_OBJECT public: JobList(const QString &medianame, const QString &clientname, const QString &jobname, const QString &filesetname, QTreeWidgetItem *); ~JobList(); virtual void PgSeltreeWidgetClicked(); virtual void currentStackItem(); int m_resultCount; public slots: void populateTable(); virtual void treeWidgetName(QString &); void selectionChanged(); private slots: void consoleListFilesOnJob(); void consoleListJobMedia(); void consoleListJobTotals(); void consoleDeleteJob(); void consoleRestartJob(); void consolePurgeFiles(); void preRestoreFromJob(); void preRestoreFromTime(); void showLogForJob(); void showInfoForJob(QTableWidgetItem * item=NULL); void consoleCancelJob(); void graphTable(); void splitterMoved(int pos, int index); private: void createConnections(); void writeSettings(); void readSettings(); void prepareFilterWidgets(); void fillQueryString(QString &query); QSplitter *m_splitter; QString m_groupText; QString m_splitText; QString m_mediaName; QString m_clientName; QString m_jobName; QString m_filesetName; QString m_currentJob; QString m_levelName; bool m_populated; bool m_checkCurrentWidget; int m_jobIdIndex; int m_purgedIndex; int m_typeIndex; int m_levelIndex; int m_clientIndex; int m_nameIndex; int m_filesetIndex; int m_statusIndex; int m_startIndex; int m_bytesIndex; int m_filesIndex; int m_selectedJobsCount; QString m_selectedJobs; QStringList m_selectedJobsList; }; #endif /* _JOBLIST_H_ */ bacula-15.0.3/src/qt-console/restore/0000755000175000017500000000000014771010173017220 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/restore/restore.cpp0000644000175000017500000003713714771010173021422 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Restore Class * * Kern Sibbald, February MMVII * */ #include "bat.h" #include "restore.h" static const int dbglvl = 100; restorePage::restorePage(int conn) : Pages() { Dmsg1(dbglvl, "Construcing restorePage Instance connection %i\n", conn); m_conn = conn; QStringList titles; setupUi(this); m_name = tr("Restore Select"); pgInitialize(); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/restore.png"))); m_console->notify(m_conn, false); /* this should already be off */ connect(fileWidget, SIGNAL(itemDoubleClicked(QTreeWidgetItem*, int)), this, SLOT(fileDoubleClicked(QTreeWidgetItem *, int))); connect(directoryWidget, SIGNAL( currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)), this, SLOT(directoryItemChanged(QTreeWidgetItem *, QTreeWidgetItem *))); connect(upButton, SIGNAL(pressed()), this, SLOT(upButtonPushed())); connect(markButton, SIGNAL(pressed()), this, SLOT(markButtonPushed())); connect(unmarkButton, SIGNAL(pressed()), this, SLOT(unmarkButtonPushed())); connect(okButton, SIGNAL(pressed()), this, SLOT(okButtonPushed())); connect(cancelButton, SIGNAL(pressed()), this, SLOT(cancelButtonPushed())); fileWidget->setContextMenuPolicy(Qt::ActionsContextMenu); fileWidget->addAction(actionMark); fileWidget->addAction(actionUnMark); connect(actionMark, SIGNAL(triggered()), this, SLOT(markButtonPushed())); connect(actionUnMark, SIGNAL(triggered()), this, SLOT(unmarkButtonPushed())); setFont(m_console->get_font()); m_console->displayToPrompt(m_conn); titles << tr("Mark") << tr("File") << tr("Mode") << tr("User") << tr("Group") << tr("Size") << tr("Date"); fileWidget->setHeaderLabels(titles); get_cwd(); readSettings(); /* wait was entered from pre-restore * will exit, but will reenter in fillDirectory */ mainWin->waitExit(); fillDirectory(); dockPage(); setCurrent(); this->show(); if (mainWin->m_miscDebug) Pmsg0(000, "Leave restorePage\n"); } restorePage::~restorePage() { writeSettings(); } /* * Fill the fileWidget box with the contents of the current directory */ void restorePage::fillDirectory() { mainWin->waitEnter(); char modes[20], user[20], group[20], size[20], date[30]; char marked[10]; int pnl, fnl; POOLMEM *file = get_pool_memory(PM_FNAME); POOLMEM *path = get_pool_memory(PM_FNAME); fileWidget->clear(); m_console->write_dir(m_conn, "dir", false); QList treeItemList; QStringList item; m_rx.setPattern("has no children\\.$"); bool first = true; while (m_console->read(m_conn) > 0) { char *p = m_console->msg(m_conn); char *l; strip_trailing_junk(p); if (*p == '$' || !*p) { continue; } if (first) { if (m_rx.indexIn(QString(p)) != -1) { continue; } first = false; } l = p; skip_nonspaces(&p); /* permissions */ *p++ = 0; bstrncpy(modes, l, sizeof(modes)); skip_spaces(&p); skip_nonspaces(&p); /* link count */ *p++ = 0; skip_spaces(&p); l = p; skip_nonspaces(&p); /* user */ *p++ = 0; skip_spaces(&p); bstrncpy(user, l, sizeof(user)); l = p; skip_nonspaces(&p); /* group */ *p++ = 0; bstrncpy(group, l, sizeof(group)); skip_spaces(&p); l = p; skip_nonspaces(&p); /* size */ *p++ = 0; bstrncpy(size, l, sizeof(size)); skip_spaces(&p); l = p; skip_nonspaces(&p); /* date/time */ skip_spaces(&p); skip_nonspaces(&p); *p++ = 0; bstrncpy(date, l, sizeof(date)); skip_spaces(&p); if (*p == '*') { bstrncpy(marked, "*", sizeof(marked)); p++; } else { bstrncpy(marked, " ", sizeof(marked)); } split_path_and_filename(p, &path, &pnl, &file, &fnl); item.clear(); item << "" << file << modes << user << group << size << date; if (item[1].endsWith("/")) { addDirectory(item[1]); } QTreeWidgetItem *ti = new QTreeWidgetItem((QTreeWidget *)0, item); ti->setTextAlignment(5, Qt::AlignRight); /* right align size */ if (strcmp(marked, "*") == 0) { ti->setIcon(0, QIcon(QString::fromUtf8(":images/check.png"))); ti->setData(0, Qt::UserRole, true); } else { ti->setIcon(0, QIcon(QString::fromUtf8(":images/unchecked.png"))); ti->setData(0, Qt::UserRole, false); } treeItemList.append(ti); } fileWidget->clear(); fileWidget->insertTopLevelItems(0, treeItemList); for (int i=0; i<7; i++) { fileWidget->resizeColumnToContents(i); } free_pool_memory(file); free_pool_memory(path); mainWin->waitExit(); } /* * Function called from fill directory when a directory is found to see if this * directory exists in the directory pane and then add it to the directory pane */ void restorePage::addDirectory(QString &newdirr) { QString newdir = newdirr; QString fullpath = m_cwd + newdirr; bool ok = true; if (mainWin->m_miscDebug) { QString msg = QString(tr("In addDirectory cwd \"%1\" newdir \"%2\" fullpath \"%3\"\n")) .arg(m_cwd) .arg(newdir) .arg(fullpath); Pmsg1(dbglvl, "%s\n", msg.toUtf8().data()); } if (isWin32Path(fullpath)) { if (mainWin->m_miscDebug) Pmsg0(dbglvl, "Windows drive\n"); if (fullpath.left(1) == "/") { fullpath.replace(0, 1, ""); /* strip leading / */ } /* If drive and not already in add it */ if (fullpath.length() == 3 && !m_dirPaths.contains(fullpath)) { QTreeWidgetItem *item = new QTreeWidgetItem(directoryWidget); item->setIcon(0,QIcon(QString::fromUtf8(":images/folder.png"))); item->setText(0, fullpath.toUtf8().data()); if (mainWin->m_miscDebug) { Pmsg1(dbglvl, "Pre Inserting %s\n",fullpath.toUtf8().data()); } m_dirPaths.insert(fullpath, item); m_dirTreeItems.insert(item, fullpath); directoryWidget->setCurrentItem(NULL); } } else { // Unix add / first if not already there if (m_dirPaths.empty()) { QTreeWidgetItem *item = new QTreeWidgetItem(directoryWidget); item->setIcon(0,QIcon(QString::fromUtf8(":images/folder.png"))); QString text("/"); item->setText(0, text.toUtf8().data()); if (mainWin->m_miscDebug) { Pmsg1(dbglvl, "Pre Inserting %s\n",text.toUtf8().data()); } m_dirPaths.insert(text, item); m_dirTreeItems.insert(item, text); } } /* Does it already exist ?? */ if (!m_dirPaths.contains(fullpath)) { QTreeWidgetItem *item = NULL; if (isWin32Path(fullpath)) { /* this is the base widget */ item = new QTreeWidgetItem(directoryWidget); item->setText(0, fullpath.toUtf8().data()); if (mainWin->m_miscDebug) Pmsg1(dbglvl, "Windows: %s\n", fullpath.toUtf8().data()); item->setIcon(0,QIcon(QString::fromUtf8(":images/folder.png"))); } else { QTreeWidgetItem *parent = m_dirPaths.value(m_cwd); if (parent) { /* new directories to add */ item = new QTreeWidgetItem(parent); item->setText(0, newdir.toUtf8().data()); item->setIcon(0,QIcon(QString::fromUtf8(":images/folder.png"))); directoryWidget->expandItem(parent); if (mainWin->m_miscDebug) { Pmsg1(dbglvl, "%s\n", newdir.toUtf8().data()); } } else { ok = false; if (mainWin->m_miscDebug) { QString msg = QString(tr("In else of if parent cwd \"%1\" newdir \"%2\"\n")) .arg(m_cwd) .arg(newdir); Pmsg1(dbglvl, "%s\n", msg.toUtf8().data()); } } } /* insert into both forward and reverse hash */ if (ok) { if (mainWin->m_miscDebug) { Pmsg1(dbglvl, "Inserting %s\n",fullpath.toUtf8().data()); } m_dirPaths.insert(fullpath, item); m_dirTreeItems.insert(item, fullpath); } } } /* * Executed when the tree item in the directory pane is changed. This will * allow us to populate the file pane and make this the cwd. */ void restorePage::directoryItemChanged(QTreeWidgetItem *currentitem, QTreeWidgetItem * /*previousitem*/) { QString fullpath = m_dirTreeItems.value(currentitem); statusLine->setText(""); if (fullpath != "") { cwd(fullpath.toUtf8().data()); fillDirectory(); } } void restorePage::okButtonPushed() { this->hide(); m_console->write(m_conn, "done"); m_console->notify(m_conn, true); setConsoleCurrent(); closeStackPage(); mainWin->resetFocus(); } void restorePage::cancelButtonPushed() { this->hide(); m_console->write(m_conn, "quit"); m_console->displayToPrompt(m_conn); mainWin->set_status(tr("Canceled")); closeStackPage(); m_console->notify(m_conn, true); mainWin->resetFocus(); } void restorePage::fileDoubleClicked(QTreeWidgetItem *item, int column) { char cmd[1000]; statusLine->setText(""); if (column == 0) { /* mark/unmark */ mainWin->waitEnter(); if (item->data(0, Qt::UserRole).toBool()) { bsnprintf(cmd, sizeof(cmd), "unmark \"%s\"", item->text(1).toUtf8().data()); item->setIcon(0, QIcon(QString::fromUtf8(":images/unchecked.png"))); item->setData(0, Qt::UserRole, false); } else { bsnprintf(cmd, sizeof(cmd), "mark \"%s\"", item->text(1).toUtf8().data()); item->setIcon(0, QIcon(QString::fromUtf8(":images/check.png"))); item->setData(0, Qt::UserRole, true); } m_console->write_dir(m_conn, cmd, false); if (m_console->read(m_conn) > 0) { strip_trailing_junk(m_console->msg(m_conn)); statusLine->setText(m_console->msg(m_conn)); } m_console->displayToPrompt(m_conn); mainWin->waitExit(); return; } /* * Double clicking other than column 0 means to decend into * the directory -- or nothing if it is not a directory. */ if (item->text(1).endsWith("/")) { QString fullpath = m_cwd + item->text(1); QTreeWidgetItem *item = m_dirPaths.value(fullpath); if (mainWin->m_miscDebug) { Pmsg1(dbglvl, "%s\n", fullpath.toUtf8().data()); } if (item) { directoryWidget->setCurrentItem(item); } else { QString msg = QString("DoubleClick else of item column %1 fullpath %2\n") .arg(column,10) .arg(fullpath); if (mainWin->m_miscDebug) Pmsg1(dbglvl, "%s\n", msg.toUtf8().data()); } } } /* * If up button pushed, making the parent tree widget current will call fill * directory. */ void restorePage::upButtonPushed() { cwd(".."); QTreeWidgetItem *item = m_dirPaths.value(m_cwd); if (item) { directoryWidget->setCurrentItem(item); } statusLine->setText(""); } /* * Mark selected items */ void restorePage::markButtonPushed() { mainWin->waitEnter(); QList treeItemList = fileWidget->selectedItems(); QTreeWidgetItem *item; char cmd[1000]; int count = 0; statusLine->setText(""); foreach (item, treeItemList) { count++; bsnprintf(cmd, sizeof(cmd), "mark \"%s\"", item->text(1).toUtf8().data()); item->setIcon(0, QIcon(QString::fromUtf8(":images/check.png"))); m_console->write_dir(m_conn, cmd, false); if (m_console->read(m_conn) > 0) { strip_trailing_junk(m_console->msg(m_conn)); statusLine->setText(m_console->msg(m_conn)); } Dmsg1(dbglvl, "cmd=%s\n", cmd); m_console->discardToPrompt(m_conn); } if (count == 0) { mainWin->set_status("Nothing selected, nothing done"); statusLine->setText("Nothing selected, nothing done"); } mainWin->waitExit(); } /* * Unmark selected items */ void restorePage::unmarkButtonPushed() { mainWin->waitEnter(); QList treeItemList = fileWidget->selectedItems(); QTreeWidgetItem *item; char cmd[1000]; int count = 0; statusLine->setText(""); foreach (item, treeItemList) { count++; bsnprintf(cmd, sizeof(cmd), "unmark \"%s\"", item->text(1).toUtf8().data()); item->setIcon(0, QIcon(QString::fromUtf8(":images/unchecked.png"))); m_console->write_dir(m_conn, cmd, false); if (m_console->read(m_conn) > 0) { strip_trailing_junk(m_console->msg(m_conn)); statusLine->setText(m_console->msg(m_conn)); } Dmsg1(dbglvl, "cmd=%s\n", cmd); m_console->discardToPrompt(m_conn); } if (count == 0) { mainWin->set_status(tr("Nothing selected, nothing done")); statusLine->setText(tr("Nothing selected, nothing done")); } mainWin->waitExit(); } /* * Change current working directory */ bool restorePage::cwd(const char *dir) { int stat; char cd_cmd[MAXSTRING]; mainWin->waitEnter(); statusLine->setText(""); bsnprintf(cd_cmd, sizeof(cd_cmd), "cd \"%s\"", dir); Dmsg2(dbglvl, "dir=%s cmd=%s\n", dir, cd_cmd); m_console->write_dir(m_conn, cd_cmd, false); lineEdit->clear(); if ((stat = m_console->read(m_conn)) > 0) { m_cwd = m_console->msg(m_conn); lineEdit->insert(m_cwd); Dmsg2(dbglvl, "cwd=%s msg=%s\n", m_cwd.toUtf8().data(), m_console->msg(m_conn)); } else { Dmsg1(dbglvl, "stat=%d\n", stat); QMessageBox::critical(this, "Error", tr("cd command failed"), QMessageBox::Ok); } m_console->discardToPrompt(m_conn); mainWin->waitExit(); return true; /* ***FIXME*** return real status */ } /* * Return cwd when in tree restore mode */ char *restorePage::get_cwd() { int stat; mainWin->waitEnter(); m_console->write_dir(m_conn, ".pwd", false); Dmsg0(dbglvl, "send: .pwd\n"); if ((stat = m_console->read(m_conn)) > 0) { m_cwd = m_console->msg(m_conn); Dmsg2(dbglvl, "cwd=%s msg=%s\n", m_cwd.toUtf8().data(), m_console->msg(m_conn)); } else { Dmsg1(dbglvl, "Something went wrong read stat=%d\n", stat); QMessageBox::critical(this, "Error", tr(".pwd command failed"), QMessageBox::Ok); } m_console->discardToPrompt(m_conn); mainWin->waitExit(); return m_cwd.toUtf8().data(); } /* * Save user settings associated with this page */ void restorePage::writeSettings() { QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup("RestorePage"); settings.setValue(m_splitText, splitter->saveState()); settings.endGroup(); } /* * Read and restore user settings associated with this page */ void restorePage::readSettings() { m_splitText = "splitterSizes_2"; QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup("RestorePage"); if (settings.contains(m_splitText)) { splitter->restoreState(settings.value(m_splitText).toByteArray()); } settings.endGroup(); } bacula-15.0.3/src/qt-console/restore/restore.h0000644000175000017500000001041114771010173021051 0ustar bsbuildbsbuild#ifndef _RESTORE_H_ #define _RESTORE_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Kern Sibbald, February 2007 */ #include #include /* Needed for some systems */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "pages.h" #include "ui_runrestore.h" class bRestoreTable : public QTableWidget { Q_OBJECT private: QPoint dragStartPosition; public: bRestoreTable(QWidget *parent) : QTableWidget(parent) { } void mousePressEvent(QMouseEvent *event); void mouseMoveEvent(QMouseEvent *event); void dragEnterEvent(QDragEnterEvent *event); void dragMoveEvent(QDragMoveEvent *event); void dropEvent(QDropEvent *event); }; #include "ui_brestore.h" #include "ui_restore.h" #include "ui_prerestore.h" enum { R_NONE, R_JOBIDLIST, R_JOBDATETIME }; /* * The pre-restore dialog selects the Job/Client to be restored * It really could use considerable enhancement. */ class prerestorePage : public Pages, public Ui::prerestoreForm { Q_OBJECT public: prerestorePage(); prerestorePage(QString &data, unsigned int); private slots: void okButtonPushed(); void cancelButtonPushed(); void job_name_change(int index); void recentChanged(int); void jobRadioClicked(bool); void jobidsRadioClicked(bool); void jobIdEditFinished(); private: int m_conn; int jobdefsFromJob(QStringList &, QString &); void buildPage(); bool checkJobIdList(); QString m_dataIn; unsigned int m_dataInType; }; /* * The restore dialog is brought up once we are in the Bacula * restore tree routines. It handles putting up a GUI tree * representation of the files to be restored. */ class restorePage : public Pages, public Ui::restoreForm { Q_OBJECT public: restorePage(int conn); ~restorePage(); void fillDirectory(); char *get_cwd(); bool cwd(const char *); private slots: void okButtonPushed(); void cancelButtonPushed(); void fileDoubleClicked(QTreeWidgetItem *item, int column); void directoryItemChanged(QTreeWidgetItem *, QTreeWidgetItem *); void upButtonPushed(); void unmarkButtonPushed(); void markButtonPushed(); void addDirectory(QString &); private: int m_conn; void writeSettings(); void readSettings(); QString m_cwd; QHash m_dirPaths; QHash m_dirTreeItems; QRegExp m_rx; QString m_splitText; }; class bRestore : public Pages, public Ui::bRestoreForm { Q_OBJECT public: bRestore(); ~bRestore(); void PgSeltreeWidgetClicked(); QString m_client; QString m_jobids; void get_info_from_selection(QStringList &fileids, QStringList &jobids, QStringList &dirids, QStringList &fileindexes); public slots: void setClient(); void setJob(); void showInfoForFile(QTableWidgetItem *); void applyLocation(); void clearVersions(QTableWidgetItem *); void clearRestoreList(); void runRestore(); void refreshView(); private: QString m_path; int64_t m_pathid; QTableWidgetItem *m_current; void setupPage(); bool m_populated; void displayFiles(int64_t pathid, QString path); void displayFileVersion(QString pathid, QString fnid, QString client, QString filename); }; class bRunRestore : public QDialog, public Ui::bRunRestoreForm { Q_OBJECT private: bRestore *brestore; QStringList m_fileids, m_jobids, m_dirids, m_findexes; public: bRunRestore(bRestore *parent); ~bRunRestore() {} void computeVolumeList(); int64_t runRestore(QString tablename); public slots: void useRegexp(); void UFRcb(); void computeRestore(); }; #endif /* _RESTORE_H_ */ bacula-15.0.3/src/qt-console/restore/prerestore.cpp0000644000175000017500000003072714771010173022127 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * preRestore -> dialog put up to determine the restore type * * Kern Sibbald, February MMVII * */ #include "bat.h" #include "restore.h" /* Constructor to have jobid list default in */ prerestorePage::prerestorePage(QString &data, unsigned int datatype) : Pages() { m_dataIn = data; m_dataInType = datatype; buildPage(); } /* Basic Constructor */ prerestorePage::prerestorePage() { m_dataIn = ""; m_dataInType = R_NONE; buildPage(); } /* * This is really the constructor */ void prerestorePage::buildPage() { m_name = tr("Restore"); setupUi(this); pgInitialize(); m_conn = m_console->notifyOff(); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/restore.png"))); jobCombo->addItems(m_console->job_list); filesetCombo->addItems(m_console->fileset_list); clientCombo->addItems(m_console->client_list); poolCombo->addItem(tr("Any")); poolCombo->addItems(m_console->pool_list); storageCombo->addItems(m_console->storage_list); /* current or before . . Start out with current checked */ recentCheckBox->setCheckState(Qt::Checked); beforeDateTime->setDisplayFormat(mainWin->m_dtformat); beforeDateTime->setDateTime(QDateTime::currentDateTime()); beforeDateTime->setEnabled(false); selectFilesRadio->setChecked(true); if (m_dataInType == R_NONE) { selectJobRadio->setChecked(true); selectJobIdsRadio->setChecked(false); jobIdEdit->setText(tr("Comma separated list of JobIds")); jobIdEdit->setEnabled(false); } else if (m_dataInType == R_JOBIDLIST) { selectJobIdsRadio->setChecked(true); selectJobRadio->setChecked(false); jobIdEdit->setText(m_dataIn); jobRadioClicked(false); QStringList fieldlist; if (jobdefsFromJob(fieldlist, m_dataIn) == 1) { filesetCombo->setCurrentIndex(filesetCombo->findText(fieldlist[2], Qt::MatchExactly)); clientCombo->setCurrentIndex(clientCombo->findText(fieldlist[1], Qt::MatchExactly)); jobCombo->setCurrentIndex(jobCombo->findText(fieldlist[0], Qt::MatchExactly)); } } else if (m_dataInType == R_JOBDATETIME) { selectJobRadio->setChecked(true); selectJobIdsRadio->setChecked(false); jobIdEdit->setText(tr("Comma separated list of JobIds")); jobIdEdit->setEnabled(false); recentCheckBox->setCheckState(Qt::Unchecked); jobRadioClicked(true); QStringList fieldlist; if (jobdefsFromJob(fieldlist, m_dataIn) == 1) { filesetCombo->setCurrentIndex(filesetCombo->findText(fieldlist[2], Qt::MatchExactly)); clientCombo->setCurrentIndex(clientCombo->findText(fieldlist[1], Qt::MatchExactly)); jobCombo->setCurrentIndex(jobCombo->findText(fieldlist[0], Qt::MatchExactly)); beforeDateTime->setDateTime(QDateTime::fromString(fieldlist[3], mainWin->m_dtformat)); } } job_name_change(0); connect(jobCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(job_name_change(int))); connect(okButton, SIGNAL(pressed()), this, SLOT(okButtonPushed())); connect(cancelButton, SIGNAL(pressed()), this, SLOT(cancelButtonPushed())); connect(recentCheckBox, SIGNAL(stateChanged(int)), this, SLOT(recentChanged(int))); connect(selectJobRadio, SIGNAL(clicked(bool)), this, SLOT(jobRadioClicked(bool))); connect(selectJobIdsRadio, SIGNAL(clicked(bool)), this, SLOT(jobidsRadioClicked(bool))); connect(jobIdEdit, SIGNAL(editingFinished()), this, SLOT(jobIdEditFinished())); dockPage(); setCurrent(); this->show(); if (mainWin->m_miscDebug) Pmsg0(000, "Leave preRestore\n"); } /* * Check to make sure all is ok then start either the select window or the restore * run window */ void prerestorePage::okButtonPushed() { if (!selectJobRadio->isChecked()) { if (!checkJobIdList()) { return; } } QString cmd; this->hide(); cmd = QString("restore"); cmd += " fileset=\"" + filesetCombo->currentText() + "\""; cmd += " client=\"" + clientCombo->currentText() + "\""; if (selectJobRadio->isChecked()) { if (poolCombo->currentText() != tr("Any") ){ cmd += " pool=\"" + poolCombo->currentText() + "\""; } cmd += " storage=\"" + storageCombo->currentText() + "\""; if (recentCheckBox->checkState() == Qt::Checked) { cmd += " current"; } else { QDateTime stamp = beforeDateTime->dateTime(); QString before = stamp.toString(mainWin->m_dtformat); cmd += " before=\"" + before + "\""; } } else { cmd += " jobid=\"" + jobIdEdit->text() + "\""; } if (selectFilesRadio->isChecked()) { if (!selectJobIdsRadio->isChecked()) cmd += " select"; } else { cmd += " all done"; } if (mainWin->m_commandDebug) { Pmsg1(000, "preRestore command \'%s\'\n", cmd.toUtf8().data()); } /* * Send off command that looks something like: * * restore fileset="Full Set" client="timmy-fd" * storage="File" current select */ m_console->write_dir(m_conn, cmd.toUtf8().data()); /* Note, do not turn notifier back on here ... */ if (selectFilesRadio->isChecked()) { setConsoleCurrent(); closeStackPage(); /* wait will be exited in the restore page constructor */ mainWin->waitEnter(); } else { closeStackPage(); mainWin->resetFocus(); } m_console->notify(m_conn, true); if (mainWin->m_miscDebug) Pmsg0(000, "preRestore OK pressed\n"); } /* * Destroy the instace of the class */ void prerestorePage::cancelButtonPushed() { mainWin->set_status(tr("Canceled")); this->hide(); m_console->notify(m_conn, true); closeStackPage(); } /* * Handle updating the other widget with job defaults when the job combo is changed. */ void prerestorePage::job_name_change(int index) { job_defaults job_defs; (void)index; job_defs.job_name = jobCombo->currentText(); if (m_console->get_job_defaults(m_conn, job_defs)) { filesetCombo->setCurrentIndex(filesetCombo->findText(job_defs.fileset_name, Qt::MatchExactly)); clientCombo->setCurrentIndex(clientCombo->findText(job_defs.client_name, Qt::MatchExactly)); poolCombo->setCurrentIndex(poolCombo->findText(tr("Any"), Qt::MatchExactly)); storageCombo->setCurrentIndex(storageCombo->findText(job_defs.store_name, Qt::MatchExactly)); } } /* * Handle the change of enabled of input widgets when the recent checkbox state * is changed. */ void prerestorePage::recentChanged(int state) { if ((state == Qt::Unchecked) && (selectJobRadio->isChecked())) { beforeDateTime->setEnabled(true); } else { beforeDateTime->setEnabled(false); } } /* * For when jobs list is to be used, return a list which is the needed items from * the job record */ int prerestorePage::jobdefsFromJob(QStringList &fieldlist, QString &jobId) { QString job, client, fileset; QString query(""); query = "SELECT DISTINCT Job.Name AS JobName, Client.Name AS Client," " FileSet.FileSet AS FileSet, Job.EndTime AS JobEnd," " Job.Type AS JobType" " From Job, Client, FileSet" " WHERE Job.FileSetId=FileSet.FileSetId AND Job.ClientId=Client.ClientId" " AND JobId=\'" + jobId + "\'"; if (mainWin->m_sqlDebug) { Pmsg1(000, "query = %s\n", query.toUtf8().data()); } QStringList results; if (m_console->sql_cmd(m_conn, query, results)) { QString field; /* Iterate through the lines of results, there should only be one. */ foreach (QString resultline, results) { fieldlist = resultline.split("\t"); } /* foreach resultline */ } /* if results from query */ /* ***FIXME*** This should not ever be getting more than one */ return results.count() >= 1; } /* * Function to handle when the jobidlist line edit input loses focus or is entered */ void prerestorePage::jobIdEditFinished() { checkJobIdList(); } bool prerestorePage::checkJobIdList() { /* Need to check and make sure the text is a comma separated list of integers */ QString line = jobIdEdit->text(); if (line.contains(" ")) { QMessageBox::warning(this, "Bat", tr("There can be no spaces in the text for the joblist.\n" "Press OK to continue?"), QMessageBox::Ok ); return false; } QStringList joblist = line.split(",", QString::SkipEmptyParts); bool allintokay = true, alljobok = true, allisjob = true; QString jobName(""), clientName(""); foreach (QString job, joblist) { bool intok; job.toInt(&intok, 10); if (intok) { /* are the integers representing a list of jobs all with the same job * and client */ QStringList fields; if (jobdefsFromJob(fields, job) == 1) { if (jobName == "") jobName = fields[0]; else if (jobName != fields[0]) alljobok = false; if (clientName == "") clientName = fields[1]; else if (clientName != fields[1]) alljobok = false; } else { allisjob = false; } } else { allintokay = false; } } if (!allintokay){ QMessageBox::warning(this, "Bat", tr("The string is not a comma separated list of integers.\n" "Press OK to continue?"), QMessageBox::Ok ); return false; } if (!allisjob){ QMessageBox::warning(this, tr("Bat"), tr("At least one of the jobs is not a valid job of type \"Backup\".\n" "Press OK to continue?"), QMessageBox::Ok ); return false; } if (!alljobok){ QMessageBox::warning(this, "Bat", tr("All jobs in the list must be of the same jobName and same client.\n" "Press OK to continue?"), QMessageBox::Ok ); return false; } return true; } /* * Handle the change of enabled of input widgets when the job radio buttons * are changed. */ void prerestorePage::jobRadioClicked(bool checked) { if (checked) { jobCombo->setEnabled(true); filesetCombo->setEnabled(true); clientCombo->setEnabled(true); poolCombo->setEnabled(true); storageCombo->setEnabled(true); recentCheckBox->setEnabled(true); if (!recentCheckBox->isChecked()) { beforeDateTime->setEnabled(true); } jobIdEdit->setEnabled(false); selectJobRadio->setChecked(true); selectJobIdsRadio->setChecked(false); } else { jobCombo->setEnabled(false); filesetCombo->setEnabled(false); clientCombo->setEnabled(false); poolCombo->setEnabled(false); storageCombo->setEnabled(false); recentCheckBox->setEnabled(false); beforeDateTime->setEnabled(false); jobIdEdit->setEnabled(true); selectJobRadio->setChecked(false); selectJobIdsRadio->setChecked(true); } if (mainWin->m_miscDebug) { Pmsg2(000, "jobRadio=%d jobidsRadio=%d\n", selectJobRadio->isChecked(), selectJobIdsRadio->isChecked()); } } void prerestorePage::jobidsRadioClicked(bool checked) { if (checked) { jobCombo->setEnabled(false); filesetCombo->setEnabled(false); clientCombo->setEnabled(false); poolCombo->setEnabled(false); storageCombo->setEnabled(false); recentCheckBox->setEnabled(false); beforeDateTime->setEnabled(false); jobIdEdit->setEnabled(true); selectJobRadio->setChecked(false); selectJobIdsRadio->setChecked(true); } else { jobCombo->setEnabled(true); filesetCombo->setEnabled(true); clientCombo->setEnabled(true); poolCombo->setEnabled(true); storageCombo->setEnabled(true); recentCheckBox->setEnabled(true); if (!recentCheckBox->isChecked()) { beforeDateTime->setEnabled(true); } jobIdEdit->setEnabled(false); selectJobRadio->setChecked(true); selectJobIdsRadio->setChecked(false); } if (mainWin->m_miscDebug) { Pmsg2(000, "jobRadio=%d jobidsRadio=%d\n", selectJobRadio->isChecked(), selectJobIdsRadio->isChecked()); } } bacula-15.0.3/src/qt-console/restore/prerestore.ui0000644000175000017500000002564314771010173021763 0ustar bsbuildbsbuild prerestoreForm 0 0 589 897 Form true 9 6 0 6 QFrame::NoFrame QFrame::Plain 0 6 All Files false Select Files false Qt::Horizontal 40 20 Qt::Horizontal 40 20 0 6 7 0 0 0 600 600 Job JobIds Qt::Vertical QSizePolicy::Fixed 20 21 0 6 2000 1 1 yyyy-mm-dd h:mm:ss true Qt::LeftToRight Use Most Recent File Set: filesetCombo Client: clientCombo Storage: storageCombo Before: beforeDateTime Pool: poolCombo Qt::Horizontal 262 21 0 6 Qt::Horizontal 171 20 OK Cancel Qt::Vertical 71 41 Qt::Vertical 387 161 Qt::Horizontal 41 139 Qt::Horizontal 21 139 0 6 Qt::Horizontal 71 21 16777215 30 <h3>Select Jobs</h3> Qt::Horizontal 81 20 bacula-15.0.3/src/qt-console/restore/restoretree.cpp0000644000175000017500000021527714771010173022305 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Restore Class * * Kern Sibbald, February MMVII * */ #include "bat.h" #include "restoretree.h" #include "pages.h" restoreTree::restoreTree() : Pages() { setupUi(this); m_name = tr("Version Browser"); pgInitialize(); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0, QIcon(QString::fromUtf8(":images/browse.png"))); m_populated = false; m_debugCnt = 0; m_debugTrap = true; QGridLayout *gridLayout = new QGridLayout(this); gridLayout->setSpacing(6); gridLayout->setMargin(9); gridLayout->setObjectName(QString::fromUtf8("gridLayout")); m_splitter = new QSplitter(Qt::Vertical, this); QScrollArea *area = new QScrollArea(); area->setObjectName(QString::fromUtf8("area")); area->setWidget(widget); area->setWidgetResizable(true); m_splitter->addWidget(area); m_splitter->addWidget(splitter); splitter->setChildrenCollapsible(false); gridLayout->addWidget(m_splitter, 0, 0, 1, 1); /* progress widgets */ prBar1->setVisible(false); prBar2->setVisible(false); prLabel1->setVisible(false); prLabel2->setVisible(false); /* Set Defaults for check and spin for limits */ limitCheckBox->setCheckState(mainWin->m_recordLimitCheck ? Qt::Checked : Qt::Unchecked); limitSpinBox->setValue(mainWin->m_recordLimitVal); daysCheckBox->setCheckState(mainWin->m_daysLimitCheck ? Qt::Checked : Qt::Unchecked); daysSpinBox->setValue(mainWin->m_daysLimitVal); readSettings(); dockPage(); setCurrent(); } restoreTree::~restoreTree() { writeSettings(); } /* * Called from the constructor to set up the page widgets and connections. */ void restoreTree::setupPage() { connect(refreshButton, SIGNAL(pressed()), this, SLOT(refreshButtonPushed())); connect(restoreButton, SIGNAL(pressed()), this, SLOT(restoreButtonPushed())); connect(jobCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(jobComboChanged(int))); connect(jobCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(updateRefresh())); connect(clientCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(updateRefresh())); connect(fileSetCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(updateRefresh())); connect(limitCheckBox, SIGNAL(stateChanged(int)), this, SLOT(updateRefresh())); connect(daysCheckBox, SIGNAL(stateChanged(int)), this, SLOT(updateRefresh())); connect(daysSpinBox, SIGNAL(valueChanged(int)), this, SLOT(updateRefresh())); connect(limitSpinBox, SIGNAL(valueChanged(int)), this, SLOT(updateRefresh())); connect(directoryTree, SIGNAL(currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)), this, SLOT(directoryCurrentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *))); connect(directoryTree, SIGNAL(itemExpanded(QTreeWidgetItem *)), this, SLOT(directoryItemExpanded(QTreeWidgetItem *))); connect(directoryTree, SIGNAL(itemChanged(QTreeWidgetItem *, int)), this, SLOT(directoryItemChanged(QTreeWidgetItem *, int))); connect(fileTable, SIGNAL(currentItemChanged(QTableWidgetItem *, QTableWidgetItem *)), this, SLOT(fileCurrentItemChanged(QTableWidgetItem *, QTableWidgetItem *))); connect(jobTable, SIGNAL(cellClicked(int, int)), this, SLOT(jobTableCellClicked(int, int))); QStringList titles = QStringList() << tr("Directories"); directoryTree->setHeaderLabels(titles); clientCombo->addItems(m_console->client_list); fileSetCombo->addItem(tr("Any")); fileSetCombo->addItems(m_console->fileset_list); jobCombo->addItem(tr("Any")); jobCombo->addItems(m_console->job_list); directoryTree->setContextMenuPolicy(Qt::ActionsContextMenu); } void restoreTree::updateRefresh() { if (mainWin->m_rtPopDirDebug) Pmsg2(000, "testing prev=\"%s\" current=\"%s\"\n", m_prevJobCombo.toUtf8().data(), jobCombo->currentText().toUtf8().data()); m_dropdownChanged = (m_prevJobCombo != jobCombo->currentText()) || (m_prevClientCombo != clientCombo->currentText()) || (m_prevFileSetCombo != fileSetCombo->currentText() || (m_prevLimitSpinBox != limitSpinBox->value()) || (m_prevDaysSpinBox != daysSpinBox->value()) || (m_prevLimitCheckState != limitCheckBox->checkState()) || (m_prevDaysCheckState != daysCheckBox->checkState()) ); if (m_dropdownChanged) { if (mainWin->m_rtPopDirDebug) Pmsg0(000, "In restoreTree::updateRefresh Is CHANGED\n"); refreshLabel->setText(tr("Refresh From Re-Select")); } else { if (mainWin->m_rtPopDirDebug) Pmsg0(000, "In restoreTree::updateRefresh Is not Changed\n"); refreshLabel->setText(tr("Refresh From JobChecks")); } } /* * When refresh button is pushed, perform a query getting the directories and * use parseDirectory and addDirectory to populate the directory tree with items. */ void restoreTree::populateDirectoryTree() { m_debugTrap = true; m_debugCnt = 0; m_slashTrap = false; m_dirPaths.clear(); directoryTree->clear(); fileTable->clear(); fileTable->setRowCount(0); fileTable->setColumnCount(0); versionTable->clear(); versionTable->setRowCount(0); versionTable->setColumnCount(0); m_fileExceptionHash.clear(); m_fileExceptionMulti.clear(); m_versionExceptionHash.clear(); m_directoryIconStateHash.clear(); updateRefresh(); int taskcount = 3, ontask = 1; if (m_dropdownChanged) taskcount += 1; /* Set progress bars and repaint */ prBar1->setVisible(true); prBar1->setRange(0,taskcount); prBar1->setValue(0); prLabel1->setText(tr("Task %1 of %2").arg(ontask).arg(taskcount)); prLabel1->setVisible(true); prBar2->setVisible(true); prBar2->setRange(0,0); prLabel2->setText(tr("Querying Database")); prLabel2->setVisible(true); repaint(); if (m_dropdownChanged) { m_prevJobCombo = jobCombo->currentText(); m_prevClientCombo = clientCombo->currentText(); m_prevFileSetCombo = fileSetCombo->currentText(); m_prevLimitSpinBox = limitSpinBox->value(); m_prevDaysSpinBox = daysSpinBox->value(); m_prevLimitCheckState = limitCheckBox->checkState(); m_prevDaysCheckState = daysCheckBox->checkState(); updateRefresh(); prBar1->setValue(ontask++); prLabel1->setText(tr("Task %1 of %2").arg(ontask).arg(taskcount)); prBar2->setValue(0); prBar2->setRange(0,0); prLabel2->setText(tr("Querying Jobs")); repaint(); populateJobTable(); } setJobsCheckedList(); if (mainWin->m_rtPopDirDebug) Pmsg0(000, "Repopulating from checks in Job Table\n"); if (m_checkedJobs != "") { /* now create the query to get the list of paths */ QString cmd = "SELECT DISTINCT Path.Path AS Path, File.PathId AS PathId" " FROM File" " INNER JOIN Path ON (File.PathId=Path.PathId)" " WHERE File.Filename='' AND File.Jobid IN (" + m_checkedJobs + ")"; if (mainWin->m_sqlDebug) Pmsg1(000, "Query cmd : %s\n", cmd.toUtf8().data()); prBar1->setValue(ontask++); prLabel1->setText(tr("Task %1 of %2").arg(ontask).arg(taskcount)); prBar2->setValue(0); prBar2->setRange(0,0); prLabel2->setText(tr("Querying for Directories")); repaint(); QStringList results; m_directoryPathIdHash.clear(); bool querydone = false; if (m_console->sql_cmd(cmd, results)) { if (!querydone) { querydone = true; prLabel2->setText(tr("Processing Directories")); prBar2->setRange(0,results.count()); repaint(); } if (mainWin->m_miscDebug) Pmsg1(000, "Done with query %i results\n", results.count()); QStringList fieldlist; foreach(const QString &resultline, results) { /* Update progress bar periodically */ if ((++m_debugCnt && 0x3FF) == 0) { prBar2->setValue(m_debugCnt); } fieldlist = resultline.split("\t"); int fieldcnt = 0; /* Iterate through fields in the record */ foreach (const QString &field, fieldlist) { if (fieldcnt == 0 ) { parseDirectory(field); } else if (fieldcnt == 1) { bool ok; int pathid = field.toInt(&ok, 10); if (ok) m_directoryPathIdHash.insert(fieldlist[0], pathid); } fieldcnt += 1; } } } else { return; } } else { QMessageBox::warning(this, "Bat", tr("No jobs were selected in the job query !!!.\n" "Press OK to continue"), QMessageBox::Ok ); } prBar1->setVisible(false); prBar2->setVisible(false); prLabel1->setVisible(false); prLabel2->setVisible(false); } /* * Function to set m_checkedJobs from the jobs that are checked in the table * of jobs */ void restoreTree::setJobsCheckedList() { m_JobsCheckedList = ""; bool first = true; /* Update the items in the version table */ int cnt = jobTable->rowCount(); for (int row=0; rowitem(row, 0); if (jobItem->checkState() == Qt::Checked) { if (!first) m_JobsCheckedList += ","; m_JobsCheckedList += jobItem->text(); first = false; jobItem->setBackground(Qt::green); } else { if (jobItem->flags()) jobItem->setBackground(Qt::gray); else jobItem->setBackground(Qt::darkYellow); } } m_checkedJobs = m_JobsCheckedList; } /* * Function to parse a directory into all possible subdirectories, then add to * The tree. */ void restoreTree::parseDirectory(const QString &dir_in) { // bail out if already processed if (m_dirPaths.contains(dir_in)) return; // search for parent... int pos=dir_in.lastIndexOf("/",-2); if (pos != -1) { QString parent=dir_in.left(pos+1); QString subdir=dir_in.mid(pos+1); QTreeWidgetItem *item = NULL; QTreeWidgetItem *parentItem = m_dirPaths.value(parent); if (parentItem==0) { // recurse to build parent... parseDirectory(parent); parentItem = m_dirPaths.value(parent); } /* new directories to add */ item = new QTreeWidgetItem(parentItem); item->setText(0, subdir); item->setData(0, Qt::UserRole, QVariant(dir_in)); item->setCheckState(0, Qt::Unchecked); /* Store the current state of the check status in column 1, which at * this point has no text*/ item->setData(1, Qt::UserRole, QVariant(Qt::Unchecked)); m_dirPaths.insert(dir_in,item); } else { QTreeWidgetItem *item = new QTreeWidgetItem(directoryTree); item->setText(0, dir_in); item->setData(0, Qt::UserRole, QVariant(dir_in)); item->setData(1, Qt::UserRole, QVariant(Qt::Unchecked)); item->setIcon(0, QIcon(QString::fromUtf8(":images/folder.png"))); m_dirPaths.insert(dir_in,item); } } /* * Virtual function which is called when this page is visible on the stack */ void restoreTree::currentStackItem() { if(!m_populated) { setupPage(); m_populated = true; } } /* * Populate the tree when refresh button pushed. */ void restoreTree::refreshButtonPushed() { populateDirectoryTree(); } /* * Set the values of non-job combo boxes to the job defaults */ void restoreTree::jobComboChanged(int) { if (jobCombo->currentText() == tr("Any")) { fileSetCombo->setCurrentIndex(fileSetCombo->findText(tr("Any"), Qt::MatchExactly)); return; } job_defaults job_defs; //(void)index; job_defs.job_name = jobCombo->currentText(); if (m_console->get_job_defaults(job_defs)) { fileSetCombo->setCurrentIndex(fileSetCombo->findText(job_defs.fileset_name, Qt::MatchExactly)); clientCombo->setCurrentIndex(clientCombo->findText(job_defs.client_name, Qt::MatchExactly)); } } /* * Function to populate the file list table */ void restoreTree::directoryCurrentItemChanged(QTreeWidgetItem *item, QTreeWidgetItem *) { if (item == NULL) return; fileTable->clear(); /* Also clear the version table here */ versionTable->clear(); versionFileLabel->setText(""); versionTable->setRowCount(0); versionTable->setColumnCount(0); QStringList headerlist = (QStringList() << tr("File Name") << tr("Filename Id")); fileTable->setColumnCount(headerlist.size()); fileTable->setHorizontalHeaderLabels(headerlist); fileTable->setRowCount(0); m_fileCheckStateList.clear(); disconnect(fileTable, SIGNAL(itemChanged(QTableWidgetItem *)), this, SLOT(fileTableItemChanged(QTableWidgetItem *))); QBrush blackBrush(Qt::black); QString directory = item->data(0, Qt::UserRole).toString(); directoryLabel->setText(tr("Present Working Directory: %1").arg(directory)); int pathid = m_directoryPathIdHash.value(directory, -1); if (pathid != -1) { QString cmd = "SELECT DISTINCT File.Filename AS FileName, File.FileId AS FilenameId" " FROM File " " WHERE File.PathId=" + QString("%1").arg(pathid) + " AND File.Jobid IN (" + m_checkedJobs + ")" " AND File.Filename!=''" " ORDER BY File.FileName"; if (mainWin->m_sqlDebug) Pmsg1(000, "Query cmd : %s\n", cmd.toUtf8().data()); QStringList results; if (m_console->sql_cmd(cmd, results)) { QTableWidgetItem* tableItem; QString field; QStringList fieldlist; fileTable->setRowCount(results.size()); int row = 0; /* Iterate through the record returned from the query */ foreach (QString resultline, results) { /* Iterate through fields in the record */ int column = 0; fieldlist = resultline.split("\t"); foreach (field, fieldlist) { field = field.trimmed(); /* strip leading & trailing spaces */ tableItem = new QTableWidgetItem(field, 1); /* Possible flags are Qt::ItemFlags flag = Qt::ItemIsSelectable | Qt::ItemIsEditablex * | Qt::ItemIsDragEnabled | Qt::ItemIsDropEnabled | Qt::ItemIsUserCheckable * | Qt::ItemIsEnabled | Qt::ItemIsTristate; */ tableItem->setForeground(blackBrush); /* Just in case a column ever gets added */ if (mainWin->m_sqlDebug) Pmsg1(000, "Column=%d\n", column); if (column == 0) { Qt::ItemFlags flag = Qt::ItemIsUserCheckable | Qt::ItemIsEnabled | Qt::ItemIsTristate; tableItem->setFlags(flag); tableItem->setData(Qt::UserRole, QVariant(directory)); fileTable->setItem(row, column, tableItem); m_fileCheckStateList.append(Qt::Unchecked); tableItem->setCheckState(Qt::Unchecked); } else if (column == 1) { Qt::ItemFlags flag = Qt::ItemIsEnabled; tableItem->setFlags(flag); bool ok; // We use FileId instead of the old FilenameId int filenameid = field.toInt(&ok, 10); if (!ok) filenameid = -1; tableItem->setData(Qt::UserRole, QVariant(filenameid)); fileTable->setItem(row, column, tableItem); } column++; } row++; } fileTable->setRowCount(row); } fileTable->resizeColumnsToContents(); fileTable->resizeRowsToContents(); fileTable->verticalHeader()->hide(); fileTable->hideColumn(1); if (mainWin->m_rtDirCurICDebug) Pmsg0(000, "will update file table checks\n"); updateFileTableChecks(); } else if (mainWin->m_sqlDebug) Pmsg1(000, "did not perform query, pathid=%i not found\n", pathid); connect(fileTable, SIGNAL(itemChanged(QTableWidgetItem *)), this, SLOT(fileTableItemChanged(QTableWidgetItem *))); } /* * Function to populate the version table */ void restoreTree::fileCurrentItemChanged(QTableWidgetItem *currentFileTableItem, QTableWidgetItem *) { if (currentFileTableItem == NULL) return; int currentRow = fileTable->row(currentFileTableItem); QTableWidgetItem *fileTableItem = fileTable->item(currentRow, 0); QTableWidgetItem *fileNameIdTableItem = fileTable->item(currentRow, 1); int fileNameId = fileNameIdTableItem->data(Qt::UserRole).toInt(); m_versionCheckStateList.clear(); disconnect(versionTable, SIGNAL(itemChanged(QTableWidgetItem *)), this, SLOT(versionTableItemChanged(QTableWidgetItem *))); QString file = fileTableItem->text(); versionFileLabel->setText(file); QString directory = fileTableItem->data(Qt::UserRole).toString(); QBrush blackBrush(Qt::black); QStringList headerlist = (QStringList() << tr("Job Id") << tr("Type") << tr("End Time") << tr("Hash") << tr("FileId") << tr("Job Type") << tr("First Volume")); versionTable->clear(); versionTable->setColumnCount(headerlist.size()); versionTable->setHorizontalHeaderLabels(headerlist); versionTable->setRowCount(0); int pathid = m_directoryPathIdHash.value(directory, -1); if ((pathid != -1) && (fileNameId != -1)) { QString cmd = "SELECT Job.JobId AS JobId, Job.Level AS Type," " Job.EndTime AS EndTime, File.MD5 AS MD5," " File.FileId AS FileId, Job.Type AS JobType," " (SELECT Media.VolumeName FROM JobMedia JOIN Media ON JobMedia.MediaId=Media.MediaId WHERE JobMedia.JobId=Job.JobId ORDER BY JobMediaId LIMIT 1) AS FirstVolume" " FROM File" " INNER JOIN Path ON (Path.PathId=File.PathId)" " INNER JOIN Job ON (File.JobId=Job.JobId)" " WHERE Path.PathId=" + QString("%1").arg(pathid) + //" AND Filename.Name='" + file + "'" " AND File.Filename = (SELECT Filename FROM File AS F1 WHERE FileId = " + QString("%1").arg(fileNameId) + ") " + " AND Job.Jobid IN (" + m_checkedJobs + ")" " ORDER BY Job.EndTime DESC"; if (mainWin->m_sqlDebug) Pmsg1(000, "Query cmd : %s\n", cmd.toUtf8().data()); QStringList results; if (m_console->sql_cmd(cmd, results)) { QTableWidgetItem* tableItem; QString field; QStringList fieldlist; versionTable->setRowCount(results.size()); int row = 0; /* Iterate through the record returned from the query */ foreach (QString resultline, results) { fieldlist = resultline.split("\t"); int column = 0; /* remove directory */ if (fieldlist[0].trimmed() != "") { /* Iterate through fields in the record */ foreach (field, fieldlist) { field = field.trimmed(); /* strip leading & trailing spaces */ if (column == 5 ) { QByteArray jtype(field.trimmed().toLatin1()); if (jtype.size()) { field = job_type_to_str(jtype[0]); } } tableItem = new QTableWidgetItem(field, 1); tableItem->setFlags(0); tableItem->setForeground(blackBrush); tableItem->setData(Qt::UserRole, QVariant(directory)); versionTable->setItem(row, column, tableItem); if (mainWin->m_sqlDebug) Pmsg1(000, "Column=%d\n", column); if (column == 0) { Qt::ItemFlags flag = Qt::ItemIsUserCheckable | Qt::ItemIsEnabled | Qt::ItemIsTristate; tableItem->setFlags(flag); m_versionCheckStateList.append(Qt::Unchecked); tableItem->setCheckState(Qt::Unchecked); } column++; } row++; } } } versionTable->resizeColumnsToContents(); versionTable->resizeRowsToContents(); versionTable->verticalHeader()->hide(); updateVersionTableChecks(); } else { if (mainWin->m_sqlDebug) Pmsg2(000, "not querying : pathid=%i fileNameId=%i\n", pathid, fileNameId); } connect(versionTable, SIGNAL(itemChanged(QTableWidgetItem *)), this, SLOT(versionTableItemChanged(QTableWidgetItem *))); } /* * Save user settings associated with this page */ void restoreTree::writeSettings() { QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup(m_groupText); settings.setValue(m_splitText1, m_splitter->saveState()); settings.setValue(m_splitText2, splitter->saveState()); settings.endGroup(); } /* * Read and restore user settings associated with this page */ void restoreTree::readSettings() { m_groupText = tr("RestoreTreePage"); m_splitText1 = "splitterSizes1_3"; m_splitText2 = "splitterSizes2_3"; QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup(m_groupText); if (settings.contains(m_splitText1)) { m_splitter->restoreState(settings.value(m_splitText1).toByteArray()); } if (settings.contains(m_splitText2)) { splitter->restoreState(settings.value(m_splitText2).toByteArray()); } settings.endGroup(); } /* * This is a funcion to accomplish the one thing I struggled to figure out what * was taking so long. It add the icons, but after the tree is made. Seemed to * work fast after changing from png to png file for graphic. */ void restoreTree::directoryItemExpanded(QTreeWidgetItem *item) { int childCount = item->childCount(); for (int i=0; ichild(i); if (child->icon(0).isNull()) child->setIcon(0, QIcon(QString::fromUtf8(":images/folder.png"))); } } /* * Show what jobs meet the criteria and are being used to * populate the directory tree and file and version tables. */ void restoreTree::populateJobTable() { QBrush blackBrush(Qt::black); if (mainWin->m_rtPopDirDebug) Pmsg0(000, "Repopulating the Job Table\n"); QStringList headerlist = (QStringList() << tr("Job Id") << tr("End Time") << tr("Level") << tr("Type") << tr("Name") << tr("Purged") << tr("TU") << tr("TD")); m_toggleUpIndex = headerlist.indexOf(tr("TU")); m_toggleDownIndex = headerlist.indexOf(tr("TD")); int purgedIndex = headerlist.indexOf(tr("Purged")); int typeIndex = headerlist.indexOf(tr("Type")); jobTable->clear(); jobTable->setColumnCount(headerlist.size()); jobTable->setHorizontalHeaderLabels(headerlist); QString jobQuery = "SELECT Job.Jobid AS Id, Job.EndTime AS EndTime," " Job.Level AS Level, Job.Type AS Type," " Job.Name AS JobName, Job.purgedfiles AS Purged" " FROM Job" /* INNER JOIN FileSet eliminates all restore jobs */ " INNER JOIN Client ON (Job.ClientId=Client.ClientId)" " INNER JOIN FileSet ON (Job.FileSetId=FileSet.FileSetId)" " WHERE" " Job.JobStatus IN ('T','W') AND Job.Type='B' AND" " Client.Name='" + clientCombo->currentText() + "'"; if ((jobCombo->currentIndex() >= 0) && (jobCombo->currentText() != tr("Any"))) { jobQuery += " AND Job.name = '" + jobCombo->currentText() + "'"; } if ((fileSetCombo->currentIndex() >= 0) && (fileSetCombo->currentText() != tr("Any"))) { jobQuery += " AND FileSet.FileSet='" + fileSetCombo->currentText() + "'"; } /* If Limit check box For limit by days is checked */ if (daysCheckBox->checkState() == Qt::Checked) { QDateTime stamp = QDateTime::currentDateTime().addDays(-daysSpinBox->value()); QString since = stamp.toString(Qt::ISODate); jobQuery += " AND Job.Starttime>'" + since + "'"; } //jobQuery += " AND Job.purgedfiles=0"; jobQuery += " ORDER BY Job.EndTime DESC"; /* If Limit check box for limit records returned is checked */ if (limitCheckBox->checkState() == Qt::Checked) { QString limit; limit.setNum(limitSpinBox->value()); jobQuery += " LIMIT " + limit; } if (mainWin->m_sqlDebug) Pmsg1(000, "Query cmd : %s\n", jobQuery.toUtf8().data()); QStringList results; if (m_console->sql_cmd(jobQuery, results)) { QTableWidgetItem* tableItem; QString field; QStringList fieldlist; jobTable->setRowCount(results.size()); int row = 0; /* Iterate through the record returned from the query */ foreach (QString resultline, results) { fieldlist = resultline.split("\t"); int column = 0; /* remove directory */ if (fieldlist[0].trimmed() != "") { /* Iterate through fields in the record */ foreach (field, fieldlist) { field = field.trimmed(); /* strip leading & trailing spaces */ if (field != "") { if (column == typeIndex) { QByteArray jtype(field.trimmed().toLatin1()); if (jtype.size()) { field = job_type_to_str(jtype[0]); } } tableItem = new QTableWidgetItem(field, 1); tableItem->setFlags(0); tableItem->setForeground(blackBrush); jobTable->setItem(row, column, tableItem); if (mainWin->m_sqlDebug) Pmsg1(000, "Column=%d\n", column); if (column == 0) { bool ok; int purged = fieldlist[purgedIndex].toInt(&ok, 10); if (!((ok) && (purged == 1))) { Qt::ItemFlags flag = Qt::ItemIsUserCheckable | Qt::ItemIsEnabled | Qt::ItemIsTristate; tableItem->setFlags(flag); tableItem->setCheckState(Qt::Checked); tableItem->setBackground(Qt::green); } else { tableItem->setFlags(0); tableItem->setCheckState(Qt::Unchecked); } } column++; } } tableItem = new QTableWidgetItem(QIcon(QString::fromUtf8(":images/go-up.png")), "", 1); tableItem->setFlags(0); tableItem->setForeground(blackBrush); jobTable->setItem(row, column, tableItem); column++; tableItem = new QTableWidgetItem(QIcon(QString::fromUtf8(":images/go-down.png")), "", 1); tableItem->setFlags(0); tableItem->setForeground(blackBrush); jobTable->setItem(row, column, tableItem); row++; } } } jobTable->resizeColumnsToContents(); jobTable->resizeRowsToContents(); jobTable->verticalHeader()->hide(); jobTable->hideColumn(purgedIndex); } void restoreTree::jobTableCellClicked(int row, int column) { if (column == m_toggleUpIndex){ int cnt; for (cnt=0; cntitem(cnt, 0); if (item->flags()) { Qt::CheckState state = item->checkState(); if (state == Qt::Checked) item->setCheckState(Qt::Unchecked); else if (state == Qt::Unchecked) item->setCheckState(Qt::Checked); } } } if (column == m_toggleDownIndex){ int cnt, max = jobTable->rowCount(); for (cnt=row; cntitem(cnt, 0); if (item->flags()) { Qt::CheckState state = item->checkState(); if (state == Qt::Checked) item->setCheckState(Qt::Unchecked); else if (state == Qt::Unchecked) item->setCheckState(Qt::Checked); } } } } /* * When a directory item is "changed" check the state of the checkable item * to see if it is different than what it was which is stored in Qt::UserRole * of the 2nd column, column 1, of the tree widget. */ void restoreTree::directoryItemChanged(QTreeWidgetItem *item, int /*column*/) { Qt::CheckState prevState = (Qt::CheckState)item->data(1, Qt::UserRole).toInt(); Qt::CheckState curState = item->checkState(0); QTreeWidgetItem* parent = item->parent(); Qt::CheckState parState; if (parent) parState = parent->checkState(0); else parState = (Qt::CheckState)3; if (mainWin->m_rtDirICDebug) { QString msg = QString("directory item OBJECT has changed prev=%1 cur=%2 par=%3 dir=%4\n") .arg(prevState).arg(curState).arg(parState).arg(item->text(0)); Pmsg1(000, "%s", msg.toUtf8().data()); } /* I only care when the check state changes */ if (prevState == curState) { if (mainWin->m_rtDirICDebug) Pmsg0(000, "Returning Early\n"); return; } if ((prevState == Qt::Unchecked) && (curState == Qt::Checked) && (parState != Qt::Unchecked)) { if (mainWin->m_rtDirICDebug) Pmsg0(000, "Disconnected Setting to Qt::PartiallyChecked\n"); directoryTreeDisconnectedSet(item, Qt::PartiallyChecked); curState = Qt::PartiallyChecked; } if ((prevState == Qt::PartiallyChecked) && (curState == Qt::Checked)) { if (mainWin->m_rtDirICDebug) Pmsg0(000, "Disconnected Setting to Qt::Unchecked\n"); directoryTreeDisconnectedSet(item, Qt::Unchecked); curState = Qt::Unchecked; } if (mainWin->m_rtDirICDebug) { QString msg = QString("directory item CHECKSTATE has changed prev=%1 cur=%2 par=%3 dir=%4\n") .arg(prevState).arg(curState).arg(parState).arg(item->text(0)); Pmsg1(000, "%s", msg.toUtf8().data()); } item->setData(1, Qt::UserRole, QVariant(curState)); Qt::CheckState childState = curState; if (childState == Qt::Checked) childState = Qt::PartiallyChecked; setCheckofChildren(item, childState); /* Remove items from the exception lists. The multi exception list is my index * of what exceptions can be removed when the directory is known*/ QString directory = item->data(0, Qt::UserRole).toString(); QStringList fullPathList = m_fileExceptionMulti.values(directory); int fullPathListCount = fullPathList.count(); if ((mainWin->m_rtDirICDebug) && fullPathListCount) Pmsg2(000, "Will attempt to remove file exceptions for %s count %i\n", directory.toUtf8().data(), fullPathListCount); foreach (QString fullPath, fullPathList) { /* If there is no value in the hash for the key fullPath a value of 3 will be returned * which will match no Qt::xxx values */ Qt::CheckState hashState = m_fileExceptionHash.value(fullPath, (Qt::CheckState)3); if (mainWin->m_rtDirICDebug) Pmsg2(000, "hashState=%i childState=%i\n", hashState, childState); if (hashState == Qt::Unchecked) { fileExceptionRemove(fullPath, directory); m_versionExceptionHash.remove(fullPath); if (mainWin->m_rtDirICDebug) Pmsg0(000, "Attempted Removal A\n"); } if (hashState == Qt::Checked) { fileExceptionRemove(fullPath, directory); m_versionExceptionHash.remove(fullPath); if (mainWin->m_rtDirICDebug) Pmsg0(000, "Attempted Removal B\n"); } } if (item == directoryTree->currentItem()) { if (mainWin->m_rtDirICDebug) Pmsg0(000, "Will attempt to update File Table Checks\n"); updateFileTableChecks(); versionTable->clear(); versionTable->setRowCount(0); versionTable->setColumnCount(0); } if (mainWin->m_rtDirICDebug) Pmsg0(000, "Returning At End\n"); } /* * When a directory item check state is changed, this function iterates through * all subdirectories and sets all to the passed state, which is either partially * checked or unchecked. */ void restoreTree::setCheckofChildren(QTreeWidgetItem *item, Qt::CheckState state) { int childCount; childCount = item->childCount(); for (int i=0; ichild(i); child->setData(1, Qt::UserRole, QVariant(state)); child->setCheckState(0, state); setCheckofChildren(child, state); } } /* * When a File Table Item is "changed" check to see if the state of the checkable * item has changed which is stored in m_fileCheckStateList * If changed store in a hash m_fileExceptionHash that whether this file should be * restored or not. * Called as a slot, connected after populated (after directory current changed called) */ void restoreTree::fileTableItemChanged(QTableWidgetItem *item) { /* get the previous and current check states */ int row = fileTable->row(item); Qt::CheckState prevState; /* prevent a segfault */ prevState = m_fileCheckStateList[row]; Qt::CheckState curState = item->checkState(); /* deterimine the default state from the state of the directory */ QTreeWidgetItem *dirTreeItem = directoryTree->currentItem(); Qt::CheckState dirState = (Qt::CheckState)dirTreeItem->data(1, Qt::UserRole).toInt(); Qt::CheckState defState = Qt::PartiallyChecked; if (dirState == Qt::Unchecked) defState = Qt::Unchecked; /* determine if it is already in the m_fileExceptionHash */ QString directory = directoryTree->currentItem()->data(0, Qt::UserRole).toString(); QString file = item->text(); QString fullPath = directory + file; Qt::CheckState hashState = m_fileExceptionHash.value(fullPath, (Qt::CheckState)3); int verJobNum = m_versionExceptionHash.value(fullPath, 0); if (mainWin->m_rtFileTabICDebug) { QString msg = QString("filerow=%1 prev=%2 cur=%3 def=%4 hash=%5 dir=%6 verJobNum=%7\n") .arg(row).arg(prevState).arg(curState).arg(defState).arg(hashState).arg(dirState).arg(verJobNum); Pmsg1(000, "%s", msg.toUtf8().data()); } /* Remove the hash if currently checked previously unchecked and directory is checked or partial */ if ((prevState == Qt::Checked) && (curState == Qt::Unchecked) && (dirState == Qt::Unchecked)) { /* it can behave as defaulted so current of unchecked is fine */ if (mainWin->m_rtFileTabICDebug) Pmsg0(000, "Will fileExceptionRemove and m_versionExceptionHash.remove here\n"); fileExceptionRemove(fullPath, directory); m_versionExceptionHash.remove(fullPath); } else if ((prevState == Qt::PartiallyChecked) && (curState == Qt::Checked) && (dirState != Qt::Unchecked) && (verJobNum == 0)) { if (mainWin->m_rtFileTabICDebug) Pmsg0(000, "Will fileExceptionInsert here\n"); fileExceptionInsert(fullPath, directory, Qt::Unchecked); } else if ((prevState == Qt::Unchecked) && (curState == Qt::Checked) && (dirState != Qt::Unchecked) && (verJobNum == 0) && (defState == Qt::PartiallyChecked)) { /* filerow=2 prev=0 cur=2 def=1 hash=0 dir=2 verJobNum=0 */ if (mainWin->m_rtFileTabICDebug) Pmsg0(000, "Will fileExceptionRemove here\n"); fileExceptionRemove(fullPath, directory); } else if ((prevState == Qt::Checked) && (curState == Qt::Unchecked) && (defState == Qt::PartiallyChecked) && (verJobNum != 0) && (hashState == Qt::Checked)) { /* Check dir, check version, attempt uncheck in file * filerow=4 prev=2 cur=0 def=1 hash=2 dir=2 verJobNum=53 */ if (mainWin->m_rtFileTabICDebug) Pmsg0(000, "Will fileExceptionRemove and m_versionExceptionHash.remove here\n"); fileExceptionRemove(fullPath, directory); m_versionExceptionHash.remove(fullPath); } else if ((prevState == Qt::Unchecked) && (curState == Qt::Checked) && (dirState != Qt::Unchecked) && (verJobNum == 0)) { /* filerow=0 prev=0 cur=2 def=1 hash=0 dirState=2 verJobNum */ if (mainWin->m_rtFileTabICDebug) Pmsg0(000, "Will Not remove here\n"); } else if (prevState != curState) { if (mainWin->m_rtFileTabICDebug) Pmsg2(000, " THE STATE OF THE Check has changed, Setting StateList[%i] to %i\n", row, curState); /* A user did not set the check state to Partially checked, ignore if so */ if (curState != Qt::PartiallyChecked) { if ((defState == Qt::Unchecked) && (prevState == Qt::PartiallyChecked) && (curState == Qt::Unchecked)) { if (mainWin->m_rtFileTabICDebug) Pmsg0(000, " got here\n"); } else { if (mainWin->m_rtFileTabICDebug) Pmsg2(000, " Inserting into m_fileExceptionHash %s, %i\n", fullPath.toUtf8().data(), curState); fileExceptionInsert(fullPath, directory, curState); } } else { if (mainWin->m_rtFileTabICDebug) Pmsg1(000, "Removing version hash for %s\n", fullPath.toUtf8().data()); /* programattically been changed back to a default state of Qt::PartiallyChecked remove the version hash here */ m_versionExceptionHash.remove(fullPath); } } updateFileTableChecks(); updateVersionTableChecks(); } /* * function to insert keys and values to both m_fileExceptionHash and m_fileExceptionMulti */ void restoreTree::fileExceptionInsert(QString &fullPath, QString &direcotry, Qt::CheckState state) { m_fileExceptionHash.insert(fullPath, state); m_fileExceptionMulti.insert(direcotry, fullPath); directoryIconStateInsert(fullPath, state); } /* * function to remove keys from both m_fileExceptionHash and m_fileExceptionMulti */ void restoreTree::fileExceptionRemove(QString &fullPath, QString &directory) { m_fileExceptionHash.remove(fullPath); /* pull the list of values in the multi */ QStringList fullPathList = m_fileExceptionMulti.values(directory); /* get the index of the fullpath to remove */ int index = fullPathList.indexOf(fullPath); if (index != -1) { /* remove the desired item in the list */ fullPathList.removeAt(index); /* remove the entire list from the multi */ m_fileExceptionMulti.remove(directory); /* readd the remaining */ foreach (QString fp, fullPathList) { m_fileExceptionMulti.insert(directory, fp); } } directoryIconStateRemove(); } /* * Overloaded function to be called from the slot and from other places to set the state * of the check marks in the version table */ void restoreTree::versionTableItemChanged(QTableWidgetItem *item) { /* get the previous and current check states */ int row = versionTable->row(item); QTableWidgetItem *colZeroItem = versionTable->item(row, 0); Qt::CheckState prevState = m_versionCheckStateList[row]; Qt::CheckState curState = (Qt::CheckState)colZeroItem->checkState(); m_versionCheckStateList[row] = curState; /* deterimine the default state from the state of the file */ QTableWidgetItem *fileTableItem = fileTable->currentItem(); Qt::CheckState fileState = (Qt::CheckState)fileTableItem->checkState(); /* determine the default state */ Qt::CheckState defState; if (mainWin->m_sqlDebug) Pmsg1(000, "row=%d\n", row); if (row == 0) { defState = Qt::PartiallyChecked; if (fileState == Qt::Unchecked) defState = Qt::Unchecked; } else { defState = Qt::Unchecked; } /* determine if it is already in the versionExceptionHash */ QString directory = directoryTree->currentItem()->data(0, Qt::UserRole).toString(); Qt::CheckState dirState = directoryTree->currentItem()->checkState(0); QString file = fileTableItem->text(); QString fullPath = directory + file; int thisJobNum = colZeroItem->text().toInt(); int hashJobNum = m_versionExceptionHash.value(fullPath, 0); if (mainWin->m_rtVerTabICDebug) { QString msg = QString("versrow=%1 prev=%2 cur=%3 def=%4 dir=%5 hashJobNum=%6 thisJobNum=%7 filestate=%8 fec=%9 vec=%10\n") .arg(row).arg(prevState).arg(curState).arg(defState).arg(dirState).arg(hashJobNum).arg(thisJobNum).arg(fileState) .arg(m_fileExceptionHash.count()).arg(m_versionExceptionHash.count()); Pmsg1(000, "%s", msg.toUtf8().data()); } /* if changed from partially checked to checked, make it unchecked */ if ((curState == Qt::Checked) && (row == 0) && (fileState == Qt::Unchecked)) { if (mainWin->m_rtVerTabICDebug) Pmsg0(000, "Setting to Qt::Checked\n"); fileTableItem->setCheckState(Qt::Checked); } else if ((prevState == Qt::PartiallyChecked) && (curState == Qt::Checked) && (row == 0) && (fileState == Qt::Checked) && (dirState == Qt::Unchecked)) { //versrow=0 prev=1 cur=2 def=1 dir=0 hashJobNum=0 thisJobNum=64 filestate=2 fec=1 vec=0 if (mainWin->m_rtVerTabICDebug) Pmsg1(000, "fileExceptionRemove %s, %i\n", fullPath.toUtf8().data()); fileExceptionRemove(fullPath, directory); } else if ((curState == Qt::Checked) && (row == 0) && (hashJobNum != 0) && (dirState != Qt::Unchecked)) { //versrow=0 prev=0 cur=2 def=1 dir=2 hashJobNum=53 thisJobNum=64 filestate=2 fec=1 vec=1 if (mainWin->m_rtVerTabICDebug) Pmsg1(000, "m_versionExceptionHash.remove %s\n", fullPath.toUtf8().data()); m_versionExceptionHash.remove(fullPath); fileExceptionRemove(fullPath, directory); } else if ((curState == Qt::Checked) && (row == 0)) { if (mainWin->m_rtVerTabICDebug) Pmsg1(000, "m_versionExceptionHash.remove %s\n", fullPath.toUtf8().data()); m_versionExceptionHash.remove(fullPath); } else if (prevState != curState) { if (mainWin->m_rtVerTabICDebug) Pmsg2(000, " THE STATE OF THE version Check has changed, Setting StateList[%i] to %i\n", row, curState); if ((curState == Qt::Checked) || (curState == Qt::PartiallyChecked)) { if (mainWin->m_rtVerTabICDebug) Pmsg2(000, "Inserting into m_versionExceptionHash %s, %i\n", fullPath.toUtf8().data(), thisJobNum); m_versionExceptionHash.insert(fullPath, thisJobNum); if (fileState != Qt::Checked) { if (mainWin->m_rtVerTabICDebug) Pmsg2(000, "Inserting into m_fileExceptionHash %s, %i\n", fullPath.toUtf8().data(), curState); fileExceptionInsert(fullPath, directory, curState); } } else { if (mainWin->m_rtVerTabICDebug) Pmsg0(000, "got here\n"); } } else { if (mainWin->m_rtVerTabICDebug) Pmsg0(000, "no conditions met\n"); } updateFileTableChecks(); updateVersionTableChecks(); } /* * Simple function to set the check state in the file table by disconnecting the * signal/slot the setting then reconnecting the signal/slot */ void restoreTree::fileTableDisconnectedSet(QTableWidgetItem *item, Qt::CheckState state, bool color) { disconnect(fileTable, SIGNAL(itemChanged(QTableWidgetItem *)), this, SLOT(fileTableItemChanged(QTableWidgetItem *))); item->setCheckState(state); if (color) item->setBackground(Qt::yellow); else item->setBackground(Qt::white); connect(fileTable, SIGNAL(itemChanged(QTableWidgetItem *)), this, SLOT(fileTableItemChanged(QTableWidgetItem *))); } /* * Simple function to set the check state in the version table by disconnecting the * signal/slot the setting then reconnecting the signal/slot */ void restoreTree::versionTableDisconnectedSet(QTableWidgetItem *item, Qt::CheckState state) { disconnect(versionTable, SIGNAL(itemChanged(QTableWidgetItem *)), this, SLOT(versionTableItemChanged(QTableWidgetItem *))); item->setCheckState(state); connect(versionTable, SIGNAL(itemChanged(QTableWidgetItem *)), this, SLOT(versionTableItemChanged(QTableWidgetItem *))); } /* * Simple function to set the check state in the directory tree by disconnecting the * signal/slot the setting then reconnecting the signal/slot */ void restoreTree::directoryTreeDisconnectedSet(QTreeWidgetItem *item, Qt::CheckState state) { disconnect(directoryTree, SIGNAL(itemChanged(QTreeWidgetItem *, int)), this, SLOT(directoryItemChanged(QTreeWidgetItem *, int))); item->setCheckState(0, state); connect(directoryTree, SIGNAL(itemChanged(QTreeWidgetItem *, int)), this, SLOT(directoryItemChanged(QTreeWidgetItem *, int))); } /* * Simplify the updating of the check state in the File table by iterating through * each item in the file table to determine it's appropriate state. * !! Will probably want to concoct a way to do this without iterating for the possibility * of the very large directories. */ void restoreTree::updateFileTableChecks() { /* deterimine the default state from the state of the directory */ QTreeWidgetItem *dirTreeItem = directoryTree->currentItem(); Qt::CheckState dirState = dirTreeItem->checkState(0); QString dirName = dirTreeItem->data(0, Qt::UserRole).toString(); /* Update the items in the version table */ int rcnt = fileTable->rowCount(); for (int row=0; rowitem(row, 0); if (!item) { return; } Qt::CheckState curState = item->checkState(); Qt::CheckState newState = Qt::PartiallyChecked; if (dirState == Qt::Unchecked) newState = Qt::Unchecked; /* determine if it is already in the m_fileExceptionHash */ QString file = item->text(); QString fullPath = dirName + file; Qt::CheckState hashState = m_fileExceptionHash.value(fullPath, (Qt::CheckState)3); int hashJobNum = m_versionExceptionHash.value(fullPath, 0); if (hashState != 3) newState = hashState; if (mainWin->m_rtUpdateFTDebug) { QString msg = QString("file row=%1 cur=%2 hash=%3 new=%4 dirState=%5\n") .arg(row).arg(curState).arg(hashState).arg(newState).arg(dirState); Pmsg1(000, "%s", msg.toUtf8().data()); } bool docolor = false; if (hashJobNum != 0) docolor = true; bool isyellow = item->background().color() == QColor(Qt::yellow); if ((newState != curState) || (hashState == 3) || ((isyellow && !docolor) || (!isyellow && docolor))) fileTableDisconnectedSet(item, newState, docolor); m_fileCheckStateList[row] = newState; } } /* * Simplify the updating of the check state in the Version table by iterating through * each item in the file table to determine it's appropriate state. */ void restoreTree::updateVersionTableChecks() { /* deterimine the default state from the state of the directory */ QTreeWidgetItem *dirTreeItem = directoryTree->currentItem(); Qt::CheckState dirState = dirTreeItem->checkState(0); QString dirName = dirTreeItem->data(0, Qt::UserRole).toString(); /* deterimine the default state from the state of the file */ QTableWidgetItem *fileTableItem = fileTable->item(fileTable->currentRow(), 0); if (!fileTableItem) { return; } Qt::CheckState fileState = fileTableItem->checkState(); QString file = fileTableItem->text(); QString fullPath = dirName + file; int hashJobNum = m_versionExceptionHash.value(fullPath, 0); /* Update the items in the version table */ int cnt = versionTable->rowCount(); for (int row=0; rowitem(row, 0); if (!item) { break; } Qt::CheckState curState = item->checkState(); Qt::CheckState newState = Qt::Unchecked; if ((row == 0) && (fileState != Qt::Unchecked) && (hashJobNum == 0)) newState = Qt::PartiallyChecked; /* determine if it is already in the versionExceptionHash */ if (hashJobNum) { int thisJobNum = item->text().toInt(); if (thisJobNum == hashJobNum) newState = Qt::Checked; } if (mainWin->m_rtChecksDebug) { QString msg = QString("ver row=%1 cur=%2 hashJobNum=%3 new=%4 dirState=%5\n") .arg(row).arg(curState).arg(hashJobNum).arg(newState).arg(dirState); Pmsg1(000, "%s", msg.toUtf8().data()); } if (newState != curState) versionTableDisconnectedSet(item, newState); m_versionCheckStateList[row] = newState; } } /* * Quick subroutine to "return via subPaths" a list of subpaths when passed a fullPath */ void restoreTree::fullPathtoSubPaths(QStringList &subPaths, QString &fullPath_in) { int index; bool done = false; QString fullPath = fullPath_in; QString direct, path; while (((index = fullPath.lastIndexOf("/", -2)) != -1) && (!done)) { direct = path = fullPath; path.replace(index+1, fullPath.length()-index-1, ""); direct.replace(0, index+1, ""); if (false) { QString msg = QString("length = \"%1\" index = \"%2\" Considering \"%3\" \"%4\"\n") .arg(fullPath.length()).arg(index).arg(path).arg(direct); Pmsg0(000, msg.toUtf8().data()); } fullPath = path; subPaths.append(fullPath); } } /* * A Function to set the icon state and insert a record into * m_directoryIconStateHash when an exception is added by the user */ void restoreTree::directoryIconStateInsert(QString &fullPath, Qt::CheckState excpState) { QStringList paths; fullPathtoSubPaths(paths, fullPath); /* an exception that causes the item in the file table to be "Checked" has occured */ if (excpState == Qt::Checked) { bool foundAsUnChecked = false; QTreeWidgetItem *firstItem = m_dirPaths.value(paths[0]); if (firstItem) { if (firstItem->checkState(0) == Qt::Unchecked) foundAsUnChecked = true; } if (foundAsUnChecked) { /* as long as directory item is Unchecked, set icon state to "green check" */ bool done = false; QListIterator siter(paths); while (siter.hasNext() && !done) { QString path = siter.next(); QTreeWidgetItem *item = m_dirPaths.value(path); if (item) { if (item->checkState(0) != Qt::Unchecked) done = true; else { directorySetIcon(1, FolderGreenChecked, path, item); if (mainWin->m_rtIconStateDebug) Pmsg1(000, "In restoreTree::directoryIconStateInsert inserting %s\n", path.toUtf8().data()); } } } } else { /* if it is partially checked or fully checked insert green Check until a unchecked is found in the path */ if (mainWin->m_rtIconStateDebug) Pmsg1(000, "In restoreTree::directoryIconStateInsert Aqua %s\n", paths[0].toUtf8().data()); bool done = false; QListIterator siter(paths); while (siter.hasNext() && !done) { QString path = siter.next(); QTreeWidgetItem *item = m_dirPaths.value(path); if (item) { /* if the directory item is checked, set icon state to unchecked "green check" */ if (item->checkState(0) == Qt::Checked) done = true; directorySetIcon(1, FolderGreenChecked, path, item); if (mainWin->m_rtIconStateDebug) Pmsg1(000, "In restoreTree::directoryIconStateInsert boogie %s\n", path.toUtf8().data()); } } } } /* an exception that causes the item in the file table to be "Unchecked" has occured */ if (excpState == Qt::Unchecked) { bool done = false; QListIterator siter(paths); while (siter.hasNext() && !done) { QString path = siter.next(); QTreeWidgetItem *item = m_dirPaths.value(path); if (item) { /* if the directory item is checked, set icon state to unchecked "white check" */ if (item->checkState(0) == Qt::Checked) done = true; directorySetIcon(1, FolderWhiteChecked, path, item); if (mainWin->m_rtIconStateDebug) Pmsg1(000, "In restoreTree::directoryIconStateInsert boogie %s\n", path.toUtf8().data()); } } } } /* * A function to set the icon state back to "folder" and to remove a record from * m_directoryIconStateHash when an exception is removed by a user. */ void restoreTree::directoryIconStateRemove() { QHash shouldBeIconStateHash; /* First determine all paths with icons that should be checked with m_fileExceptionHash */ /* Use iterator tera to iterate through m_fileExceptionHash */ QHashIterator tera(m_fileExceptionHash); while (tera.hasNext()) { tera.next(); if (mainWin->m_rtIconStateDebug) Pmsg2(000, "Alpha Key %s value %i\n", tera.key().toUtf8().data(), tera.value()); QString keyPath = tera.key(); Qt::CheckState state = tera.value(); QStringList paths; fullPathtoSubPaths(paths, keyPath); /* if the state of the item in m_fileExceptionHash is checked * each of the subpaths should be "Checked Green" */ if (state == Qt::Checked) { bool foundAsUnChecked = false; QTreeWidgetItem *firstItem = m_dirPaths.value(paths[0]); if (firstItem) { if (firstItem->checkState(0) == Qt::Unchecked) foundAsUnChecked = true; } if (foundAsUnChecked) { /* The right most directory is Unchecked, iterate leftwards * as long as directory item is Unchecked, set icon state to "green check" */ bool done = false; QListIterator siter(paths); while (siter.hasNext() && !done) { QString path = siter.next(); QTreeWidgetItem *item = m_dirPaths.value(path); if (item) { if (item->checkState(0) != Qt::Unchecked) done = true; else { shouldBeIconStateHash.insert(path, FolderGreenChecked); if (mainWin->m_rtIconStateDebug) Pmsg1(000, "In restoreTree::directoryIconStateInsert inserting %s\n", path.toUtf8().data()); } } } } else { /* The right most directory is Unchecked, iterate leftwards * until directory item is Checked, set icon state to "green check" */ bool done = false; QListIterator siter(paths); while (siter.hasNext() && !done) { QString path = siter.next(); QTreeWidgetItem *item = m_dirPaths.value(path); if (item) { if (item->checkState(0) == Qt::Checked) done = true; shouldBeIconStateHash.insert(path, FolderGreenChecked); } } } } /* if the state of the item in m_fileExceptionHash is UNChecked * each of the subpaths should be "Checked white" until the tree item * which represents that path is Qt::Checked */ if (state == Qt::Unchecked) { bool done = false; QListIterator siter(paths); while (siter.hasNext() && !done) { QString path = siter.next(); QTreeWidgetItem *item = m_dirPaths.value(path); if (item) { if (item->checkState(0) == Qt::Checked) done = true; shouldBeIconStateHash.insert(path, FolderWhiteChecked); } } } } /* now iterate through m_directoryIconStateHash which are the items that are checked * and remove all of those that are not in shouldBeIconStateHash */ QHashIterator iter(m_directoryIconStateHash); while (iter.hasNext()) { iter.next(); if (mainWin->m_rtIconStateDebug) Pmsg2(000, "Beta Key %s value %i\n", iter.key().toUtf8().data(), iter.value()); QString keyPath = iter.key(); if (shouldBeIconStateHash.value(keyPath)) { if (mainWin->m_rtIconStateDebug) Pmsg1(000, "WAS found in shouldBeStateHash %s\n", keyPath.toUtf8().data()); //newval = m_directoryIconStateHash.value(path, FolderUnchecked) & (~change); int newval = shouldBeIconStateHash.value(keyPath); newval = ~newval; newval = newval & FolderBothChecked; QTreeWidgetItem *item = m_dirPaths.value(keyPath); if (item) directorySetIcon(0, newval, keyPath, item); } else { if (mainWin->m_rtIconStateDebug) Pmsg1(000, "NOT found in shouldBeStateHash %s\n", keyPath.toUtf8().data()); QTreeWidgetItem *item = m_dirPaths.value(keyPath); if (item) directorySetIcon(0, FolderBothChecked, keyPath, item); //item->setIcon(0,QIcon(QString::fromUtf8(":images/folder.png"))); //m_directoryIconStateHash.remove(keyPath); } } } void restoreTree::directorySetIcon(int operation, int change, QString &path, QTreeWidgetItem* item) { int newval; /* we are adding a check type white or green */ if (operation > 0) { /* get the old val and "bitwise OR" with the change */ newval = m_directoryIconStateHash.value(path, FolderUnchecked) | change; if (mainWin->m_rtIconStateDebug) Pmsg2(000, "Inserting into m_directoryIconStateHash path=%s newval=%i\n", path.toUtf8().data(), newval); m_directoryIconStateHash.insert(path, newval); } else { /* we are removing a check type white or green */ newval = m_directoryIconStateHash.value(path, FolderUnchecked) & (~change); if (newval == 0) { if (mainWin->m_rtIconStateDebug) Pmsg2(000, "Removing from m_directoryIconStateHash path=%s newval=%i\n", path.toUtf8().data(), newval); m_directoryIconStateHash.remove(path); } else { if (mainWin->m_rtIconStateDebug) Pmsg2(000, "Inserting into m_directoryIconStateHash path=%s newval=%i\n", path.toUtf8().data(), newval); m_directoryIconStateHash.insert(path, newval); } } if (newval == FolderUnchecked) item->setIcon(0, QIcon(QString::fromUtf8(":images/folder.png"))); else if (newval == FolderGreenChecked) item->setIcon(0, QIcon(QString::fromUtf8(":images/folderchecked.png"))); else if (newval == FolderWhiteChecked) item->setIcon(0, QIcon(QString::fromUtf8(":images/folderunchecked.png"))); else if (newval == FolderBothChecked) item->setIcon(0, QIcon(QString::fromUtf8(":images/folderbothchecked.png"))); } /* * Restore Button */ void restoreTree::restoreButtonPushed() { /* Set progress bars and repaint */ prLabel1->setVisible(true); prLabel1->setText(tr("Task 1 of 3")); prLabel2->setVisible(true); prLabel2->setText(tr("Processing Checked directories")); prBar1->setVisible(true); prBar1->setRange(0, 3); prBar1->setValue(0); prBar2->setVisible(true); prBar2->setRange(0, 0); repaint(); QMultiHash versionFilesMulti; int vFMCounter = 0; QHash fullPathDone; QHash fileIndexHash; if ((mainWin->m_rtRestore1Debug) || (mainWin->m_rtRestore2Debug) || (mainWin->m_rtRestore3Debug)) Pmsg0(000, "In restoreTree::restoreButtonPushed\n"); /* Use a tree widget item iterator to count directories for the progress bar */ QTreeWidgetItemIterator diterc(directoryTree, QTreeWidgetItemIterator::Checked); int ditcount = 0; while (*diterc) { ditcount += 1; ++diterc; } /* while (*diterc) */ prBar2->setRange(0, ditcount); prBar2->setValue(0); ditcount = 0; /* Use a tree widget item iterator filtering for Checked Items */ QTreeWidgetItemIterator diter(directoryTree, QTreeWidgetItemIterator::Checked); while (*diter) { QString directory = (*diter)->data(0, Qt::UserRole).toString(); int pathid = m_directoryPathIdHash.value(directory, -1); if (pathid != -1) { if (mainWin->m_rtRestore1Debug) Pmsg1(000, "Directory Checked=\"%s\"\n", directory.toUtf8().data()); /* With a checked directory, query for the files in the directory */ QString cmd = "SELECT File.Filename AS Filename, t1.JobId AS JobId, File.FileIndex AS FileIndex" " FROM" " ( SELECT File.Filename AS Filename, MAX(Job.JobId) AS JobId" " FROM File" " INNER JOIN Job ON (Job.JobId=File.JobId)" " WHERE File.PathId=" + QString("%1").arg(pathid) + " AND Job.Jobid IN (" + m_checkedJobs + ")" " GROUP BY File.Filename" ") t1, File " " INNER JOIN Job ON (Job.JobId=File.JobId)" " WHERE File.PathId=" + QString("%1").arg(pathid) + " AND Job.Jobid=t1.JobId" " ORDER BY Filename"; if (mainWin->m_sqlDebug) Pmsg1(000, "Query cmd : %s\n", cmd.toUtf8().data()); QStringList results; if (m_console->sql_cmd(cmd, results)) { QStringList fieldlist; int row = 0; /* Iterate through the record returned from the query */ foreach (QString resultline, results) { /* Iterate through fields in the record */ int column = 0; QString fullPath = ""; Qt::CheckState fileExcpState = (Qt::CheckState)4; fieldlist = resultline.split("\t"); int version = 0; int fileIndex = 0; foreach (QString field, fieldlist) { if (column == 0) { fullPath = directory + field; } if (column == 1) { version = field.toInt(); } if (column == 2) { fileIndex = field.toInt(); } column++; } fileExcpState = m_fileExceptionHash.value(fullPath, (Qt::CheckState)3); int excpVersion = m_versionExceptionHash.value(fullPath, 0); if (fileExcpState != Qt::Unchecked) { QString debugtext; if (excpVersion != 0) { debugtext = QString("*E* version=%1").arg(excpVersion); version = excpVersion; fileIndex = queryFileIndex(fullPath, excpVersion); } else debugtext = QString("___ version=%1").arg(version); if (mainWin->m_rtRestore1Debug) Pmsg2(000, "Restoring %s File %s\n", debugtext.toUtf8().data(), fullPath.toUtf8().data()); fullPathDone.insert(fullPath, 1); fileIndexHash.insert(fullPath, fileIndex); versionFilesMulti.insert(version, fullPath); vFMCounter += 1; } row++; } } } ditcount += 1; prBar2->setValue(ditcount); ++diter; } /* while (*diter) */ prBar1->setValue(1); prLabel1->setText( tr("Task 2 of 3")); prLabel2->setText(tr("Processing Exceptions")); prBar2->setRange(0, 0); repaint(); /* There may be some exceptions not accounted for yet with fullPathDone */ QHashIterator ftera(m_fileExceptionHash); while (ftera.hasNext()) { ftera.next(); QString fullPath = ftera.key(); Qt::CheckState state = ftera.value(); if (state != 0) { /* now we don't want the ones already done */ if (fullPathDone.value(fullPath, 0) == 0) { int version = m_versionExceptionHash.value(fullPath, 0); int fileIndex = 0; QString debugtext = ""; if (version != 0) { fileIndex = queryFileIndex(fullPath, version); debugtext = QString("E1* version=%1 fileid=%2").arg(version).arg(fileIndex); } else { version = mostRecentVersionfromFullPath(fullPath); if (version) { fileIndex = queryFileIndex(fullPath, version); debugtext = QString("E2* version=%1 fileid=%2").arg(version).arg(fileIndex); } else debugtext = QString("Error det vers").arg(version); } if (mainWin->m_rtRestore1Debug) Pmsg2(000, "Restoring %s file %s\n", debugtext.toUtf8().data(), fullPath.toUtf8().data()); versionFilesMulti.insert(version, fullPath); vFMCounter += 1; fileIndexHash.insert(fullPath, fileIndex); } /* if fullPathDone.value(fullPath, 0) == 0 */ } /* if state != 0 */ } /* while ftera.hasNext */ /* The progress bars for the next step */ prBar1->setValue(2); prLabel1->setText(tr("Task 3 of 3")); prLabel2->setText(tr("Filling Database Table")); prBar2->setRange(0, vFMCounter); vFMCounter = 0; prBar2->setValue(vFMCounter); repaint(); /* now for the final spit out of the versions and lists of files for each version */ QHash doneKeys; QHashIterator vFMiter(versionFilesMulti); QString tempTable = ""; QList jobList; while (vFMiter.hasNext()) { vFMiter.next(); int fversion = vFMiter.key(); /* did not succeed in getting an iterator to work as expected on versionFilesMulti so use doneKeys */ if (doneKeys.value(fversion, 0) == 0) { if (tempTable == "") { QSettings settings("www.bacula.org", "bat"); settings.beginGroup("Restore"); int counter = settings.value("Counter", 1).toInt(); settings.setValue("Counter", counter+1); settings.endGroup(); tempTable = "restore_" + QString("%1").arg(qrand()) + "_" + QString("%1").arg(counter); QString sqlcmd = "CREATE TEMPORARY TABLE " + tempTable + " (JobId INTEGER, FileIndex INTEGER)"; if (mainWin->m_sqlDebug) Pmsg1(000, "Query cmd : %s ;\n", sqlcmd.toUtf8().data()); QStringList results; if (!m_console->sql_cmd(sqlcmd, results)) Pmsg1(000, "CREATE TABLE FAILED!!!! %s\n", sqlcmd.toUtf8().data()); } if (mainWin->m_rtRestore2Debug) Pmsg1(000, "Version->%i\n", fversion); QStringList fullPathList = versionFilesMulti.values(fversion); /* create the command to perform the restore */ foreach(QString ffullPath, fullPathList) { int fileIndex = fileIndexHash.value(ffullPath); if (mainWin->m_rtRestore2Debug) Pmsg2(000, " file->%s id %i\n", ffullPath.toUtf8().data(), fileIndex); QString sqlcmd = "INSERT INTO " + tempTable + " (JobId, FileIndex) VALUES (" + QString("%1").arg(fversion) + ", " + QString("%1").arg(fileIndex) + ")"; if (mainWin->m_rtRestore3Debug) Pmsg1(000, "Insert cmd : %s\n", sqlcmd.toUtf8().data()); QStringList results; if (!m_console->sql_cmd(sqlcmd, results)) Pmsg1(000, "INSERT INTO FAILED!!!! %s\n", sqlcmd.toUtf8().data()); prBar2->setValue(++vFMCounter); } /* foreach fullPathList */ doneKeys.insert(fversion,1); jobList.append(fversion); } /* if (doneKeys.value(fversion, 0) == 0) */ } /* while (vFMiter.hasNext()) */ if (tempTable != "") { /* a table was made, lets run the job */ QString jobOption = " jobid=\""; bool first = true; /* create a list of jobs comma separated */ foreach (int job, jobList) { if (first) first = false; else jobOption += ","; jobOption += QString("%1").arg(job); } jobOption += "\""; QString cmd = QString("restore"); cmd += jobOption + " client=\"" + m_prevClientCombo + "\"" + " file=\"?" + tempTable + "\" done"; if (mainWin->m_commandDebug) Pmsg1(000, "preRestore command \'%s\'\n", cmd.toUtf8().data()); consoleCommand(cmd); } /* turn off the progress widgets */ prBar1->setVisible(false); prBar2->setVisible(false); prLabel1->setVisible(false); prLabel2->setVisible(false); } int restoreTree::mostRecentVersionfromFullPath(QString &fullPath) { int qversion = 0; QString directory, fileName; int index = fullPath.lastIndexOf("/", -2); if (index != -1) { directory = fileName = fullPath; directory.replace(index+1, fullPath.length()-index-1, ""); fileName.replace(0, index+1, ""); if (false) { QString msg = QString("length = \"%1\" index = \"%2\" Considering \"%3\" \"%4\"\n") .arg(fullPath.length()).arg(index).arg(fileName).arg(directory); Pmsg0(000, msg.toUtf8().data()); } int pathid = m_directoryPathIdHash.value(directory, -1); if (pathid != -1) { /* so now we need the latest version from the database */ QString cmd = "SELECT MAX(Job.JobId)" " FROM File " " INNER JOIN Job ON (File.JobId=Job.JobId)" " WHERE File.PathId=" + QString("%1").arg(pathid) + " AND Job.Jobid IN (" + m_checkedJobs + ")" " AND File.Filename='" + fileName + "'" " GROUP BY File.Filename"; if (mainWin->m_sqlDebug) Pmsg1(000, "Query cmd : %s\n", cmd.toUtf8().data()); QStringList results; if (m_console->sql_cmd(cmd, results)) { QStringList fieldlist; int row = 0; /* Iterate through the record returned from the query */ foreach (QString resultline, results) { /* Iterate through fields in the record */ int column = 0; fieldlist = resultline.split("\t"); foreach (QString field, fieldlist) { if (column == 0) { qversion = field.toInt(); } column++; } row++; } } } } /* if (index != -1) */ return qversion; } int restoreTree::queryFileIndex(QString &fullPath, int jobId) { int qfileIndex = 0; QString directory, fileName; int index = fullPath.lastIndexOf("/", -2); if (mainWin->m_sqlDebug) Pmsg1(000, "Index=%d\n", index); if (index != -1) { directory = fileName = fullPath; directory.replace(index+1, fullPath.length()-index-1, ""); fileName.replace(0, index+1, ""); if (false) { QString msg = QString("length = \"%1\" index = \"%2\" Considering \"%3\" \"%4\"\n") .arg(fullPath.length()).arg(index).arg(fileName).arg(directory); Pmsg0(000, msg.toUtf8().data()); } int pathid = m_directoryPathIdHash.value(directory, -1); if (pathid != -1) { /* so now we need the latest version from the database */ QString cmd = "SELECT" " File.FileIndex" " FROM File" " INNER JOIN Job ON (File.JobId=Job.JobId)" " WHERE File.PathId=" + QString("%1").arg(pathid) + " AND File.Filename='" + fileName + "'" " AND Job.Jobid='" + QString("%1").arg(jobId) + "'" " GROUP BY File.FileIndex"; if (mainWin->m_sqlDebug) Pmsg1(000, "Query cmd : %s\n", cmd.toUtf8().data()); QStringList results; if (m_console->sql_cmd(cmd, results)) { QStringList fieldlist; int row = 0; /* Iterate through the record returned from the query */ foreach (QString resultline, results) { /* Iterate through fields in the record */ int column = 0; fieldlist = resultline.split("\t"); foreach (QString field, fieldlist) { if (column == 0) { qfileIndex = field.toInt(); } column++; } row++; } } } } /* if (index != -1) */ if (mainWin->m_sqlDebug) Pmsg1(000, "qfileIndex=%d\n", qfileIndex); return qfileIndex; } void restoreTree::PgSeltreeWidgetClicked() { if (!isOnceDocked()) { dockPage(); } } bacula-15.0.3/src/qt-console/restore/brestore.cpp0000644000175000017500000005410614771010173021557 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * bRestore Class (Eric's brestore) * * Kern Sibbald, January MMVII * */ #include "bat.h" #include "restore.h" #include "util/fmtwidgetitem.h" bRestore::bRestore() : Pages() { m_name = tr("bRestore"); m_client = ""; setupUi(this); pgInitialize(); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0, QIcon(QString::fromUtf8(":images/browse.png"))); m_populated = false; m_closeable = false; m_current = NULL; RestoreList->setAcceptDrops(true); } // Populate client table and job associated void bRestore::setClient() { // Select the same client, don't touch if (m_client == ClientList->currentText()) { return; } m_client = ClientList->currentText(); FileList->clearContents(); FileRevisions->clearContents(); JobList->clear(); JobList->setEnabled(true); LocationEntry->clear(); m_path = ""; m_pathid = 0; if (ClientList->currentIndex() < 1) { JobList->setEnabled(false); return; } JobList->addItem("Job list for " + m_client); QString jobQuery = "SELECT Job.Jobid AS JobId, Job.StartTime AS StartTime," " Job.Level AS Level," " Job.Name AS Name" " FROM Job JOIN Client USING (ClientId)" " WHERE" " Job.JobStatus IN ('T','W') AND Job.Type='B' AND" " Client.Name='" + m_client + "' ORDER BY StartTime DESC" ; QString job; QStringList results; QStringList fieldlist; if (m_console->sql_cmd(jobQuery, results)) { /* Iterate through the record returned from the query */ foreach (QString resultline, results) { // 0 1 2 3 // JobId, StartTime, Level, Name fieldlist = resultline.split("\t"); job = fieldlist[1] + " " + fieldlist[3] + "(" + fieldlist[2] + ") " + fieldlist[0]; JobList->addItem(job, QVariant(fieldlist[0])); // set also private value } } } // Compute job associated and update the job cache if needed void bRestore::setJob() { if (JobList->currentIndex() < 1) { FileList->clearContents(); FileList->setRowCount(0); FileRevisions->clearContents(); FileRevisions->setRowCount(0); return ; } QStringList results; QVariant tmp = JobList->itemData(JobList->currentIndex(), Qt::UserRole); m_jobids = tmp.toString(); QString cmd = ".bvfs_get_jobids jobid=" + m_jobids; if (MergeChk->checkState() == Qt::Checked) { cmd.append(" all"); } m_console->dir_cmd(cmd, results); if (results.size() < 1) { FileList->clearContents(); FileList->setRowCount(0); FileRevisions->clearContents(); FileRevisions->setRowCount(0); return; } // TODO: Can take some time if the job contains many dirs m_jobids = results.at(0); cmd = ".bvfs_update jobid=" + m_jobids; m_console->dir_cmd(cmd, results); Pmsg1(0, "jobids=%s\n", m_jobids.toLocal8Bit().constData()); displayFiles(m_pathid, QString("")); Pmsg0(000, "update done\n"); } extern int decode_stat(char *buf, struct stat *statp, int stat_size, int32_t *LinkFI); // refresh button with a filter or limit/offset change void bRestore::refreshView() { displayFiles(m_pathid, m_path); } void bRestore::displayFiles(int64_t pathid, QString path) { QString arg; QStringList results; QStringList fieldlist; struct stat statp; int32_t LinkFI; int nb = 0; int row = 0; Freeze frz_lst(*FileList); /* disable updating*/ Freeze frz_rev(*FileRevisions); /* disable updating*/ FileList->clearContents(); FileRevisions->clearContents(); FileRevisions->setRowCount(0); // If we provide pathid, use it (path can be altered by encoding conversion) if (pathid > 0) { arg = " pathid=" + QString().setNum(pathid); // Choose .. update current path to parent dir if (path == "..") { if (m_path == "/") { m_path = ""; } else { m_path.remove(QRegExp("[^/]+/$")); } } else if (path == "/" && m_path == "") { m_path += path; } else if (path != "/" && path != ".") { m_path += path; } } else { m_path = path; arg = " path=\"" + m_path + "\""; } // If a filter is set, add it to the current query if (FilterEntry->text() != "") { QString tmp = FilterEntry->text(); tmp.replace("\"", "."); // basic escape of " arg += " pattern=\"" + tmp + "\""; } LocationEntry->setText(m_path); QString offset = QString().setNum(Offset1Spin->value()); QString limit=QString().setNum(Offset2Spin->value() - Offset1Spin->value()); QString q = ".bvfs_lsdir jobid=" + m_jobids + arg + " limit=" + limit + " offset=" + offset ; if (mainWin->m_miscDebug) qDebug() << q; if (m_console->dir_cmd(q, results)) { nb = results.size(); FileList->setRowCount(nb); foreach (QString resultline, results) { int col=0; //PathId, FilenameId, fileid, jobid, lstat, path fieldlist = resultline.split("\t"); /* * Note, the next line zaps variable "item", probably * because the input data in fieldlist is bad. */ decode_stat(fieldlist.at(4).toLocal8Bit().data(), &statp, sizeof(statp), &LinkFI); TableItemFormatter item(*FileList, row++); item.setFileType(col++, QString("folder")); // folder or file item.setTextFld(col++, fieldlist.at(5)); // path item.setBytesFld(col++, QString().setNum(statp.st_size)); item.setDateFld(col++, statp.st_mtime); // date fieldlist.replace(3, m_jobids); // use current jobids selection // keep original info on the first cel that is never empty item.widget(1)->setData(Qt::UserRole, fieldlist.join("\t")); } } results.clear(); q = ".bvfs_lsfiles jobid=" + m_jobids + arg + " limit=" + limit + " offset=" + offset ; if (m_console->dir_cmd(q, results)) { FileList->setRowCount(results.size() + nb); foreach (QString resultline, results) { int col=1; // skip icon //PathId, FilenameId, fileid, jobid, lstat, name fieldlist = resultline.split("\t"); TableItemFormatter item(*FileList, row++); item.setTextFld(col++, fieldlist.at(5)); // name decode_stat(fieldlist.at(4).toLocal8Bit().data(), &statp, sizeof(statp), &LinkFI); item.setBytesFld(col++, QString().setNum(statp.st_size)); item.setDateFld(col++, statp.st_mtime); // keep original info on the first cel that is never empty item.widget(1)->setData(Qt::UserRole, fieldlist.join("\t")); // keep info } } FileList->verticalHeader()->hide(); FileList->resizeColumnsToContents(); FileList->resizeRowsToContents(); FileList->setEditTriggers(QAbstractItemView::NoEditTriggers); } void bRestore::PgSeltreeWidgetClicked() { if(!m_populated) { setupPage(); } if (!isOnceDocked()) { dockPage(); } } // Display all versions of a file for this client void bRestore::displayFileVersion(QString pathid, QString fnid, QString client, QString filename) { int row=0; struct stat statp; int32_t LinkFI; Freeze frz_rev(*FileRevisions); /* disable updating*/ FileRevisions->clearContents(); QString q = ".bvfs_versions jobid=" + m_jobids + " pathid=" + pathid + " fnid=" + fnid + " client=" + client; if (VersionsChk->checkState() == Qt::Checked) { q.append(" versions"); } QStringList results; QStringList fieldlist; QString tmp; if (m_console->dir_cmd(q, results)) { FileRevisions->setRowCount(results.size()); foreach (QString resultline, results) { int col=0; // 0 1 2 3 4 5 6 7 //PathId, FilenameId, fileid, jobid, lstat, Md5, VolName, Inchanger fieldlist = resultline.split("\t"); TableItemFormatter item(*FileRevisions, row++); item.setInChanger(col++, fieldlist.at(7)); // inchanger item.setTextFld(col++, fieldlist.at(6)); // Volume item.setNumericFld(col++, fieldlist.at(3)); // JobId decode_stat(fieldlist.at(4).toLocal8Bit().data(), &statp, sizeof(statp), &LinkFI); item.setBytesFld(col++, QString().setNum(statp.st_size)); // size item.setDateFld(col++, statp.st_mtime); // date item.setTextFld(col++, fieldlist.at(5)); // chksum // Adjust the fieldlist for drag&drop fieldlist.removeLast(); // inchanger fieldlist.removeLast(); // volname fieldlist.removeLast(); // md5 fieldlist << m_path + filename; // keep original info on the first cel that is never empty item.widget(1)->setData(Qt::UserRole, fieldlist.join("\t")); } } FileRevisions->verticalHeader()->hide(); FileRevisions->resizeColumnsToContents(); FileRevisions->resizeRowsToContents(); FileRevisions->setEditTriggers(QAbstractItemView::NoEditTriggers); } void bRestore::showInfoForFile(QTableWidgetItem *widget) { m_current = widget; QTableWidgetItem *first = FileList->item(widget->row(), 1); QStringList lst = first->data(Qt::UserRole).toString().split("\t"); if (lst.at(1) == "0") { // no filenameid, should be a path displayFiles(lst.at(0).toLongLong(), lst.at(5)); } else { displayFileVersion(lst.at(0), lst.at(1), m_client, lst.at(5)); } } void bRestore::applyLocation() { displayFiles(0, LocationEntry->text()); } void bRestore::clearVersions(QTableWidgetItem *item) { if (item != m_current) { FileRevisions->clearContents(); FileRevisions->setRowCount(0); } m_current = item ; } void bRestore::clearRestoreList() { RestoreList->clearContents(); RestoreList->setRowCount(0); } void bRestore::runRestore() { bRunRestore *r = new bRunRestore(this); r->setVisible(true); } void bRestore::setupPage() { ClientList->addItem("Client list"); ClientList->addItems(m_console->client_list); connect(ClientList, SIGNAL(currentIndexChanged(int)), this, SLOT(setClient())); connect(JobList, SIGNAL(currentIndexChanged(int)), this, SLOT(setJob())); connect(FileList, SIGNAL(itemClicked(QTableWidgetItem*)), this, SLOT(clearVersions(QTableWidgetItem *))); connect(FileList, SIGNAL(itemDoubleClicked(QTableWidgetItem*)), this, SLOT(showInfoForFile(QTableWidgetItem *))); connect(LocationBp, SIGNAL(pressed()), this, SLOT(applyLocation())); connect(MergeChk, SIGNAL(clicked()), this, SLOT(setJob())); connect(ClearBp, SIGNAL(clicked()), this, SLOT(clearRestoreList())); connect(RestoreBp, SIGNAL(clicked()), this, SLOT(runRestore())); connect(FilterBp, SIGNAL(clicked()), this, SLOT(refreshView())); m_populated = true; } bRestore::~bRestore() { } // Drag & Drop handling, not so easy... void bRestoreTable::mousePressEvent(QMouseEvent *event) { QTableWidget::mousePressEvent(event); if (event->button() == Qt::LeftButton) { dragStartPosition = event->pos(); } } // This event permits to send set custom data on drag&drop // Don't forget to call original class if we are not interested void bRestoreTable::mouseMoveEvent(QMouseEvent *event) { int lastrow=-1; // Look just for drag&drop if (!(event->buttons() & Qt::LeftButton)) { QTableWidget::mouseMoveEvent(event); return; } if ((event->pos() - dragStartPosition).manhattanLength() < QApplication::startDragDistance()) { QTableWidget::mouseMoveEvent(event); return; } QList lst = selectedItems(); if (mainWin->m_miscDebug) qDebug() << this << " selectedItems: " << lst; if (lst.isEmpty()) { return; } QDrag *drag = new QDrag(this); QMimeData *mimeData = new QMimeData; for (int i=0; i < lst.size(); i++) { if (lastrow != lst[i]->row()) { lastrow = lst[i]->row(); QTableWidgetItem *it = item(lastrow, 1); mimeData->setText(it->data(Qt::UserRole).toString()); break; // at this time, we do it one by one } } drag->setMimeData(mimeData); drag->exec(); } // This event is called when the drag item enters in the destination area void bRestoreTable::dragEnterEvent(QDragEnterEvent *event) { if (event->source() == this) { event->ignore(); return; } if (event->mimeData()->hasText()) { event->acceptProposedAction(); } else { event->ignore(); } } // It should not be essential to redefine this event, but it // doesn't work if not defined void bRestoreTable::dragMoveEvent(QDragMoveEvent *event) { if (event->mimeData()->hasText()) { event->acceptProposedAction(); } else { event->ignore(); } } // When user releases the button void bRestoreTable::dropEvent(QDropEvent *event) { int col=1; struct stat statp; int32_t LinkFI; if (event->mimeData()->hasText()) { TableItemFormatter item(*this, rowCount()); setRowCount(rowCount() + 1); QStringList fields = event->mimeData()->text().split("\t"); if (fields.size() != 6) { event->ignore(); return; } if (fields.at(1) == "0") { item.setFileType(0, "folder"); } item.setTextFld(col++, fields.at(5)); // filename decode_stat(fields.at(4).toLocal8Bit().data(), &statp, sizeof(statp), &LinkFI); item.setBytesFld(col++, QString().setNum(statp.st_size)); // size item.setDateFld(col++, statp.st_mtime); // date item.setNumericFld(col++, fields.at(3)); // jobid item.setNumericFld(col++, fields.at(2)); // fileid // keep original info on the first cel that is never empty item.widget(1)->setData(Qt::UserRole, event->mimeData()->text()); event->acceptProposedAction(); } else { event->ignore(); } } // Use File Relocation bp void bRunRestore::UFRcb() { if (UseFileRelocationChk->checkState() == Qt::Checked) { WhereEntry->setEnabled(false); UseRegexpChk->setEnabled(true); if (UseRegexpChk->checkState() == Qt::Checked) { AddSuffixEntry->setEnabled(false); AddPrefixEntry->setEnabled(false); StripPrefixEntry->setEnabled(false); WhereRegexpEntry->setEnabled(true); } else { AddSuffixEntry->setEnabled(true); AddPrefixEntry->setEnabled(true); StripPrefixEntry->setEnabled(true); WhereRegexpEntry->setEnabled(false); } } else { WhereEntry->setEnabled(true); AddSuffixEntry->setEnabled(false); AddPrefixEntry->setEnabled(false); StripPrefixEntry->setEnabled(false); UseRegexpChk->setEnabled(false); WhereRegexpEntry->setEnabled(false); } } // Expert mode for file relocation void bRunRestore::useRegexp() { if (UseRegexpChk->checkState() == Qt::Checked) { AddSuffixEntry->setEnabled(false); AddPrefixEntry->setEnabled(false); StripPrefixEntry->setEnabled(false); WhereRegexpEntry->setEnabled(true); } else { AddSuffixEntry->setEnabled(true); AddPrefixEntry->setEnabled(true); StripPrefixEntry->setEnabled(true); WhereRegexpEntry->setEnabled(false); } } // Display Form to run the restore job bRunRestore::bRunRestore(bRestore *parent) { brestore = parent; setupUi(this); ClientCb->addItems(parent->console()->client_list); int i = ClientCb->findText(parent->m_client); if (i >= 0) { ClientCb->setCurrentIndex(i); } StorageCb->addItem(QString("")); RestoreCb->addItems(parent->console()->restore_list); WhenEditor->setDateTime(QDateTime::currentDateTime()); StorageCb->addItems(parent->console()->storage_list); connect(UseFileRelocationChk, SIGNAL(clicked()), this, SLOT(UFRcb())); connect(UseRegexpChk, SIGNAL(clicked()), this, SLOT(useRegexp())); connect(ActionBp, SIGNAL(accepted()), this, SLOT(computeRestore())); // TODO: handle multiple restore job struct job_defaults jd; if (parent->console()->restore_list.size() > 0) { jd.job_name = parent->console()->restore_list[0]; brestore->console()->get_job_defaults(jd); WhereEntry->setText(jd.where); } computeVolumeList(); } void bRestore::get_info_from_selection(QStringList &fileids, QStringList &jobids, QStringList &dirids, QStringList &findexes) { struct stat statp; int32_t LinkFI; for (int i=0; i < RestoreList->rowCount(); i++) { QTableWidgetItem *item = RestoreList->item(i, 1); QString data = item->data(Qt::UserRole).toString(); QStringList lst = data.split("\t"); if (lst.at(1) != "0") { // skip path fileids << lst.at(2); jobids << lst.at(3); decode_stat(lst.at(4).toLocal8Bit().data(), &statp, sizeof(statp), &LinkFI); if (LinkFI) { findexes << lst.at(3) + "," + QString().setNum(LinkFI); } } else { dirids << lst.at(0); jobids << lst.at(3).split(","); // Can have multiple jobids } } fileids.removeDuplicates(); jobids.removeDuplicates(); dirids.removeDuplicates(); findexes.removeDuplicates(); } // To compute volume list with directories, query is much slower void bRunRestore::computeVolumeList() { brestore->get_info_from_selection(m_fileids, m_jobids, m_dirids, m_findexes); if (m_fileids.size() == 0) { return; } Freeze frz_lst(*TableMedia); /* disable updating*/ QString q = " SELECT DISTINCT VolumeName, Enabled, InChanger " " FROM File, " " ( " // -- Get all media from this job " SELECT MIN(FirstIndex) AS FirstIndex, MAX(LastIndex) AS LastIndex, " " VolumeName, Enabled, Inchanger " " FROM JobMedia JOIN Media USING (MediaId) " " WHERE JobId IN (" + m_jobids.join(",") + ") " " GROUP BY VolumeName,Enabled,InChanger " " ) AS allmedia " " WHERE File.FileId IN (" + m_fileids.join(",") + ") " " AND File.FileIndex >= allmedia.FirstIndex " " AND File.FileIndex <= allmedia.LastIndex "; int row=0; QStringList results; if (brestore->console()->sql_cmd(q, results)) { QStringList fieldlist; TableMedia->setRowCount(results.size()); /* Iterate through the record returned from the query */ foreach (QString resultline, results) { // 0 1 2 //volname, enabled, inchanger fieldlist = resultline.split("\t"); int col=0; TableItemFormatter item(*TableMedia, row++); item.setInChanger(col++, fieldlist.at(2)); // inchanger item.setTextFld(col++, fieldlist.at(0)); // Volume } } TableMedia->verticalHeader()->hide(); TableMedia->resizeColumnsToContents(); TableMedia->resizeRowsToContents(); TableMedia->setEditTriggers(QAbstractItemView::NoEditTriggers); } int64_t bRunRestore::runRestore(QString tablename) { QString q; QString tmp; tmp = ClientCb->currentText(); if (tmp == "") { return 0; } q = "restore client=" + tmp; tmp = CommentEntry->text(); if (tmp != "") { tmp.replace("\"", " "); q += " comment=\"" + tmp + "\""; } tmp = StorageCb->currentText(); if (tmp != "") { q += " storage=" + tmp; } if (UseFileRelocationChk->checkState() == Qt::Checked) { if (UseRegexpChk->checkState() == Qt::Checked) { tmp = WhereRegexpEntry->text(); if (tmp != "") { tmp.replace("\"", ""); q += " regexwhere=\"" + tmp + "\""; } } else { QStringList lst; tmp = StripPrefixEntry->text(); if (tmp != "") { tmp.replace("\"", ""); lst.append("!" + tmp + "!!i"); } tmp = AddPrefixEntry->text(); if (tmp != "") { tmp.replace("\"", ""); lst.append("!^!" + tmp + "!"); } tmp = AddSuffixEntry->text(); if (tmp != "") { tmp.replace("\"", ""); lst.append("!([^/])$!$1" + tmp + "!"); } if (lst.size() > 0) { q += " regexwhere=\"" + lst.join(",") + "\""; } } } else { tmp = WhereEntry->text(); if (tmp != "") { tmp.replace("\"", ""); q += " where=\"" + tmp + "\""; } } // q += " priority=" + tmp.setNum(PrioritySb->value()); // q += " job=\"" + RestoreCb->currentText() + "\""; q += " file=\"?" + tablename + "\""; q += " when=\"" + WhenEditor->dateTime().toString("yyyy-MM-dd hh:mm:ss") + "\""; q += " done yes"; if (mainWin->m_miscDebug) qDebug() << q; QStringList results; if (brestore->console()->dir_cmd(q, results)) { foreach (QString resultline, results) { QStringList fieldlist = resultline.split("="); if (fieldlist.size() == 2) { return fieldlist.at(1).toLongLong(); } } } return 0; } void bRunRestore::computeRestore() { QString q = ".bvfs_restore path=b2123 jobid=" + m_jobids.join(","); if (m_fileids.size() > 0) { q += " fileid=" + m_fileids.join(","); } if (m_dirids.size() > 0) { q += " dirid=" + m_dirids.join(","); } if (m_findexes.size() > 0) { q += " hardlink=" + m_findexes.join(","); } if (mainWin->m_miscDebug) qDebug() << q; QStringList results; if (brestore->console()->dir_cmd(q, results)) { if (results.size() == 1 && results[0] == "OK") { int64_t jobid = runRestore("b2123"); if (mainWin->m_miscDebug) qDebug() << "jobid=" << jobid; q = ".bvfs_cleanup path=b2123"; brestore->console()->dir_cmd(q, results); } } } bacula-15.0.3/src/qt-console/restore/runrestore.ui0000644000175000017500000002535314771010173021777 0ustar bsbuildbsbuild bRunRestoreForm 0 0 385 438 Run restore 0 Standard Restore options QFormLayout::ExpandingFieldsGrow Client: Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter Where: Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter Replace: Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter Comment: Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter Media needed false Qt::NoPen InChanger Volume Compute with directories false Advanced File Relocation Use file relocation: Strip prefix: false Add prefix: false Add suffix: false Use regexp: false Where regexp: false Other options When: Priority: 10 Storage: true Job: Qt::Horizontal QDialogButtonBox::Cancel|QDialogButtonBox::Ok ActionBp accepted() bRunRestoreForm accept() 222 422 157 274 ActionBp rejected() bRunRestoreForm reject() 290 428 286 274 UseFileRelocationChk clicked(bool) WhereEntry setDisabled(bool) 107 72 225 102 bacula-15.0.3/src/qt-console/restore/restoretree.h0000644000175000017500000000724014771010173021737 0ustar bsbuildbsbuild#ifndef _RESTORETREE_H_ #define _RESTORETREE_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Kern Sibbald, February 2007 */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "pages.h" #include "ui_restoretree.h" /* * A restore tree to view files in the catalog */ class restoreTree : public Pages, public Ui::restoreTreeForm { Q_OBJECT public: restoreTree(); ~restoreTree(); virtual void PgSeltreeWidgetClicked(); virtual void currentStackItem(); enum folderCheckState { FolderUnchecked = 0, FolderGreenChecked = 1, FolderWhiteChecked = 2, FolderBothChecked = 3 }; private slots: void refreshButtonPushed(); void restoreButtonPushed(); void jobComboChanged(int); void directoryCurrentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *); void fileCurrentItemChanged(QTableWidgetItem *,QTableWidgetItem *); void directoryItemExpanded(QTreeWidgetItem *); void setCheckofChildren(QTreeWidgetItem *item, Qt::CheckState); void directoryItemChanged(QTreeWidgetItem *, int); void fileTableItemChanged(QTableWidgetItem *); void versionTableItemChanged(QTableWidgetItem *); void updateRefresh(); void jobTableCellClicked(int, int); private: void populateDirectoryTree(); void populateJobTable(); void parseDirectory(const QString &dir_in); void setupPage(); void writeSettings(); void readSettings(); void fileExceptionInsert(QString &, QString &, Qt::CheckState); void fileExceptionRemove(QString &, QString &); void directoryTreeDisconnectedSet(QTreeWidgetItem *, Qt::CheckState); void fileTableDisconnectedSet(QTableWidgetItem *, Qt::CheckState, bool color); void versionTableDisconnectedSet(QTableWidgetItem *, Qt::CheckState); void updateFileTableChecks(); void updateVersionTableChecks(); void directoryIconStateInsert(QString &, Qt::CheckState); void directoryIconStateRemove(); void directorySetIcon(int operation, int change, QString &, QTreeWidgetItem* item); void fullPathtoSubPaths(QStringList &, QString &); int mostRecentVersionfromFullPath(QString &); void setJobsCheckedList(); int queryFileIndex(QString &fullPath, int jobID); QSplitter *m_splitter; QString m_groupText; QString m_splitText1; QString m_splitText2; bool m_populated; bool m_dropdownChanged; bool m_slashTrap; QHash m_dirPaths; QString m_checkedJobs, m_prevJobCombo, m_prevClientCombo, m_prevFileSetCombo; int m_prevLimitSpinBox, m_prevDaysSpinBox; Qt::CheckState m_prevLimitCheckState, m_prevDaysCheckState; QString m_JobsCheckedList; int m_debugCnt; bool m_debugTrap; QList m_fileCheckStateList; QList m_versionCheckStateList; QHash m_fileExceptionHash; QMultiHash m_fileExceptionMulti; QHash m_versionExceptionHash; QHash m_directoryIconStateHash; QHash m_directoryPathIdHash; int m_toggleUpIndex, m_toggleDownIndex, m_nullFileNameId; }; #endif /* _RESTORETREE_H_ */ bacula-15.0.3/src/qt-console/restore/restore.ui0000644000175000017500000002423214771010173021245 0ustar bsbuildbsbuild restoreForm 0 0 796 597 Form Qt::Horizontal QSizePolicy::Expanding 81 20 16777215 41 <h3>Restore Select</h3> Qt::Horizontal QSizePolicy::Expanding 51 21 0 0 35 0 Up :/images/up.png 0 0 35 0 Mark :/images/check.png 0 0 50 0 Unmark :/images/unchecked.png Qt::Horizontal QSizePolicy::Expanding 81 21 <h2>Files</h2> Qt::Horizontal 71 21 0 0 Qt::Horizontal 1 1 200 0 16777215 16777215 1 0 50 0 Select a Directory true 1 Directories 144 1 400 0 0 0 true QAbstractItemView::ExtendedSelection QAbstractItemView::ScrollPerItem true 7 1 1 2 3 4 5 6 0 0 Current Dir: lineEdit 0 0 100 0 Qt::Horizontal QSizePolicy::Fixed 30 32 OK Cancel 0 0 Status: :/images/check.png Mark :/images/unchecked.png UnMark bacula-15.0.3/src/qt-console/restore/restoretree.ui0000644000175000017500000002656014771010173022133 0ustar bsbuildbsbuild restoreTreeForm 0 0 695 432 Form 120 190 382 221 Qt::Horizontal 6 0 Jobs Qt::AlignCenter TextLabel Qt::AlignCenter true 24 Qt::Horizontal TextLabel Qt::AlignCenter true 24 Qt::Horizontal 1 6 0 Files Qt::AlignCenter 6 0 Versions of File Qt::AlignCenter FileName Qt::AlignCenter 20 59 669 95 900 95 3 2 4 2 2 3 Refresh :/images/view-refresh.png:/images/view-refresh.png TextLabel Restore :/images/restore.png:/images/restore.png 3 2 Job 0 0 Job List Job Criterion Selector Job List Job Criterion Selector 3 2 Client 0 0 Job List Client Criterion Selector Job List Client Criterion Selector 3 2 FileSet 0 0 Job List Fileset Criterion Selector Job List Fileset Criterion Selector 3 2 Record Limit 1 10000 25 3 2 Days Limit 7 0 0 PointingHandCursor Directory Select Directory UnselectDirectory bacula-15.0.3/src/qt-console/restore/brestore.ui0000644000175000017500000004707214771010173021416 0ustar bsbuildbsbuild bRestoreForm 0 0 1011 650 brestore 9 6 6 0 0 0 QComboBox::AdjustToContents false 0 0 QComboBox::AdjustToContents Qt::Horizontal QSizePolicy::Minimum 40 20 true Merge Jobs false View all Versions Current Directory 150 0 150 0 Qt::Vertical Qt::Horizontal 0 0 File list -- drag below for restore 5 5 Double Click File Name to decend true QAbstractItemView::DragOnly true QAbstractItemView::SelectRows false false false false Type File Name Double Click to decend Size Date QLayout::SetMinimumSize Selection Range 0 0 25 25 :/images/page-prev.gif:/images/page-prev.gif QAbstractSpinBox::NoButtons 9000000 500 0 0 16 16 - QAbstractSpinBox::NoButtons false 10 9999999 500 500 0 0 25 25 :/images/page-next.gif:/images/page-next.gif Qt::Horizontal 40 20 File Filter 150 0 111 23 25 25 :/images/view-refresh.png:/images/view-refresh.png 0 0 File revisions -- drag below for restore 6 9 0 0 true QAbstractItemView::DragOnly true QAbstractItemView::SingleSelection QAbstractItemView::SelectRows false InChanger Volume JobId Size Date Chksum 6 0 6 0 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Drag and drop <span style=" font-weight:600;">File list</span> and/or <span style=" font-weight:600;">File revisions</span> items here for Restore</p></body></html> Qt::Horizontal 40 20 Clear false Estimate Restore... true false QAbstractItemView::DropOnly Qt::MoveAction QAbstractItemView::MultiSelection QAbstractItemView::SelectRows false false Type FileName Size Date JobId FileIndex Nb Files Select from Client list drop down then select from Job list drop down bRestoreTable QTableWidget

restore.h
OffsetNextBp clicked() Offset2Spin stepUp() 275 279 232 279 OffsetNextBp clicked() Offset1Spin stepUp() 272 281 92 279 OffsetPrevBp clicked() Offset1Spin stepDown() 44 287 123 282 OffsetPrevBp clicked() Offset2Spin stepDown() 50 284 221 282 bacula-15.0.3/src/qt-console/README.mingw320000644000175000017500000000503414771010173017704 0ustar bsbuildbsbuild BUILD SYSTEM: Ubuntu gutsy STATUS: Works REQUIRE: - Bacula cross compilation tool (must be able to compile bacula-fd.exe) - wine (apt-get install wine) - qt mingw32 distribution ORIGINAL HOWTO (french): http://doc.qtfr.org/post/2007/04/10/Cross-Compilation-Native-dapplication-Qt-depuis-Linux Legend: # comment $ shell command * tips Directory setup --------------- $ cd bacula/src/win32 $ ./build-win32-cross-tools $ ./build-depkgs-mingw32 It will result something like : ./ |-- depkgs-mingw32 |-- cross-tools `-- bacula Linux bacula setup -- Note: I believe that this is *required* before trying to build the Win32 bat. ------------ $ cd bacula $ ./configure $ cd bacula/src/win32 $ make Make sure that bacula/src/win32/release/bacula.dll is built QT4 setup ---------- Install QT for mingw Get the mingw installation from http://trolltech.com/developer/downloads/qt/windows (Try to get the same version than your linux installation) ftp://ftp.qtsoftware.com/qt/source $ wine qt-win-opensource-4.3.5-mingw.exe * Install under c:\Qt (no space) * no worry about mingw installation $ cp -r ~/.wine/drive_c/Qt/4.3.5/src/ depkgs-mingw32/include $ cp -r ~/.wine/drive_c/Qt/4.3.5/include depkgs-mingw32/include/qt $ cp -r ~/.wine/drive_c/Qt/4.3.5/lib depkgs-mingw32/lib/qt # cp ~/.wine/drive_c/Qt/4.3.5/bin/QtCore4.dll src/win32/release32 # cp ~/.wine/drive_c/Qt/4.3.5/bin/QtGui4.dll src/win32/release32 MINGW setup ----------- I think this only needs to be done once ... --- cross-tools/mingw32/mingw32/include/wchar.h.org 2008-07-13 15:18:52.000000000 +0200 +++ cross-tools/mingw32/mingw32/include/wchar.h 2008-07-12 14:47:10.000000000 +0200 @@ -394,7 +394,7 @@ time_t st_ctime; /* Creation time */ }; -#ifndef _NO_OLDNAMES +#ifdef _NO_OLDNAMES_DISABLE /* NOTE: Must be the same as _stat above. */ struct stat { Compile bat ----------- $ cd bacula/src/qt-console $ export DEPKGS="directory above cross-tools and depkgs" $ ./make-win32 clean $ ./make-win32 Cleanup ------- $ cd bacula/src/qt-console $ ./make-win32 clean The bat.exe will be in src/qt-console/debug/bat.exe Run Bat on Windows ------------------ You'll need zlib1.dll ssleay32.dll libeay32.dll QtCore4.dll QtGui4.dll bacula.dll pthreadGCE.dll mingwm10.dll bat.conf You can find the Qt dlls in ~/.wine/drive_c/Qt/4.3.5/bin Run Bat with wine ----------------- $ cd bacula/src/qt-console/debug # configure a bat.conf # copy all dlls to this directory $ wine bat That all, easy isn't it ? bacula-15.0.3/src/qt-console/status/0000755000175000017500000000000014771010173017060 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/status/storstat.cpp0000644000175000017500000003034314771010173021452 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Dirk Bartley, March 2007 */ #include "bat.h" #include #include #include "storstat.h" #include "mount/mount.h" #include "label/label.h" static bool working = false; /* prevent timer recursion */ /* .status storage= where is the storage name in the Director, and is one of the following: header running terminated waitreservation devices volumes spooling */ /* * Constructor for the class */ StorStat::StorStat(QString &storage, QTreeWidgetItem *parentTreeWidgetItem) : Pages() { m_storage = storage; setupUi(this); pgInitialize(tr("Storage Status %1").arg(m_storage), parentTreeWidgetItem); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/status.png"))); m_cursor = new QTextCursor(textEditHeader->document()); m_timer = new QTimer(this); readSettings(); createConnections(); m_timer->start(1000); setCurrent(); dockPage(); } void StorStat::getFont() { QFont font = textEditHeader->font(); QString dirname; m_console->getDirResName(dirname); QSettings settings(dirname, "bat"); settings.beginGroup("Console"); font.setFamily(settings.value("consoleFont", "Courier").value()); font.setPointSize(settings.value("consolePointSize", 10).toInt()); font.setFixedPitch(settings.value("consoleFixedPitch", true).toBool()); settings.endGroup(); textEditHeader->setFont(font); } /* * Write the m_splitter settings in the destructor */ StorStat::~StorStat() { writeSettings(); } /* * Populate all tables and header widgets */ void StorStat::populateAll() { populateTerminated(); populateCurrentTab(tabWidget->currentIndex()); } /* * Timer is triggered, see if is current and repopulate. */ void StorStat::timerTriggered() { double value = timerDisplay->value(); value -= 1; if (value <= 0 && !working) { working = true; value = spinBox->value(); bool iscurrent = mainWin->tabWidget->currentIndex() == mainWin->tabWidget->indexOf(this); if (((isDocked() && iscurrent) || (!isDocked())) && (checkBox->checkState() == Qt::Checked)) { populateAll(); } working = false; } timerDisplay->display(value); } /* * Populate header text widget */ void StorStat::populateHeader() { QString command = QString(".status storage=\"" + m_storage + "\" header"); if (mainWin->m_commandDebug) Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); QStringList results; textEditHeader->clear(); if (m_console->dir_cmd(command, results)) { foreach (QString line, results) { line += "\n"; textEditHeader->insertPlainText(line); } } } void StorStat::populateWaitReservation() { QString command = QString(".status storage=\"" + m_storage + "\" waitreservation"); if (mainWin->m_commandDebug) Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); QStringList results; textEditWaitReservation->clear(); if (m_console->dir_cmd(command, results)) { foreach (QString line, results) { line += "\n"; textEditWaitReservation->insertPlainText(line); } } } void StorStat::populateDevices() { QString command = QString(".status storage=\"" + m_storage + "\" devices"); if (mainWin->m_commandDebug) Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); QStringList results; textEditDevices->clear(); if (m_console->dir_cmd(command, results)) { foreach (QString line, results) { line += "\n"; textEditDevices->insertPlainText(line); } } } void StorStat::populateVolumes() { QString command = QString(".status storage=\"" + m_storage + "\" volumes"); if (mainWin->m_commandDebug) Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); QStringList results; textEditVolumes->clear(); if (m_console->dir_cmd(command, results)) { foreach (QString line, results) { line += "\n"; textEditVolumes->insertPlainText(line); } } } void StorStat::populateSpooling() { QString command = QString(".status storage=\"" + m_storage + "\" spooling"); if (mainWin->m_commandDebug) Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); QStringList results; textEditSpooling->clear(); if (m_console->dir_cmd(command, results)) { foreach (QString line, results) { line += "\n"; textEditSpooling->insertPlainText(line); } } } void StorStat::populateRunning() { QString command = QString(".status storage=\"" + m_storage + "\" running"); if (mainWin->m_commandDebug) Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); QStringList results; textEditRunning->clear(); if (m_console->dir_cmd(command, results)) { foreach (QString line, results) { line += "\n"; textEditRunning->insertPlainText(line); } } } /* * Populate teminated table */ void StorStat::populateTerminated() { QString command = QString(".status storage=\"" + m_storage + "\" terminated"); if (mainWin->m_commandDebug) Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); QStringList results; QBrush blackBrush(Qt::black); terminatedTable->clear(); QStringList headerlist = (QStringList() << tr("Job Id") << tr("Job Level") << tr("Job Files") << tr("Job Bytes") << tr("Job Status") << tr("Job Time") << tr("Job Name")); QStringList flaglist = (QStringList() << "R" << "L" << "R" << "R" << "LC" << "L" << "L"); terminatedTable->setColumnCount(headerlist.size()); terminatedTable->setHorizontalHeaderLabels(headerlist); if (m_console->dir_cmd(command, results)) { int row = 0; QTableWidgetItem* p_tableitem; terminatedTable->setRowCount(results.size()); foreach (QString line, results) { /* Iterate through the record returned from the query */ QStringList fieldlist = line.split("\t"); int column = 0; QString statusCode(""); /* Iterate through fields in the record */ foreach (QString field, fieldlist) { field = field.trimmed(); /* strip leading & trailing spaces */ p_tableitem = new QTableWidgetItem(field, 1); p_tableitem->setForeground(blackBrush); p_tableitem->setFlags(0); if (flaglist[column].contains("R")) p_tableitem->setTextAlignment(Qt::AlignRight); if (flaglist[column].contains("C")) { if (field == "OK") p_tableitem->setBackground(Qt::green); else p_tableitem->setBackground(Qt::red); } terminatedTable->setItem(row, column, p_tableitem); column += 1; } row += 1; } } terminatedTable->resizeColumnsToContents(); terminatedTable->resizeRowsToContents(); terminatedTable->verticalHeader()->hide(); } /* * When the treeWidgetItem in the page selector tree is singleclicked, Make sure * The tree has been populated. */ void StorStat::PgSeltreeWidgetClicked() { if (!m_populated) { populateAll(); m_populated=true; } } /* * Virtual function override of pages function which is called when this page * is visible on the stack */ void StorStat::currentStackItem() { populateAll(); timerDisplay->display(spinBox->value()); if (!m_populated) { m_populated=true; } } /* * Function to create connections for context sensitive menu for this and * the page selector */ void StorStat::createConnections() { connect(actionRefresh, SIGNAL(triggered()), this, SLOT(populateAll())); connect(tabWidget, SIGNAL(currentChanged(int)), this, SLOT(populateCurrentTab(int))); connect(mountButton, SIGNAL(pressed()), this, SLOT(mountButtonPushed())); connect(umountButton, SIGNAL(pressed()), this, SLOT(umountButtonPushed())); connect(labelButton, SIGNAL(pressed()), this, SLOT(labelButtonPushed())); connect(releaseButton, SIGNAL(pressed()), this, SLOT(releaseButtonPushed())); terminatedTable->setContextMenuPolicy(Qt::ActionsContextMenu); terminatedTable->addAction(actionRefresh); connect(m_timer, SIGNAL(timeout()), this, SLOT(timerTriggered())); } /* * Save user settings associated with this page */ void StorStat::writeSettings() { QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup(m_groupText); settings.setValue(m_splitText, splitter->saveState()); settings.setValue("refreshInterval", spinBox->value()); settings.setValue("refreshCheck", checkBox->checkState()); settings.endGroup(); settings.beginGroup("OpenOnExit"); QString toWrite = "StorageStatus_" + m_storage; settings.setValue(toWrite, 1); settings.endGroup(); } /* * Read and restore user settings associated with this page */ void StorStat::readSettings() { m_groupText = "StorStatPage"; m_splitText = "splitterSizes_0"; QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup(m_groupText); if (settings.contains(m_splitText)) { splitter->restoreState(settings.value(m_splitText).toByteArray()); } spinBox->setValue(settings.value("refreshInterval", 28).toInt()); checkBox->setCheckState((Qt::CheckState)settings.value("refreshCheck", Qt::Checked).toInt()); settings.endGroup(); timerDisplay->display(spinBox->value()); } /* * Populate the text edit window in the current tab */ void StorStat::populateCurrentTab(int index) { if (index == 0) populateHeader(); if (index == 1) populateWaitReservation(); if (index == 2) populateDevices(); if (index == 3) populateVolumes(); if (index == 4) populateSpooling(); if (index == 5) populateRunning(); } /* * execute mount in console */ void StorStat::mountButtonPushed() { int haschanger = 3; /* Set up query QString and header QStringList */ QString query("SELECT AutoChanger AS Changer" " FROM Storage WHERE Name='" + m_storage + "'" " ORDER BY Name" ); QStringList results; /* This could be a log item */ if (mainWin->m_sqlDebug) { Pmsg1(000, "Storage query cmd : %s\n",query.toUtf8().data()); } if (m_console->sql_cmd(query, results)) { int resultCount = results.count(); if (resultCount == 1){ QString resultline; QString field; QStringList fieldlist; /* there will only be one of these */ foreach (resultline, results) { fieldlist = resultline.split("\t"); int index = 0; /* Iterate through fields in the record */ foreach (field, fieldlist) { field = field.trimmed(); /* strip leading & trailing spaces */ haschanger = field.toInt(); index++; } } } } Pmsg1(000, "haschanger is : %i\n", haschanger); if (haschanger == 0){ /* no autochanger, just execute the command in the console */ QString cmd("mount storage=" + m_storage); consoleCommand(cmd); } else if (haschanger != 3) { setConsoleCurrent(); /* if this storage is an autochanger, lets ask for the slot */ new mountDialog(m_console, m_storage); } } /* * execute umount in console */ void StorStat::umountButtonPushed() { QString cmd("umount storage=" + m_storage); consoleCommand(cmd); } /* Release a tape in the drive */ void StorStat::releaseButtonPushed() { QString cmd("release storage="); cmd += m_storage; consoleCommand(cmd); } /* Label Media populating current storage by default */ void StorStat::labelButtonPushed() { new labelPage(m_storage); } bacula-15.0.3/src/qt-console/status/clientstat.ui0000644000175000017500000001542014771010173021573 0ustar bsbuildbsbuild ClientStatForm 0 0 557 350 Form Qt::Vertical 0 Running Header 200 0 0 0 16777215 16777215 1 0 Qt::StrongFocus false Qt::ScrollBarAsNeeded QTextEdit::AutoNone false QTextEdit::NoWrap true 141 0 141 16777215 Refresh Timer 20 50 111 24 5 999 20 20 101 20 Do Refresh 20 80 101 31 Qt::LeftToRight <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> Qt::AlignCenter :/images/view-refresh.png Refresh :/images/utilities-terminal.png Cancel Running Job bacula-15.0.3/src/qt-console/status/clientstat.cpp0000644000175000017500000001760314771010173021745 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #include "bat.h" #include #include #include "clientstat.h" /* This probably should be on a mutex */ static bool working = false; /* prevent timer recursion */ /* * Constructor for the class */ ClientStat::ClientStat(QString &client, QTreeWidgetItem *parentTreeWidgetItem) : Pages() { m_client = client; setupUi(this); pgInitialize(tr("Client Status %1").arg(m_client), parentTreeWidgetItem); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/status.png"))); m_cursor = new QTextCursor(textEditHeader->document()); readSettings(); dockPage(); m_timer = new QTimer(this); createConnections(); m_timer->start(1000); setCurrent(); } void ClientStat::getFont() { QFont font = textEditHeader->font(); QString dirname; m_console->getDirResName(dirname); QSettings settings(dirname, "bat"); settings.beginGroup("Console"); font.setFamily(settings.value("consoleFont", "Courier").value()); font.setPointSize(settings.value("consolePointSize", 10).toInt()); font.setFixedPitch(settings.value("consoleFixedPitch", true).toBool()); settings.endGroup(); textEditHeader->setFont(font); } /* * Write the m_splitter settings in the destructor */ ClientStat::~ClientStat() { writeSettings(); } /* * Populate all tables and header widgets */ void ClientStat::populateAll() { populateTerminated(); populateCurrentTab(tabWidget->currentIndex()); } /* * Timer is triggered, see if is current and repopulate. */ void ClientStat::timerTriggered() { double value = timerDisplay->value(); value -= 1; if (value <= 0 && !working) { working = true; value = spinBox->value(); bool iscurrent = mainWin->tabWidget->currentIndex() == mainWin->tabWidget->indexOf(this); if (((isDocked() && iscurrent) || (!isDocked())) && (checkBox->checkState() == Qt::Checked)) { populateAll(); } working = false; } timerDisplay->display(value); } void ClientStat::populateCurrentTab(int index) { if (index == 0) populateRunning(); if (index == 1) populateHeader(); } /* * Populate header text widget */ void ClientStat::populateHeader() { QString command = QString(".status client=\"" + m_client + "\" header"); if (mainWin->m_commandDebug) Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); QStringList results; textEditHeader->clear(); if (m_console->dir_cmd(command, results)) { foreach (QString line, results) { line += "\n"; textEditHeader->insertPlainText(line); } } } /* * Populate teminated table */ void ClientStat::populateTerminated() { QString command = QString(".status client=\"" + m_client + "\" terminated"); if (mainWin->m_commandDebug) Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); QStringList results; QBrush blackBrush(Qt::black); terminatedTable->clear(); QStringList headerlist = (QStringList() << tr("Job Id") << tr("Job Level") << tr("Job Files") << tr("Job Bytes") << tr("Job Status") << tr("Job Time") << tr("Job Name")); QStringList flaglist = (QStringList() << "R" << "L" << "R" << "R" << "LC" << "L" << "L"); terminatedTable->setColumnCount(headerlist.size()); terminatedTable->setHorizontalHeaderLabels(headerlist); if (m_console->dir_cmd(command, results)) { int row = 0; QTableWidgetItem* p_tableitem; terminatedTable->setRowCount(results.size()); foreach (QString line, results) { /* Iterate through the record returned from the query */ QStringList fieldlist = line.split("\t"); int column = 0; QString statusCode(""); /* Iterate through fields in the record */ foreach (QString field, fieldlist) { field = field.trimmed(); /* strip leading & trailing spaces */ p_tableitem = new QTableWidgetItem(field, 1); p_tableitem->setForeground(blackBrush); p_tableitem->setFlags(0); if (flaglist[column].contains("R")) p_tableitem->setTextAlignment(Qt::AlignRight); if (flaglist[column].contains("C")) { if (field == "OK") p_tableitem->setBackground(Qt::green); else p_tableitem->setBackground(Qt::red); } terminatedTable->setItem(results.size() - row - 1, column, p_tableitem); column += 1; } row += 1; } } terminatedTable->resizeColumnsToContents(); terminatedTable->resizeRowsToContents(); terminatedTable->verticalHeader()->hide(); } /* * Populate running text */ void ClientStat::populateRunning() { QString command = QString(".status client=\"" + m_client + "\" running"); if (mainWin->m_commandDebug) Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); QStringList results; textEditRunning->clear(); if (m_console->dir_cmd(command, results)) { foreach (QString line, results) { line += "\n"; textEditRunning->insertPlainText(line); } } } /* * When the treeWidgetItem in the page selector tree is single clicked, Make sure * The tree has been populated. */ void ClientStat::PgSeltreeWidgetClicked() { if (!m_populated) { populateAll(); m_populated=true; } } /* * Virtual function override of pages function which is called when this page * is visible on the stack */ void ClientStat::currentStackItem() { populateAll(); timerDisplay->display(spinBox->value()); if (!m_populated) { m_populated=true; } } /* * Function to create connections for context sensitive menu for this and * the page selector */ void ClientStat::createConnections() { connect(actionRefresh, SIGNAL(triggered()), this, SLOT(populateAll())); connect(tabWidget, SIGNAL(currentChanged(int)), this, SLOT(populateCurrentTab(int))); connect(m_timer, SIGNAL(timeout()), this, SLOT(timerTriggered())); terminatedTable->setContextMenuPolicy(Qt::ActionsContextMenu); terminatedTable->addAction(actionRefresh); } /* * Save user settings associated with this page */ void ClientStat::writeSettings() { QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup(m_groupText); settings.setValue(m_splitText, splitter->saveState()); settings.setValue("refreshInterval", spinBox->value()); settings.setValue("refreshCheck", checkBox->checkState()); settings.endGroup(); settings.beginGroup("OpenOnExit"); QString toWrite = "ClientStatus_" + m_client; settings.setValue(toWrite, 1); settings.endGroup(); } /* * Read and restore user settings associated with this page */ void ClientStat::readSettings() { m_groupText = "ClientStatPage"; m_splitText = "splitterSizes_1"; QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup(m_groupText); if (settings.contains(m_splitText)) { splitter->restoreState(settings.value(m_splitText).toByteArray()); } spinBox->setValue(settings.value("refreshInterval", 28).toInt()); checkBox->setCheckState((Qt::CheckState)settings.value("refreshCheck", Qt::Checked).toInt()); settings.endGroup(); timerDisplay->display(spinBox->value()); } bacula-15.0.3/src/qt-console/status/dirstat.cpp0000644000175000017500000002675514771010173021255 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Dirk Bartley, March 2007 */ #include "bat.h" #include #include #include "dirstat.h" static bool working = false; /* prevent timer recursion */ /* * Constructor for the class */ DirStat::DirStat() : Pages() { setupUi(this); m_name = tr("Director Status"); pgInitialize(); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/status.png"))); m_cursor = new QTextCursor(textEdit->document()); m_timer = new QTimer(this); readSettings(); m_timer->start(1000); createConnections(); setCurrent(); } void DirStat::getFont() { QFont font = textEdit->font(); QString dirname; m_console->getDirResName(dirname); QSettings settings(dirname, "bat"); settings.beginGroup("Console"); font.setFamily(settings.value("consoleFont", "Courier").value()); font.setPointSize(settings.value("consolePointSize", 10).toInt()); font.setFixedPitch(settings.value("consoleFixedPitch", true).toBool()); settings.endGroup(); textEdit->setFont(font); } /* * Write the m_splitter settings in the destructor */ DirStat::~DirStat() { writeSettings(); } /* * Populate all tables and header widgets */ void DirStat::populateAll() { populateHeader(); populateTerminated(); populateScheduled(); populateRunning(); } /* * Timer is triggered, see if is current and repopulate. */ void DirStat::timerTriggered() { double value = timerDisplay->value(); value -= 1; if (value <= 0 && !working) { working = true; value = spinBox->value(); bool iscurrent = mainWin->tabWidget->currentIndex() == mainWin->tabWidget->indexOf(this); if (((isDocked() && iscurrent) || ((!isDocked()) && isOnceDocked())) && (checkBox->checkState() == Qt::Checked)) { populateAll(); } working = false; } timerDisplay->display(value); } /* * Populate header text widget */ void DirStat::populateHeader() { QString command = QString(".status dir header"); if (mainWin->m_commandDebug) Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); QStringList results; textEdit->clear(); if (m_console->dir_cmd(command, results)) { foreach (QString line, results) { line += "\n"; textEdit->insertPlainText(line); } } } /* * Populate teminated table */ void DirStat::populateTerminated() { QString command = QString(".status dir terminated"); if (mainWin->m_commandDebug) Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); QStringList results; QBrush blackBrush(Qt::black); terminatedTable->clear(); QStringList headerlist = (QStringList() << tr("Job Id") << tr("Job Level") << tr("Job Files") << tr("Job Bytes") << tr("Job Status") << tr("Job Time") << tr("Job Name")); QStringList flaglist = (QStringList() << "R" << "L" << "R" << "R" << "LC" << "L" << "L"); terminatedTable->setColumnCount(headerlist.size()); terminatedTable->setHorizontalHeaderLabels(headerlist); if (m_console->dir_cmd(command, results)) { int row = 0; QTableWidgetItem* p_tableitem; terminatedTable->setRowCount(results.size()); foreach (QString line, results) { /* Iterate through the record returned from the query */ QStringList fieldlist = line.split("\t"); int column = 0; QString statusCode(""); /* Iterate through fields in the record */ foreach (QString field, fieldlist) { field = field.trimmed(); /* strip leading & trailing spaces */ p_tableitem = new QTableWidgetItem(field, 1); p_tableitem->setForeground(blackBrush); p_tableitem->setFlags(0); if (flaglist[column].contains("R")) p_tableitem->setTextAlignment(Qt::AlignRight); if (flaglist[column].contains("C")) { if (field == "OK") p_tableitem->setBackground(Qt::green); else p_tableitem->setBackground(Qt::red); } terminatedTable->setItem(results.size() - row - 1, column, p_tableitem); column += 1; } row += 1; } } terminatedTable->resizeColumnsToContents(); terminatedTable->resizeRowsToContents(); terminatedTable->verticalHeader()->hide(); } /* * Populate scheduled table */ void DirStat::populateScheduled() { QString command = QString(".status dir scheduled"); if (mainWin->m_commandDebug) Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); QStringList results; QBrush blackBrush(Qt::black); scheduledTable->clear(); QStringList headerlist = (QStringList() << tr("Job Level") << tr("Job Type") << tr("Priority") << tr("Job Time") << tr("Job Name") << tr("Volume")); QStringList flaglist = (QStringList() << "L" << "L" << "R" << "L" << "L" << "L"); scheduledTable->setColumnCount(headerlist.size()); scheduledTable->setHorizontalHeaderLabels(headerlist); scheduledTable->setSelectionBehavior(QAbstractItemView::SelectRows); scheduledTable->setSelectionMode(QAbstractItemView::SingleSelection); if (m_console->dir_cmd(command, results)) { int row = 0; QTableWidgetItem* p_tableitem; scheduledTable->setRowCount(results.size()); foreach (QString line, results) { /* Iterate through the record returned from the query */ QStringList fieldlist = line.split("\t"); int column = 0; QString statusCode(""); /* Iterate through fields in the record */ foreach (QString field, fieldlist) { field = field.trimmed(); /* strip leading & trailing spaces */ p_tableitem = new QTableWidgetItem(field, 1); p_tableitem->setForeground(blackBrush); scheduledTable->setItem(row, column, p_tableitem); column += 1; } row += 1; } } scheduledTable->resizeColumnsToContents(); scheduledTable->resizeRowsToContents(); scheduledTable->verticalHeader()->hide(); } /* * Populate running table */ void DirStat::populateRunning() { QString command = QString(".status dir running"); if (mainWin->m_commandDebug) Pmsg1(000, "sending command : %s\n",command.toUtf8().data()); QStringList results; QBrush blackBrush(Qt::black); runningTable->clear(); QStringList headerlist = (QStringList() << tr("Job Id") << tr("Job Level") << tr("Job Data") << tr("Job Info")); runningTable->setColumnCount(headerlist.size()); runningTable->setHorizontalHeaderLabels(headerlist); runningTable->setSelectionBehavior(QAbstractItemView::SelectRows); if (m_console->dir_cmd(command, results)) { int row = 0; QTableWidgetItem* p_tableitem; runningTable->setRowCount(results.size()); foreach (QString line, results) { /* Iterate through the record returned from the query */ QStringList fieldlist = line.split("\t"); int column = 0; QString statusCode(""); /* Iterate through fields in the record */ foreach (QString field, fieldlist) { field = field.trimmed(); /* strip leading & trailing spaces */ p_tableitem = new QTableWidgetItem(field, 1); p_tableitem->setForeground(blackBrush); runningTable->setItem(row, column, p_tableitem); column += 1; } row += 1; } } runningTable->resizeColumnsToContents(); runningTable->resizeRowsToContents(); runningTable->verticalHeader()->hide(); } /* * When the treeWidgetItem in the page selector tree is singleclicked, Make sure * The tree has been populated. */ void DirStat::PgSeltreeWidgetClicked() { if (!m_populated) { populateAll(); m_populated=true; } if (!isOnceDocked()) { dockPage(); } } /* * Virtual function override of pages function which is called when this page * is visible on the stack */ void DirStat::currentStackItem() { populateAll(); timerDisplay->display(spinBox->value()); if (!m_populated) { m_populated=true; } } /* * Function to create connections for context sensitive menu for this and * the page selector */ void DirStat::createConnections() { connect(actionRefresh, SIGNAL(triggered()), this, SLOT(populateAll())); connect(actionCancelRunning, SIGNAL(triggered()), this, SLOT(consoleCancelJob())); connect(actionDisableScheduledJob, SIGNAL(triggered()), this, SLOT(consoleDisableJob())); connect(m_timer, SIGNAL(timeout()), this, SLOT(timerTriggered())); scheduledTable->setContextMenuPolicy(Qt::ActionsContextMenu); scheduledTable->addAction(actionRefresh); scheduledTable->addAction(actionDisableScheduledJob); terminatedTable->setContextMenuPolicy(Qt::ActionsContextMenu); terminatedTable->addAction(actionRefresh); runningTable->setContextMenuPolicy(Qt::ActionsContextMenu); runningTable->addAction(actionRefresh); runningTable->addAction(actionCancelRunning); } /* * Save user settings associated with this page */ void DirStat::writeSettings() { QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup(m_groupText); settings.setValue(m_splitText, splitter->saveState()); settings.setValue("refreshInterval", spinBox->value()); settings.setValue("refreshCheck", checkBox->checkState()); settings.endGroup(); } /* * Read and restore user settings associated with this page */ void DirStat::readSettings() { m_groupText = "DirStatPage"; m_splitText = "splitterSizes_0"; QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup(m_groupText); if (settings.contains(m_splitText)) { splitter->restoreState(settings.value(m_splitText).toByteArray()); } spinBox->setValue(settings.value("refreshInterval", 28).toInt()); checkBox->setCheckState((Qt::CheckState)settings.value("refreshCheck", Qt::Checked).toInt()); settings.endGroup(); timerDisplay->display(spinBox->value()); } /* * Cancel a running job */ void DirStat::consoleCancelJob() { QList rowList; QList sitems = runningTable->selectedItems(); foreach (QTableWidgetItem *sitem, sitems) { int row = sitem->row(); if (!rowList.contains(row)) { rowList.append(row); } } QStringList selectedJobsList; foreach(int row, rowList) { QTableWidgetItem * sitem = runningTable->item(row, 0); selectedJobsList.append(sitem->text()); } foreach( QString job, selectedJobsList ) { QString cmd("cancel jobid="); cmd += job; consoleCommand(cmd); } } /* * Disable a scheduled Job */ void DirStat::consoleDisableJob() { int currentrow = scheduledTable->currentRow(); QTableWidgetItem *item = scheduledTable->item(currentrow, 4); if (item) { QString text = item->text(); QString cmd("disable job=\""); cmd += text + '"'; consoleCommand(cmd); } } bacula-15.0.3/src/qt-console/status/storstat.ui0000644000175000017500000001746514771010173021317 0ustar bsbuildbsbuild StorStatForm 0 0 694 393 Form Qt::Vertical 6 Header Waitreservation Devices Volumes Spooling Running Misc 10 10 168 60 Mount UMount Label Release 141 0 141 16777215 Refresh Timer 20 50 111 24 5 999 20 20 101 20 Do Refresh 20 80 101 31 Qt::LeftToRight <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> Qt::AlignCenter :/images/view-refresh.png Refresh :/images/utilities-terminal.png Cancel Running Job :/images/utilities-terminal.png Disable Scheduled Job bacula-15.0.3/src/qt-console/status/dirstat.h0000644000175000017500000000277114771010173020712 0ustar bsbuildbsbuild#ifndef _DIRSTAT_H_ #define _DIRSTAT_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_dirstat.h" #include "console.h" #include "pages.h" class DirStat : public Pages, public Ui::DirStatForm { Q_OBJECT public: DirStat(); ~DirStat(); virtual void PgSeltreeWidgetClicked(); virtual void currentStackItem(); public slots: void populateHeader(); void populateTerminated(); void populateScheduled(); void populateRunning(); void populateAll(); private slots: void timerTriggered(); void consoleCancelJob(); void consoleDisableJob(); private: void createConnections(); void writeSettings(); void readSettings(); bool m_populated; QTextCursor *m_cursor; void getFont(); QString m_groupText, m_splitText; QTimer *m_timer; }; #endif /* _DIRSTAT_H_ */ bacula-15.0.3/src/qt-console/status/dirstat.ui0000644000175000017500000002276614771010173021106 0ustar bsbuildbsbuild DirStatForm 0 0 514 425 Form Qt::Vertical 0 0 0 0 16777215 100 1 0 Qt::StrongFocus false Qt::ScrollBarAsNeeded QTextEdit::AutoNone false QTextEdit::NoWrap true 0 0 221 100 221 100 Refresh Timer 20 60 111 24 5 999 20 20 101 20 Do Refresh 110 20 101 31 6 0 0 0 0 Qt::LeftToRight <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Scheduled Jobs</span></p></body></html> Qt::AlignCenter 6 0 0 0 0 Qt::LeftToRight <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Running Jobs</span></p></body></html> Qt::AlignCenter 6 0 0 0 0 Qt::LeftToRight <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:13pt; font-weight:600;">Terminated Jobs</span></p></body></html> Qt::AlignCenter :/images/view-refresh.png Refresh :/images/utilities-terminal.png Cancel Selected Running Jobs :/images/utilities-terminal.png Disable Scheduled Job bacula-15.0.3/src/qt-console/status/storstat.h0000644000175000017500000000336514771010173021123 0ustar bsbuildbsbuild#ifndef _STORSTAT_H_ #define _STORSTAT_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_storstat.h" #include "console.h" #include "pages.h" class StorStat : public Pages, public Ui::StorStatForm { Q_OBJECT public: StorStat(QString &, QTreeWidgetItem *); ~StorStat(); virtual void PgSeltreeWidgetClicked(); virtual void currentStackItem(); public slots: void populateHeader(); void populateTerminated(); void populateRunning(); void populateWaitReservation(); void populateDevices(); void populateVolumes(); void populateSpooling(); void populateAll(); private slots: void timerTriggered(); void populateCurrentTab(int); void mountButtonPushed(); void umountButtonPushed(); void releaseButtonPushed(); void labelButtonPushed(); private: void createConnections(); void writeSettings(); void readSettings(); bool m_populated; QTextCursor *m_cursor; void getFont(); QString m_groupText; QString m_splitText; QTimer *m_timer; QString m_storage; }; #endif /* _STORSTAT_H_ */ bacula-15.0.3/src/qt-console/status/clientstat.h0000644000175000017500000000301314771010173021400 0ustar bsbuildbsbuild#ifndef _CLIENTSTAT_H_ #define _CLIENTSTAT_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_clientstat.h" #include "console.h" #include "pages.h" class ClientStat : public Pages, public Ui::ClientStatForm { Q_OBJECT public: ClientStat(QString&, QTreeWidgetItem*); ~ClientStat(); virtual void PgSeltreeWidgetClicked(); virtual void currentStackItem(); public slots: void populateHeader(); void populateTerminated(); void populateRunning(); void populateAll(); void populateCurrentTab(int); private slots: void timerTriggered(); private: void createConnections(); void writeSettings(); void readSettings(); bool m_populated; QTextCursor *m_cursor; void getFont(); QString m_groupText, m_splitText; QTimer *m_timer; QString m_client; }; #endif /* _CLIENTSTAT_H_ */ bacula-15.0.3/src/qt-console/pages.cpp0000644000175000017500000002727214771010173017352 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #include "bat.h" #include "pages.h" /* A global function */ bool isWin32Path(QString &fullPath) { if (fullPath.size()<2) { return false; } bool toret = fullPath[1].toLatin1() == ':' && fullPath[0].isLetter(); if (mainWin->m_miscDebug) { if (toret) Pmsg1(000, "returning from isWin32Path true %s\n", fullPath.toUtf8().data()); else Pmsg1(000, "returning from isWin32Path false %s\n", fullPath.toUtf8().data()); } return toret; } /* Need to initialize variables here */ Pages::Pages() : QWidget() { m_docked = false; m_onceDocked = false; m_closeable = true; m_dockOnFirstUse = true; m_console = NULL; m_parent = NULL; } /* first Use Dock */ void Pages::firstUseDock() { if (!m_onceDocked && m_dockOnFirstUse) { dockPage(); } } /* * dockPage * This function is intended to be called from within the Pages class to pull * a window from floating to in the stack widget. */ void Pages::dockPage() { if (isDocked()) { return; } /* These two lines are for making sure if it is being changed from a window * that it has the proper window flag and parent. */ setWindowFlags(Qt::Widget); /* calculate the index that the tab should be inserted into */ int tabPos = 0; QTreeWidgetItemIterator it(mainWin->treeWidget); while (*it) { Pages *somepage = mainWin->getFromHash(*it); if (this == somepage) { tabPos += 1; break; } int pageindex = mainWin->tabWidget->indexOf(somepage); if (pageindex != -1) { tabPos = pageindex; } ++it; } /* This was being done already */ m_parent->insertTab(tabPos, this, m_name); /* Set docked flag */ m_docked = true; m_onceDocked = true; mainWin->tabWidget->setCurrentWidget(this); /* lets set the page selectors action for docking or undocking */ setContextMenuDockText(); } /* * undockPage * This function is intended to be called from within the Pages class to put * a window from the stack widget to a floating window. */ void Pages::undockPage() { if (!isDocked()) { return; } /* Change from a stacked widget to a normal window */ m_parent->removeTab(m_parent->indexOf(this)); setWindowFlags(Qt::Window); show(); /* Clear docked flag */ m_docked = false; /* The window has been undocked, lets change the context menu */ setContextMenuDockText(); } /* * This function is intended to be called with the subclasses. When it is * called the specific sublclass does not have to be known to Pages. When it * is called this function will change the page from it's current state of being * docked or undocked and change it to the other. */ void Pages::togglePageDocking() { if (m_docked) { undockPage(); } else { dockPage(); } } /* * This function is because I wanted for some reason to keep it protected but still * give any subclasses the ability to find out if it is currently stacked or not. */ bool Pages::isDocked() { return m_docked; } /* * This function is because after the tabbed widget was added I could not tell * from is docked if it had been docked yet. To prevent status pages from requesting * status from the director */ bool Pages::isOnceDocked() { return m_onceDocked; } /* * To keep m_closeable protected as well */ bool Pages::isCloseable() { return m_closeable; } void Pages::hidePage() { if (!m_parent || (m_parent->indexOf(this) <= 0)) { return; } /* Remove any tab that may exist */ m_parent->removeTab(m_parent->indexOf(this)); hide(); /* Clear docked flag */ m_docked = false; /* The window has been undocked, lets change the context menu */ setContextMenuDockText(); } /* * When a window is closed, this slot is called. The idea is to put it back in the * stack here, and it works. I wanted to get it to the top of the stack so that the * user immediately sees where his window went. Also, if he undocks the window, then * closes it with the tree item highlighted, it may be confusing that the highlighted * treewidgetitem is not the stack item in the front. */ void Pages::closeEvent(QCloseEvent* event) { /* A Widget was closed, lets toggle it back into the window, and set it in front. */ dockPage(); /* this fixes my woes of getting the widget to show up on top when closed */ event->ignore(); /* Set the current tree widget item in the Page Selector window to the item * which represents "this" * Which will also bring "this" to the top of the stacked widget */ setCurrent(); } /* * The next three are virtual functions. The idea here is that each subclass will have the * built in virtual function to override if the programmer wants to populate the window * when it it is first clicked. */ void Pages::PgSeltreeWidgetClicked() { } /* * Virtual function which is called when this page is visible on the stack. * This will be overridden by classes that want to populate if they are on the * top. */ void Pages::currentStackItem() { } /* * Function to close the stacked page and remove the widget from the * Page selector window */ void Pages::closeStackPage() { /* First get the tree widget item and destroy it */ QTreeWidgetItem *item=mainWin->getFromHash(this); /* remove the QTreeWidgetItem <-> page from the hash */ if (item) { mainWin->hashRemove(item, this); /* remove the item from the page selector by destroying it */ delete item; } /* remove this */ delete this; } /* * Function to set members from the external mainwin and it's overload being * passed a specific QTreeWidgetItem to be it's parent on the tree */ void Pages::pgInitialize() { pgInitialize(QString(), NULL); } void Pages::pgInitialize(const QString &name) { pgInitialize(name, NULL); } void Pages::pgInitialize(const QString &tname, QTreeWidgetItem *parentTreeWidgetItem) { m_docked = false; m_onceDocked = false; if (tname.size()) { m_name = tname; } m_parent = mainWin->tabWidget; m_console = mainWin->currentConsole(); if (!parentTreeWidgetItem) { parentTreeWidgetItem = m_console->directorTreeItem(); } QTreeWidgetItem *item = new QTreeWidgetItem(parentTreeWidgetItem); QString name; treeWidgetName(name); item->setText(0, name); mainWin->hashInsert(item, this); setTitle(); } /* * Virtual Function to return a name * All subclasses should override this function */ void Pages::treeWidgetName(QString &name) { name = m_name; } /* * Function to simplify executing a console command and bringing the * console to the front of the stack */ void Pages::consoleCommand(QString &command) { consoleCommand(command, true); } void Pages::consoleCommand(QString &command, bool setCurrent) { int conn; if (m_console->getDirComm(conn)) { consoleCommand(command, conn, setCurrent, true); } } /* * Lowest level of console command method. * "notify" parameter default is set to true by higher level console command call. * In most cases "notify" parameter should be set to true value because after console * command sent, notifier should be always enabled for catch all Director responses. */ void Pages::consoleCommand(QString &command, int conn, bool setCurrent, bool notify) { if (notify) { m_console->notify(conn, true); } /* Bring this director's console to the front of the stack */ if (setCurrent) { setConsoleCurrent(); } QString displayhtml(""); displayhtml += command + "\n"; m_console->display_html(displayhtml); m_console->display_text("\n"); mainWin->waitEnter(); m_console->write_dir(conn, command.toUtf8().data(), false); m_console->displayToPrompt(conn); mainWin->waitExit(); } /* * Function for handling undocked windows becoming active. * Change the currently selected item in the page selector window to the now * active undocked window. This will also make the console for the undocked * window m_currentConsole. */ void Pages::changeEvent(QEvent *event) { if ((event->type() == QEvent::ActivationChange) && (isActiveWindow())) { setCurrent(); } } /* * Function to simplify getting the name of the class and the director into * the caption or title of the window */ void Pages::setTitle() { QString wdgname, director; treeWidgetName(wdgname); m_console->getDirResName(director); QString title = tr("%1 of Director %2").arg(wdgname).arg(director); setWindowTitle(title); } /* * Bring the current directors console window to the top of the stack. */ void Pages::setConsoleCurrent() { mainWin->treeWidget->setCurrentItem(mainWin->getFromHash(m_console)); } /* * Bring this window to the top of the stack. */ void Pages::setCurrent() { mainWin->treeWidget->setCurrentItem(mainWin->getFromHash(this)); } /* * Function to set the text of the toggle dock context menu when page and * widget item are NOT known. */ void Pages::setContextMenuDockText() { QTreeWidgetItem *item = mainWin->getFromHash(this); QString docktext; if (isDocked()) { docktext = tr("UnDock %1 Window").arg(item->text(0)); } else { docktext = tr("ReDock %1 Window").arg(item->text(0)); } mainWin->actionToggleDock->setText(docktext); setTreeWidgetItemDockColor(); } /* * Function to set the color of the tree widget item based on whether it is * docked or not. */ void Pages::setTreeWidgetItemDockColor() { QTreeWidgetItem* item = mainWin->getFromHash(this); if (item) { if (item->text(0) != tr("Console")) { if (isDocked()) { /* Set the brush to blue if undocked */ QBrush blackBrush(Qt::black); item->setForeground(0, blackBrush); } else { /* Set the brush back to black if docked */ QBrush blueBrush(Qt::blue); item->setForeground(0, blueBrush); } } } } /* Function to get a list of volumes */ void Pages::getVolumeList(QStringList &volumeList) { QString query("SELECT VolumeName AS Media FROM Media ORDER BY Media"); if (mainWin->m_sqlDebug) { Pmsg1(000, "Query cmd : %s\n",query.toUtf8().data()); } QStringList results; if (m_console->sql_cmd(query, results)) { QString field; QStringList fieldlist; /* Iterate through the lines of results. */ foreach (QString resultline, results) { fieldlist = resultline.split("\t"); volumeList.append(fieldlist[0]); } /* foreach resultline */ } /* if results from query */ } /* Function to get a list of volumes */ void Pages::getStatusList(QStringList &statusLongList) { QString statusQuery("SELECT JobStatusLong FROM Status"); if (mainWin->m_sqlDebug) { Pmsg1(000, "Query cmd : %s\n",statusQuery.toUtf8().data()); } QStringList statusResults; if (m_console->sql_cmd(statusQuery, statusResults)) { QString field; QStringList fieldlist; /* Iterate through the lines of results. */ foreach (QString resultline, statusResults) { fieldlist = resultline.split("\t"); statusLongList.append(fieldlist[0]); } /* foreach resultline */ } /* if results from statusquery */ } bacula-15.0.3/src/qt-console/prefs.ui0000644000175000017500000005304514771010173017222 0ustar bsbuildbsbuild PrefsForm 0 0 470 533 0 0 Preferences images/bat.png 9 9 9 9 6 6 0 Timers 60 60 180 106 0 0 Messages Options 11 81 158 16 Message check interval in seconds 11 25 158 20 Check Messages 11 51 158 24 3600 Joblist 9 9 9 9 6 6 0 0 Joblist Limit Options 9 9 9 9 6 6 0 0 0 0 6 6 Days Limit 1 10000 7 1 10000 25 Record Limit Misc 30 220 311 111 Convert 20 20 231 22 Convert Off 20 50 231 22 Display Bytes using IEC units (1024B = 1 KiB) 20 80 231 22 Display Bytes using SI units (1000B = 1KB) 30 10 311 61 Context Sensitive List Commands 9 9 9 9 6 6 Execute Long List 30 80 311 121 Open Pages 10 30 241 20 Open Plot page on startup 10 60 241 20 Open Browser page on startup 10 90 241 20 Open Director Status page on startup Debug 9 9 9 9 6 6 0 0 Debugging Options Debug comm Debug multiple connection Display all messages in console Debug Sql queries Debug Commands Debug Miscellaneous Items RestoreTree 9 9 9 9 6 6 0 0 Debugging Options 9 9 9 9 6 6 0 0 0 0 6 6 Restore Debug 2 Directory Item Changed Restore Debug 1 Directory Current Item Changed Debug Update File Table Debug Version Table Item Changed Debug File Table Item Changed Debug Icon State Debug Update Checks Debug Restore Debug 3 Update Version Table Debug Populate Directory Debug Qt::Horizontal QDialogButtonBox::Cancel|QDialogButtonBox::NoButton|QDialogButtonBox::Ok 6 0 0 0 0 Qt::Horizontal 81 20 0 0 <h2>Preferences</h2> Qt::Horizontal 101 20 buttonBox accepted() PrefsForm accept() 248 254 157 274 buttonBox rejected() PrefsForm reject() 316 260 286 274 bacula-15.0.3/src/qt-console/mainwin.h0000644000175000017500000001073214771010173017353 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * qt-console main window class definition. * * Written by Kern Sibbald, January MMVII */ #ifndef _MAINWIN_H_ #define _MAINWIN_H_ #if QT_VERSION >= 0x050000 #include #else #include #endif #include #include "ui_main.h" class Console; class Pages; class MainWin : public QMainWindow, public Ui::MainForm { Q_OBJECT public: MainWin(QWidget *parent = 0); void set_statusf(const char *fmt, ...); void set_status_ready(); void set_status(const char *buf); void set_status(const QString &str); void writeSettings(); void readSettings(); void resetFocus() { lineEdit->setFocus(); }; void hashInsert(QTreeWidgetItem *, Pages *); void hashRemove(Pages *); void hashRemove(QTreeWidgetItem *, Pages *); void setMessageIcon(); bool getWaitState() {return m_waitState; }; bool isClosing() {return m_isClosing; }; Console *currentConsole(); QTreeWidgetItem *currentTopItem(); Pages* getFromHash(QTreeWidgetItem *); QTreeWidgetItem* getFromHash(Pages *); /* This hash is to get the page when the page selector widget is known */ QHash m_pagehash; /* This hash is to get the page selector widget when the page is known */ QHash m_widgethash; /* This is a list of consoles */ QHash m_consoleHash; void createPageJobList(const QString &, const QString &, const QString &, const QString &, QTreeWidgetItem *); QString m_dtformat; /* Begin Preferences variables */ bool m_commDebug; bool m_connDebug; bool m_displayAll; bool m_sqlDebug; bool m_commandDebug; bool m_miscDebug; bool m_recordLimitCheck; int m_recordLimitVal; bool m_daysLimitCheck; int m_daysLimitVal; bool m_checkMessages; int m_checkMessagesInterval; bool m_longList; bool m_rtPopDirDebug; bool m_rtDirCurICDebug; bool m_rtDirICDebug; bool m_rtFileTabICDebug; bool m_rtVerTabICDebug; bool m_rtUpdateFTDebug; bool m_rtUpdateVTDebug; bool m_rtChecksDebug; bool m_rtIconStateDebug; bool m_rtRestore1Debug; bool m_rtRestore2Debug; bool m_rtRestore3Debug; bool m_openBrowser; bool m_openPlot; bool m_openDirStat; /* Global */ bool m_notify; /* global flag to turn on/off all notifiers */ public slots: void input_line(); void about(); void help(); void treeItemClicked(QTreeWidgetItem *item, int column); void labelButtonClicked(); void runButtonClicked(); void estimateButtonClicked(); void browseButtonClicked(); void statusPageButtonClicked(); void jobPlotButtonClicked(); void restoreButtonClicked(); void undockWindowButton(); void treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *); void stackItemChanged(int); void toggleDockContextWindow(); void closePage(int item); void closeCurrentPage(); void setPreferences(); void readPreferences(); void waitEnter(); void waitExit(); void repopLists(); void reloadRepopLists(); void popLists(); void goToPreviousPage(); protected: void closeEvent(QCloseEvent *event); void keyPressEvent(QKeyEvent *event); private: void connectConsole(); void createPages(); void connectSignals(); void disconnectSignals(); void connectConsoleSignals(); void disconnectConsoleSignals(Console *console); private: Console *m_currentConsole; Pages *m_pagespophold; QStringList m_cmd_history; int m_cmd_last; QTreeWidgetItem *m_firstItem; QTreeWidgetItem *m_waitTreeItem; bool m_isClosing; bool m_waitState; bool m_doConnect; QList m_treeWidgetStack; bool m_treeStackTrap; }; #include "ui_prefs.h" class prefsDialog : public QDialog, public Ui::PrefsForm { Q_OBJECT public: prefsDialog(); private slots: void accept(); void reject(); }; #endif /* _MAINWIN_H_ */ bacula-15.0.3/src/qt-console/qwtconfig.pri0000644000175000017500000000600414771010173020252 0ustar bsbuildbsbuild # # Inserted by build script # # unix { # INSTALLBASE = /usr #} # #win32 { # INSTALLBASE = C:/Qwt-5.0.2 #} target.path = $$INSTALLBASE/lib headers.path = $$INSTALLBASE/include # doc.path = $$INSTALLBASE/doc ###################################################################### # qmake internal options ###################################################################### CONFIG += qt # Also for Qtopia Core! CONFIG += warn_on CONFIG += thread ###################################################################### # release/debug mode # The designer plugin is always built in release mode. # If want to change this, you have to edit designer/designer.pro. ###################################################################### CONFIG += release # release/debug ###################################################################### # Build the static/shared libraries. # If QwtDll is enabled, a shared library is built, otherwise # it will be a static library. ###################################################################### # CONFIG += QwtDll ###################################################################### # QwtPlot enables all classes, that are needed to use the QwtPlot # widget. ###################################################################### CONFIG += QwtPlot ###################################################################### # QwtWidgets enables all classes, that are needed to use the all other # widgets (sliders, dials, ...), beside QwtPlot. ###################################################################### # CONFIG += QwtWidgets ###################################################################### # If you want to display svg imageson the plot canvas, enable the # line below. Note that Qwt needs the svg+xml, when enabling # QwtSVGItem. ###################################################################### #CONFIG += QwtSVGItem ###################################################################### # If you have a commercial license you can use the MathML renderer # of the Qt solutions package to enable MathML support in Qwt. # So if you want this, copy qtmmlwidget.h + qtmmlwidget.cpp to # textengines/mathml and enable the line below. ###################################################################### #CONFIG += QwtMathML ###################################################################### # If you want to build the Qwt designer plugin, # enable the line below. # Otherwise you have to build it from the designer directory. ###################################################################### # CONFIG += QwtDesigner ###################################################################### # If you want to auto build the examples, enable the line below # Otherwise you have to build them from the examples directory. ###################################################################### # CONFIG += QwtExamples unix { INSTALLBASE = /home/kern/bacula/x/src/qt-console/qwt } bacula-15.0.3/src/qt-console/pages.h0000644000175000017500000000553614771010173017016 0ustar bsbuildbsbuild#ifndef _PAGES_H_ #define _PAGES_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include /* * The Pages Class * * This class is inherited by all widget windows which are on the stack * It is for the purpose of having a consistent set of functions and properties * in all of the subclasses to accomplish tasks such as pulling a window out * of or into the stack. It also provides virtual functions called * from in mainwin so that subclasses can contain functions to allow them * to populate the screens at the time of first viewing, (when selected) as * opposed to the first creation of the console connection. The * console is not connected until after the page selector tree has been * populated. */ class Console; class Pages : public QWidget { public: /* methods */ Pages(); void dockPage(); void undockPage(); void hidePage(); void togglePageDocking(); bool isDocked(); bool isOnceDocked(); bool isCloseable(); virtual void PgSeltreeWidgetClicked(); virtual void currentStackItem(); void closeStackPage(); Console *console() { return m_console; }; void setCurrent(); void setContextMenuDockText(); void setTreeWidgetItemDockColor(); void consoleCommand(QString &); void consoleCommand(QString &, bool setCurrent); void consoleCommand(QString &, int conn, bool setCurrent=true, bool notify=true); QString &name() { return m_name; }; void getVolumeList(QStringList &); void getStatusList(QStringList &); void firstUseDock(); /* members */ QTabWidget *m_parent; QList m_contextActions; public slots: /* closeEvent is a virtual function inherited from QWidget */ virtual void closeEvent(QCloseEvent* event); protected: /* methods */ void pgInitialize(); void pgInitialize(const QString &); void pgInitialize(const QString &, QTreeWidgetItem *); virtual void treeWidgetName(QString &); virtual void changeEvent(QEvent *event); void setConsoleCurrent(); void setTitle(); /* members */ bool m_closeable; bool m_docked; bool m_onceDocked; bool m_dockOnFirstUse; Console *m_console; QString m_name; }; #endif /* _PAGES_H_ */ bacula-15.0.3/src/qt-console/storage/0000755000175000017500000000000014771010173017201 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/storage/content.h0000644000175000017500000000303014771010173021020 0ustar bsbuildbsbuild#ifndef _CONTENT_H_ #define _CONTENT_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_content.h" #include "console.h" #include "pages.h" class Content : public Pages, public Ui::ContentForm { Q_OBJECT public: Content(QString storage, QTreeWidgetItem *parentWidget); // virtual void PgSeltreeWidgetClicked(); virtual void currentStackItem(); public slots: void treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *); void consoleRelease(); void consoleUpdateSlots(); void consoleLabelStorage(); void consoleMountStorage(); void statusStorageWindow(); void consoleUnMountStorage(); void showMediaInfo(QTableWidgetItem * item); private slots: void populateContent(); private: bool m_currentAutoChanger; bool m_populated; bool m_firstpopulation; bool m_checkcurwidget; QString m_currentStorage; }; #endif /* _STORAGE_H_ */ bacula-15.0.3/src/qt-console/storage/content.cpp0000644000175000017500000002163314771010173021364 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bat.h" #include #include #include "content.h" #include "label/label.h" #include "mediainfo/mediainfo.h" #include "mount/mount.h" #include "util/fmtwidgetitem.h" #include "status/storstat.h" // // TODO: List tray // List drives in autochanger // use user selection to add slot= argument // Content::Content(QString storage, QTreeWidgetItem *parentWidget) : Pages() { setupUi(this); pgInitialize(storage, parentWidget); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/package-x-generic.png"))); m_populated = false; m_firstpopulation = true; m_checkcurwidget = true; m_currentStorage = storage; connect(pbUpdate, SIGNAL(clicked()), this, SLOT(consoleUpdateSlots())); connect(pbLabel, SIGNAL(clicked()), this, SLOT(consoleLabelStorage())); connect(pbMount, SIGNAL(clicked()), this, SLOT(consoleMountStorage())); connect(pbUnmount, SIGNAL(clicked()), this, SLOT(consoleUnMountStorage())); connect(pbStatus, SIGNAL(clicked()), this, SLOT(statusStorageWindow())); connect(pbRelease, SIGNAL(clicked()), this, SLOT(consoleRelease())); connect(tableContent, SIGNAL(itemDoubleClicked(QTableWidgetItem*)), this, SLOT(showMediaInfo(QTableWidgetItem *))); dockPage(); setCurrent(); } /* * Subroutine to call class to show the log in the database from that job */ void Content::showMediaInfo(QTableWidgetItem * item) { QTreeWidgetItem* pageSelectorTreeWidgetItem = mainWin->getFromHash(this); int row = item->row(); QString vol = tableContent->item(row, 1)->text(); if (vol != "") { new MediaInfo(pageSelectorTreeWidgetItem, vol); } } void table_get_selection(QTableWidget *table, QString &sel) { QTableWidgetItem *item; int current; /* The QT selection returns each cell, so you * have x times the same row number... * We take only one instance */ int s = table->rowCount(); if (s == 0) { /* No selection?? */ return; } bool *tab = (bool *)malloc(s * sizeof(bool)); memset(tab, 0, s); foreach (item, table->selectedItems()) { current = item->row(); tab[current]=true; } sel += "="; for(int i=0; iitem(i, 0)->text(); sel += ","; } } sel.chop(1); // remove trailing , or useless = free(tab); } /* Label Media populating current storage by default */ void Content::consoleLabelStorage() { QString sel; table_get_selection(tableContent, sel); if (sel == "") { new labelPage(m_currentStorage); } else { QString cmd = "label barcodes slots"; cmd += sel; cmd += " storage=" + m_currentStorage; consoleCommand(cmd); } } /* Mount currently selected storage */ void Content::consoleMountStorage() { setConsoleCurrent(); /* if this storage is an autochanger, lets ask for the slot */ new mountDialog(m_console, m_currentStorage); } /* Unmount Currently selected storage */ void Content::consoleUnMountStorage() { QString cmd("umount storage="); cmd += m_currentStorage; consoleCommand(cmd); } void Content::statusStorageWindow() { /* if one exists, then just set it current */ bool found = false; foreach(Pages *page, mainWin->m_pagehash) { if (mainWin->currentConsole() == page->console()) { if (page->name() == tr("Storage Status %1").arg(m_currentStorage)) { found = true; page->setCurrent(); } } } if (!found) { QTreeWidgetItem *parentItem = mainWin->getFromHash(this); new StorStat(m_currentStorage, parentItem); } } /* * The main meat of the class!! The function that querries the director and * creates the widgets with appropriate values. */ void Content::populateContent() { char buf[200]; time_t tim; struct tm tm; QStringList results_all; QString cmd("status slots drive=0 storage=\"" + m_currentStorage + "\""); m_console->dir_cmd(cmd, results_all); Freeze frz(*tableContent); /* disable updating*/ Freeze frz2(*tableTray); Freeze frz3(*tableDrive); tableContent->clearContents(); tableTray->clearContents(); tableTray->clearContents(); // take only valid records, TODO: Add D to get drive status QStringList results = results_all.filter(QRegExp("^[IS]\\|[0-9]+\\|")); tableContent->setRowCount(results.size()); QStringList io_results = results_all.filter(QRegExp("^I\\|[0-9]+\\|")); tableTray->setRowCount(io_results.size()); QString resultline; QStringList fieldlist; int row = 0, row_io=0; foreach (resultline, results) { fieldlist = resultline.split("|"); if (fieldlist.size() < 10) { Pmsg1(0, "Discarding %s\n", resultline.data()); continue; /* some fields missing, ignore row */ } int index=0; QStringListIterator fld(fieldlist); TableItemFormatter slotitem(*tableContent, row); /* Slot type */ if (fld.next() == "I") { TableItemFormatter ioitem(*tableTray, row_io++); ioitem.setNumericFld(0, fieldlist[1]); ioitem.setTextFld(1, fieldlist[3]); } /* Slot */ slotitem.setNumericFld(index++, fld.next()); /* Real Slot */ if (fld.next() != "") { /* Volume */ slotitem.setTextFld(index++, fld.next()); /* Bytes */ slotitem.setBytesFld(index++, fld.next()); /* Status */ slotitem.setVolStatusFld(index++, fld.next()); /* MediaType */ slotitem.setTextFld(index++, fld.next()); /* Pool */ slotitem.setTextFld(index++, fld.next()); tim = fld.next().toInt(); if (tim > 0) { /* LastW */ localtime_r(&tim, &tm); strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", &tm); slotitem.setTextFld(index++, QString(buf)); /* Expire */ tim = fld.next().toInt(); localtime_r(&tim, &tm); strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", &tm); slotitem.setTextFld(index++, QString(buf)); } } row++; } tableContent->verticalHeader()->hide(); tableContent->sortByColumn(0, Qt::AscendingOrder); tableContent->setSortingEnabled(true); tableContent->resizeColumnsToContents(); tableContent->resizeRowsToContents(); tableContent->setEditTriggers(QAbstractItemView::NoEditTriggers); m_populated = true; tableTray->verticalHeader()->hide(); tableTray->setEditTriggers(QAbstractItemView::NoEditTriggers); tableDrive->verticalHeader()->hide(); /* Get count of rows needed (Drives) */ QStringList drives = results_all.filter(QRegExp("^D\\|[0-9]+\\|")); /* Ensure we have sufficient rows for Drive display */ tableDrive->setRowCount(drives.size()); row = 0; foreach (resultline, drives) { fieldlist = resultline.split("|"); if (fieldlist.size() < 4) { continue; /* some fields missing, ignore row */ } int index=0; QStringListIterator fld(fieldlist); TableItemFormatter slotitem(*tableDrive, row); /* Drive type */ fld.next(); /* Number */ slotitem.setNumericFld(index++, fld.next()); /* Slot */ fld.next(); /* Volume */ slotitem.setTextFld(index++, fld.next()); row++; } tableDrive->resizeRowsToContents(); tableDrive->setEditTriggers(QAbstractItemView::NoEditTriggers); } /* * Virtual function which is called when this page is visible on the stack */ void Content::currentStackItem() { if(!m_populated) { populateContent(); } } void Content::treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *) { } /* Update Slots */ void Content::consoleUpdateSlots() { QString sel = ""; table_get_selection(tableContent, sel); QString cmd("update slots"); if (sel != "") { cmd += sel; } cmd += " drive=0 storage=" + m_currentStorage; Pmsg1(0, "cmd=%s\n", cmd.toUtf8().data()); consoleCommand(cmd); populateContent(); } /* Release a tape in the drive */ void Content::consoleRelease() { QString cmd("release storage="); cmd += m_currentStorage; consoleCommand(cmd); } bacula-15.0.3/src/qt-console/storage/storage.h0000644000175000017500000000352214771010173021020 0ustar bsbuildbsbuild#ifndef _STORAGE_H_ #define _STORAGE_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_storage.h" #include "console.h" #include "pages.h" class Storage : public Pages, public Ui::StorageForm { Q_OBJECT public: Storage(); ~Storage(); virtual void PgSeltreeWidgetClicked(); virtual void currentStackItem(); public slots: void treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *); private slots: void populateTree(); void consoleStatusStorage(); void consoleLabelStorage(); void consoleMountStorage(); void consoleUnMountStorage(); void consoleUpdateSlots(); void consoleUpdateSlotsScan(); void consoleRelease(); void statusStorageWindow(); void contentWindow(); private: void createContextMenu(); void mediaList(QTreeWidgetItem *parent, const QString &storageID); void settingsOpenStatus(QString& storage); QString m_currentStorage; bool m_currentAutoChanger; bool m_populated; bool m_firstpopulation; bool m_checkcurwidget; void writeExpandedSettings(); QTreeWidgetItem *m_topItem; }; void table_get_selection(QTableWidget *table, QString &sel); #endif /* _STORAGE_H_ */ bacula-15.0.3/src/qt-console/storage/storage.cpp0000644000175000017500000003547314771010173021365 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Storage Class * * Dirk Bartley, March 2007 * */ #include "bat.h" #include #include #include "storage.h" #include "content.h" #include "label/label.h" #include "mount/mount.h" #include "status/storstat.h" #include "util/fmtwidgetitem.h" Storage::Storage() : Pages() { setupUi(this); pgInitialize(tr("Storage")); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/package-x-generic.png"))); /* mp_treeWidget, Storage Tree Tree Widget inherited from ui_storage.h */ m_populated = false; m_firstpopulation = true; m_checkcurwidget = true; m_closeable = false; m_currentStorage = ""; /* add context sensitive menu items specific to this classto the page * selector tree. m_contextActions is QList of QActions */ m_contextActions.append(actionRefreshStorage); } Storage::~Storage() { if (m_populated) writeExpandedSettings(); } /* * The main meat of the class!! The function that querries the director and * creates the widgets with appropriate values. */ void Storage::populateTree() { if (m_populated) writeExpandedSettings(); m_populated = true; Freeze frz(*mp_treeWidget); /* disable updating */ m_checkcurwidget = false; mp_treeWidget->clear(); m_checkcurwidget = true; QStringList headerlist = (QStringList() << tr("Name") << tr("Id") << tr("Changer") << tr("Slot") << tr("Status") << tr("Enabled") << tr("Pool") << tr("Media Type") ); m_topItem = new QTreeWidgetItem(mp_treeWidget); m_topItem->setText(0, tr("Storage")); m_topItem->setData(0, Qt::UserRole, 0); m_topItem->setExpanded(true); mp_treeWidget->setColumnCount(headerlist.count()); mp_treeWidget->setHeaderLabels(headerlist); QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup("StorageTreeExpanded"); bool first = true; QString storage_comsep(""); QString storageName; foreach(storageName, m_console->storage_list){ if (first) { storage_comsep += "'" + storageName + "'"; first = false; } else storage_comsep += ",'" + storageName + "'"; } if (storage_comsep != "") { /* Set up query QString and header QStringList */ QString query("SELECT" " Name AS StorageName," " StorageId AS ID, AutoChanger AS Changer" " FROM Storage " " WHERE StorageId IN (SELECT MAX(StorageId) FROM Storage WHERE"); query += " Name IN (" + storage_comsep + ")"; query += " GROUP BY Name) ORDER BY Name"; QStringList results; /* This could be a log item */ if (mainWin->m_sqlDebug) { Pmsg1(000, "Storage query cmd : %s\n",query.toUtf8().data()); } if (m_console->sql_cmd(query, results)) { QStringList fieldlist; foreach (QString resultline, results) { fieldlist = resultline.split("\t"); if (fieldlist.size() != 3) { Pmsg1(000, "Unexpected line %s", resultline.toUtf8().data()); continue; } storageName = fieldlist.takeFirst(); if (m_firstpopulation) { settingsOpenStatus(storageName); } TreeItemFormatter storageItem(*m_topItem, 1); storageItem.setTextFld(0, storageName); if(settings.contains(storageName)) storageItem.widget()->setExpanded(settings.value(storageName).toBool()); else storageItem.widget()->setExpanded(true); int index = 1; QStringListIterator fld(fieldlist); /* storage id */ storageItem.setNumericFld(index++, fld.next() ); /* changer */ QString changer = fld.next(); storageItem.setBoolFld(index++, changer); if (changer == "1") mediaList(storageItem.widget(), fieldlist.first()); } } } /* Resize the columns */ for(int cnter=0; cnterresizeColumnToContents(cnter); } m_firstpopulation = false; } /* * For autochangers A query to show the tapes in the changer. */ void Storage::mediaList(QTreeWidgetItem *parent, const QString &storageID) { QString query("SELECT Media.VolumeName AS Media, Media.Slot AS Slot," " Media.VolStatus AS VolStatus, Media.Enabled AS Enabled," " Pool.Name AS MediaPool, Media.MediaType AS MediaType" " From Media" " JOIN Pool ON (Media.PoolId=Pool.PoolId)" " WHERE Media.StorageId='" + storageID + "'" " AND Media.InChanger<>0" " ORDER BY Media.Slot"); QStringList results; /* This could be a log item */ if (mainWin->m_sqlDebug) { Pmsg1(000, "Storage query cmd : %s\n",query.toUtf8().data()); } if (m_console->sql_cmd(query, results)) { QString resultline; QString field; QStringList fieldlist; foreach (resultline, results) { fieldlist = resultline.split("\t"); if (fieldlist.size() < 6) continue; /* Iterate through fields in the record */ QStringListIterator fld(fieldlist); int index = 0; TreeItemFormatter fmt(*parent, 2); /* volname */ fmt.setTextFld(index++, fld.next()); /* skip the next two columns, unused by media */ index += 2; /* slot */ fmt.setNumericFld(index++, fld.next()); /* status */ fmt.setVolStatusFld(index++, fld.next()); /* enabled */ fmt.setBoolFld(index++, fld.next()); /* pool */ fmt.setTextFld(index++, fld.next()); /* media type */ fmt.setTextFld(index++, fld.next()); } } } /* * When the treeWidgetItem in the page selector tree is singleclicked, Make sure * The tree has been populated. */ void Storage::PgSeltreeWidgetClicked() { if(!m_populated) { populateTree(); createContextMenu(); } if (!isOnceDocked()) { dockPage(); } } /* * Added to set the context menu policy based on currently active treeWidgetItem * signaled by currentItemChanged */ void Storage::treeItemChanged(QTreeWidgetItem *currentwidgetitem, QTreeWidgetItem *previouswidgetitem ) { /* m_checkcurwidget checks to see if this is during a refresh, which will segfault */ if (m_checkcurwidget) { /* The Previous item */ if (previouswidgetitem) { /* avoid a segfault if first time */ int treedepth = previouswidgetitem->data(0, Qt::UserRole).toInt(); if (treedepth == 1){ mp_treeWidget->removeAction(actionStatusStorageInConsole); mp_treeWidget->removeAction(actionStatusStorageWindow); mp_treeWidget->removeAction(actionLabelStorage); mp_treeWidget->removeAction(actionMountStorage); mp_treeWidget->removeAction(actionUnMountStorage); mp_treeWidget->removeAction(actionUpdateSlots); mp_treeWidget->removeAction(actionUpdateSlotsScan); mp_treeWidget->removeAction(actionRelease); } } int treedepth = currentwidgetitem->data(0, Qt::UserRole).toInt(); if (treedepth == 1){ /* set a hold variable to the storage name in case the context sensitive * menu is used */ m_currentStorage = currentwidgetitem->text(0); m_currentAutoChanger = currentwidgetitem->text(2) == tr("Yes"); mp_treeWidget->addAction(actionStatusStorageInConsole); mp_treeWidget->addAction(actionStatusStorageWindow); mp_treeWidget->addAction(actionLabelStorage); mp_treeWidget->addAction(actionMountStorage); mp_treeWidget->addAction(actionUnMountStorage); mp_treeWidget->addAction(actionRelease); QString text; text = tr("Status Storage \"%1\"").arg(m_currentStorage);; actionStatusStorageInConsole->setText(text); text = tr("Status Storage \"%1\" in Window").arg(m_currentStorage);; actionStatusStorageWindow->setText(text); text = tr("Label media in Storage \"%1\"").arg(m_currentStorage); actionLabelStorage->setText(text); text = tr("Mount media in Storage \"%1\"").arg(m_currentStorage); actionMountStorage->setText(text); text = tr("\"UN\" Mount media in Storage \"%1\"").arg(m_currentStorage); text = tr("Release media in Storage \"%1\"").arg(m_currentStorage); actionRelease->setText(text); if (m_currentAutoChanger) { mp_treeWidget->addAction(actionUpdateSlots); mp_treeWidget->addAction(actionUpdateSlotsScan); text = tr("Barcode Scan media in Storage \"%1\"").arg(m_currentStorage); actionUpdateSlots->setText(text); text = tr("Read scan media in Storage \"%1\"").arg( m_currentStorage); actionUpdateSlotsScan->setText(text); } } } } /* * Setup a context menu * Made separate from populate so that it would not create context menu over and * over as the tree is repopulated. */ void Storage::createContextMenu() { mp_treeWidget->setContextMenuPolicy(Qt::ActionsContextMenu); mp_treeWidget->addAction(actionRefreshStorage); connect(mp_treeWidget, SIGNAL( currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)), this, SLOT(treeItemChanged(QTreeWidgetItem *, QTreeWidgetItem *))); /* connect to the action specific to this pages class */ connect(actionRefreshStorage, SIGNAL(triggered()), this, SLOT(populateTree())); connect(actionStatusStorageInConsole, SIGNAL(triggered()), this, SLOT(consoleStatusStorage())); connect(actionLabelStorage, SIGNAL(triggered()), this, SLOT(consoleLabelStorage())); connect(actionMountStorage, SIGNAL(triggered()), this, SLOT(consoleMountStorage())); connect(actionUnMountStorage, SIGNAL(triggered()), this, SLOT(consoleUnMountStorage())); connect(actionUpdateSlots, SIGNAL(triggered()), this, SLOT(consoleUpdateSlots())); connect(actionUpdateSlotsScan, SIGNAL(triggered()), this, SLOT(consoleUpdateSlotsScan())); connect(actionRelease, SIGNAL(triggered()), this, SLOT(consoleRelease())); connect(actionStatusStorageWindow, SIGNAL(triggered()), this, SLOT(statusStorageWindow())); connect(mp_treeWidget, SIGNAL(itemDoubleClicked(QTreeWidgetItem *, int)), this, SLOT(contentWindow())); } void Storage::contentWindow() { if (m_currentStorage != "" && m_currentAutoChanger) { QTreeWidgetItem *parentItem = mainWin->getFromHash(this); new Content(m_currentStorage, parentItem); } } /* * Virtual function which is called when this page is visible on the stack */ void Storage::currentStackItem() { if(!m_populated) { populateTree(); /* Create the context menu for the storage tree */ createContextMenu(); } } /* * Functions to respond to local context sensitive menu sending console commands * If I could figure out how to make these one function passing a string, Yaaaaaa */ void Storage::consoleStatusStorage() { QString cmd("status storage="); cmd += m_currentStorage; consoleCommand(cmd); } /* Label Media populating current storage by default */ void Storage::consoleLabelStorage() { new labelPage(m_currentStorage); } /* Mount currently selected storage */ void Storage::consoleMountStorage() { if (m_currentAutoChanger == 0){ /* no autochanger, just execute the command in the console */ QString cmd("mount storage="); cmd += m_currentStorage; consoleCommand(cmd); } else { setConsoleCurrent(); /* if this storage is an autochanger, lets ask for the slot */ new mountDialog(m_console, m_currentStorage); } } /* Unmount Currently selected storage */ void Storage::consoleUnMountStorage() { QString cmd("umount storage="); cmd += m_currentStorage; consoleCommand(cmd); } /* Update Slots */ void Storage::consoleUpdateSlots() { QString cmd("update slots storage="); cmd += m_currentStorage; consoleCommand(cmd); } /* Update Slots Scan*/ void Storage::consoleUpdateSlotsScan() { QString cmd("update slots scan storage="); cmd += m_currentStorage; consoleCommand(cmd); } /* Release a tape in the drive */ void Storage::consoleRelease() { QString cmd("release storage="); cmd += m_currentStorage; consoleCommand(cmd); } /* * Open a status storage window */ void Storage::statusStorageWindow() { /* if one exists, then just set it current */ bool found = false; foreach(Pages *page, mainWin->m_pagehash) { if (mainWin->currentConsole() == page->console()) { if (page->name() == tr("Storage Status %1").arg(m_currentStorage)) { found = true; page->setCurrent(); } } } if (!found) { QTreeWidgetItem *parentItem = mainWin->getFromHash(this); new StorStat(m_currentStorage, parentItem); } } /* * Write settings to save expanded states of the pools */ void Storage::writeExpandedSettings() { QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup("StorageTreeExpanded"); int childcount = m_topItem->childCount(); for (int cnt=0; cntchild(cnt); settings.setValue(item->text(0), item->isExpanded()); } settings.endGroup(); } /* * If first time, then check to see if there were status pages open the last time closed * if so open */ void Storage::settingsOpenStatus(QString &storage) { QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup("OpenOnExit"); QString toRead = "StorageStatus_" + storage; if (settings.value(toRead) == 1) { if (mainWin->m_sqlDebug) { Pmsg1(000, "Do open Storage Status window for : %s\n", storage.toUtf8().data()); } new StorStat(storage, mainWin->getFromHash(this)); setCurrent(); mainWin->getFromHash(this)->setExpanded(true); } else { if (mainWin->m_sqlDebug) { Pmsg1(000, "Do NOT open Storage Status window for : %s\n", storage.toUtf8().data()); } } settings.endGroup(); } bacula-15.0.3/src/qt-console/storage/storage.ui0000644000175000017500000000755414771010173021217 0ustar bsbuildbsbuild StorageForm 0 0 467 383 Storage Tree 9 9 9 9 6 6 1 :/images/view-refresh.png Refresh Storage List Requery the director for the list of storage objects. :/images/status-console.png Status Storage In Console Status Storage In Console :/images/label.png Label Storage Label Storage :/images/cartridge.png MountStorage MountStorage :/images/cartridge.png UnMount Storage UnMount Storage :/images/package-x-generic.png Update Slots Update Slots :/images/package-x-generic.png Update Slots Scan Update Slots Scan :/images/cartridge.png Release :/images/status.png Status Storage Window bacula-15.0.3/src/qt-console/storage/content.ui0000644000175000017500000002052614771010173021217 0ustar bsbuildbsbuild ContentForm 0 0 949 695 Form 0 0 240 110 Actions Update slots :/images/view-refresh.png Label :/images/label.png false Move to tray :/images/extern.png false Empty tray :/images/intern.png Mount Unmount Status Release 0 0 241 221 241 16777215 Drives 0 0 220 192 QAbstractItemView::SelectRows Drive Volume 0 0 240 16777215 Import/Export 0 0 100 0 QAbstractItemView::SelectRows false Slot Volume Content true QAbstractItemView::ExtendedSelection QAbstractItemView::SelectRows 0 Slot Volume Bytes Status Media Type Pool Last Written When expire? bacula-15.0.3/src/qt-console/bat.pro.mingw32.in0000644000175000017500000001327214771010173020724 0ustar bsbuildbsbuild###################################################################### # # !!!!!!! IMPORTANT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # Edit only bat.pro.mingw32.in -- bat.pro.mingw32 is built by the ./configure program # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # CONFIG += qt cross-win32 QT += core gui widgets # CONFIG += debug bins.path = ./ bins.files = ./bat confs.path = ./ confs.commands = ./install_conf_file TEMPLATE = app TARGET = bat DEPENDPATH += . INCLUDEPATH += .. . ./console ./restore ./select cross-win32 { # LIBS += ../win32/dll/bacula.a LIBS += -mwindows -L../win32/release32 -lbacula -lwinpthread } !cross-win32 { LIBS += -L../lib -lbac -L../findlib -lbacfind @OPENSSL_LIBS@ } qwt { INCLUDEPATH += @QWT_INC@ LIBS += @QWT_LDFLAGS@ @QWT_LIB@ } RESOURCES = main.qrc MOC_DIR = moc32 OBJECTS_DIR = obj32 UI_DIR = ui32 QMAKE_CC = i686-w64-mingw32-gcc QMAKE_CXX = i686-w64-mingw32-g++ QMAKE_INCDIR = $(DEPKGS)/depkgs-mingw32/include/ ../win32/compat $(DEPKGS)/depkgs-mingw32/include/QtGui $(DEPKGS)/depkgs-mingw32/include/QtCore $(DEPKGS)/depkgs-mingw32/include/QtWidgets ui32/ QMAKE_INCDIR_QT = QMAKE_LIBDIR_QT = $(DEPKGS)/depkgs-mingw32/lib QMAKE_LINK = i686-w64-mingw32-g++ QMAKE_LFLAGS = -mthreads -Wl,-enable-stdcall-fixup -Wl,-enable-auto-import -m32 -fno-strict-aliasing -Wl,-enable-runtime-pseudo-reloc QMAKE_LIB = i686-w64-mingw32-ar -ru QMAKE_RC = i686-w64-mingw32-windres # Main window FORMS += main.ui FORMS += prefs.ui FORMS += label/label.ui FORMS += relabel/relabel.ui FORMS += mount/mount.ui FORMS += console/console.ui FORMS += restore/restore.ui restore/prerestore.ui restore/brestore.ui FORMS += restore/runrestore.ui restore/restoretree.ui FORMS += run/run.ui run/runcmd.ui run/estimate.ui run/prune.ui FORMS += select/select.ui select/textinput.ui FORMS += medialist/medialist.ui mediaedit/mediaedit.ui joblist/joblist.ui FORMS += medialist/mediaview.ui FORMS += clients/clients.ui storage/storage.ui fileset/fileset.ui FORMS += joblog/joblog.ui jobs/jobs.ui job/job.ui FORMS += help/help.ui mediainfo/mediainfo.ui FORMS += status/dirstat.ui storage/content.ui FORMS += status/clientstat.ui FORMS += status/storstat.ui qwt { FORMS += jobgraphs/jobplotcontrols.ui } # Main directory HEADERS += mainwin.h bat.h bat_conf.h qstd.h pages.h SOURCES += main.cpp bat_conf.cpp mainwin.cpp qstd.cpp pages.cpp # include authenticatebase.cpp that is already in libbac because # libbac is compiled with -no-rtti while qt-console use RTTI by # default. Inherited sub-class with virtual method in shared lib # using a diffferent RTTI model dont work well, see this error a # link time: "... DirCommAuthenticate ... undefined reference to # typeinfo for AuthenticateBase" HEADERS += ../lib/authenticatebase.h SOURCES += ../lib/authenticatebase.cc # bcomm HEADERS += bcomm/dircomm.h SOURCES += bcomm/dircomm.cpp bcomm/dircomm_auth.cpp # Console HEADERS += console/console.h SOURCES += console/console.cpp # Restore HEADERS += restore/restore.h SOURCES += restore/prerestore.cpp restore/restore.cpp restore/brestore.cpp # Label dialog HEADERS += label/label.h SOURCES += label/label.cpp # Relabel dialog HEADERS += relabel/relabel.h SOURCES += relabel/relabel.cpp # Mount dialog HEADERS += mount/mount.h SOURCES += mount/mount.cpp # Run dialog HEADERS += run/run.h SOURCES += run/run.cpp run/runcmd.cpp run/estimate.cpp run/prune.cpp # Select dialog HEADERS += select/select.h select/textinput.h SOURCES += select/select.cpp select/textinput.cpp ## MediaList HEADERS += medialist/medialist.h SOURCES += medialist/medialist.cpp # MediaView HEADERS += medialist/mediaview.h SOURCES += medialist/mediaview.cpp ## MediaEdit HEADERS += mediaedit/mediaedit.h SOURCES += mediaedit/mediaedit.cpp ## JobList HEADERS += joblist/joblist.h SOURCES += joblist/joblist.cpp ## Clients HEADERS += clients/clients.h SOURCES += clients/clients.cpp ## Storage HEADERS += storage/storage.h SOURCES += storage/storage.cpp ## Storage content HEADERS += storage/content.h SOURCES += storage/content.cpp ## Fileset HEADERS += fileset/fileset.h SOURCES += fileset/fileset.cpp ## Job log HEADERS += joblog/joblog.h SOURCES += joblog/joblog.cpp ## Job HEADERS += job/job.h SOURCES += job/job.cpp ## Jobs HEADERS += jobs/jobs.h SOURCES += jobs/jobs.cpp ## RestoreTree HEADERS += restore/restoretree.h SOURCES += restore/restoretree.cpp ## Job Step Graphs qwt { HEADERS += jobgraphs/jobplot.h SOURCES += jobgraphs/jobplot.cpp } # Help dialog HEADERS += help/help.h SOURCES += help/help.cpp # Media info dialog HEADERS += mediainfo/mediainfo.h SOURCES += mediainfo/mediainfo.cpp ## Status Dir HEADERS += status/dirstat.h SOURCES += status/dirstat.cpp ## Status Client HEADERS += status/clientstat.h SOURCES += status/clientstat.cpp ## Status Client HEADERS += status/storstat.h SOURCES += status/storstat.cpp # Utility sources HEADERS += util/fmtwidgetitem.h util/comboutil.h SOURCES += util/fmtwidgetitem.cpp util/comboutil.cpp INSTALLS += bins INSTALLS += confs QMAKE_EXTRA_TARGETS += depend TRANSLATIONS += ts/bat_fr.ts ts/bat_de.ts bacula-15.0.3/src/qt-console/main.qrc0000644000175000017500000000623114771010173017172 0ustar bsbuildbsbuild images/tray-monitor-logo.png images/ajax-loader-big.gif images/page-prev.gif images/page-next.gif images/0p.png images/16p.png images/32p.png images/48p.png images/64p.png images/80p.png images/96p.png images/A.png images/R.png images/T.png images/W.png images/client_icon_48dp.png images/director_icon_48dp.png images/storage_icon_48dp.png images/applications-graphics.png images/bat.png images/bat_icon.png images/browse.png images/cartridge-edit.png images/cartridge.png images/cartridge1.png images/check.png images/connected.png images/copy.png images/cut.png images/disconnected.png images/edit-cut.png images/edit-delete.png images/edit.png images/emblem-system.png images/estimate-job.png images/extern.png images/f.png images/folder.png images/folderbothchecked.png images/folderchecked.png images/folderunchecked.png images/go-down.png images/go-jump.png images/go-up.png images/graph1.png images/help-browser.png images/inflag0.png images/inflag1.png images/inflag2.png images/intern.png images/joblog.png images/label.png images/mail-message-new.png images/mail-message-pending.png images/mark.png images/network-server.png images/new.png images/open.png images/package-x-generic.png images/paste.png images/print.png images/purge.png images/restore.png images/run.png images/runit.png images/save.png images/status-console.png images/status.png images/system-file-manager.png images/unchecked.png images/undo.png images/unmark.png images/up.png images/utilities-terminal.png images/view-refresh.png images/weather-severe-alert.png images/zoom.png images/ss_main.jpg images/ss_dir.jpg images/ss_backup.jpg images/ss_restore.jpg images/ss_trayconf.jpg images/ss_resconf.jpg images/ss_qrcode.jpg images/ss_fdconf.jpg bacula-15.0.3/src/qt-console/label/0000755000175000017500000000000014771010173016614 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/label/label.cpp0000644000175000017500000000657014771010173020407 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Label Page class * * Kern Sibbald, February MMVII * */ #include "bat.h" #include "label.h" #include labelPage::labelPage() : Pages() { QString deflt(""); m_closeable = false; showPage(deflt); } /* * An overload of the constructor to have a default storage show in the * combobox on start. Used from context sensitive in storage class. */ labelPage::labelPage(QString &defString) : Pages() { showPage(defString); } /* * moved the constructor code here for the overload. */ void labelPage::showPage(QString &defString) { m_name = "Label"; pgInitialize(); setupUi(this); m_conn = m_console->notifyOff(); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/label.png"))); storageCombo->addItems(m_console->storage_list); int index = storageCombo->findText(defString, Qt::MatchExactly); if (index != -1) { storageCombo->setCurrentIndex(index); } poolCombo->addItems(m_console->pool_list); connect(okButton, SIGNAL(pressed()), this, SLOT(okButtonPushed())); connect(cancelButton, SIGNAL(pressed()), this, SLOT(cancelButtonPushed())); connect(automountOnButton, SIGNAL(pressed()), this, SLOT(automountOnButtonPushed())); connect(automountOffButton, SIGNAL(pressed()), this, SLOT(automountOffButtonPushed())); dockPage(); setCurrent(); this->show(); } void labelPage::okButtonPushed() { QString scmd; if (volumeName->text().toUtf8().data()[0] == 0) { QMessageBox::warning(this, "No Volume name", "No Volume name given", QMessageBox::Ok, QMessageBox::Ok); return; } this->hide(); scmd = QString("label volume=\"%1\" pool=\"%2\" storage=\"%3\" slot=%4\n") .arg(volumeName->text()) .arg(poolCombo->currentText()) .arg(storageCombo->currentText()) .arg(slotSpin->value()); if (mainWin->m_commandDebug) { Pmsg1(000, "sending command : %s\n", scmd.toUtf8().data()); } if (m_console) { m_console->write_dir(scmd.toUtf8().data()); m_console->displayToPrompt(m_conn); m_console->notify(m_conn, true); } else { Pmsg0(000, "m_console==NULL !!!!!!\n"); } closeStackPage(); mainWin->resetFocus(); } void labelPage::cancelButtonPushed() { this->hide(); if (m_console) { m_console->notify(m_conn, true); } else { Pmsg0(000, "m_console==NULL !!!!!!\n"); } closeStackPage(); mainWin->resetFocus(); } /* turn automount on */ void labelPage::automountOnButtonPushed() { QString cmd("automount on"); consoleCommand(cmd); } /* turn automount off */ void labelPage::automountOffButtonPushed() { QString cmd("automount off"); consoleCommand(cmd); } bacula-15.0.3/src/qt-console/label/label.ui0000644000175000017500000002000514771010173020227 0ustar bsbuildbsbuild labelForm 0 0 560 357 Qt::NoFocus Form 9 6 Qt::Vertical QSizePolicy::Expanding 421 48 Qt::Vertical QSizePolicy::Expanding 431 48 0 6 6 0 Qt::Horizontal 40 20 OK true Cancel Volume Name: volumeName Storage: storageCombo 200 0 Qt::StrongFocus Enter Name of Volume to create 10000 Slot: slotSpin 6 0 Execute Automount On Off Qt::Vertical 20 61 Pool: poolCombo 6 0 Qt::Horizontal 71 21 16777215 30 <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Label a Volume</span></p></body></html> Qt::Horizontal 81 20 Qt::Horizontal 40 131 Qt::Horizontal 40 121 bacula-15.0.3/src/qt-console/label/label.h0000644000175000017500000000227614771010173020053 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Kern Sibbald, February MMVII */ #ifndef _LABEL_H_ #define _LABEL_H_ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_label.h" #include "console.h" #include "pages.h" class labelPage : public Pages, public Ui::labelForm { Q_OBJECT public: labelPage(); labelPage(QString &defString); void showPage(QString &defString); private slots: void okButtonPushed(); void cancelButtonPushed(); void automountOnButtonPushed(); void automountOffButtonPushed(); private: int m_conn; }; #endif /* _LABEL_H_ */ bacula-15.0.3/src/qt-console/bat.pro.in0000644000175000017500000001151414771010173017434 0ustar bsbuildbsbuild###################################################################### # # !!!!!!! IMPORTANT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # Edit only bat.pro.in -- bat.pro is built by the ./configure program # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # CONFIG += qt debug @QWT@ greaterThan(QT_MAJOR_VERSION, 4): QT += widgets datarootdir = @datarootdir@ bins.path = /$(DESTDIR)@sbindir@ bins.files = bat confs.path = /$(DESTDIR)@sysconfdir@ confs.commands = ./install_conf_file help.path = /$(DESTDIR)@docdir@ help.files = help/*.html images/status.png images/mail-message-new.png TEMPLATE = app TARGET = bat DEPENDPATH += . INCLUDEPATH += .. . ./console ./restore ./select LIBS += -L../lib -lbaccfg -lbac -L../findlib -lbacfind @OPENSSL_LIBS@ LIBTOOL_LINK = @QMAKE_LIBTOOL@ --silent --tag=CXX --mode=link LIBTOOL_INSTALL = @QMAKE_LIBTOOL@ --silent --mode=install QMAKE_LINK = $${LIBTOOL_LINK} $(CXX) QMAKE_INSTALL_PROGRAM = $${LIBTOOL_INSTALL} install -m @SBINPERM@ -p QMAKE_CLEAN += .libs/* bat qwt { INCLUDEPATH += @QWT_INC@ LIBS += @QWT_LDFLAGS@ @QWT_LIB@ } macx { ICON = images/bat_icon.icns } RESOURCES = main.qrc MOC_DIR = moc OBJECTS_DIR = obj UI_DIR = ui # Main window FORMS += main.ui FORMS += prefs.ui FORMS += label/label.ui FORMS += relabel/relabel.ui FORMS += mount/mount.ui FORMS += console/console.ui FORMS += restore/restore.ui restore/prerestore.ui restore/brestore.ui FORMS += restore/runrestore.ui FORMS += restore/restoretree.ui FORMS += run/run.ui run/runcmd.ui run/estimate.ui run/prune.ui FORMS += select/select.ui select/textinput.ui FORMS += medialist/medialist.ui mediaedit/mediaedit.ui joblist/joblist.ui FORMS += medialist/mediaview.ui FORMS += clients/clients.ui storage/storage.ui fileset/fileset.ui FORMS += joblog/joblog.ui jobs/jobs.ui job/job.ui FORMS += help/help.ui mediainfo/mediainfo.ui FORMS += status/dirstat.ui storage/content.ui FORMS += status/clientstat.ui FORMS += status/storstat.ui qwt { FORMS += jobgraphs/jobplotcontrols.ui } # Main directory HEADERS += mainwin.h bat.h bat_conf.h qstd.h pages.h SOURCES += main.cpp bat_conf.cpp mainwin.cpp qstd.cpp pages.cpp # include authenticatebase.cpp that is already in libbac because # libbac is compiled with -no-rtti while qt-console use RTTI by # default. Inherited sub-class with virtual method in shared lib # using a diffferent RTTI model dont work well, see this error a # link time: "... DirCommAuthenticate ... undefined reference to # typeinfo for AuthenticateBase" HEADERS += ../lib/authenticatebase.h SOURCES += ../lib/authenticatebase.cc # bcomm HEADERS += bcomm/dircomm.h SOURCES += bcomm/dircomm.cpp bcomm/dircomm_auth.cpp # Console HEADERS += console/console.h SOURCES += console/console.cpp # Restore HEADERS += restore/restore.h SOURCES += restore/prerestore.cpp restore/restore.cpp restore/brestore.cpp # Label dialog HEADERS += label/label.h SOURCES += label/label.cpp # Relabel dialog HEADERS += relabel/relabel.h SOURCES += relabel/relabel.cpp # Mount dialog HEADERS += mount/mount.h SOURCES += mount/mount.cpp # Run dialog HEADERS += run/run.h SOURCES += run/run.cpp run/runcmd.cpp run/estimate.cpp run/prune.cpp # Select dialog HEADERS += select/select.h select/textinput.h SOURCES += select/select.cpp select/textinput.cpp ## MediaList HEADERS += medialist/medialist.h SOURCES += medialist/medialist.cpp # MediaView HEADERS += medialist/mediaview.h SOURCES += medialist/mediaview.cpp ## MediaEdit HEADERS += mediaedit/mediaedit.h SOURCES += mediaedit/mediaedit.cpp ## JobList HEADERS += joblist/joblist.h SOURCES += joblist/joblist.cpp ## Clients HEADERS += clients/clients.h SOURCES += clients/clients.cpp ## Storage HEADERS += storage/storage.h SOURCES += storage/storage.cpp ## Storage content HEADERS += storage/content.h SOURCES += storage/content.cpp ## Fileset HEADERS += fileset/fileset.h SOURCES += fileset/fileset.cpp ## Job log HEADERS += joblog/joblog.h SOURCES += joblog/joblog.cpp ## Job HEADERS += job/job.h SOURCES += job/job.cpp ## Jobs HEADERS += jobs/jobs.h SOURCES += jobs/jobs.cpp ## RestoreTree HEADERS += restore/restoretree.h SOURCES += restore/restoretree.cpp ## Job Step Graphs qwt { HEADERS += jobgraphs/jobplot.h SOURCES += jobgraphs/jobplot.cpp } # Help dialog HEADERS += help/help.h SOURCES += help/help.cpp # Media info dialog HEADERS += mediainfo/mediainfo.h SOURCES += mediainfo/mediainfo.cpp ## Status Dir HEADERS += status/dirstat.h SOURCES += status/dirstat.cpp ## Status Client HEADERS += status/clientstat.h SOURCES += status/clientstat.cpp ## Status Client HEADERS += status/storstat.h SOURCES += status/storstat.cpp # Utility sources HEADERS += util/fmtwidgetitem.h util/comboutil.h SOURCES += util/fmtwidgetitem.cpp util/comboutil.cpp INSTALLS = bins confs help QMAKE_EXTRA_TARGETS += depend TRANSLATIONS += ts/bat_fr.ts ts/bat_de.ts bacula-15.0.3/src/qt-console/qstd.cpp0000644000175000017500000000505314771010173017217 0ustar bsbuildbsbuild//start id=namespace #include "qstd.h" /* QTextStreams look a lot like iostreams, we just have to point them to the right place. */ //start id=streamdefs QTextStream qstd::cin(stdin, QIODevice::ReadOnly); QTextStream qstd::cout(stdout, QIODevice::WriteOnly); QTextStream qstd::cerr(stderr, QIODevice::WriteOnly); //end /* Namespace members are like static class members */ bool qstd::yes(QString question) { QString ans; cout << QString(" %1 [y/n]? ").arg(question); cout.flush(); ans = cin.readLine(); return (ans.toUpper().startsWith("Y", Qt::CaseInsensitive)); } //end bool qstd::more(QString s) { return yes(QString("Another %1").arg(s)); } int qstd::promptInt(int base /* =10 */) { /* Usage: int n = promptInt(); */ QString numstr; int result; bool ok; cout << ": " << flush; while (1) { numstr = cin.readLine(); result = numstr.toInt(&ok, base); if (!ok) { cout << "Invalid number. Try again: "; cout.flush(); } else return result; } } double qstd::promptDouble() { /* Usage: double d = promptDouble(); */ QString numstr; double result; bool ok; while (1) { numstr = cin.readLine(); result = numstr.toDouble(&ok); if (!ok) { cout << "Invalid number. Try again: "; cout.flush(); } else return result; } } void qstd::promptOutputFile(QFile& outfile) { QString filename; while (1) { cout << "Please enter the file name for saving this data: "; cout.flush(); filename = cin.readLine(); outfile.setFileName(filename); bool fileExists = outfile.open(QIODevice::ReadOnly); if (!fileExists) break; if (yes("File already exists ... Ok to overwrite")) break; outfile.close(); outfile.reset(); } outfile.close(); outfile.reset(); outfile.open(QIODevice::WriteOnly); cout << filename << " open for writing ...\n"; cout.flush(); } void qstd::promptInputFile(QFile& infile) { QString filename; while (1) { cout << "Name of the file to be read: "; cout.flush(); filename = cin.readLine(); infile.setFileName(filename); bool fileExists = infile.open(QIODevice::ReadOnly); if (fileExists) break; cout << "File does not exist ... Please try again. \n"; cout.flush(); infile.reset(); } cout << filename << " open for reading ...\n"; cout.flush(); } bacula-15.0.3/src/qt-console/mediaedit/0000755000175000017500000000000014771010173017462 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/mediaedit/mediaedit.ui0000644000175000017500000003727714771010173021766 0ustar bsbuildbsbuild mediaEditForm 0 0 487 470 Form 9 6 0 6 Pool: poolCombo Volume Status: Max Volume Bytes: slotSpin Slot: slotSpin Max Volume Jobs: slotSpin Use Duration: slotSpin 0 6 Qt::Horizontal 40 20 OK Cancel 2147483647 Retention: slotSpin Recycle Pool: slotSpin 0 6 Qt::RightToLeft Enabled Qt::Horizontal 40 20 2147483647 Max Volume Files: slotSpin 0 6 Qt::Vertical 97 21 999 -1 Years Seconds 30 -1 Use Duration Qt::Vertical 97 31 Days 60 -1 Hours Months 60 -1 24 -1 0 Retention 12 -1 Minutes 2147483647 0 6 Qt::Horizontal 40 20 16777215 48 Volume : Qt::Horizontal 40 20 2147483647 0 6 Qt::RightToLeft Recycle Qt::Horizontal 40 20 0 6 Qt::Horizontal 71 21 16777215 30 <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal; text-decoration:none;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600;">Edit a Volume</span></p></body></html> Qt::Horizontal 81 20 10000 2147483647 Qt::Vertical 20 40 Qt::Horizontal 40 20 Qt::Vertical 20 40 Qt::Horizontal 40 20 bacula-15.0.3/src/qt-console/mediaedit/mediaedit.cpp0000644000175000017500000003346014771010173022121 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #include "bat.h" #include #include #include #include "mediaedit.h" /* * A constructor */ MediaEdit::MediaEdit(QTreeWidgetItem *parentWidget, QString &mediaId) : Pages() { setupUi(this); pgInitialize(tr("Media Edit"), parentWidget); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/cartridge-edit.png"))); dockPage(); setCurrent(); connect(okButton, SIGNAL(pressed()), this, SLOT(okButtonPushed())); connect(cancelButton, SIGNAL(pressed()), this, SLOT(cancelButtonPushed())); connectSpins(); connect(retentionSpin, SIGNAL(valueChanged(int)), this, SLOT(retentionChanged())); connect(useDurationSpin, SIGNAL(valueChanged(int)), this, SLOT(useDurationChanged())); connect(retentionRadio, SIGNAL(pressed()), this, SLOT(retentionRadioPressed())); connect(useDurationRadio, SIGNAL(pressed()), this, SLOT(useDurationRadioPressed())); m_pool = ""; m_recyclePool = ""; m_status = ""; m_slot = 0; /* The media's pool */ poolCombo->addItems(m_console->pool_list); /* The media's Status */ QStringList statusList = (QStringList() << "Full" << "Used" << "Append" << "Error" << "Purged" << "Recycle" << "Read-Only" << "Cleaning"); statusCombo->addItems(statusList); /* Set up the query for the default values */ QStringList FieldList = (QStringList() << "Media.VolumeName" << "Pool.Name" << "Media.VolStatus" << "Media.Slot" << "Media.VolRetention" << "Media.VolUseDuration" << "Media.MaxVolJobs" << "Media.MaxVolFiles" << "Media.MaxVolBytes" << "Media.Recycle" << "Media.Enabled" << "Pol.Name"); QStringList AsList = (QStringList() << "VolumeName" << "PoolName" << "Status" << "Slot" << "Retention" << "UseDuration" << "MaxJobs" << "MaxFiles" << "MaxBytes" << "Recycle" << "Enabled" << "RecyclePool"); int i = 0; QString query("SELECT "); foreach (QString field, FieldList) { if (i != 0) { query += ", "; } query += field + " AS " + AsList[i]; i += 1; } QString where = " WHERE Media.VolumeName = '" + mediaId + "' "; if (mediaId.contains(QRegExp("^[0-9]+$"))) { where = " WHERE Media.MediaId=" + mediaId; } query += " FROM Media" " JOIN Pool ON (Media.PoolId=Pool.PoolId)" " LEFT OUTER JOIN Pool AS Pol ON (Media.RecyclePoolId=Pol.PoolId)" + where; if (mainWin->m_sqlDebug) { Pmsg1(000, "MediaList query cmd : %s\n",query.toUtf8().data()); } QStringList results; if (m_console->sql_cmd(query, results)) { QString field; QStringList fieldlist; /* Iterate through the lines of results, there should only be one. */ foreach (QString resultline, results) { fieldlist = resultline.split("\t"); i = 0; /* Iterate through fields in the record */ foreach (field, fieldlist) { field = field.trimmed(); /* strip leading & trailing spaces */ bool ok; if (i == 0) { m_mediaName = field; volumeLabel->setText(QString("Volume : %1").arg(m_mediaName)); } else if (i == 1) { m_pool = field; } else if (i == 2) { m_status = field; } else if (i == 3) { m_slot = field.toInt(&ok, 10); if (!ok){ m_slot = 0; } } else if (i == 4) { m_retention = field.toInt(&ok, 10); if (!ok){ m_retention = 0; } } else if (i == 5) { m_useDuration = field.toInt(&ok, 10); if (!ok){ m_useDuration = 0; } } else if (i == 6) { m_maxVolJobs = field.toInt(&ok, 10); if (!ok){ m_maxVolJobs = 0; } } else if (i == 7) { m_maxVolFiles = field.toInt(&ok, 10); if (!ok){ m_maxVolFiles = 0; } } else if (i == 8) { m_maxVolBytes = field.toInt(&ok, 10); if (!ok){ m_maxVolBytes = 0; } } else if (i == 9) { if (field == "1") m_recycle = true; else m_recycle = false; } else if (i == 10) { if (field == "1") m_enabled = true; else m_enabled = false; } else if (i == 11) { m_recyclePool = field; } i++; } /* foreach field */ } /* foreach resultline */ } /* if results from query */ if (m_mediaName != "") { int index; /* default value for pool */ index = poolCombo->findText(m_pool, Qt::MatchExactly); if (index != -1) { poolCombo->setCurrentIndex(index); } /* default value for status */ index = statusCombo->findText(m_status, Qt::MatchExactly); if (index != -1) { statusCombo->setCurrentIndex(index); } slotSpin->setValue(m_slot); retentionSpin->setValue(m_retention); useDurationSpin->setValue(m_useDuration); setSpins(retentionSpin->value()); retentionRadio->setChecked(true); maxJobsSpin->setValue(m_maxVolJobs); maxFilesSpin->setValue(m_maxVolFiles); maxBytesSpin->setValue(m_maxVolBytes); if (m_recycle) recycleCheck->setCheckState(Qt::Checked); else recycleCheck->setCheckState(Qt::Unchecked); if (m_enabled) enabledCheck->setCheckState(Qt::Checked); else enabledCheck->setCheckState(Qt::Unchecked); /* default for recycle pool */ recyclePoolCombo->addItems(m_console->pool_list); recyclePoolCombo->insertItem(0, "*None*"); index = recyclePoolCombo->findText(m_recyclePool, Qt::MatchExactly); if (index == -1) { index = 0; } recyclePoolCombo->setCurrentIndex(index); } else { QMessageBox::warning(this, tr("No Volume name"), tr("No Volume name given"), QMessageBox::Ok, QMessageBox::Ok); return; } } /* * Function to handle updating the record then closing the page */ void MediaEdit::okButtonPushed() { QString scmd; this->hide(); bool docmd = false; scmd = QString("update volume=\"%1\"") .arg(m_mediaName); if (m_pool != poolCombo->currentText()) { scmd += " pool=\"" + poolCombo->currentText() + "\""; docmd = true; } if (m_status != statusCombo->currentText()) { scmd += " volstatus=\"" + statusCombo->currentText() + "\""; docmd = true; } if (m_slot != slotSpin->value()) { scmd += " slot=" + QString().setNum(slotSpin->value()); docmd = true; } if (m_retention != retentionSpin->value()) { scmd += " VolRetention=" + QString().setNum(retentionSpin->value()); docmd = true; } if (m_useDuration != useDurationSpin->value()) { scmd += " VolUse=" + QString().setNum(useDurationSpin->value()); docmd = true; } if (m_maxVolJobs != maxJobsSpin->value()) { scmd += " MaxVolJobs=" + QString().setNum(maxJobsSpin->value()); docmd = true; } if (m_maxVolFiles != maxFilesSpin->value()) { scmd += " MaxVolFiles=" + QString().setNum(maxFilesSpin->value()); docmd = true; } if (m_maxVolBytes != maxBytesSpin->value()) { scmd += " MaxVolBytes=" + QString().setNum(maxBytesSpin->value()); docmd = true; } if ((m_recycle) && (recycleCheck->checkState() == Qt::Unchecked)) { scmd += " Recycle=no"; docmd = true; } if ((!m_recycle) && (recycleCheck->checkState() == Qt::Checked)) { scmd += " Recycle=yes"; docmd = true; } if ((m_enabled) && (enabledCheck->checkState() == Qt::Unchecked)) { scmd += " enabled=no"; docmd = true; } if ((!m_enabled) && (enabledCheck->checkState() == Qt::Checked)) { scmd += " enabled=yes"; docmd = true; } if (m_recyclePool != recyclePoolCombo->currentText()) { scmd += " recyclepool=\""; if (recyclePoolCombo->currentText() != "*None*") { scmd += recyclePoolCombo->currentText(); } scmd += "\""; docmd = true; } if (docmd) { if (mainWin->m_commandDebug) { Pmsg1(000, "sending command : %s\n",scmd.toUtf8().data()); } consoleCommand(scmd); } closeStackPage(); } /* close if cancel */ void MediaEdit::cancelButtonPushed() { closeStackPage(); } /* * Slot for user changed retention */ void MediaEdit::retentionChanged() { retentionRadio->setChecked(true); setSpins(retentionSpin->value()); } /* * Slot for user changed the use duration */ void MediaEdit::useDurationChanged() { useDurationRadio->setChecked(true); setSpins(useDurationSpin->value()); } /* * Set the 5 duration spins from a known duration value */ void MediaEdit::setSpins(int value) { int years, months, days, hours, minutes, seconds, left; years = abs(value / 31536000); left = value - years * 31536000; months = abs(left / 2592000); left = left - months * 2592000; days = abs(left / 86400); left = left - days * 86400; hours = abs(left / 3600); left = left - hours * 3600; minutes = abs(left / 60); seconds = left - minutes * 60; disconnectSpins(); yearsSpin->setValue(years); monthsSpin->setValue(months); daysSpin->setValue(days); hoursSpin->setValue(hours); minutesSpin->setValue(minutes); secondsSpin->setValue(seconds); connectSpins(); } /* * This slot is called any time any one of the 5 duration spins a changed. */ void MediaEdit::durationChanged() { disconnectSpins(); if (secondsSpin->value() == -1) { secondsSpin->setValue(59); minutesSpin->setValue(minutesSpin->value()-1); } if (minutesSpin->value() == -1) { minutesSpin->setValue(59); hoursSpin->setValue(hoursSpin->value()-1); } if (hoursSpin->value() == -1) { hoursSpin->setValue(23); daysSpin->setValue(daysSpin->value()-1); } if (daysSpin->value() == -1) { daysSpin->setValue(29); monthsSpin->setValue(monthsSpin->value()-1); } if (monthsSpin->value() == -1) { monthsSpin->setValue(11); yearsSpin->setValue(yearsSpin->value()-1); } if (yearsSpin->value() == -1) { yearsSpin->setValue(0); } if (secondsSpin->value() == 60) { secondsSpin->setValue(0); minutesSpin->setValue(minutesSpin->value()+1); } if (minutesSpin->value() == 60) { minutesSpin->setValue(0); hoursSpin->setValue(hoursSpin->value()+1); } if (hoursSpin->value() == 24) { hoursSpin->setValue(0); daysSpin->setValue(daysSpin->value()+1); } if (daysSpin->value() == 30) { daysSpin->setValue(0); monthsSpin->setValue(monthsSpin->value()+1); } if (monthsSpin->value() == 12) { monthsSpin->setValue(0); yearsSpin->setValue(yearsSpin->value()+1); } connectSpins(); if (retentionRadio->isChecked()) { int retention; retention = secondsSpin->value() + minutesSpin->value() * 60 + hoursSpin->value() * 3600 + daysSpin->value() * 86400 + monthsSpin->value() * 2592000 + yearsSpin->value() * 31536000; disconnect(retentionSpin, SIGNAL(valueChanged(int)), this, SLOT(retentionChanged())); retentionSpin->setValue(retention); connect(retentionSpin, SIGNAL(valueChanged(int)), this, SLOT(retentionChanged())); } if (useDurationRadio->isChecked()) { int useDuration; useDuration = secondsSpin->value() + minutesSpin->value() * 60 + hoursSpin->value() * 3600 + daysSpin->value() * 86400 + monthsSpin->value() * 2592000 + yearsSpin->value() * 31536000; disconnect(useDurationSpin, SIGNAL(valueChanged(int)), this, SLOT(useDurationChanged())); useDurationSpin->setValue(useDuration); connect(useDurationSpin, SIGNAL(valueChanged(int)), this, SLOT(useDurationChanged())); } } /* Connect the spins */ void MediaEdit::connectSpins() { connect(secondsSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); connect(minutesSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); connect(hoursSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); connect(daysSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); connect(monthsSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); connect(yearsSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); } /* disconnect spins so that we can set the value of other spin from changed duration spin */ void MediaEdit::disconnectSpins() { disconnect(secondsSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); disconnect(minutesSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); disconnect(hoursSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); disconnect(daysSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); disconnect(monthsSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); disconnect(yearsSpin, SIGNAL(valueChanged(int)), this, SLOT(durationChanged())); } /* slot for setting spins when retention radio checked */ void MediaEdit::retentionRadioPressed() { setSpins(retentionSpin->value()); } /* slot for setting spins when duration radio checked */ void MediaEdit::useDurationRadioPressed() { setSpins(useDurationSpin->value()); } bacula-15.0.3/src/qt-console/mediaedit/mediaedit.h0000644000175000017500000000310214771010173021554 0ustar bsbuildbsbuild#ifndef _MEDIAEDIT_H_ #define _MEDIAEDIT_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_mediaedit.h" #include "console.h" #include "pages.h" class MediaEdit : public Pages, public Ui::mediaEditForm { Q_OBJECT public: MediaEdit(QTreeWidgetItem *parentWidget, QString &mediaId); private slots: void okButtonPushed(); void cancelButtonPushed(); void retentionChanged(); void durationChanged(); void useDurationChanged(); void setSpins(int value); void retentionRadioPressed(); void useDurationRadioPressed(); private: void connectSpins(); void disconnectSpins(); QString m_mediaName; QString m_pool; QString m_status; int m_slot; int m_retention; int m_useDuration; int m_maxVolJobs; int m_maxVolFiles; int m_maxVolBytes; bool m_recycle; bool m_enabled; QString m_recyclePool; }; #endif /* _MEDIAEDIT_H_ */ bacula-15.0.3/src/qt-console/bcomm/0000755000175000017500000000000014771010173016632 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/bcomm/dircomm.h0000644000175000017500000000402614771010173020437 0ustar bsbuildbsbuild#ifndef _DIRCOMM_H_ #define _DIRCOMM_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Kern Sibbald, January 2007 */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "pages.h" #include "ui_console.h" #include #ifndef MAX_NAME_LENGTH #define MAX_NAME_LENGTH 128 #endif class DIRRES; class BSOCK; class JCR; class CONRES; //class DirComm : public QObject class DirComm : public QObject { Q_OBJECT friend class Console; public: DirComm(Console *parent, int conn); ~DirComm(); Console *m_console; int sock_read(); bool authenticate_director(JCR *jcr, DIRRES *director, CONRES *cons, char *buf, int buflen); bool is_connected() { return m_sock != NULL; } bool is_ready() { return is_connected() && m_at_prompt && m_at_main_prompt; } char *msg(); bool notify(bool enable); // enables/disables socket notification - returns the previous state bool is_notify_enabled() const; bool is_in_command() const { return m_in_command > 0; } void terminate(); bool connect_dir(); int read(void); int write(const char *msg); int write(QString msg); public slots: void notify_read_dir(int fd); private: BSOCK *m_sock; bool m_at_prompt; bool m_at_main_prompt; bool m_sent_blank; bool m_notify; int m_in_command; QSocketNotifier *m_notifier; bool m_api_set; int m_conn; bool m_in_select; }; #endif /* _DIRCOMM_H_ */ bacula-15.0.3/src/qt-console/bcomm/dircomm.cpp0000644000175000017500000004326514771010173021002 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * DirComm, Director communications,class * * Kern Sibbald, January MMVII * */ #include "bat.h" #include "console.h" #include "restore.h" #include "select.h" #include "textinput.h" #include "run/run.h" static int tls_pem_callback(char *buf, int size, const void *userdata); DirComm::DirComm(Console *parent, int conn): m_notifier(NULL), m_api_set(false) { m_console = parent; m_sock = NULL; m_at_prompt = false; m_at_main_prompt = false; m_sent_blank = false; m_conn = conn; m_in_command = 0; m_in_select = false; m_notify = false; } DirComm::~DirComm() { } /* Terminate any open socket */ void DirComm::terminate() { if (m_sock) { if (m_notifier) { m_notifier->setEnabled(false); delete m_notifier; m_notifier = NULL; m_notify = false; } if (mainWin->m_connDebug) Pmsg2(000, "DirComm %i terminating connections %s\n", m_conn, m_console->m_dir->name()); free_bsock(m_sock); } } /* * Connect to Director. */ bool DirComm::connect_dir() { JCR *jcr = new JCR; utime_t heart_beat; char buf[1024]; CONRES *cons; int numcon = 0; int i = 0; buf[0] = 0; foreach_res(cons, R_CONSOLE) { numcon++; } if (m_sock && !is_bsock_open(m_sock)) { mainWin->set_status( tr("Already connected.")); m_console->display_textf(_("Already connected\"%s\".\n"), m_console->m_dir->name()); if (mainWin->m_connDebug) { Pmsg2(000, "DirComm %i BAILING already connected %s\n", m_conn, m_console->m_dir->name()); } goto bail_out; } if (mainWin->m_connDebug)Pmsg2(000, "DirComm %i connecting %s\n", m_conn, m_console->m_dir->name()); memset(jcr, 0, sizeof(JCR)); mainWin->set_statusf(_("Connecting to Director %s:%d"), m_console->m_dir->address, m_console->m_dir->DIRport); if (m_conn == 0) { m_console->display_textf(_("Connecting to Director %s:%d\n\n"), m_console->m_dir->address, m_console->m_dir->DIRport); } /* Give GUI a chance */ app->processEvents(); LockRes(); /* If cons==NULL, default console will be used */ for (i=0; idirector && strcasecmp(cons->director, m_console->m_dir->name()) == 0) { break; } if (i == (numcon - 1)) { cons = NULL; } } /* Look for the first non-linked console */ if (cons == NULL) { for (i=0; idirector == NULL) { break; } if (i == (numcon - 1)) { cons = NULL; } } } /* If no console, take first one */ if (!cons) { cons = (CONRES *)GetNextRes(R_CONSOLE, (RES *)NULL); } UnlockRes(); /* Initialize Console TLS context once */ if (cons && !cons->tls_ctx && (cons->tls_enable || cons->tls_require)) { /* Generate passphrase prompt */ bsnprintf(buf, sizeof(buf), "Passphrase for Console \"%s\" TLS private key: ", cons->name()); /* Initialize TLS context: * Args: CA certfile, CA certdir, Certfile, Keyfile, * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ cons->tls_ctx = new_tls_context(cons->tls_ca_certfile, cons->tls_ca_certdir, cons->tls_certfile, cons->tls_keyfile, tls_pem_callback, &buf, NULL, true); if (!cons->tls_ctx) { m_console->display_textf(_("Failed to initialize TLS context for Console \"%s\".\n"), m_console->m_dir->name()); if (mainWin->m_connDebug) { Pmsg2(000, "DirComm %i BAILING Failed to initialize TLS context for Console %s\n", m_conn, m_console->m_dir->name()); } goto bail_out; } } if (cons && !cons->psk_ctx && cons->tls_psk_enable) { cons->psk_ctx = new_psk_context(NULL /* cons->password */); } /* Initialize Director TLS context once */ if (!m_console->m_dir->tls_ctx && (m_console->m_dir->tls_enable || m_console->m_dir->tls_require)) { /* Generate passphrase prompt */ bsnprintf(buf, sizeof(buf), "Passphrase for Director \"%s\" TLS private key: ", m_console->m_dir->name()); /* Initialize TLS context: * Args: CA certfile, CA certdir, Certfile, Keyfile, * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ m_console->m_dir->tls_ctx = new_tls_context(m_console->m_dir->tls_ca_certfile, m_console->m_dir->tls_ca_certdir, m_console->m_dir->tls_certfile, m_console->m_dir->tls_keyfile, tls_pem_callback, &buf, NULL, true); if (!m_console->m_dir->tls_ctx) { m_console->display_textf(_("Failed to initialize TLS context for Director \"%s\".\n"), m_console->m_dir->name()); mainWin->set_status("Connection failed"); if (mainWin->m_connDebug) { Pmsg2(000, "DirComm %i BAILING Failed to initialize TLS context for Director %s\n", m_conn, m_console->m_dir->name()); } goto bail_out; } } if (!m_console->m_dir->psk_ctx && m_console->m_dir->tls_psk_enable) { m_console->m_dir->psk_ctx = new_psk_context(NULL /*m_console->m_dir->password*/); } if (m_console->m_dir->heartbeat_interval) { heart_beat = m_console->m_dir->heartbeat_interval; } else if (cons) { heart_beat = cons->heartbeat_interval; } else { heart_beat = 0; } if (!m_sock) { m_sock = new_bsock(); } if (!m_sock->connect(NULL, 5, 15, heart_beat, _("Director daemon"), m_console->m_dir->address, NULL, m_console->m_dir->DIRport, 0)) { m_sock->destroy(); m_sock = NULL; } if (m_sock == NULL) { mainWin->set_status("Connection failed"); if (mainWin->m_connDebug) { Pmsg2(000, "DirComm %i BAILING Connection failed %s\n", m_conn, m_console->m_dir->name()); } goto bail_out; } else { /* Update page selector to green to indicate that Console is connected */ mainWin->actionConnect->setIcon(QIcon(":images/connected.png")); QBrush greenBrush(Qt::green); QTreeWidgetItem *item = mainWin->getFromHash(m_console); if (item) { item->setForeground(0, greenBrush); } } jcr->dir_bsock = m_sock; if (!authenticate_director(jcr, m_console->m_dir, cons, buf, sizeof(buf))) { m_console->display_text(buf); if (mainWin->m_connDebug) { Pmsg2(000, "DirComm %i BAILING Connection failed %s\n", m_conn, m_console->m_dir->name()); } goto bail_out; } if (buf[0]) { m_console->display_text(buf); } /* Give GUI a chance */ app->processEvents(); mainWin->set_status(_("Initializing ...")); /* * Set up input notifier */ m_notifier = new QSocketNotifier(m_sock->m_fd, QSocketNotifier::Read, 0); QObject::connect(m_notifier, SIGNAL(activated(int)), this, SLOT(notify_read_dir(int))); m_notifier->setEnabled(true); m_notify = true; write(".api 1"); m_api_set = true; m_console->displayToPrompt(m_conn); m_console->beginNewCommand(m_conn); mainWin->set_status(_("Connected")); if (mainWin->m_connDebug) { Pmsg2(000, "Returning TRUE from DirComm->connect_dir : %i %s\n", m_conn, m_console->m_dir->name()); } return true; bail_out: if (mainWin->m_connDebug) { Pmsg2(000, "Returning FALSE from DirComm->connect_dir : %i %s\n", m_conn, m_console->m_dir->name()); } delete jcr; return false; } /* * This should be moved into a bSocket class */ char *DirComm::msg() { if (m_sock) { return m_sock->msg; } return NULL; } int DirComm::write(const QString msg) { return write(msg.toUtf8().data()); } int DirComm::write(const char *msg) { if (!m_sock) { return -1; } m_sock->msglen = pm_strcpy(m_sock->msg, msg); m_at_prompt = false; m_at_main_prompt = false; if (mainWin->m_commDebug) Pmsg2(000, "conn %i send: %s\n", m_conn, msg); /* * Ensure we send only one blank line. Multiple blank lines are * simply discarded, it keeps the console output looking nicer. */ if (m_sock->msglen == 0 || (m_sock->msglen == 1 && *m_sock->msg == '\n')) { if (!m_sent_blank) { m_sent_blank = true; return m_sock->send(); } else { return -1; /* discard multiple blanks */ } } m_sent_blank = false; /* clear flag */ return m_sock->send(); } int DirComm::sock_read() { int stat; #ifdef HAVE_WIN32 bool wasEnabled = notify(false); stat = m_sock->recv(); notify(wasEnabled); #else stat = m_sock->recv(); #endif return stat; } /* * Blocking read from director */ int DirComm::read() { int stat = -1; if (!m_sock) { return -1; } while (m_sock) { /* Poll DIR every 50 milliseconds */ for (;;) { if (!m_sock) break; stat = m_sock->wait_data_intr(0, 50); /* wait 50 milliseconds */ if (stat > 0) { break; } app->processEvents(); if (m_api_set && m_console->is_messagesPending() && is_notify_enabled() && m_console->hasFocus()) { if (mainWin->m_commDebug) Pmsg1(000, "conn %i process_events\n", m_conn); m_console->messagesPending(false); m_console->write_dir(m_conn, ".messages", false); } } if (!m_sock) { return -1; } m_sock->msg[0] = 0; stat = sock_read(); if (stat >= 0) { if (mainWin->m_commDebug) Pmsg2(000, "conn %i got: %s\n", m_conn, m_sock->msg); if (m_at_prompt) { m_console->display_text("\n"); m_at_prompt = false; m_at_main_prompt = false; } } switch (m_sock->msglen) { case BNET_MSGS_PENDING : if (is_notify_enabled() && m_console->hasFocus()) { m_console->messagesPending(false); if (mainWin->m_commDebug) Pmsg1(000, "conn %i MSGS PENDING\n", m_conn); m_console->write_dir(m_conn, ".messages", false); m_console->displayToPrompt(m_conn); continue; } m_console->messagesPending(true); continue; case BNET_CMD_OK: if (mainWin->m_commDebug) Pmsg1(000, "conn %i CMD OK\n", m_conn); m_at_prompt = false; m_at_main_prompt = false; if (--m_in_command < 0) { m_in_command = 0; } mainWin->set_status(_("Command completed ...")); continue; case BNET_CMD_BEGIN: if (mainWin->m_commDebug) Pmsg1(000, "conn %i CMD BEGIN\n", m_conn); m_at_prompt = false; m_at_main_prompt = false; m_in_command++; mainWin->set_status(_("Processing command ...")); continue; case BNET_MAIN_PROMPT: if (mainWin->m_commDebug) Pmsg1(000, "conn %i MAIN PROMPT\n", m_conn); if (!m_at_prompt && ! m_at_main_prompt) { m_at_prompt = true; m_at_main_prompt = true; mainWin->set_status(_("At main prompt waiting for input ...")); } break; case BNET_SUB_PROMPT: if (mainWin->m_commDebug) Pmsg2(000, "conn %i SUB_PROMPT m_in_select=%d\n", m_conn, m_in_select); m_at_prompt = true; m_at_main_prompt = false; mainWin->set_status(_("At prompt waiting for input ...")); break; case BNET_TEXT_INPUT: if (mainWin->m_commDebug) Pmsg4(000, "conn %i TEXT_INPUT at_prompt=%d m_in_select=%d notify=%d\n", m_conn, m_at_prompt, m_in_select, is_notify_enabled()); if (!m_in_select && is_notify_enabled()) { new textInputDialog(m_console, m_conn); if (mainWin->m_commDebug) Pmsg0(000, "!m_in_select && is_notify_enabled\n"); m_at_prompt = true; m_at_main_prompt = false; mainWin->set_status(_("At prompt waiting for input ...")); } break; case BNET_CMD_FAILED: if (mainWin->m_commDebug) Pmsg1(000, "CMD FAILED\n", m_conn); if (--m_in_command < 0) { m_in_command = 0; } mainWin->set_status(_("Command failed.")); break; /* We should not get this one */ case BNET_EOD: if (mainWin->m_commDebug) Pmsg1(000, "conn %i EOD\n", m_conn); mainWin->set_status_ready(); if (!m_api_set) { break; } continue; case BNET_START_SELECT: if (mainWin->m_commDebug) Pmsg1(000, "conn %i START SELECT\n", m_conn); m_in_select = true; new selectDialog(m_console, m_conn); m_in_select = false; break; case BNET_YESNO: if (mainWin->m_commDebug) Pmsg1(000, "conn %i YESNO\n", m_conn); new yesnoPopUp(m_console, m_conn); break; case BNET_RUN_CMD: if (mainWin->m_commDebug) Pmsg1(000, "conn %i RUN CMD\n", m_conn); new runCmdPage(m_conn); break; case BNET_START_RTREE: if (mainWin->m_commDebug) Pmsg1(000, "conn %i START RTREE CMD\n", m_conn); new restorePage(m_conn); break; case BNET_END_RTREE: if (mainWin->m_commDebug) Pmsg1(000, "conn %i END RTREE CMD\n", m_conn); break; case BNET_ERROR_MSG: if (mainWin->m_commDebug) Pmsg1(000, "conn %i ERROR MSG\n", m_conn); stat = sock_read(); /* get the message */ m_console->display_text(msg()); QMessageBox::critical(m_console, "Error", msg(), QMessageBox::Ok); m_console->beginNewCommand(m_conn); mainWin->waitExit(); break; case BNET_WARNING_MSG: if (mainWin->m_commDebug) Pmsg1(000, "conn %i WARNING MSG\n", m_conn); stat = sock_read(); /* get the message */ if (!m_console->m_warningPrevent) { QMessageBox::critical(m_console, "Warning", msg(), QMessageBox::Ok); } break; case BNET_INFO_MSG: if (mainWin->m_commDebug) Pmsg1(000, "conn %i INFO MSG\n", m_conn); stat = sock_read(); /* get the message */ m_console->display_text(msg()); mainWin->set_status(msg()); break; } if (!m_sock) { stat = BNET_HARDEOF; return stat; } if (m_sock->is_stop()) { /* error or term request */ if (mainWin->m_commDebug) Pmsg1(000, "conn %i BNET STOP\n", m_conn); m_console->stopTimer(); free_bsock(m_sock); mainWin->actionConnect->setIcon(QIcon(":images/disconnected.png")); QBrush redBrush(Qt::red); QTreeWidgetItem *item = mainWin->getFromHash(m_console); item->setForeground(0, redBrush); if (m_notifier) { m_notifier->setEnabled(false); delete m_notifier; m_notifier = NULL; m_notify = false; } mainWin->set_status(_("Director disconnected.")); stat = BNET_HARDEOF; } break; } return stat; } /* Called by signal when the Director has output for us */ void DirComm::notify_read_dir(int /* fd */) { int stat; if (!mainWin->m_notify) { return; } if (mainWin->m_commDebug) Pmsg1(000, "enter read_dir conn %i read_dir\n", m_conn); stat = m_sock->wait_data(0, 5000); if (stat > 0) { if (mainWin->m_commDebug) Pmsg2(000, "read_dir conn %i stat=%d\n", m_conn, stat); while (read() >= 0) { m_console->display_text(msg()); } } if (mainWin->m_commDebug) Pmsg2(000, "exit read_dir conn %i stat=%d\n", m_conn, stat); } /* * When the notifier is enabled, read_dir() will automatically be * called by the Qt event loop when ever there is any output * from the Director, and read_dir() will then display it on * the console. * * When we are in a bat dialog, we want to control *all* output * from the Directory, so we set notify to off. * m_console->notify(false); */ bool DirComm::notify(bool enable) { bool prev_enabled = false; /* Set global flag */ mainWin->m_notify = enable; if (m_notifier) { prev_enabled = m_notifier->isEnabled(); m_notifier->setEnabled(enable); m_notify = enable; if (mainWin->m_connDebug) Pmsg3(000, "conn=%i set_notify=%d prev=%d\n", m_conn, enable, prev_enabled); } else if (mainWin->m_connDebug) { Pmsg2(000, "m_notifier does not exist: %i %s\n", m_conn, m_console->m_dir->name()); } return prev_enabled; } bool DirComm::is_notify_enabled() const { return m_notify; } /* * Call-back for reading a passphrase for an encrypted PEM file * This function uses getpass(), * which uses a static buffer and is NOT thread-safe. */ static int tls_pem_callback(char *buf, int size, const void *userdata) { (void)size; (void)userdata; #ifdef HAVE_TLS # if defined(HAVE_WIN32) //sendit(prompt); if (win32_cgets(buf, size) == NULL) { buf[0] = 0; return 0; } else { return strlen(buf); } # else const char *prompt = (const char *)userdata; char *passwd; passwd = getpass(prompt); bstrncpy(buf, passwd, size); return strlen(buf); # endif #else buf[0] = 0; return 0; #endif } bacula-15.0.3/src/qt-console/bcomm/dircomm_auth.cpp0000644000175000017500000001314314771010173022013 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Bacula UA authentication. Provides authentication with * the Director. * * Kern Sibbald, June MMI adapted to bat, Jan MMVI * */ #include "bat.h" /* * Version at end of Hello * prior to 06Aug13 no version * 1 21Oct13 - added comm line compression */ #define BAT_VERSION 1 /* Commands sent to Director */ static char hello[] = "Hello %s calling %d tlspsk=%d\n"; /* Response from Director */ static char oldOKhello[] = "1000 OK:"; static char newOKhello[] = "1000 OK: %d"; static char FDOKhello[] = "2000 OK Hello %d"; class AuthenticateBase; class DirCommAuthenticate: public AuthenticateBase { char *errmsg; int errmsg_len; public: DirCommAuthenticate(BSOCK *bsock, char *a_errmsg, int a_errmsg_len); ~DirCommAuthenticate(); virtual void TLSFailure(); virtual bool CheckTLSRequirement(); bool authenticate_director(DIRRES *director, CONRES *cons, bool dump_ok); }; DirCommAuthenticate::DirCommAuthenticate(BSOCK *bsock, char *a_errmsg, int a_errmsg_len): AuthenticateBase(NULL, bsock, dtCli, dcCON, dcDIR), errmsg(a_errmsg), errmsg_len(a_errmsg_len) { } DirCommAuthenticate::~DirCommAuthenticate() { }; void DirCommAuthenticate::TLSFailure() { bsnprintf(errmsg, errmsg_len, _("TLS negotiation failed with Director at \"%s:%d\"\n"), bsock->host(), bsock->port()); } bool DirCommAuthenticate::CheckTLSRequirement() { /* Verify that the connection is willing to meet our TLS requirements */ switch (TestTLSRequirement()) { case TLS_REQ_ERR_LOCAL: bsnprintf(errmsg, errmsg_len, _("Authorization problem with Director at \"%s:%d\":" " Remote server requires TLS.\n"), bsock->host(), bsock->port()); return false; case TLS_REQ_ERR_REMOTE: bsnprintf(errmsg, errmsg_len, _("Authorization problem:" " Remote server at \"%s:%d\" did not advertise required TLS support.\n"), bsock->host(), bsock->port()); return false; case TLS_REQ_OK: break; } return true; } bool DirCommAuthenticate::authenticate_director(DIRRES *director, CONRES *cons, bool dump_ok) { BSOCK *dir = bsock; int dir_version = 0; char bashed_name[MAX_NAME_LENGTH]; errmsg[0] = 0; /* * Send my name to the Director then do authentication */ if (cons) { bstrncpy(bashed_name, cons->hdr.name, sizeof(bashed_name)); bash_spaces(bashed_name); CalcLocalTLSNeedFromRes(cons->tls_enable, cons->tls_require, cons->tls_authenticate, false, NULL, cons->tls_ctx, cons->tls_psk_enable, cons->psk_ctx, cons->password); } else { bstrncpy(bashed_name, "*UserAgent*", sizeof(bashed_name)); CalcLocalTLSNeedFromRes(director->tls_enable, director->tls_require, director->tls_authenticate, false, NULL, director->tls_ctx, director->tls_psk_enable, director->psk_ctx, director->password); } /* Timeout Hello after 15 secs */ StartAuthTimeout(15); dir->fsend(hello, bashed_name, BAT_VERSION, tlspsk_local_need); if (!ClientCramMD5Authenticate(password)) { goto bail_out; } if (!HandleTLS()) { return false; } Dmsg1(6, ">dird: %s", dir->msg); if (dir->recv() <= 0) { dir->stop_timer(); bsnprintf(errmsg, errmsg_len, _("Bad response to Hello command: ERR=%s\n" "The Director at \"%s:%d\" is probably not running.\n"), dir->bstrerror(), dir->host(), dir->port()); return false; } Dmsg1(10, "msg); if (strncmp(dir->msg, oldOKhello, sizeof(oldOKhello)-1) == 0) { /* If Dir version exists, get it */ sscanf(dir->msg, newOKhello, &dir_version); /* We do not check the last %d */ } else if (strncmp(dir->msg, FDOKhello, sizeof(FDOKhello)-3) == 0) { sscanf(dir->msg, FDOKhello, &dir_version); // TODO: Keep somewhere that we need a proxy command, or run it directly? } else { bsnprintf(errmsg, errmsg_len, _("Director at \"%s:%d\" rejected Hello command\n"), dir->host(), dir->port()); return false; } /* Turn on compression for newer Directors */ if (dir_version >= 1 && (!cons || cons->comm_compression)) { dir->set_compress(); } if (dump_ok) { bsnprintf(errmsg, errmsg_len, "%s", dir->msg); } return true; bail_out: bsnprintf(errmsg, errmsg_len, _("Authorization problem with Director at \"%s:%d\"\n" "Most likely the passwords do not agree.\n" "If you are using TLS, there may have been a certificate validation error during the TLS handshake.\n" "For help, please see " MANUAL_AUTH_URL "\n"), dir->host(), dir->port()); return false; } /* * Authenticate Director */ bool DirComm::authenticate_director(JCR *jcr, DIRRES *director, CONRES *cons, char *errmsg, int errmsg_len) { DirCommAuthenticate auth(jcr->dir_bsock, errmsg, errmsg_len); return auth.authenticate_director(director, cons, m_conn == 0); } bacula-15.0.3/src/qt-console/mediainfo/0000755000175000017500000000000014771010173017470 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/mediainfo/mediainfo.h0000644000175000017500000000232314771010173021574 0ustar bsbuildbsbuild#ifndef _MEDIAINFO_H_ #define _MEDIAINFO_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_mediainfo.h" #include "console.h" #include "pages.h" class MediaInfo : public Pages, public Ui::mediaInfoForm { Q_OBJECT public: MediaInfo(QTreeWidgetItem *parentWidget, QString &mediaId); private slots: void pruneVol(); void purgeVol(); void deleteVol(); void editVol(); void showInfoForJob(QTableWidgetItem * item); private: void populateForm(); QString m_mediaName; QString m_mediaId; }; #endif /* _MEDIAINFO_H_ */ bacula-15.0.3/src/qt-console/mediainfo/mediainfo.cpp0000644000175000017500000002131714771010173022133 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bat.h" #include #include #include #include "mediaedit/mediaedit.h" #include "relabel/relabel.h" #include "run/run.h" #include "mediainfo.h" #include "util/fmtwidgetitem.h" #include "job/job.h" /* * A constructor */ MediaInfo::MediaInfo(QTreeWidgetItem *parentWidget, QString &mediaName) : Pages() { setupUi(this); pgInitialize(tr("Media Info"), parentWidget); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/cartridge-edit.png"))); m_mediaName = mediaName; connect(pbPrune, SIGNAL(clicked()), this, SLOT(pruneVol())); connect(pbPurge, SIGNAL(clicked()), this, SLOT(purgeVol())); connect(pbDelete, SIGNAL(clicked()), this, SLOT(deleteVol())); connect(pbEdit, SIGNAL(clicked()), this, SLOT(editVol())); connect(tableJob, SIGNAL(itemDoubleClicked(QTableWidgetItem*)), this, SLOT(showInfoForJob(QTableWidgetItem *))); dockPage(); setCurrent(); populateForm(); } /* * Subroutine to call class to show the log in the database from that job */ void MediaInfo::showInfoForJob(QTableWidgetItem * item) { QTreeWidgetItem* pageSelectorTreeWidgetItem = mainWin->getFromHash(this); int row = item->row(); QString jobid = tableJob->item(row, 0)->text(); new Job(jobid, pageSelectorTreeWidgetItem); // connect(j, SIGNAL(destroyed()), this, SLOT(populateTree())); } void MediaInfo::pruneVol() { new prunePage(m_mediaName, ""); // connect(prune, SIGNAL(destroyed()), this, SLOT(populateTree())); } // TODO: use same functions as in medialist.cpp void MediaInfo::purgeVol() { if (QMessageBox::warning(this, "Bat", tr("Are you sure you want to purge ?? !!!.\n" "The Purge command will delete associated Catalog database records from Jobs and" " Volumes without considering the retention period. Purge works only on the" " Catalog database and does not affect data written to Volumes. This command can" " be dangerous because you can delete catalog records associated with current" " backups of files, and we recommend that you do not use it unless you know what" " you are doing.\n" "Press OK to proceed with the purge operation?"), QMessageBox::Ok | QMessageBox::Cancel) == QMessageBox::Cancel) { return; } QString cmd("purge volume="); cmd += m_mediaName; consoleCommand(cmd); } void MediaInfo::deleteVol() { if (QMessageBox::warning(this, "Bat", tr("Are you sure you want to delete?? !!!.\n" "This delete command is used to delete a Volume record and all associated catalog" " records that were created. This command operates only on the Catalog" " database and has no effect on the actual data written to a Volume. This" " command can be dangerous and we strongly recommend that you do not use" " it unless you know what you are doing. All Jobs and all associated" " records (File and JobMedia) will be deleted from the catalog." "Press OK to proceed with delete operation.?"), QMessageBox::Ok | QMessageBox::Cancel) == QMessageBox::Cancel) { return; } QString cmd("delete volume="); cmd += m_mediaName; consoleCommand(cmd); } void MediaInfo::editVol() { new MediaEdit(mainWin->getFromHash(this), m_mediaId); // connect(edit, SIGNAL(destroyed()), this, SLOT(populateTree())); } /* * Populate the text in the window */ void MediaInfo::populateForm() { utime_t t; time_t ttime; QString stat, LastWritten; struct tm tm; char buf[256]; QString query = "SELECT MediaId, VolumeName, Pool.Name, MediaType, FirstWritten," "LastWritten, VolMounts, VolBytes, Media.Enabled," "Location.Location, VolStatus, RecyclePool.Name, Media.Recycle, " "VolReadTime/1000000, VolWriteTime/1000000, Media.VolUseDuration, " "Media.MaxVolJobs, Media.MaxVolFiles, Media.MaxVolBytes, " "Media.VolRetention,InChanger,Slot " "FROM Media JOIN Pool USING (PoolId) LEFT JOIN Pool AS RecyclePool " "ON (Media.RecyclePoolId=RecyclePool.PoolId) " "LEFT JOIN Location ON (Media.LocationId=Location.LocationId) " "WHERE Media.VolumeName='" + m_mediaName + "'"; if (mainWin->m_sqlDebug) { Pmsg1(000, "MediaInfo query cmd : %s\n",query.toUtf8().data()); } QStringList results; if (m_console->sql_cmd(query, results)) { QString resultline; QStringList fieldlist; foreach (resultline, results) { // should have only one result fieldlist = resultline.split("\t"); QStringListIterator fld(fieldlist); m_mediaId = fld.next(); label_VolumeName->setText(fld.next()); label_Pool->setText(fld.next()); label_MediaType->setText(fld.next()); label_FirstWritten->setText(fld.next()); LastWritten = fld.next(); label_LastWritten->setText(LastWritten); // label_VolFiles->setText(fld.next()); label_VolMounts->setText(fld.next()); label_VolBytes->setText(convertBytesSI(fld.next().toULongLong())); label_Enabled->setPixmap(QPixmap(":/images/inflag" + fld.next() + ".png")); label_Location->setText(fld.next()); label_VolStatus->setText(fld.next()); label_RecyclePool->setText(fld.next()); chkbox_Recycle->setCheckState(fld.next().toInt()?Qt::Checked:Qt::Unchecked); edit_utime(fld.next().toULongLong(), buf, sizeof(buf)); label_VolReadTime->setText(QString(buf)); edit_utime(fld.next().toULongLong(), buf, sizeof(buf)); label_VolWriteTime->setText(QString(buf)); edit_utime(fld.next().toULongLong(), buf, sizeof(buf)); label_VolUseDuration->setText(QString(buf)); label_MaxVolJobs->setText(fld.next()); label_MaxVolFiles->setText(fld.next()); label_MaxVolBytes->setText(fld.next()); stat = fld.next(); edit_utime(stat.toULongLong(), buf, sizeof(buf)); label_VolRetention->setText(QString(buf)); if (LastWritten != "") { t = str_to_utime(LastWritten.toLatin1().data()); t = t + stat.toULongLong(); ttime = t; localtime_r(&ttime, &tm); strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", &tm); label_Expire->setText(QString(buf)); } label_Online->setPixmap(QPixmap(":/images/inflag"+fld.next()+".png")); // label_VolFiles->setText(fld.next()); // label_VolErrors->setText(fld.next()); // stat=fld.next(); // jobstatus_to_ascii_gui(stat[0].toLatin1(), buf, sizeof(buf)); // stat = buf; // } } query = "SELECT DISTINCT JobId, Name, StartTime, Type, Level, JobFiles," "JobBytes,JobStatus " "FROM Job JOIN JobMedia USING (JobId) JOIN Media USING (MediaId) " "WHERE Media.VolumeName = '" + m_mediaName + "'"; if (mainWin->m_sqlDebug) { Pmsg1(000, "MediaInfo query cmd : %s\n",query.toUtf8().data()); } results.clear(); if (m_console->sql_cmd(query, results)) { QString resultline; QStringList fieldlist; int row = 0; tableJob->setRowCount(results.size()); foreach (resultline, results) { fieldlist = resultline.split("\t"); QStringListIterator fld(fieldlist); int index=0; TableItemFormatter jobitem(*tableJob, row); /* JobId */ jobitem.setNumericFld(index++, fld.next()); /* job name */ jobitem.setTextFld(index++, fld.next()); /* job starttime */ jobitem.setTextFld(index++, fld.next(), true); /* job type */ jobitem.setJobTypeFld(index++, fld.next()); /* job level */ jobitem.setJobLevelFld(index++, fld.next()); /* job files */ jobitem.setNumericFld(index++, fld.next()); /* job bytes */ jobitem.setBytesFld(index++, fld.next()); /* job status */ jobitem.setJobStatusFld(index++, fld.next()); row++; } } tableJob->resizeColumnsToContents(); tableJob->resizeRowsToContents(); tableJob->verticalHeader()->hide(); /* make read only */ tableJob->setEditTriggers(QAbstractItemView::NoEditTriggers); } bacula-15.0.3/src/qt-console/mediainfo/mediainfo.ui0000644000175000017500000005225714771010173021775 0ustar bsbuildbsbuild mediaInfoForm 0 0 828 814 Form Edit :/images/edit.png:/images/edit.png true Purge :/images/purge.png:/images/purge.png true Delete :/images/purge.png:/images/purge.png true Prune :/images/edit-cut.png:/images/edit-cut.png true false Load :/images/intern.png:/images/intern.png true false Unload :/images/extern.png:/images/extern.png true Qt::Horizontal 40 20 QLayout::SetFixedSize 0 0 251 241 251 243 Information Name: Vol0001 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Pool: Default Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Online: :/images/inflag0.png Enabled: yes Location: Vault Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Status: Append Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Media Type: File Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Recycle Pool: Scratch Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse 0 0 261 241 261 241 Statistics Vol Bytes: 19.8 MB Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Vol Mounts: 10 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Recycle count: 5 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Read time: 10 mins Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Write time: 20 mins Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Errors: 0 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Last Written: 2009-07-05 12:23:00 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse First Written: 2009-06-05 10:00:00 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse 0 0 200 241 200 241 Limits Use duration: 0 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Max jobs: 0 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Max files: 0 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Max bytes: 0 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Recycle: false Retention: 365 days Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Expire: 2010-08-03 23:10:03 Qt::LinksAccessibleByMouse|Qt::TextSelectableByMouse Qt::Horizontal 40 20 0 0 Jobs true JobId true Name true Start Time true Type true Level true Files true Bytes true Status bacula-15.0.3/src/qt-console/COMMANDS0000644000175000017500000000604114771010173016662 0ustar bsbuildbsbuildcancel ujobid= delete pool= should ask if this would have bad effects if say media existed in the pool the pool existed in the configuration file. list list ujobid (list job with unique name) list nextvol job= days=nnn unmount [ jobid= | job= ] Mmmmm update /* Attempted, having problems */ Volume from Pool All Volumes from Pool CONFIG FILE =========================== add [pool= storage= jobid=] These are done from config files create [pool=] Done in a config file CHOOSE NOT TO (for now at least) =========================== autodisplay on/off could be done in the context of the console This may interfere with our connection issues Also, there is a configurable qt timer to automatically ask for messages python not needed from bat?? setdebug I'd say we could choose not to implement this in bat quit I'd like to have a disconnect graphical option. exit Not really needed use still need to make decisions about how to handle multiple catalogs version could be done in console as long as it is explicite about that it is the version of the director var Mmmmmm wait Mmmmmmm DONE =========================== automount on/off Added buttons to the label dialog box to execute automount command cancel jobid= job= delete [volume= job jobid=] disable job and enable job Could be done in the context of a jobs resource window which is not yet created estimate Could be a dialog in the context of a jobs window. list list jobid= (list jobid id) list files jobid= list jobmedia jobid= list volumes jobid= list jobtotals (from page selector on joblist widget) The next few are accomplishable graphically with joblist class list jobs list job= (list all jobs with "job-name") list jobname= (same as above) list jobmedia list jobmedia job= The next few are accomplishable graphically with the medialist class list volumes list volumes pool= list volume= list pools Accomplishable with the clients class graphically list clients list files job= do this as a context option on page selector on joblist list volumes job= list nextvolume job= list nextvol job= help context sensitive from page selector console label Done by kern before I started prune files|jobs|volume client= volume= could add as a dialog box from both client and medialist purge files jobid=|job=|client= purge volume|volume= (of all jobs) purge jobs client= (of all jobs) relabel release storage= Would need to explain what this does in bat with a dialog run reload status unmount storage= [ drive=\lt{}num\gt{} ] update volume (via mediaedit) "update slots" "update slots scan" (context of storage) bacula-15.0.3/src/qt-console/PREFS0000644000175000017500000000035314771010173016340 0ustar bsbuildbsbuildTo create a preference variable Create member in mainwin.h use designer to add the widget to modify in prefs.ui modify the following functions in mainwin.cpp MainWin::setPreferences() prefsDialog::accept() MainWin::readPreferences() bacula-15.0.3/src/qt-console/clients/0000755000175000017500000000000014771010173017176 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/clients/clients.cpp0000644000175000017500000002566514771010173021361 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Clients Class * * Dirk Bartley, March 2007 * */ #include "bat.h" #include #include #include "clients/clients.h" #include "run/run.h" #include "status/clientstat.h" #include "util/fmtwidgetitem.h" Clients::Clients() : Pages() { setupUi(this); m_name = tr("Clients"); pgInitialize(); QTreeWidgetItem* thisitem = mainWin->getFromHash(this); thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/network-server.png"))); /* tableWidget, Storage Tree Tree Widget inherited from ui_client.h */ m_populated = false; m_checkcurwidget = true; m_closeable = false; m_firstpopulation = true; /* add context sensitive menu items specific to this classto the page * selector tree. m_contextActions is QList of QActions */ m_contextActions.append(actionRefreshClients); createContextMenu(); } Clients::~Clients() { } /* * The main meat of the class!! The function that queries the director and * creates the widgets with appropriate values. */ void Clients::populateTable() { m_populated = true; Freeze frz(*tableWidget); /* disable updating*/ QStringList headerlist = (QStringList() << tr("Client Name") << tr("File Retention") << tr("Job Retention") << tr("AutoPrune") << tr("ClientId") << tr("Uname") ); int sortcol = headerlist.indexOf(tr("Client Name")); Qt::SortOrder sortord = Qt::AscendingOrder; if (tableWidget->rowCount()) { sortcol = tableWidget->horizontalHeader()->sortIndicatorSection(); sortord = tableWidget->horizontalHeader()->sortIndicatorOrder(); } m_checkcurwidget = false; tableWidget->clear(); m_checkcurwidget = true; tableWidget->setColumnCount(headerlist.count()); tableWidget->setHorizontalHeaderLabels(headerlist); tableWidget->horizontalHeader()->setHighlightSections(false); tableWidget->setRowCount(m_console->client_list.count()); tableWidget->verticalHeader()->hide(); tableWidget->setSelectionBehavior(QAbstractItemView::SelectRows); tableWidget->setSelectionMode(QAbstractItemView::SingleSelection); tableWidget->setSortingEnabled(false); /* rows move on insert if sorting enabled */ bool first = true; QString client_comsep(""); foreach (QString clientName, m_console->client_list){ if (first) { client_comsep += "'" + clientName + "'"; first = false; } else client_comsep += ",'" + clientName + "'"; } if (client_comsep != "") { QString query(""); query += "SELECT Name, FileRetention, JobRetention, AutoPrune, ClientId, Uname" " FROM Client" " WHERE ClientId IN (SELECT MAX(ClientId) FROM Client WHERE"; query += " Name IN (" + client_comsep + ")"; query += " GROUP BY Name) ORDER BY Name"; QStringList results; if (mainWin->m_sqlDebug) Pmsg1(000, "Clients query cmd : %s\n",query.toUtf8().data()); if (m_console->sql_cmd(query, results)) { int row = 0; /* Iterate through the record returned from the query */ foreach (QString resultline, results) { QStringList fieldlist = resultline.split("\t"); if (fieldlist.size() < 5) { // Uname is checked after Pmsg1(0, "Unexpected line %s\n", resultline.toUtf8().data()); continue; } if (m_firstpopulation) { settingsOpenStatus(fieldlist[0]); } TableItemFormatter item(*tableWidget, row); /* Iterate through fields in the record */ QStringListIterator fld(fieldlist); int col = 0; /* name */ item.setTextFld(col++, fld.next()); /* file retention */ item.setDurationFld(col++, fld.next()); /* job retention */ item.setDurationFld(col++, fld.next()); /* autoprune */ item.setBoolFld(col++, fld.next()); /* client id */ item.setNumericFld(col++, fld.next()); /* uname */ if (fld.hasNext()) { item.setTextFld(col++, fld.next()); } else { item.setTextFld(col++, ""); } row++; } } } /* set default sorting */ tableWidget->sortByColumn(sortcol, sortord); tableWidget->setSortingEnabled(true); /* Resize rows and columns */ tableWidget->resizeColumnsToContents(); tableWidget->resizeRowsToContents(); /* make read only */ int rcnt = tableWidget->rowCount(); int ccnt = tableWidget->columnCount(); for(int r=0; r < rcnt; r++) { for(int c=0; c < ccnt; c++) { QTableWidgetItem* item = tableWidget->item(r, c); if (item) { item->setFlags(Qt::ItemFlags(item->flags() & (~Qt::ItemIsEditable))); } } } m_firstpopulation = false; } /* * When the treeWidgetItem in the page selector tree is singleclicked, Make sure * The tree has been populated. */ void Clients::PgSeltreeWidgetClicked() { if(!m_populated) { populateTable(); } if (!isOnceDocked()) { dockPage(); } } /* * Added to set the context menu policy based on currently active treeWidgetItem * signaled by currentItemChanged */ void Clients::tableItemChanged(QTableWidgetItem *currentwidgetitem, QTableWidgetItem *previouswidgetitem ) { /* m_checkcurwidget checks to see if this is during a refresh, which will segfault */ if (m_checkcurwidget) { int currentRow = currentwidgetitem->row(); QTableWidgetItem *currentrowzeroitem = tableWidget->item(currentRow, 0); m_currentlyselected = currentrowzeroitem->text(); /* The Previous item */ if (previouswidgetitem) { /* avoid a segfault if first time */ tableWidget->removeAction(actionListJobsofClient); tableWidget->removeAction(actionStatusClientWindow); tableWidget->removeAction(actionPurgeJobs); tableWidget->removeAction(actionPrune); } if (m_currentlyselected.length() != 0) { /* set a hold variable to the client name in case the context sensitive * menu is used */ tableWidget->addAction(actionListJobsofClient); tableWidget->addAction(actionStatusClientWindow); tableWidget->addAction(actionPurgeJobs); tableWidget->addAction(actionPrune); } } } /* * Setup a context menu * Made separate from populate so that it would not create context menu over and * over as the tree is repopulated. */ void Clients::createContextMenu() { tableWidget->setContextMenuPolicy(Qt::ActionsContextMenu); tableWidget->addAction(actionRefreshClients); /* for the tableItemChanged to maintain m_currentJob */ connect(tableWidget, SIGNAL( currentItemChanged(QTableWidgetItem *, QTableWidgetItem *)), this, SLOT(tableItemChanged(QTableWidgetItem *, QTableWidgetItem *))); /* connect to the action specific to this pages class */ connect(actionRefreshClients, SIGNAL(triggered()), this, SLOT(populateTable())); connect(actionListJobsofClient, SIGNAL(triggered()), this, SLOT(showJobs())); connect(actionStatusClientWindow, SIGNAL(triggered()), this, SLOT(statusClientWindow())); connect(actionPurgeJobs, SIGNAL(triggered()), this, SLOT(consolePurgeJobs())); connect(actionPrune, SIGNAL(triggered()), this, SLOT(prune())); } /* * Function responding to actionListJobsofClient which calls mainwin function * to create a window of a list of jobs of this client. */ void Clients::showJobs() { QTreeWidgetItem *parentItem = mainWin->getFromHash(this); mainWin->createPageJobList("", m_currentlyselected, "", "", parentItem); } /* * Function responding to actionListJobsofClient which calls mainwin function * to create a window of a list of jobs of this client. */ void Clients::consoleStatusClient() { QString cmd("status client="); cmd += m_currentlyselected; consoleCommand(cmd); } /* * Virtual function which is called when this page is visible on the stack */ void Clients::currentStackItem() { if(!m_populated) { populateTable(); /* Create the context menu for the client table */ } } /* * Function responding to actionPurgeJobs */ void Clients::consolePurgeJobs() { if (QMessageBox::warning(this, "Bat", tr("Are you sure you want to purge all jobs of client \"%1\" ?\n" "The Purge command will delete associated Catalog database records from Jobs and" " Volumes without considering the retention period. Purge works only on the" " Catalog database and does not affect data written to Volumes. This command can" " be dangerous because you can delete catalog records associated with current" " backups of files, and we recommend that you do not use it unless you know what" " you are doing.\n\n" " Is there any way I can get you to click Cancel here? You really don't want to do" " this\n\n" "Press OK to proceed with the purge operation?").arg(m_currentlyselected), QMessageBox::Ok | QMessageBox::Cancel, QMessageBox::Cancel) == QMessageBox::Cancel) { return; } QString cmd("purge jobs client="); cmd += m_currentlyselected; consoleCommand(cmd); } /* * Function responding to actionPrune */ void Clients::prune() { new prunePage("", m_currentlyselected); } /* * Function responding to action to create new client status window */ void Clients::statusClientWindow() { /* if one exists, then just set it current */ bool found = false; foreach(Pages *page, mainWin->m_pagehash) { if (mainWin->currentConsole() == page->console()) { if (page->name() == tr("Client Status %1").arg(m_currentlyselected)) { found = true; page->setCurrent(); } } } if (!found) { QTreeWidgetItem *parentItem = mainWin->getFromHash(this); new ClientStat(m_currentlyselected, parentItem); } } /* * If first time, then check to see if there were status pages open the last time closed * if so open */ void Clients::settingsOpenStatus(QString &client) { QSettings settings(m_console->m_dir->name(), "bat"); settings.beginGroup("OpenOnExit"); QString toRead = "ClientStatus_" + client; if (settings.value(toRead) == 1) { new ClientStat(client, mainWin->getFromHash(this)); setCurrent(); mainWin->getFromHash(this)->setExpanded(true); } settings.endGroup(); } bacula-15.0.3/src/qt-console/clients/clients.ui0000644000175000017500000000465314771010173021206 0ustar bsbuildbsbuild ClientForm 0 0 492 428 Client Tree :/images/view-refresh.png:/images/view-refresh.png Refresh Client List Requery the director for the list of clients. :/images/emblem-system.png:/images/emblem-system.png List Jobs of Client Open a joblist page selecting this client. :/images/weather-severe-alert.png:/images/weather-severe-alert.png Purge Jobs Purge jobs peformed from this client. :/images/edit-cut.png:/images/edit-cut.png Prune Jobs Open the diaolog to prune for this client. :/images/status.png:/images/status.png Status Client bacula-15.0.3/src/qt-console/clients/clients.h0000644000175000017500000000274214771010173021015 0ustar bsbuildbsbuild#ifndef _CLIENTS_H_ #define _CLIENTS_H_ /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Dirk Bartley, March 2007 */ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_clients.h" #include "console.h" #include "pages.h" class Clients : public Pages, public Ui::ClientForm { Q_OBJECT public: Clients(); ~Clients(); virtual void PgSeltreeWidgetClicked(); virtual void currentStackItem(); public slots: void tableItemChanged(QTableWidgetItem *, QTableWidgetItem *); private slots: void populateTable(); void showJobs(); void consoleStatusClient(); void statusClientWindow(); void consolePurgeJobs(); void prune(); private: void createContextMenu(); void settingsOpenStatus(QString& client); QString m_currentlyselected; bool m_populated; bool m_firstpopulation; bool m_checkcurwidget; }; #endif /* _CLIENTS_H_ */ bacula-15.0.3/src/qt-console/bat.pro.mingw64.in0000644000175000017500000001332114771010173020724 0ustar bsbuildbsbuild###################################################################### # # !!!!!!! IMPORTANT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # Edit only bat.pro.mingw64.in -- bat.pro.mingw64 is built by the ./configure program # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # CONFIG += qt cross-win32 QT += core gui widgets # CONFIG += debug bins.path = ./ bins.files = ./bat confs.path = ./ confs.commands = ./install_conf_file TEMPLATE = app TARGET = bat DEPENDPATH += . INCLUDEPATH += .. . ./console ./restore ./select cross-win32 { # LIBS += ../win32/dll/bacula.a LIBS += -mwindows -L../win32/release64 -lbacula -lwinpthread } !cross-win32 { LIBS += -L../lib -lbac -L../findlib -lbacfind @OPENSSL_LIBS@ } qwt { INCLUDEPATH += @QWT_INC@ LIBS += @QWT_LDFLAGS@ @QWT_LIB@ } RESOURCES = main.qrc MOC_DIR = moc64 OBJECTS_DIR = obj64 UI_DIR = ui64 QMAKE_CC = x86_64-w64-mingw32-gcc QMAKE_CXX = x86_64-w64-mingw32-g++ QMAKE_INCDIR = $(DEPKGS)/depkgs-mingw-w64/include/ ../win32/compat ui/ $(DEPKGS)/depkgs-mingw-w64/include/QtGui $(DEPKGS)/depkgs-mingw-w64/include/QtCore $(DEPKGS)/depkgs-mingw-w64/include/QtWidgets ui64/ QMAKE_INCDIR_QT = QMAKE_LIBDIR_QT = $(DEPKGS)/depkgs-mingw-w64/lib QMAKE_LINK = x86_64-w64-mingw32-g++ QMAKE_LFLAGS = -mthreads -Wl,-enable-stdcall-fixup -Wl,-enable-auto-import -m64 -fno-strict-aliasing -Wl,-enable-runtime-pseudo-reloc QMAKE_LIB = x86_64-w64-mingw32-ar -ru QMAKE_RC = x86_64-w64-mingw32-windres # Main window FORMS += main.ui FORMS += prefs.ui FORMS += label/label.ui FORMS += relabel/relabel.ui FORMS += mount/mount.ui FORMS += console/console.ui FORMS += restore/restore.ui restore/prerestore.ui restore/brestore.ui FORMS += restore/runrestore.ui restore/restoretree.ui FORMS += run/run.ui run/runcmd.ui run/estimate.ui run/prune.ui FORMS += select/select.ui select/textinput.ui FORMS += medialist/medialist.ui mediaedit/mediaedit.ui joblist/joblist.ui FORMS += medialist/mediaview.ui FORMS += clients/clients.ui storage/storage.ui fileset/fileset.ui FORMS += joblog/joblog.ui jobs/jobs.ui job/job.ui FORMS += help/help.ui mediainfo/mediainfo.ui FORMS += status/dirstat.ui storage/content.ui FORMS += status/clientstat.ui FORMS += status/storstat.ui qwt { FORMS += jobgraphs/jobplotcontrols.ui } # Main directory HEADERS += mainwin.h bat.h bat_conf.h qstd.h pages.h SOURCES += main.cpp bat_conf.cpp mainwin.cpp qstd.cpp pages.cpp # include authenticatebase.cpp that is already in libbac because # libbac is compiled with -no-rtti while qt-console use RTTI by # default. Inherited sub-class with virtual method in shared lib # using a diffferent RTTI model dont work well, see this error a # link time: "... DirCommAuthenticate ... undefined reference to # typeinfo for AuthenticateBase" HEADERS += ../lib/authenticatebase.h SOURCES += ../lib/authenticatebase.cc # bcomm HEADERS += bcomm/dircomm.h SOURCES += bcomm/dircomm.cpp bcomm/dircomm_auth.cpp # Console HEADERS += console/console.h SOURCES += console/console.cpp # Restore HEADERS += restore/restore.h SOURCES += restore/prerestore.cpp restore/restore.cpp restore/brestore.cpp # Label dialog HEADERS += label/label.h SOURCES += label/label.cpp # Relabel dialog HEADERS += relabel/relabel.h SOURCES += relabel/relabel.cpp # Mount dialog HEADERS += mount/mount.h SOURCES += mount/mount.cpp # Run dialog HEADERS += run/run.h SOURCES += run/run.cpp run/runcmd.cpp run/estimate.cpp run/prune.cpp # Select dialog HEADERS += select/select.h select/textinput.h SOURCES += select/select.cpp select/textinput.cpp ## MediaList HEADERS += medialist/medialist.h SOURCES += medialist/medialist.cpp # MediaView HEADERS += medialist/mediaview.h SOURCES += medialist/mediaview.cpp ## MediaEdit HEADERS += mediaedit/mediaedit.h SOURCES += mediaedit/mediaedit.cpp ## JobList HEADERS += joblist/joblist.h SOURCES += joblist/joblist.cpp ## Clients HEADERS += clients/clients.h SOURCES += clients/clients.cpp ## Storage HEADERS += storage/storage.h SOURCES += storage/storage.cpp ## Storage content HEADERS += storage/content.h SOURCES += storage/content.cpp ## Fileset HEADERS += fileset/fileset.h SOURCES += fileset/fileset.cpp ## Job log HEADERS += joblog/joblog.h SOURCES += joblog/joblog.cpp ## Job HEADERS += job/job.h SOURCES += job/job.cpp ## Jobs HEADERS += jobs/jobs.h SOURCES += jobs/jobs.cpp ## RestoreTree HEADERS += restore/restoretree.h SOURCES += restore/restoretree.cpp ## Job Step Graphs qwt { HEADERS += jobgraphs/jobplot.h SOURCES += jobgraphs/jobplot.cpp } # Help dialog HEADERS += help/help.h SOURCES += help/help.cpp # Media info dialog HEADERS += mediainfo/mediainfo.h SOURCES += mediainfo/mediainfo.cpp ## Status Dir HEADERS += status/dirstat.h SOURCES += status/dirstat.cpp ## Status Client HEADERS += status/clientstat.h SOURCES += status/clientstat.cpp ## Status Client HEADERS += status/storstat.h SOURCES += status/storstat.cpp # Utility sources HEADERS += util/fmtwidgetitem.h util/comboutil.h SOURCES += util/fmtwidgetitem.cpp util/comboutil.cpp INSTALLS += bins INSTALLS += confs QMAKE_EXTRA_TARGETS += depend TRANSLATIONS += ts/bat_fr.ts ts/bat_de.ts bacula-15.0.3/src/qt-console/win32/0000755000175000017500000000000014771010173016477 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/win32/qmake.conf0000644000175000017500000000724614771010173020455 0ustar bsbuildbsbuild# # qmake configuration for win32-g++ # # Written for MinGW # MAKEFILE_GENERATOR = MINGW TEMPLATE = app CONFIG += qt warn_on release link_prl copy_dir_files debug_and_release debug_and_release_target precompile_header cross-win32 QT += core gui DEFINES += UNICODE QMAKE_COMPILER_DEFINES += __GNUC__ WIN32 QMAKE_COMPILER = gcc QMAKE_COMPILER = gcc QMAKE_EXT_OBJ = .o QMAKE_EXT_RES = _res.o QMAKE_LEX = flex QMAKE_LEXFLAGS = QMAKE_YACC = byacc QMAKE_YACCFLAGS = -d QMAKE_CFLAGS = -DHAVE_MINGW -DHAVE_WIN32 -DHAVE_MINGW_W64 QMAKE_CFLAGS_DEPS = -M QMAKE_CFLAGS_WARN_ON = -Wall QMAKE_CFLAGS_WARN_OFF = -w QMAKE_CFLAGS_RELEASE = -O2 QMAKE_CFLAGS_DEBUG = -g -O2 QMAKE_CFLAGS_YACC = -Wno-unused -Wno-parentheses QMAKE_CXXFLAGS = $$QMAKE_CFLAGS -DHAVE_MINGW -DHAVE_WIN32 -DHAVE_ZLIB_H -DHAVE_LIBZ -DHAVE_CRYPTO -DHAVE_OPENSSL -DHAVE_TLS -DHAVE_MINGW_W64 QMAKE_CXXFLAGS_DEPS = $$QMAKE_CFLAGS_DEPS QMAKE_CXXFLAGS_WARN_ON = $$QMAKE_CFLAGS_WARN_ON QMAKE_CXXFLAGS_WARN_OFF = $$QMAKE_CFLAGS_WARN_OFF QMAKE_CXXFLAGS_RELEASE = $$QMAKE_CFLAGS_RELEASE QMAKE_CXXFLAGS_DEBUG = $$QMAKE_CFLAGS_DEBUG QMAKE_CXXFLAGS_YACC = $$QMAKE_CFLAGS_YACC QMAKE_CXXFLAGS_THREAD = $$QMAKE_CFLAGS_THREAD QMAKE_CXXFLAGS_RTTI_ON = -frtti QMAKE_CXXFLAGS_RTTI_OFF = -fno-rtti QMAKE_CXXFLAGS_EXCEPTIONS_ON = -fexceptions -mthreads QMAKE_CXXFLAGS_EXCEPTIONS_OFF = -fno-exceptions QMAKE_INCDIR_QT = $(DEPKGS)/depkgs-mingw32/include/qt/QtGui $(DEPKGS)/depkgs-mingw32/include/qt/QtCore $(DEPKGS)/depkgs-mingw32/include/qt/QtWidgets QMAKE_LIBDIR_QT = $(DEPKGS)/depkgs-mingw32/lib/qt QMAKE_RUN_CC = $(CXX) -c $(CFLAGS) $(INCPATH) -o $obj $src QMAKE_RUN_CC_IMP = $(CXX) -c $(CFLAGS) $(INCPATH) -o $@ $< QMAKE_RUN_CXX = $(CXX) -c $(CXXFLAGS) $(INCPATH) -o $obj $src QMAKE_RUN_CXX_IMP = $(CXX) -c $(CXXFLAGS) $(INCPATH) -o $@ $< QMAKE_LINK = i686-w64-mingw32-g++ QMAKE_LFLAGS = -mthreads -Wl,-enable-stdcall-fixup -Wl,-enable-auto-import -m32 -fno-strict-aliasing -Wl,-enable-runtime-pseudo-reloc QMAKE_LFLAGS_EXCEPTIONS_ON = -mthreads -Wl QMAKE_LFLAGS_EXCEPTIONS_OFF = QMAKE_LFLAGS_RELEASE = -Wl,-s QMAKE_LFLAGS_DEBUG = QMAKE_LFLAGS_CONSOLE = -Wl,-subsystem,console QMAKE_LFLAGS_WINDOWS = -Wl,-subsystem,windows QMAKE_LFLAGS_DLL = -shared QMAKE_LINK_OBJECT_MAX = 10 QMAKE_LINK_OBJECT_SCRIPT= object_script QMAKE_LIBS = -lwsock32 -lstdc++ QMAKE_LIBS_CORE = -lkernel32 -luser32 -lshell32 -luuid -lole32 -ladvapi32 -lws2_32 QMAKE_LIBS_GUI = -lgdi32 -lcomdlg32 -loleaut32 -limm32 -lwinmm -lwinspool -lws2_32 -lole32 -luuid -luser32 -ladvapi32 QMAKE_LIBS_NETWORK = -lws2_32 QMAKE_LIBS_OPENGL = -lopengl32 -lglu32 -lgdi32 -luser32 QMAKE_LIBS_COMPAT = -ladvapi32 -lshell32 -lcomdlg32 -luser32 -lgdi32 -lws2_32 QMAKE_LIBS_QT_ENTRY = -lmingw32 -lqtmain MINGW_IN_SHELL = 1 QMAKE_DIR_SEP = / QMAKE_COPY = cp QMAKE_COPY_DIR = cp -r QMAKE_MOVE = mv QMAKE_DEL_FILE = rm -f QMAKE_MKDIR = mkdir -p QMAKE_DEL_DIR = rm -rf QMAKE_RCC = rcc QMAKE_CHK_DIR_EXISTS = test -d QMAKE_MOC = $$[QT_INSTALL_BINS]$${DIR_SEPARATOR}moc QMAKE_UIC = $$[QT_INSTALL_BINS]$${DIR_SEPARATOR}uic QMAKE_IDC = $$[QT_INSTALL_BINS]$${DIR_SEPARATOR}idc QMAKE_IDL = midl QMAKE_ZIP = zip -r -9 QMAKE_STRIP = i686-w64-mingw32-strip QMAKE_STRIPFLAGS_LIB += --strip-unneeded load(qt_config) bacula-15.0.3/src/qt-console/win32/qplatformdefs.h0000644000175000017500000001202214771010173021514 0ustar bsbuildbsbuild/**************************************************************************** ** ** Copyright (C) 1992-2007 Trolltech ASA. All rights reserved. ** ** This file is part of the qmake spec of the Qt Toolkit. ** ** This file may be used under the terms of the GNU General Public ** License version 2.0 as published by the Free Software Foundation ** and appearing in the file LICENSE.GPL included in the packaging of ** this file. Please review the following information to ensure GNU ** General Public Licensing requirements will be met: ** http://trolltech.com/products/qt/licenses/licensing/opensource/ ** ** If you are unsure which license is appropriate for your use, please ** review the following information: ** http://trolltech.com/products/qt/licenses/licensing/licensingoverview ** or contact the sales department at sales@trolltech.com. ** ** In addition, as a special exception, Trolltech gives you certain ** additional rights. These rights are described in the Trolltech GPL ** Exception version 1.0, which can be found at ** http://www.trolltech.com/products/qt/gplexception/ and in the file ** GPL_EXCEPTION.txt in this package. ** ** In addition, as a special exception, Trolltech, as the sole copyright ** holder for Qt Designer, grants users of the Qt/Eclipse Integration ** plug-in the right for the Qt/Eclipse Integration to link to ** functionality provided by Qt Designer and its related libraries. ** ** Trolltech reserves all rights not expressly granted herein. ** ** Trolltech ASA (c) 2007 ** ** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE ** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. ** ****************************************************************************/ #ifndef QPLATFORMDEFS_H #define QPLATFORMDEFS_H #ifdef UNICODE #ifndef _UNICODE #define _UNICODE #endif #endif // Get Qt defines/settings #include "qglobal.h" #include "winhdrs.h" #include #include #include #include #include #include #include #include #include #if !defined(_WIN32_WINNT) || (_WIN32_WINNT-0 < 0x0500) typedef enum { NameUnknown = 0, NameFullyQualifiedDN = 1, NameSamCompatible = 2, NameDisplay = 3, NameUniqueId = 6, NameCanonical = 7, NameUserPrincipal = 8, NameCanonicalEx = 9, NameServicePrincipal = 10, NameDnsDomain = 12 } EXTENDED_NAME_FORMAT, *PEXTENDED_NAME_FORMAT; #endif #define Q_FS_FAT #ifdef QT_LARGEFILE_SUPPORT #define QT_STATBUF struct _stati64 // non-ANSI defs #define QT_STATBUF4TSTAT struct _stati64 // non-ANSI defs #define QT_STAT ::_stati64 #define QT_FSTAT ::_fstati64 #else #define QT_STATBUF struct _stat // non-ANSI defs #define QT_STATBUF4TSTAT struct _stat // non-ANSI defs #define QT_STAT ::_stat #define QT_FSTAT ::_fstat #endif #define QT_STAT_REG _S_IFREG #define QT_STAT_DIR _S_IFDIR #define QT_STAT_MASK _S_IFMT #if defined(_S_IFLNK) # define QT_STAT_LNK _S_IFLNK #endif #define QT_FILENO _fileno #define QT_OPEN ::_open #define QT_CLOSE ::_close #ifdef QT_LARGEFILE_SUPPORT #define QT_LSEEK ::_lseeki64 #ifndef UNICODE #define QT_TSTAT ::_stati64 #else #define QT_TSTAT ::_wstati64 #endif #else #define QT_LSEEK ::_lseek #ifndef UNICODE #define QT_TSTAT ::_stat #else #define QT_TSTAT ::_wstat #endif #endif #define QT_READ ::_read #define QT_WRITE ::_write #define QT_ACCESS ::_access #define QT_GETCWD ::_getcwd #define QT_CHDIR ::_chdir #define QT_MKDIR ::_mkdir #define QT_RMDIR ::_rmdir #define QT_OPEN_LARGEFILE 0 #define QT_OPEN_RDONLY _O_RDONLY #define QT_OPEN_WRONLY _O_WRONLY #define QT_OPEN_RDWR _O_RDWR #define QT_OPEN_CREAT _O_CREAT #define QT_OPEN_TRUNC _O_TRUNC #define QT_OPEN_APPEND _O_APPEND #if defined(O_TEXT) # define QT_OPEN_TEXT _O_TEXT # define QT_OPEN_BINARY _O_BINARY #endif #define QT_FOPEN ::fopen #ifdef QT_LARGEFILE_SUPPORT #define QT_FSEEK ::fseeko64 #define QT_FTELL ::ftello64 #else #define QT_FSEEK ::fseek #define QT_FTELL ::ftell #endif #define QT_FGETPOS ::fgetpos #define QT_FSETPOS ::fsetpos #define QT_FPOS_T fpos_t #ifdef QT_LARGEFILE_SUPPORT #define QT_OFF_T off64_t #else #define QT_OFF_T long #endif #define QT_SIGNAL_ARGS int #define QT_VSNPRINTF ::_vsnprintf #define QT_SNPRINTF ::_snprintf # define F_OK 0 # define X_OK 1 # define W_OK 2 # define R_OK 4 #endif // QPLATFORMDEFS_H bacula-15.0.3/src/qt-console/README0000644000175000017500000001076614771010173016427 0ustar bsbuildbsbuild This directory contains the Bacula Admin Tool (bat). At the current time, the contents of this directory are under development. If you want to help, please contact Kern directly. If you want to build it, you need Qt4 loaded and setup as your default Qt or with the appropriate Qt Environment variables set. 6/24/07 There is now one dependency, it is qwt. It compiles just fine with either qwt-5.0.2 or qwt-5.0.1. You can either install the qwt package yourself or if your distro does not have it, we have included the source in depkgs-qt, which you can download from the Bacula Source Forge download area. Building and running bat is done much like bconsole, the gnome console, or the wxWidgets console. You add the appropriate options to your ./configure, then simply do a make. Please see the Installation chapter of the manual for more details. Win32 mingw infos for QT4 : - http://silmor.de/29 - http://doc.qtfr.org/post/2007/04/10/Cross-Compilation-Native-dapplication-Qt-depuis-Linux Development status as of 05/06/07 Items not implemented: - Nothing on the brestore page Translations: - All translatable strings should be written as tr("string") ... - To extract the strings for translation run: lupdate bat.pro - To translate the strings, do: linguist ts/bat_xx.ts where xx is the country code (e.g. fr or de) - To "compile" the translated strings do: lrelease bat.pro The necessary binary files will be in ts/bat_xx.qm As far as I can tell, these files must be on your path or in the same directory as bat for them to be used, otherwise it reverts to English. Selecting the translation is based on how your system is setup or the LANG environment variable. Design decisions: - If possible all code for a particular component will be kept in an appropriate subdirectory. - All private class variables are named "m_xxx" this makes it very clear if one is referencing a class variable or a local. - All signal/slots are connected by explicit code (most all are done in the MainWin constructor), rather than using designer. - Each page has a separate designer .ui file in a subdirectory. - All windows are created with designer and have a name such as xxxForm i.e. the main window is MainForm and kept in main.ui. Major projects: - Implement other restore interfaces such as brestore ... - Implement a database browser - Implement a resource (conf file) browser - Implement a reports page -- e.g. something similar to bweb - Implement Qt plugins to add new functionality to bat - Implement a GUI configuration file editor (something like JBacula). ... Partially Done: =========================== - Implement graphical commands that allow updating most aspects of the database (i.e. commands for label, update Volume, ...) still need to be able to edit a pool object - None of the menu items except About, Select Font, and Quit. Print and save don't do anything, does save need to?? Done: ============================ Design/implementation considerations: - Need icons in front of the Director. - The console page should be in a DockWidget so it can be removed from the main window. It is currently in a dock window, but it does not remove properly -- more research needed. - Need to figure out a good implementation of adding pages and even having plugins that load as pages. Currently the page mechanism is a bit kludged. - We need to have multiple Directors - Each Director should have its own console - The Console class needs to be a list or be attached to the currently active Director. - Will automatically connect to the first Director in the conf file. Doesn't know about multiple Directors. - The Label menu bar item, prints on the shell window what you entered. - The Run menu bar item, prints on the console window what you entered. - The Restore menu bar item, brings up dialog, then when OK is clicked, it goes on to the next dialog, which is meant to be a tree view, but for the moment does nothing ... It is a bit ugly. Canceling it should get you back to the normal command prompt. - Implement a restore page that does a directory tree restore selection much like wx-console does. Not working: - The left selection window and the right window (where the console is) are dockable windows so should be movable once they are properly clicked. Well, they sort of move, but then get stuck. I haven't figured out what is going on, so for the current time, I am implementing most stuff through dialogs. Items implemented: See RELEASEFEATURES bacula-15.0.3/src/qt-console/RELEASEFEATURES0000644000175000017500000000645414771010173017710 0ustar bsbuildbsbuildOriginal release, August 2007. Graphical features: Undockable (floating) windows of all interfaces. Page selector for deciding and finding interfaces to view Console command window for entering standard text console commands. A preferences interface for listing limits and debugging. Graphical console A console to perform all the commands available from bconsole. Graphical restoring High performance restore of backup with GUI tree browsing and file and directory selecting. Pre-restore Interface to assist in selecting jobs for this restore method. Restore by browsing and selecting from all cataloged versions of files. This method allows for Multiple simultaneous views of catalog data. The only selection limitation for this browsing method is one client at a time. Graphical listing Interfaces: List the backup jobs that have run. ** see details below List the clients and perform 4 commands on the client within context. List the Filesets List the Job resources and perform 8 commands on the job within context. List the Pools and the volumes in each pool. Pefrom 7 possible commands on each media. List the storage resources and perform any of 5 commands on the storage resource. Graphical Media Management Modify Volume parameters Label a new volume Relabel an existing volume Select jobs on a volume in Job List directly from media interface Delete a Volume Purge Jobs on volume Update Volume Parameters from Pool Parameters Graphical Graphing Interface to plot the files and bytes of backup jobs. Other Graphical interfaces: Open the cataloged Log messages of a job that has run Run a job manually and modify job defaults before running. Estimate a job. Determine the files and bytes that would be backed up if the job were run now. ** JobList features As many joblist windows at a time as desired. 9 different selection criterion: client, volume, job, Fileset, level, status, purged or not, record limit and days limit Lists the 11 most commonly desired job attributes. Run the following console commands by issuing the command within the context of the know job number: List job, list files on job, list jobmedia, list volumes, delete job and purge files. Open other interfaces knowing the jobid: show the cataloged log of the running of the job, restore from that job only browsing the filestructure, restore from the job end time to get the most recent version of a restore after the job completed running and browse the combined filestucture, jump directly to view a plot of the selected jobs files and bytes backed up. There is even a button to perform status dir at any time. What more can you ask for. ;-) An are you sure dialog box will even attempt to prevent the user from deleting important stuff. Almost all console commands can be run graphically without touching a keyboard. Help me out by letting me know what still cannot be done. I know of the following: show, run a migration job, and editing resources in configuration files. The last is not a console command, so it does not count. See the TODO file for future plans. The bacula develompment team is interested in your feedback and your assistance. If you would like to get involved, join the users and/or the developers mailing list. bacula-15.0.3/src/qt-console/testprogs/0000755000175000017500000000000014771010173017567 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/testprogs/examp/0000755000175000017500000000000014771010173020701 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/testprogs/examp/dock.pro0000644000175000017500000000034714771010173022347 0ustar bsbuildbsbuildCONFIG += qt debug TEMPLATE = app TARGET = main DEPENDPATH += . INCLUDEPATH += .. MOC_DIR = moc OBJECTS_DIR = obj UI_DIR = ui # Main window #RESOURCES = dockwidgets.qrc HEADERS += mainwindow.h SOURCES += mainwindow.cpp main.cpp bacula-15.0.3/src/qt-console/testprogs/examp/main.cpp0000644000175000017500000000035114771010173022330 0ustar bsbuildbsbuild#include #include "mainwindow.h" int main(int argc, char *argv[]) { QApplication app(argc, argv); // Q_INIT_RESOURCE(dockwidgets); MainWindow mainWin; mainWin.show(); return app.exec(); } bacula-15.0.3/src/qt-console/testprogs/examp/mainwindow.cpp0000644000175000017500000002602714771010173023570 0ustar bsbuildbsbuild/**************************************************************************** * ** * ** Copyright (C) 2005-2007 Trolltech ASA. All rights reserved. * ** * ** This file is part of the example classes of the Qt Toolkit. * ** * ** This file may be used under the terms of the GNU General Public * ** License version 2.0 as published by the Free Software Foundation * ** and appearing in the file LICENSE.GPL included in the packaging of * ** this file. Please review the following information to ensure GNU * ** General Public Licensing requirements will be met: * ** http://www.trolltech.com/products/qt/opensource.html * ** * ** If you are unsure which license is appropriate for your use, please * ** review the following information: * ** http://www.trolltech.com/products/qt/licensing.html or contact the * ** sales department at sales@trolltech.com. * ** * ** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE * ** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * ** * ****************************************************************************/ #include #include "mainwindow.h" MainWindow::MainWindow() { textEdit = new QTextEdit; setCentralWidget(textEdit); createActions(); createMenus(); createToolBars(); createStatusBar(); createDockWindows(); setWindowTitle(tr("Dock Widgets")); newLetter(); } void MainWindow::newLetter() { textEdit->clear(); QTextCursor cursor(textEdit->textCursor()); cursor.movePosition(QTextCursor::Start); QTextFrame *topFrame = cursor.currentFrame(); QTextFrameFormat topFrameFormat = topFrame->frameFormat(); topFrameFormat.setPadding(16); topFrame->setFrameFormat(topFrameFormat); QTextCharFormat textFormat; QTextCharFormat boldFormat; boldFormat.setFontWeight(QFont::Bold); QTextCharFormat italicFormat; italicFormat.setFontItalic(true); QTextTableFormat tableFormat; tableFormat.setBorder(1); tableFormat.setCellPadding(16); tableFormat.setAlignment(Qt::AlignRight); cursor.insertTable(1, 1, tableFormat); cursor.insertText("The Firm", boldFormat); cursor.insertBlock(); cursor.insertText("321 City Street", textFormat); cursor.insertBlock(); cursor.insertText("Industry Park"); cursor.insertBlock(); cursor.insertText("Some Country"); cursor.setPosition(topFrame->lastPosition()); cursor.insertText(QDate::currentDate().toString("d MMMM yyyy"), textFormat); cursor.insertBlock(); cursor.insertBlock(); cursor.insertText("Dear ", textFormat); cursor.insertText("NAME", italicFormat); cursor.insertText(",", textFormat); for (int i = 0; i < 3; ++i) cursor.insertBlock(); cursor.insertText(tr("Yours sincerely,"), textFormat); for (int i = 0; i < 3; ++i) cursor.insertBlock(); cursor.insertText("The Boss", textFormat); cursor.insertBlock(); cursor.insertText("ADDRESS", italicFormat); } void MainWindow::print() { QTextDocument *document = textEdit->document(); QPrinter printer; QPrintDialog *dlg = new QPrintDialog(&printer, this); if (dlg->exec() != QDialog::Accepted) return; document->print(&printer); statusBar()->showMessage(tr("Ready"), 2000); } void MainWindow::save() { QString fileName = QFileDialog::getSaveFileName(this, tr("Choose a file name"), ".", tr("HTML (*.html *.htm)")); if (fileName.isEmpty()) return; QFile file(fileName); if (!file.open(QFile::WriteOnly | QFile::Text)) { QMessageBox::warning(this, tr("Dock Widgets"), tr("Cannot write file %1:\n%2.") .arg(fileName) .arg(file.errorString())); return; } QTextStream out(&file); QApplication::setOverrideCursor(Qt::WaitCursor); out << textEdit->toHtml(); QApplication::restoreOverrideCursor(); statusBar()->showMessage(tr("Saved '%1'").arg(fileName), 2000); } void MainWindow::undo() { QTextDocument *document = textEdit->document(); document->undo(); } void MainWindow::insertCustomer(const QString &customer) { if (customer.isEmpty()) return; QStringList customerList = customer.split(", "); QTextDocument *document = textEdit->document(); QTextCursor cursor = document->find("NAME"); if (!cursor.isNull()) { cursor.beginEditBlock(); cursor.insertText(customerList.at(0)); QTextCursor oldcursor = cursor; cursor = document->find("ADDRESS"); if (!cursor.isNull()) { for (int i = 1; i < customerList.size(); ++i) { cursor.insertBlock(); cursor.insertText(customerList.at(i)); } cursor.endEditBlock(); } else oldcursor.endEditBlock(); } } void MainWindow::addParagraph(const QString ¶graph) { if (paragraph.isEmpty()) return; QTextDocument *document = textEdit->document(); QTextCursor cursor = document->find(tr("Yours sincerely,")); if (cursor.isNull()) return; cursor.beginEditBlock(); cursor.movePosition(QTextCursor::PreviousBlock, QTextCursor::MoveAnchor, 2); cursor.insertBlock(); cursor.insertText(paragraph); cursor.insertBlock(); cursor.endEditBlock(); } void MainWindow::about() { QMessageBox::about(this, tr("About Dock Widgets"), tr("The Dock Widgets example demonstrates how to " "use Qt's dock widgets. You can enter your own text, " "click a customer to add a customer name and " "address, and click standard paragraphs to add them.")); } void MainWindow::createActions() { newLetterAct = new QAction(QIcon(":/images/new.png"), tr("&New Letter"), this); newLetterAct->setShortcut(tr("Ctrl+N")); newLetterAct->setStatusTip(tr("Create a new form letter")); connect(newLetterAct, SIGNAL(triggered()), this, SLOT(newLetter())); saveAct = new QAction(QIcon(":/images/save.png"), tr("&Save..."), this); saveAct->setShortcut(tr("Ctrl+S")); saveAct->setStatusTip(tr("Save the current form letter")); connect(saveAct, SIGNAL(triggered()), this, SLOT(save())); printAct = new QAction(QIcon(":/images/print.png"), tr("&Print..."), this); printAct->setShortcut(tr("Ctrl+P")); printAct->setStatusTip(tr("Print the current form letter")); connect(printAct, SIGNAL(triggered()), this, SLOT(print())); undoAct = new QAction(QIcon(":/images/undo.png"), tr("&Undo"), this); undoAct->setShortcut(tr("Ctrl+Z")); undoAct->setStatusTip(tr("Undo the last editing action")); connect(undoAct, SIGNAL(triggered()), this, SLOT(undo())); quitAct = new QAction(tr("&Quit"), this); quitAct->setShortcut(tr("Ctrl+Q")); quitAct->setStatusTip(tr("Quit the application")); connect(quitAct, SIGNAL(triggered()), this, SLOT(close())); aboutAct = new QAction(tr("&About"), this); aboutAct->setStatusTip(tr("Show the application's About box")); connect(aboutAct, SIGNAL(triggered()), this, SLOT(about())); aboutQtAct = new QAction(tr("About &Qt"), this); aboutQtAct->setStatusTip(tr("Show the Qt library's About box")); connect(aboutQtAct, SIGNAL(triggered()), qApp, SLOT(aboutQt())); } void MainWindow::createMenus() { fileMenu = menuBar()->addMenu(tr("&File")); fileMenu->addAction(newLetterAct); fileMenu->addAction(saveAct); fileMenu->addAction(printAct); fileMenu->addSeparator(); fileMenu->addAction(quitAct); editMenu = menuBar()->addMenu(tr("&Edit")); editMenu->addAction(undoAct); viewMenu = menuBar()->addMenu(tr("&View")); menuBar()->addSeparator(); helpMenu = menuBar()->addMenu(tr("&Help")); helpMenu->addAction(aboutAct); helpMenu->addAction(aboutQtAct); } void MainWindow::createToolBars() { fileToolBar = addToolBar(tr("File")); fileToolBar->addAction(newLetterAct); fileToolBar->addAction(saveAct); fileToolBar->addAction(printAct); editToolBar = addToolBar(tr("Edit")); editToolBar->addAction(undoAct); } void MainWindow::createStatusBar() { statusBar()->showMessage(tr("Ready")); } void MainWindow::createDockWindows() { QDockWidget *dock = new QDockWidget(tr("Customers"), this); dock->setAllowedAreas(Qt::LeftDockWidgetArea | Qt::RightDockWidgetArea); customerList = new QListWidget(dock); customerList->addItems(QStringList() << "John Doe, Harmony Enterprises, 12 Lakeside, Ambleton" << "Jane Doe, Memorabilia, 23 Watersedge, Beaton" << "Tammy Shea, Tiblanka, 38 Sea Views, Carlton" << "Tim Sheen, Caraba Gifts, 48 Ocean Way, Deal" << "Sol Harvey, Chicos Coffee, 53 New Springs, Eccleston" << "Sally Hobart, Tiroli Tea, 67 Long River, Fedula"); dock->setWidget(customerList); addDockWidget(Qt::RightDockWidgetArea, dock); viewMenu->addAction(dock->toggleViewAction()); dock = new QDockWidget(tr("Paragraphs"), this); paragraphsList = new QListWidget(dock); paragraphsList->addItems(QStringList() << "Thank you for your payment which we have received today." << "Your order has been dispatched and should be with you " "within 28 days." << "We have dispatched those items that were in stock. The " "rest of your order will be dispatched once all the " "remaining items have arrived at our warehouse. No " "additional shipping charges will be made." << "You made a small overpayment (less than $5) which we " "will keep on account for you, or return at your request." << "You made a small underpayment (less than $1), but we have " "sent your order anyway. We'll add this underpayment to " "your next bill." << "Unfortunately you did not send enough money. Please remit " "an additional $. Your order will be dispatched as soon as " "the complete amount has been received." << "You made an overpayment (more than $5). Do you wish to " "buy more items, or should we return the excess to you?"); dock->setWidget(paragraphsList); addDockWidget(Qt::RightDockWidgetArea, dock); viewMenu->addAction(dock->toggleViewAction()); connect(customerList, SIGNAL(currentTextChanged(const QString &)), this, SLOT(insertCustomer(const QString &))); connect(paragraphsList, SIGNAL(currentTextChanged(const QString &)), this, SLOT(addParagraph(const QString &))); } bacula-15.0.3/src/qt-console/testprogs/examp/dockwidgets.qrc0000644000175000017500000000031514771010173023716 0ustar bsbuildbsbuild images/new.png images/print.png images/save.png images/undo.png bacula-15.0.3/src/qt-console/testprogs/examp/mainwindow.h0000644000175000017500000000452314771010173023232 0ustar bsbuildbsbuild /**************************************************************************** * ** * ** Copyright (C) 2005-2007 Trolltech ASA. All rights reserved. * ** * ** This file is part of the example classes of the Qt Toolkit. * ** * ** This file may be used under the terms of the GNU General Public * ** License version 2.0 as published by the Free Software Foundation * ** and appearing in the file LICENSE.GPL included in the packaging of * ** this file. Please review the following information to ensure GNU * ** General Public Licensing requirements will be met: * ** http://www.trolltech.com/products/qt/opensource.html * ** * ** If you are unsure which license is appropriate for your use, please * ** review the following information: * ** http://www.trolltech.com/products/qt/licensing.html or contact the * ** sales department at sales@trolltech.com. * ** * ** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE * ** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * ** * ****************************************************************************/ #ifndef MAINWINDOW_H #define MAINWINDOW_H #include class QAction; class QListWidget; class QMenu; class QTextEdit; class MainWindow : public QMainWindow { Q_OBJECT public: MainWindow(); private slots: void newLetter(); void save(); void print(); void undo(); void about(); void insertCustomer(const QString &customer); void addParagraph(const QString ¶graph); private: void createActions(); void createMenus(); void createToolBars(); void createStatusBar(); void createDockWindows(); QTextEdit *textEdit; QListWidget *customerList; QListWidget *paragraphsList; QMenu *fileMenu; QMenu *editMenu; QMenu *viewMenu; QMenu *helpMenu; QToolBar *fileToolBar; QToolBar *editToolBar; QAction *newLetterAct; QAction *saveAct; QAction *printAct; QAction *undoAct; QAction *aboutAct; QAction *aboutQtAct; QAction *quitAct; }; #endif bacula-15.0.3/src/qt-console/testprogs/putz/0000755000175000017500000000000014771010173020571 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/testprogs/putz/main.cpp0000644000175000017500000000036414771010173022224 0ustar bsbuildbsbuild#include #include "putz.h" Putz *putz; QApplication *app; int main(int argc, char *argv[]) { app = new QApplication(argc, argv); app->setQuitOnLastWindowClosed(true); putz = new Putz; putz->show(); return app->exec(); } bacula-15.0.3/src/qt-console/testprogs/putz/putz.pro0000644000175000017500000000031714771010173022316 0ustar bsbuildbsbuildCONFIG += qt debug TEMPLATE = app TARGET = putz DEPENDPATH += . INCLUDEPATH += .. MOC_DIR = moc OBJECTS_DIR = obj UI_DIR = ui # Main window FORMS += putz.ui HEADERS += putz.h SOURCES += putz.cpp main.cpp bacula-15.0.3/src/qt-console/testprogs/putz/putz.ui0000644000175000017500000000407714771010173022142 0ustar bsbuildbsbuild MainWindow 0 0 557 534 MainWindow true 0 0 557 28 7 7 12 7 Whoochie 1 9 6 coochie 2 9 6 bacula-15.0.3/src/qt-console/testprogs/putz/putz.cpp0000644000175000017500000000023514771010173022277 0ustar bsbuildbsbuild #include #include #include "putz.h" Putz::Putz() { printf("got to Putz Constructor\n"); setupUi(this); } bacula-15.0.3/src/qt-console/testprogs/putz/putz.h0000644000175000017500000000042414771010173021744 0ustar bsbuildbsbuild#ifndef _PUTZ_H_ #define _PUTZ_H_ #if QT_VERSION >= 0x050000 #include #else #include #endif #include "ui_putz.h" class Putz : public QMainWindow, public Ui::MainWindow { Q_OBJECT public: Putz(); public slots: private: }; #endif /* _PUTZ_H_ */ bacula-15.0.3/src/qt-console/bat.pro.android0000644000175000017500000001076514771010173020455 0ustar bsbuildbsbuild###################################################################### # # !!!!!!! IMPORTANT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # Edit only bat.pro.in -- bat.pro is built by the ./configure program # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # CONFIG += qt debug greaterThan(QT_MAJOR_VERSION, 4): QT += widgets bins.path = /$(DESTDIR)/opt/b810/bin bins.files = bat confs.path = /$(DESTDIR)/opt/b810/etc confs.commands = ./install_conf_file help.path = /$(DESTDIR)/opt/b810/doc help.files = help/*.html images/status.png images/mail-message-new.png TEMPLATE = app TARGET = bat DEPENDPATH += . INCLUDEPATH += .. . ./console ./restore ./select LIBS += -L../lib -lbaccfg -lbac -L../findlib -lbacfind -lssl -lcrypto LIBTOOL_LINK = /home/bdev/b810/libtool --silent --tag=CXX --mode=link LIBTOOL_INSTALL = /home/bdev/b810/libtool --silent --mode=install QMAKE_LINK = $${LIBTOOL_LINK} $(CXX) QMAKE_INSTALL_PROGRAM = $${LIBTOOL_INSTALL} install -m 0750 -p QMAKE_CLEAN += .libs/* bat qwt { INCLUDEPATH += LIBS += } macx { ICON = images/bat_icon.icns } RESOURCES = main.qrc MOC_DIR = moc OBJECTS_DIR = obj UI_DIR = ui # Main window FORMS += main.ui FORMS += prefs.ui FORMS += label/label.ui FORMS += relabel/relabel.ui FORMS += mount/mount.ui FORMS += console/console.ui FORMS += restore/restore.ui restore/prerestore.ui restore/brestore.ui FORMS += restore/runrestore.ui FORMS += restore/restoretree.ui FORMS += run/run.ui run/runcmd.ui run/estimate.ui run/prune.ui FORMS += select/select.ui select/textinput.ui FORMS += medialist/medialist.ui mediaedit/mediaedit.ui joblist/joblist.ui FORMS += medialist/mediaview.ui FORMS += clients/clients.ui storage/storage.ui fileset/fileset.ui FORMS += joblog/joblog.ui jobs/jobs.ui job/job.ui FORMS += help/help.ui mediainfo/mediainfo.ui # FORMS += status/dirstat.ui storage/content.ui #FORMS += status/clientstat.ui # FORMS += status/storstat.ui # qwt { # FORMS += jobgraphs/jobplotcontrols.ui # } # Main directory HEADERS += mainwin.h bat.h bat_conf.h qstd.h pages.h SOURCES += main.cpp bat_conf.cpp mainwin.cpp qstd.cpp pages.cpp # bcomm HEADERS += bcomm/dircomm.h SOURCES += bcomm/dircomm.cpp bcomm/dircomm_auth.cpp # Console HEADERS += console/console.h SOURCES += console/console.cpp # Restore # HEADERS += restore/restore.h # SOURCES += restore/prerestore.cpp restore/restore.cpp restore/brestore.cpp # Label dialog # HEADERS += label/label.h # SOURCES += label/label.cpp # # # Relabel dialog # HEADERS += relabel/relabel.h # SOURCES += relabel/relabel.cpp # # # Mount dialog # HEADERS += mount/mount.h # SOURCES += mount/mount.cpp # Run dialog # HEADERS += run/run.h # SOURCES += run/run.cpp run/runcmd.cpp run/estimate.cpp run/prune.cpp # Select dialog HEADERS += select/select.h select/textinput.h SOURCES += select/select.cpp select/textinput.cpp ## MediaList # HEADERS += medialist/medialist.h # SOURCES += medialist/medialist.cpp # # # MediaView # HEADERS += medialist/mediaview.h # SOURCES += medialist/mediaview.cpp # # ## MediaEdit # HEADERS += mediaedit/mediaedit.h # SOURCES += mediaedit/mediaedit.cpp ## JobList # HEADERS += joblist/joblist.h # SOURCES += joblist/joblist.cpp ## Clients #HEADERS += clients/clients.h #SOURCES += clients/clients.cpp ## Storage #HEADERS += storage/storage.h #SOURCES += storage/storage.cpp ## Storage content # HEADERS += storage/content.h #SOURCES += storage/content.cpp ## Fileset # HEADERS += fileset/fileset.h # SOURCES += fileset/fileset.cpp # # ## Job log # HEADERS += joblog/joblog.h # SOURCES += joblog/joblog.cpp # # ## Job # HEADERS += job/job.h # SOURCES += job/job.cpp # # ## Jobs # HEADERS += jobs/jobs.h # SOURCES += jobs/jobs.cpp # # ## RestoreTree # HEADERS += restore/restoretree.h # SOURCES += restore/restoretree.cpp # # ## Job Step Graphs # qwt { # HEADERS += jobgraphs/jobplot.h # SOURCES += jobgraphs/jobplot.cpp # } # Help dialog #HEADERS += help/help.h #SOURCES += help/help.cpp # Media info dialog # HEADERS += mediainfo/mediainfo.h # SOURCES += mediainfo/mediainfo.cpp ## Status Dir # HEADERS += status/dirstat.h # SOURCES += status/dirstat.cpp ## Status Client # HEADERS += status/clientstat.h # SOURCES += status/clientstat.cpp ## Status Client # HEADERS += status/storstat.h # SOURCES += status/storstat.cpp # Utility sources HEADERS += util/fmtwidgetitem.h util/comboutil.h SOURCES += util/fmtwidgetitem.cpp util/comboutil.cpp INSTALLS = bins confs help QMAKE_EXTRA_TARGETS += depend TRANSLATIONS += ts/bat_fr.ts ts/bat_de.ts bacula-15.0.3/src/qt-console/tray-monitor/0000755000175000017500000000000014771010173020201 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/FileDaemonPage.qml0000644000175000017500000001423514771010173023521 0ustar bsbuildbsbuildimport QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 import io.qt.bmob.traycontroller 1.0 Page { visible: true width: parent.width height: parent.height // Our C++ component TrayUiController { id: controller // Events triggered by our c++ code onClientsChanged: { // } onDirectorsChanged: { // } onStoragesChanged: { // } onInfoDialogTextChanged: { infoDialog.text = controller.infoDialogText infoDialog.open() } onFdTraceChanged: { scrollableBox.text = controller.fetchFile(controller.fdTracePath); } Component.onCompleted: { scrollableBox.text = controller.fetchFile(controller.fdTracePath) scrollableBox.scrollToBottom() } } header: ToolBar { id: toolbar height: 48 background: Rectangle { color: "#d32f2f" } RowLayout { anchors.fill: parent anchors.leftMargin: 8 anchors.rightMargin: 8 ToolButton { id: backButton onClicked: stackView.pop() anchors.left: parent.left contentItem: Text { text: qsTr("<") font.pixelSize: 24 opacity: enabled ? 1.0 : 0.3 color: "white" } background: Rectangle { color: backButton.down ? "#b71c1c" : "#d32f2f" } } Label { id: titleLabel text: "File Daemon" color: "white" font.pixelSize: 18 anchors.centerIn: parent } } } Text { id: labelLogLevel text: "Log Level:" color: "black" anchors.left: parent.left anchors.leftMargin: 16 anchors.verticalCenter: fdLogLevel.verticalCenter } TextField { id: fdLogLevel height: 44 anchors.top: parent.top anchors.right: parent.right anchors.left: labelLogLevel.right placeholderText: qsTr("value") text: controller.fdLogLevel inputMethodHints: Qt.ImhDigitsOnly maximumLength: 3 onTextChanged: controller.fdLogLevel = text background: Rectangle { implicitHeight: 48 implicitWidth: 100 color: "transparent" border.color: "transparent" } } Button { id: reportButton text: "Report" anchors.verticalCenter: fdLogLevel.verticalCenter anchors.right: parent.right anchors.rightMargin: 12 onClicked: controller.composeTraceMail() contentItem: Text { text: reportButton.text font.pixelSize: 18 color: reportButton.down ? "#ef9a9a" : "#d32f2f" } background: Rectangle { color: "white" } } Rectangle { id: divider2 width: parent.width height: 1 color: "#d32f2f" anchors.bottom: fdLogLevel.bottom } Rectangle { id: divider width: parent.width height: 1 color: "#d32f2f" anchors.bottom: startButton.top } Button { id: startButton text: "Start" onClicked: controller.startFileDaemon() anchors.rightMargin: 12 anchors.right: parent.right anchors.bottom: parent.bottom contentItem: Text { text: startButton.text font.pixelSize: 18 color: startButton.down ? "#ef9a9a" : "#d32f2f" } background: Rectangle { color: "white" } } Button { id: stopButton text: "Stop" onClicked: controller.stopFileDaemon() anchors.rightMargin: 12 anchors.right: startButton.left anchors.bottom: parent.bottom contentItem: Text { text: stopButton.text font.pixelSize: 18 color: stopButton.down ? "#ef9a9a" : "#d32f2f" } background: Rectangle { color: "white" } } Button { id: configButton text: "Config" onClicked: stackView.push(Qt.resolvedUrl("FileDaemonConfigPage.qml")) anchors.rightMargin: 12 anchors.right: stopButton.left anchors.bottom: parent.bottom contentItem: Text { text: configButton.text font.pixelSize: 18 color: configButton.down ? "#ef9a9a" : "#d32f2f" } background: Rectangle { color: "white" } } Button { visible: IS_ENTERPRISE id: qrcodeButton text: "QR Code" onClicked: controller.startQRCodeReader() anchors.leftMargin: 12 anchors.left: parent.left anchors.bottom: parent.bottom contentItem: Text { text: qrcodeButton.text font.pixelSize: 18 color: qrcodeButton.down ? "#ef9a9a" : "#d32f2f" } background: Rectangle { color: "white" } } ScrollView { id: scrollableBox anchors.top: divider2.bottom anchors.bottom: divider.top width: parent.width contentHeight: content.height contentWidth: parent.width ScrollBar.horizontal.policy: ScrollBar.AlwaysOff ScrollBar.horizontal.interactive: false clip: true property alias text: content.text Text { id: content width: scrollableBox.width leftPadding: 12 rightPadding: 12 topPadding: 8 bottomPadding: 12 wrapMode: Text.WrapAnywhere } onVisibleChanged: { if (visible) { scrollableBox.text = controller.fetchFile(controller.fdTracePath); } } function scrollToBottom() { if (content.height > contentItem.height) { contentItem.contentY = content.height - contentItem.height } } } PulseLoader { useDouble: true visible: controller.isConnecting radius: 28 color: "#d32f2f" anchors.horizontalCenter: parent.horizontalCenter anchors.bottom: divider.top anchors.bottomMargin: 24 } MessageDialog { id: infoDialog modality: Qt.ApplicationModal standardButtons: StandardButton.Ok visible: false } } bacula-15.0.3/src/qt-console/tray-monitor/fdstatus.h0000644000175000017500000000206214771010173022207 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "common.h" #include "ui_fd-monitor.h" #include "task.h" #include "status.h" class FDStatus: public ResStatus { Q_OBJECT public: Ui::fdStatus status; FDStatus(RESMON *c): ResStatus(c) { status.setupUi(this); QObject::connect(status.pushButton, SIGNAL(clicked()), this, SLOT(doUpdate()), Qt::QueuedConnection); }; ~FDStatus() { }; public slots: void doUpdate(); void taskDone(task *); }; bacula-15.0.3/src/qt-console/tray-monitor/restoreoptionswizardpage.ui0000644000175000017500000000374714771010173025730 0ustar bsbuildbsbuild RestoreOptionsWizardPage 0 0 419 304 WizardPage Restore Client: Where : Replace : Never Always IfNewer IfOlder Comment : bacula-15.0.3/src/qt-console/tray-monitor/config-storage.cpp0000644000175000017500000002320514771010173023616 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "config-storage.h" #include #include #include #include "resmodel.h" extern URES res_all; extern char *configfile; // defined in tray-monitor.cpp /* Check for \ at the end */ static char *is_str_valid(POOLMEM **buf, const char *p) { char *p1; if (!p || !*p) { return NULL; } p1 = *buf = check_pool_memory_size(*buf, (strlen(p) + 1)); for (; *p ; p++) { if (*p == '\\') { *p1++ = '/'; } else if (*p == '"') { return NULL; } else { *p1++ = *p; } } *p1 = 0; return *buf; } bool ConfigStorage::reloadResources() { reloadResources(false); } bool ConfigStorage::reloadResources(bool encodePwd) { bool ret; config = New(CONFIG()); config->encode_password(encodePwd); config->init(configfile, NULL, M_ERROR, (void *)&res_all, res_all_size, r_first, r_last, resources, &rhead); ret = config->parse_config(); return ret; } MONITOR *ConfigStorage::getMonitor() { MONITOR *mon = NULL; mon = (MONITOR *) GetNextRes(rhead, R_MONITOR, NULL); return mon; } const char *ConfigStorage::saveMonitor(const char *deviceId) { MONITOR mon; POOLMEM *monName = get_pool_memory(PM_FNAME); Mmsg(monName, "%s-mon", deviceId); mon.hdr.name = monName; const char *err_msg = saveMonitor(&mon); free_and_null_pool_memory(monName); return err_msg; } const char *ConfigStorage::saveMonitor(MONITOR *mon) { const char *error_msg = NULL; POOL_MEM tmp_fname; Mmsg(tmp_fname, "%s.temp", configfile); FILE *fp = fopen(tmp_fname.c_str(), "w"); if(!fp) { return "Error - unable to open configuration file"; } writeMonitor(fp, mon); QList *resources = getAllResources(); for(RESMON *res : *resources) { writeResource(fp, res, res->code); } fclose(fp); unlink(configfile); int result = rename(tmp_fname.c_str(), configfile); if (result != 0) { error_msg = "Error - unable to write to the configuration file"; } if(error_msg == NULL) { reloadResources(); dump_storage(); } return error_msg; } RESMON *ConfigStorage::getResourceByName(rescode resCode, const char* resName) { RESMON *res = NULL; QList *resources = getResources(resCode); for(RESMON *r : *resources) { if(strcmp(r->hdr.name, resName) == 0) { res = r; } } return res; } QList *ConfigStorage::getResources(rescode resCode) { QList *resources = new QList(); RESMON *res; for(res=NULL; (res=(RESMON *)GetNextRes(rhead, resCode, (RES*)res));) { res->code = resCode; resources->push_back(res); } return resources; } QList *ConfigStorage::getAllResources() { QList *resources = new QList(); RESMON *res; for(res=NULL; (res=(RESMON *)GetNextRes(rhead, R_CLIENT, (RES*)res));) { res->code = R_CLIENT; resources->push_back(res); } for(res=NULL; (res=(RESMON *)GetNextRes(rhead, R_DIRECTOR, (RES*)res));) { res->code = R_DIRECTOR; resources->push_back(res); } for(res=NULL; (res=(RESMON *)GetNextRes(rhead, R_STORAGE, (RES*)res));) { res->code = R_STORAGE; resources->push_back(res); } return resources; } const char *ConfigStorage::saveResources( QList *clients, QList *directors, QList *storages ) { const char *error_msg = NULL; POOL_MEM tmp_fname; Mmsg(tmp_fname, "%s.temp", configfile); FILE *fp = fopen(tmp_fname.c_str(), "w"); if(!fp) { return "Unable to open configuration file"; } MONITOR *mon = getMonitor(); writeMonitor(fp, mon); for(QObject *obj : *clients) { ResourceModel *model = (ResourceModel *) obj; RESMON *res = model->resource(); writeResource(fp, res, R_CLIENT); } for(QObject *obj : *directors) { ResourceModel *model = (ResourceModel *) obj; RESMON *res = model->resource(); writeResource(fp, res, R_DIRECTOR); } for(QObject *obj : *storages) { ResourceModel *model = (ResourceModel *) obj; RESMON *res = model->resource(); writeResource(fp, res, R_STORAGE); } fclose(fp); unlink(configfile); int result = rename(tmp_fname.c_str(), configfile); if (result != 0) { error_msg = "Unable to write to the configuration file"; } reloadResources(); return error_msg; } void ConfigStorage::writeMonitor(FILE *fp, MONITOR *mon) { fprintf(fp, "Monitor {\n Name = \"%s\"\n", mon->hdr.name); fprintf(fp, " Refresh Interval = %d\n", 120); fprintf(fp, "}\n"); } const char *ConfigStorage::addResource(const char *resName, rescode code) { return addResource(resName, code, false); } const char *ConfigStorage::addResource(const char *resName, rescode code, bool managed) { RESMON res; res.address = "localhost"; res.password = (char *) AndroidFD::devicePwd().toUtf8().constData(); res.managed = managed; POOLMEM *pm_resName = get_pool_memory(PM_FNAME); switch (code) { case R_CLIENT: Mmsg(pm_resName, "%s-fd", resName); res.port = 9102; res.use_remote = false; break; case R_DIRECTOR: Mmsg(pm_resName, "%s-dir", resName); res.port = 9101; break; case R_STORAGE: Mmsg(pm_resName, "%s-sd", resName); res.port = 9103; break; default: return NULL; } res.hdr.name = pm_resName; const char *err_msg = addResource(&res, code); free_and_null_pool_memory(pm_resName); return err_msg; } const char *ConfigStorage::addResource(RESMON *newRes, rescode code) { const char *error_msg = validateResource(newRes); if(error_msg != NULL) { return error_msg; } FILE *fp = fopen(configfile, "a"); if(!fp) { return "Unable to open configuration file"; } writeResource(fp, newRes, code); fclose(fp); reloadResources(); return error_msg; } const char *ConfigStorage::editResource(RESMON *oldRes, RESMON *newRes, rescode resCode) { const char *error_msg = validateResource(newRes); if(error_msg != NULL) { return error_msg; } POOL_MEM tmp_fname; Mmsg(tmp_fname, "%s.temp", configfile); FILE *fp = fopen(tmp_fname.c_str(), "w"); if(!fp) { return "Unable to open configuration file"; } MONITOR *mon = getMonitor(); writeMonitor(fp, mon); QList *resources = getAllResources(); for(RESMON *res : *resources) { if(strcmp(res->hdr.name, oldRes->hdr.name) == 0) { writeResource(fp, newRes, resCode); } else { writeResource(fp, res, res->code); } } fclose(fp); unlink(configfile); int result = rename(tmp_fname.c_str(), configfile); if (result != 0) { error_msg = "Unable to write to the configuration file"; } if(error_msg == NULL) { reloadResources(); dump_storage(); } return error_msg; } const char *ConfigStorage::validateResource(RESMON *res) { POOLMEM *buf1 = get_pool_memory(PM_FNAME); POOLMEM *buf2 = get_pool_memory(PM_FNAME); POOLMEM *buf3 = get_pool_memory(PM_FNAME); res->hdr.name = is_str_valid(&buf1, res->hdr.name); if(!res->hdr.name) { return "The name of the Resource should be set"; } res->address = is_str_valid(&buf2, res->address); if(!res->address) { return "The address of the Resource should be set"; } res->password = is_str_valid(&buf3, res->password); if(!res->password) { return "The password of the Resource should be set"; } return NULL; } void ConfigStorage::writeResource(FILE *fp, RESMON *res, rescode code) { const char *resType; switch (code) { case R_CLIENT: resType = "Client"; break; case R_DIRECTOR: resType = "Director"; break; case R_STORAGE: resType = "Storage"; break; default: return; } fprintf(fp, "%s {\n Name = \"%s\"\n Address = \"%s\"\n", resType, res->hdr.name, res->address); fprintf(fp, " Password = \"%s\"\n", res->password); fprintf(fp, " Port = %d\n", res->port); fprintf(fp, " Connect Timeout = %d\n", 10); if (res->use_remote) { fprintf(fp, " Remote = yes\n"); } if (res->managed) { fprintf(fp, " Managed = yes\n"); } fprintf(fp, "}\n"); } void ConfigStorage::dump_storage() { __android_log_write(ANDROID_LOG_DEBUG, "Configuration File:", configfile); std::string line; std::ifstream myfile(configfile); if (myfile.is_open()) { __android_log_write(ANDROID_LOG_DEBUG, "Success!", "File was opened"); while ( getline (myfile, line) ) { __android_log_write(ANDROID_LOG_DEBUG, "Line:", line.c_str()); } myfile.close(); } else { __android_log_write(ANDROID_LOG_DEBUG, "Error!", "Unable to open file"); } } bacula-15.0.3/src/qt-console/tray-monitor/resdetails-ui-controller.h0000644000175000017500000001762514771010173025320 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef RESCONFIGUICONTROLLER_H #define RESCONFIGUICONTROLLER_H #include #include #include "tray_conf.h" #include "config-storage.h" #include "resmodel.h" #include #include "jcr.h" #include "task.h" #include "common.h" #include "jobmodel.h" #include "runjobmodel.h" #include "restorejobmodel.h" /* TrayUiController - Controls the screen displayed when a user selects one resource (Director, File Daemon or Storage Daemon). It allows the user to: 1 - Change the data that's required to connect with the resource (name, password, address and port) 2 - Connect to the resource, and therefore see it's terminated jobs and running jobs Also, if the resource is a director, it allows the user to start new backup jobs or restore jobs */ class ResDetailsUiController : public QObject { Q_OBJECT // Data related to the resource that the user selected Q_PROPERTY(ResourceModel *resModel WRITE setResModel) Q_PROPERTY(QString resourceName READ resourceName WRITE setResourceName NOTIFY resourceNameChanged) //These are loaded after successfully connecting to the resource Q_PROPERTY(QString startedDate READ startedDate WRITE setStartedDate NOTIFY startedDateChanged) Q_PROPERTY(QString resourceVersion READ resourceVersion WRITE setResourceVersion NOTIFY resourceVersionChanged) Q_PROPERTY(QString resourcePlugins READ resourcePlugins WRITE setResourcePlugins NOTIFY resourcePluginsChanged) Q_PROPERTY(QString bandwidthLimit READ bandwidthLimit WRITE setBandwidthLimit NOTIFY bandwidthLimitChanged) Q_PROPERTY(QList terminatedJobs READ getTerminatedJobs NOTIFY terminatedJobsChanged()) Q_PROPERTY(QList runningJobs READ getRunningJobs NOTIFY runningJobsChanged()) //Loaded if the connection was not successful Q_PROPERTY(QString connectionError READ getConnectionError WRITE setConnectionError NOTIFY connectionError) //TODO merge both into a variable 'dialogMsg' // Message displayed on the GUI dialogs Q_PROPERTY(QString successMsg READ successMsg WRITE setSuccessMessage NOTIFY successMessageChanged) Q_PROPERTY(QString errorMsg READ errorMsg WRITE setErrorMessage NOTIFY errorMessageChanged) Q_PROPERTY(bool isConnecting READ isConnecting WRITE setIsConnecting NOTIFY isConnectingChanged) private: QString m_resourceName; QString m_startedDate; QString m_resourceVersion; QString m_resourcePlugins; QString m_bandwidthLimit; QList *m_terminatedJobs = new QList(); QList *m_runningJobs = new QList(); QString m_successMsg; QString m_errorMsg; QString m_connError; ConfigStorage *m_storage = NULL; rescode m_resCode; RESMON *m_res = NULL; bool m_connecting = false; public: explicit ResDetailsUiController(QObject *parent = nullptr); ~ResDetailsUiController(); // Getters / Setters for data that is used by our GUI bool isConnecting() { return m_connecting; } // Resource Name QString resourceName() { return m_resourceName; } void setResourceName(const QString &resourceName) { if (resourceName == m_resourceName) return; m_resourceName = resourceName; emit resourceNameChanged(); } // Resource Started Date QString startedDate() { return m_startedDate; } void setStartedDate(const QString &startedDate) { if (startedDate == m_startedDate) return; m_startedDate = startedDate; emit startedDateChanged(); } // Resource Version QString resourceVersion() { return m_resourceVersion; } void setResourceVersion(const QString &resourceVersion) { if (resourceVersion == m_resourceVersion) return; m_resourceVersion = resourceVersion; emit resourceVersionChanged(); } // Resource Plugins QString resourcePlugins() { return m_resourcePlugins; } void setResourcePlugins(const QString &resourcePlugins) { if (resourcePlugins == m_resourcePlugins) return; m_resourcePlugins = resourcePlugins; emit resourcePluginsChanged(); } // Resource Bandwidth Limit QString bandwidthLimit() { return m_bandwidthLimit; } void setBandwidthLimit(const QString &bandwidthLimit) { if (bandwidthLimit == m_bandwidthLimit) return; m_bandwidthLimit = bandwidthLimit; emit bandwidthLimitChanged(); } // Terminated Jobs QList getTerminatedJobs() { return *m_terminatedJobs; } void setTerminatedJobs(QList *jobs) { if(m_terminatedJobs != NULL) { delete m_terminatedJobs; } m_terminatedJobs = jobs; emit terminatedJobsChanged(); } // Running Jobs QList getRunningJobs() { return *m_runningJobs; } void setRunningJobs(QList *jobs) { if(m_runningJobs != NULL) { delete m_runningJobs; } m_runningJobs = jobs; emit runningJobsChanged(); } // Dialog Success Message QString successMsg() { return m_successMsg; } void setSuccessMessage(const QString &successMsg) { m_successMsg = successMsg; emit successMessageChanged(); } // Dialog Error Message QString errorMsg() { return m_errorMsg; } void setErrorMessage(const QString &errorMsg) { m_errorMsg = errorMsg; emit errorMessageChanged(); } // Connection Error Message QString getConnectionError() { return m_connError; } void setConnectionError(const QString &connError) { m_connError = connError; emit connectionError(); } void setIsConnecting(bool isConnecting) { if (m_connecting != isConnecting) { m_connecting = isConnecting; emit isConnectingChanged(); } } // Model (If Edition Mode) void setResModel(ResourceModel *resModel) { m_res = resModel->resource(); m_resCode = resModel->resCode(); setResourceName(m_res->hdr.name); } signals: // Events that are emitted to our GUI code to inform changes in our data void resourceNameChanged(); void startedDateChanged(); void resourceVersionChanged(); void resourcePluginsChanged(); void bandwidthLimitChanged(); void terminatedJobsChanged(); void runningJobsChanged(); void successMessageChanged(); void errorMessageChanged(); void isConnectingChanged(); // Events emitted to our GUI about the connection status with the resource void connectionStarted(); void connectionError(); void connectionSuccess(); // Tells the GUI to start the screen related to running a job void runJobModelCreated(RunJobModel *model); // Tells the GUI to start the screen related to restoring a job void restoreModelCreated(RestoreJobModel *model); public slots: // Called when the user wants to connect to a resource void connectToResource(); void connectCallback(task *t); // Called when the user wants to run a job void createRunJobModel(); void createBackupModelCallback(task *t); // Called when the user wants to restore a job void createRestoreJobModel(); void createRestoreModelCallback(task *t); bool canRunJobs() { if (m_res == NULL) { return false; } return m_resCode == R_DIRECTOR || m_res->use_remote; } }; #endif // RESCONFIGUICONTROLLER_H bacula-15.0.3/src/qt-console/tray-monitor/enterprise-tray-ui-controller.h0000644000175000017500000000426714771010173026314 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef ENTR_TRAYUICONTROLLER_H #define ENTR_TRAYUICONTROLLER_H #include #include "tray-ui-controller.h" #include "bweb-service.h" /* EnterpriseTrayUiController - A subclass of TrayUiController that adds the QR Code configuration feature for our Android bacula-fd.conf file */ class EnterpriseTrayUiController : public TrayUiController, QAndroidActivityResultReceiver, BWebCallback { Q_OBJECT Q_PROPERTY(bool isConnecting READ isConnecting WRITE setIsConnecting NOTIFY isConnectingChanged) private: BWebService *m_bweb; bool m_connecting = false; public: explicit EnterpriseTrayUiController(QObject *parent = nullptr); ~EnterpriseTrayUiController(); // Callback containing the scanned QR Code data virtual void handleActivityResult(int receiverRequestCode, int resultCode, const QAndroidJniObject &data); // Callbacks for the Link Registration HTTP request sent to BWeb virtual void onBWebSuccess(QString msg); virtual void onBWebError(QString msg); // Getters / Setters for data that is used by our GUI bool isConnecting() { return m_connecting; } void setIsConnecting(bool isConnecting) { if (m_connecting != isConnecting) { m_connecting = isConnecting; emit isConnectingChanged(); } } public slots: void handleQRCodeData(QString bwebUrl); void startQRCodeReader() { AndroidFD::startQRCodeReader(this); } signals: void onQRCodeRead(QString bwebUrl); void isConnectingChanged(); }; #endif // ENTR_TRAYUICONTROLLER_H bacula-15.0.3/src/qt-console/tray-monitor/run.ui0000644000175000017500000002657014771010173021356 0ustar bsbuildbsbuild runForm 0 0 568 407 0 0 Run job 16777215 30 11 <h3>Run a Job</h3> 0 0 0 5 Qt::Horizontal :/images/runit.png 0 Properties QFormLayout::AllNonFixedFieldsGrow Job: jobCombo 0 0 QComboBox::AdjustToContents When: dateTimeEdit QDateTimeEdit::YearSection yyyy-MM-dd hh:mm:ss true <html><head/><body><p>Job statistics computed from the Catalog with previous jobs.</p><p>For accurate information, it is possible to use the bconsole &quot;estimate&quot; command.</p></body></html> Estimate: Job Bytes: Job Files: Level: Advanced Level: levelCombo Client: clientCombo FileSet: filesetCombo Pool: poolCombo Storage: storageCombo Catalog: Priority: prioritySpin 60 0 60 16777215 60 0 1 10000 10 0 0 0 5 Qt::Horizontal Qt::Horizontal 40 20 OK Cancel bacula-15.0.3/src/qt-console/tray-monitor/tray-monitor.pro.mingw32.in0000644000175000017500000000707414771010173025271 0ustar bsbuildbsbuild###################################################################### # # !!!!!!! IMPORTANT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # Edit only tray-monitor.pro.mingw32.in -- tray-monitor.pro.mingw32 is built by the ./configure program # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # CONFIG options for Windows are pulled from win32/qmake.conf # # Copyright (C) 2000-2022 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # CONFIG += qt cross-win32 #CONFIG += qt debug QT += widgets core gui cross-win32 { LIBS += ../../win32/lib/obj32/ini.o -mwindows -L../../win32/release32 -lbacula -lwinpthread INCLUDEPATH += ../../win32/compat } !cross-win32 { LIBS += -L../../lib -lbaccfg -lbac -L../../findlib -lbacfind @OPENSSL_LIBS@ } bins.path = /$(DESTDIR)@sbindir@ bins.files = bacula-tray-monitor confs.path = /$(DESTDIR)@sysconfdir@ confs.commands = ./install_conf_file TEMPLATE = app TARGET = bacula-tray-monitor QMAKE_EXTRA_TARGETS += depend DEPENDPATH += . INCLUDEPATH += ../.. . LIBTOOL_LINK = @QMAKE_LIBTOOL@ --silent --tag=CXX --mode=link LIBTOOL_INSTALL = @QMAKE_LIBTOOL@ --silent --mode=install QMAKE_LINK = $${LIBTOOL_LINK} $(CXX) QMAKE_INSTALL_PROGRAM = $${LIBTOOL_INSTALL} install -m @SBINPERM@ -p QMAKE_CLEAN += .libs/* bacula-tray-monitor release/bacula-tray-monitor QMAKE_CXXFLAGS += -DTRAY_MONITOR QMAKE_CFLAGS += -DTRAY_MONITOR RESOURCES = ../main.qrc MOC_DIR = moc32 OBJECTS_DIR = obj32 UI_DIR = ui32 QMAKE_CC = $(CXX) QMAKE_CXX = i686-w64-mingw32-g++ QMAKE_INCDIR = $(DEPKGS)/depkgs-mingw32/include/ ../win32/compat $(DEPKGS)/depkgs-mingw32/include/QtGui $(DEPKGS)/depkgs-mingw32/include/QtCore $(DEPKGS)/depkgs-mingw32/include/QtWidgets ui32/ QMAKE_INCDIR_QT = $(DEPKGS)/depkgs-mingw32/include/qt QMAKE_LIBDIR_QT = $(DEPKGS)/depkgs-mingw32/lib/qt QMAKE_LINK = i686-w64-mingw32-g++ QMAKE_LFLAGS = -mthreads -Wl,-enable-stdcall-fixup -Wl,-enable-auto-import -m32 -fno-strict-aliasing -Wl,-enable-runtime-pseudo-reloc QMAKE_LIB = i686-w64-mingw32-ar -ru QMAKE_RC = i686-w64-mingw32-windres # Main directory HEADERS += tray-monitor.h tray_conf.h tray-ui.h fdstatus.h task.h ../util/fmtwidgetitem.h dirstatus.h conf.h sdstatus.h runjob.h status.h restorewizard.h filesmodel.h clientselectwizardpage.h jobselectwizardpage.h fileselectwizardpage.h restoreoptionswizardpage.h pluginwizardpage.h SOURCES += tray-monitor.cpp tray_conf.cpp fdstatus.cpp task.cpp authenticate.cpp ../util/fmtwidgetitem.cpp dirstatus.cpp sdstatus.cpp conf.cpp runjob.cpp status.cpp restorewizard.cpp clientselectwizardpage.cpp jobselectwizardpage.cpp fileselectwizardpage.cpp restoreoptionswizardpage.cpp pluginwizardpage.cpp FORMS += fd-monitor.ui dir-monitor.ui sd-monitor.ui main-conf.ui res-conf.ui run.ui restorewizard.ui clientselectwizardpage.ui jobselectwizardpage.ui fileselectwizardpage.ui restoreoptionswizardpage.ui pluginwizardpage.ui # CDP Client CDP_DIR = ../../tools/cdp-client HEADERS += $$CDP_DIR/backupservice.h $$CDP_DIR/folderwatcher.h desktop-gui/cdp-main-ui.h $$CDP_DIR/cdp.h SOURCES += $$CDP_DIR/backupservice.cpp $$CDP_DIR/folderwatcher.cpp INCLUDEPATH += $$CDP_DIR # Journal JOURNAL_DIR = ../../plugins/fd HEADERS += $$JOURNAL_DIR/journal.h $$JOURNAL_DIR/file-record.h $$JOURNAL_DIR/folder-record.h $$JOURNAL_DIR/settings-record.h SOURCES += $$JOURNAL_DIR/journal.c INCLUDEPATH += $$JOURNAL_DIR TRANSLATIONS += ts/tm_fr.ts ts/tm_de.ts ts/tm_ja.ts bacula-15.0.3/src/qt-console/tray-monitor/dirstatus.cpp0000644000175000017500000001242114771010173022727 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "dirstatus.h" #include "../util/fmtwidgetitem.h" #include "jcr.h" void DIRStatus::doUpdate() { if (count == 0) { count++; task *t = new task(); status.pushButton->setEnabled(false); connect(t, SIGNAL(done(task *)), this, SLOT(taskDone(task *)), Qt::QueuedConnection); t->init(res, TASK_STATUS); res->wrk->queue(t); status.statusBar->setText(QString("Trying to connect to Director...")); Dmsg1(50, "doUpdate(%p)\n", res); } } void DIRStatus::taskDone(task *t) { count--; if (!t->status) { status.statusBar->setText(QString(t->errmsg)); } else { status.statusBar->clear(); if (t->type == TASK_STATUS) { char ed1[50]; struct s_last_job *ljob; struct s_running_job *rjob; res->mutex->lock(); status.labelName->setText(QString(res->name)); status.labelVersion->setText(QString(res->version)); status.labelStarted->setText(QString(res->started)); status.labelReloaded->setText(QString(res->reloaded)); status.labelPlugins->setText(QString(res->plugins)); /* Clear the table first */ Freeze(*status.tableRunning); Freeze(*status.tableTerminated); QStringList headerlistR = (QStringList() << tr("JobId") << tr("Job") << tr("Level") << tr("Client") << tr("Status") << tr("Storage") << tr("Files") << tr("Bytes") << tr("Errors")); status.tableRunning->clear(); status.tableRunning->setRowCount(0); status.tableRunning->setColumnCount(headerlistR.count()); status.tableRunning->setHorizontalHeaderLabels(headerlistR); status.tableRunning->setEditTriggers(QAbstractItemView::NoEditTriggers); status.tableRunning->verticalHeader()->hide(); status.tableRunning->setSortingEnabled(true); if (res->running_jobs) { status.tableRunning->setRowCount(res->running_jobs->size()); int row=0; foreach_alist(rjob, res->running_jobs) { int col=0; TableItemFormatter item(*status.tableRunning, row++); item.setNumericFld(col++, QString(edit_uint64(rjob->JobId, ed1))); item.setTextFld(col++, QString(rjob->Job)); item.setJobLevelFld(col++, QString(rjob->JobLevel)); item.setTextFld(col++, QString(rjob->Client)); item.setJobStatusFld(col++, QString(rjob->JobStatus)); item.setTextFld(col++, QString(rjob->Storage)); item.setNumericFld(col++, QString(edit_uint64(rjob->JobFiles, ed1))); item.setBytesFld(col++, QString(edit_uint64(rjob->JobBytes, ed1))); item.setNumericFld(col++, QString(edit_uint64(rjob->Errors, ed1))); } } else { Dmsg0(0, "Strange, the list is NULL\n"); } QStringList headerlistT = (QStringList() << tr("JobId") << tr("Job") << tr("Level") << tr("Status") << tr("Files") << tr("Bytes") << tr("Errors")); status.tableTerminated->clear(); status.tableTerminated->setRowCount(0); status.tableTerminated->setColumnCount(headerlistT.count()); status.tableTerminated->setHorizontalHeaderLabels(headerlistT); status.tableTerminated->setEditTriggers(QAbstractItemView::NoEditTriggers); status.tableTerminated->verticalHeader()->hide(); status.tableTerminated->setSortingEnabled(true); if (res->terminated_jobs) { status.tableTerminated->setRowCount(res->terminated_jobs->size()); int row=0; foreach_dlist(ljob, res->terminated_jobs) { int col=0; TableItemFormatter item(*status.tableTerminated, row++); item.setNumericFld(col++, QString(edit_uint64(ljob->JobId, ed1))); item.setTextFld(col++, QString(ljob->Job)); item.setJobLevelFld(col++, QString(ljob->JobLevel)); item.setJobStatusFld(col++, QString(ljob->JobStatus)); item.setNumericFld(col++, QString(edit_uint64(ljob->JobFiles, ed1))); item.setBytesFld(col++, QString(edit_uint64(ljob->JobBytes, ed1))); item.setNumericFld(col++, QString(edit_uint64(ljob->Errors, ed1))); } } else { Dmsg0(0, "Strange, the list is NULL\n"); } res->mutex->unlock(); } Dmsg1(50, " Task %p OK\n", t); } t->deleteLater(); status.pushButton->setEnabled(true); } bacula-15.0.3/src/qt-console/tray-monitor/bacula-tray-monitor.conf.in0000644000175000017500000000123314771010173025345 0ustar bsbuildbsbuild# # Bacula Tray Monitor Configuration File # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # Monitor { Name = @basename@-mon RefreshInterval = 120 seconds } Client { Name = @basename@-fd Password = "@mon_fd_password@" # password for FileDaemon Address = @hostname@ Port = @fd_port@ } #Storage { # Name = @basename@-sd # Address = @hostname@ # Port = @sd_port@ # Password = "@mon_sd_password@" # password for StorageDaemon #} # #Director { # Name = @basename@-dir # Address = @hostname@ # Port = @dir_port@ # Password = "@mon_dir_password@" # password for the Directors #} bacula-15.0.3/src/qt-console/tray-monitor/android.qrc0000644000175000017500000000157614771010173022341 0ustar bsbuildbsbuild main.qml TrayUiPage.qml ResourceListPage.qml ResourceListItem.qml ResourceDetailsPage.qml ResourceStatusPage.qml JobListPage.qml JobListItem.qml RunJobPage.qml RestoreJobPage.qml JobSelectTab.qml FileSelectTab.qml RestoreConfirmTab.qml FileDaemonConfigPage.qml MainMenuPage.qml FileDaemonPage.qml ResourcePanel.qml FeaturesTutorialPage.qml ConfigTutorialPage.qml TutorialPage.qml PulseLoader.qml bacula-15.0.3/src/qt-console/tray-monitor/main.qml0000644000175000017500000000365114771010173021645 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 import io.qt.bmob.bootuicontroller 1.0 ApplicationWindow { visible: true title: qsTr("Tray Monitor") BootUiController { id: bootUiController } StackView { id: stackView anchors.fill: parent focus: true // Implements back key navigation for the App toolbar button Keys.onReleased: if (event.key === Qt.Key_Back && stackView.depth > 1) { if (stackView.currentItem.main === true) { Qt.quit() } else { stackView.pop(); } event.accepted = true; } initialItem: bootUiController.shouldShowTutorial() ? "TutorialPage.qml" : "MainMenuPage.qml" } // Implements back key navigation for global android back button onClosing: { if (Qt.platform.os == "android") { if (stackView.depth > 1) { if (stackView.currentItem.main === true) { Qt.quit() } else { stackView.pop(); } close.accepted = false; } } } } bacula-15.0.3/src/qt-console/tray-monitor/make_release_apk.sh.in0000644000175000017500000000322414771010173024413 0ustar bsbuildbsbuild#!/bin/sh MAKE=make QMAKE=@ANDROID_QT_QMAKE@ APK_MAKE=@ANDROID_QT_APK_MAKE@ TRAYMON_DIR=@TOP_DIR@/bacula/src/qt-console/tray-monitor BUILD_DIR=${TRAYMON_DIR}/android-build BUILD_JSON_FILE=android-libbacula-tray-monitor.so-deployment-settings.json BUILD_TARGET=android-@ANDROID_API@ rm -rf ${BUILD_DIR} mkdir ${BUILD_DIR} cp ${KEYSTORE_FILE} ${BUILD_DIR}/android_release.keystore if [ -z "${ENTERPRISE}" ]; then ${QMAKE} ${TRAYMON_DIR}/tray-monitor.android.pro -spec android-g++ INCLUDEPATH+=${ANDROID_OPENSSL_DIR}/include else ${QMAKE} ${TRAYMON_DIR}/tray-monitor.android.pro -spec android-g++ INCLUDEPATH+=${ANDROID_OPENSSL_DIR}/include DEFINES+="ENTERPRISE" fi #libfix.sh sed -i -e 's/\.so/\.la/g' Makefile if [ -z "${SKIP_CLEAN}" ]; then ${MAKE} clean fi ${MAKE} ${MAKE} install INSTALL_ROOT=${BUILD_DIR} #mvdyn.sh.in cp ${TRAYMON_DIR}/.libs/libbacula-tray-monitor.so ${TRAYMON_DIR} cp ${TRAYMON_DIR}/.libs/libbacula-tray-monitor.so ${BUILD_DIR}/@ANDROID_QT_BUILD_LIBS@ cp @TOP_DIR@/bacula/src/lib/.libs/libbaccfg-@VERSION@.so ${BUILD_DIR}/@ANDROID_QT_BUILD_LIBS@ cp @TOP_DIR@/bacula/src/lib/.libs/libbac-@VERSION@.so ${BUILD_DIR}/@ANDROID_QT_BUILD_LIBS@ cp @TOP_DIR@/bacula/src/findlib/.libs/libbacfind-@VERSION@.so ${BUILD_DIR}/@ANDROID_QT_BUILD_LIBS@ #cp ${DEPKGS_DIR}/src/openssl/libssl.so ${BUILD_DIR}/@ANDROID_QT_BUILD_LIBS@ #cp ${DEPKGS_DIR}/src/openssl/libcrypto.so ${BUILD_DIR}/@ANDROID_QT_BUILD_LIBS@ #build android apk ${APK_MAKE} --output ${BUILD_DIR} \ --verbose \ --input ${BUILD_JSON_FILE} \ --android-platform ${BUILD_TARGET} \ --sign ${BUILD_DIR}/android_release.keystore \ --storepass $KEYSTORE_PASSWORD \ --gradle bacula-15.0.3/src/qt-console/tray-monitor/sdstatus.cpp0000644000175000017500000001216714771010173022566 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "sdstatus.h" #include "../util/fmtwidgetitem.h" #include "jcr.h" void SDStatus::doUpdate() { if (count == 0) { count++; task *t = new task(); status.pushButton->setEnabled(false); connect(t, SIGNAL(done(task *)), this, SLOT(taskDone(task *)), Qt::QueuedConnection); t->init(res, TASK_STATUS); res->wrk->queue(t); status.statusBar->setText(QString("Trying to connect to Storage...")); Dmsg1(50, "doUpdate(%p)\n", res); } } void SDStatus::taskDone(task *t) { count--; if (!t->status) { status.statusBar->setText(QString(t->errmsg)); } else { status.statusBar->clear(); if (t->type == TASK_STATUS) { char ed1[50]; struct s_last_job *ljob; struct s_running_job *rjob; res->mutex->lock(); status.labelName->setText(QString(res->name)); status.labelVersion->setText(QString(res->version)); status.labelStarted->setText(QString(res->started)); status.labelPlugins->setText(QString(res->plugins)); /* Clear the table first */ Freeze(*status.tableRunning); Freeze(*status.tableTerminated); QStringList headerlistR = (QStringList() << tr("JobId") << tr("Job") << tr("Level") << tr("Client") << tr("Storage") << tr("Files") << tr("Bytes") << tr("Errors")); status.tableRunning->clear(); status.tableRunning->setRowCount(0); status.tableRunning->setColumnCount(headerlistR.count()); status.tableRunning->setHorizontalHeaderLabels(headerlistR); status.tableRunning->setEditTriggers(QAbstractItemView::NoEditTriggers); status.tableRunning->verticalHeader()->hide(); status.tableRunning->setSortingEnabled(true); if (res->running_jobs) { status.tableRunning->setRowCount(res->running_jobs->size()); int row=0; foreach_alist(rjob, res->running_jobs) { int col=0; TableItemFormatter item(*status.tableRunning, row++); item.setNumericFld(col++, QString(edit_uint64(rjob->JobId, ed1))); item.setTextFld(col++, QString(rjob->Job)); item.setJobLevelFld(col++, QString(rjob->JobLevel)); item.setTextFld(col++, QString(rjob->Client)); item.setTextFld(col++, QString(rjob->Storage)); item.setNumericFld(col++, QString(edit_uint64(rjob->JobFiles, ed1))); item.setBytesFld(col++, QString(edit_uint64(rjob->JobBytes, ed1))); item.setNumericFld(col++, QString(edit_uint64(rjob->Errors, ed1))); } } else { Dmsg0(0, "Strange, the list is NULL\n"); } QStringList headerlistT = (QStringList() << tr("JobId") << tr("Job") << tr("Level") << tr("Status") << tr("Files") << tr("Bytes") << tr("Errors")); status.tableTerminated->clear(); status.tableTerminated->setRowCount(0); status.tableTerminated->setColumnCount(headerlistT.count()); status.tableTerminated->setHorizontalHeaderLabels(headerlistT); status.tableTerminated->setEditTriggers(QAbstractItemView::NoEditTriggers); status.tableTerminated->verticalHeader()->hide(); status.tableTerminated->setSortingEnabled(true); if (res->terminated_jobs) { status.tableTerminated->setRowCount(res->terminated_jobs->size()); int row=0; foreach_dlist(ljob, res->terminated_jobs) { int col=0; TableItemFormatter item(*status.tableTerminated, row++); item.setNumericFld(col++, QString(edit_uint64(ljob->JobId, ed1))); item.setTextFld(col++, QString(ljob->Job)); item.setJobLevelFld(col++, QString(ljob->JobLevel)); item.setJobStatusFld(col++, QString(ljob->JobStatus)); item.setNumericFld(col++, QString(edit_uint64(ljob->JobFiles, ed1))); item.setBytesFld(col++, QString(edit_uint64(ljob->JobBytes, ed1))); item.setNumericFld(col++, QString(edit_uint64(ljob->Errors, ed1))); } } else { Dmsg0(0, "Strange, the list is NULL\n"); } res->mutex->unlock(); } Dmsg1(50, " Task %p OK\n", t); } t->deleteLater(); status.pushButton->setEnabled(true); } bacula-15.0.3/src/qt-console/tray-monitor/dir-monitor.ui0000644000175000017500000001141414771010173023004 0ustar bsbuildbsbuild dirStatus 0 0 518 642 Form Director Status Name: Started: Version: Plugins: Reloaded: Running Jobs QAbstractItemView::SingleSelection false Terminated Jobs QAbstractItemView::SingleSelection Qt::Horizontal 40 20 :/images/view-refresh.png:/images/view-refresh.png bacula-15.0.3/src/qt-console/tray-monitor/tray-monitor.pro.in0000644000175000017500000000561314771010173024001 0ustar bsbuildbsbuild# # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # ###################################################################### # # !!!!!!! IMPORTANT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # Edit only tray-monitor.pro.in -- tray-monitor.pro is built by the ./configure program # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # CONFIG options for Windows are pulled from win32/qmake.conf CONFIG += qt #CONFIG += qt debug greaterThan(QT_MAJOR_VERSION, 4): QT += widgets cross-win32 { LIBS += -mwindows -L../../win32/release32 -lbacula INCLUDEPATH += ../../win32/compat } !cross-win32 { LIBS += -L../../lib -lbaccfg -lbac -L../../findlib -lbacfind @OPENSSL_LIBS@ } bins.path = /$(DESTDIR)@sbindir@ bins.files = bacula-tray-monitor confs.path = /$(DESTDIR)@sysconfdir@ confs.commands = ./install_conf_file TEMPLATE = app TARGET = bacula-tray-monitor QMAKE_EXTRA_TARGETS += depend DEPENDPATH += . INCLUDEPATH += ../.. . QMAKE_CC = $(CXX) LIBTOOL_LINK = @QMAKE_LIBTOOL@ --silent --tag=CXX --mode=link LIBTOOL_INSTALL = @QMAKE_LIBTOOL@ --silent --mode=install QMAKE_LINK = $${LIBTOOL_LINK} $(CXX) QMAKE_INSTALL_PROGRAM = $${LIBTOOL_INSTALL} install -m @SBINPERM@ -p QMAKE_CLEAN += obj/* .libs/* bacula-tray-monitor release/bacula-tray-monitor QMAKE_CXXFLAGS += -DTRAY_MONITOR QMAKE_CFLAGS += -DTRAY_MONITOR INSTALLS = bins confs RESOURCES = ../main.qrc MOC_DIR = moc OBJECTS_DIR = obj UI_DIR = ui # Main directory HEADERS += tray-monitor.h tray_conf.h ../../lib/authenticatebase.h tray-ui.h fdstatus.h task.h ../util/fmtwidgetitem.h dirstatus.h conf.h sdstatus.h runjob.h status.h restorewizard.h filesmodel.h clientselectwizardpage.h jobselectwizardpage.h fileselectwizardpage.h restoreoptionswizardpage.h pluginwizardpage.h SOURCES += tray-monitor.cpp tray_conf.cpp ../../lib/authenticatebase.cc fdstatus.cpp task.cpp authenticate.cpp ../util/fmtwidgetitem.cpp dirstatus.cpp sdstatus.cpp conf.cpp runjob.cpp status.cpp restorewizard.cpp clientselectwizardpage.cpp jobselectwizardpage.cpp fileselectwizardpage.cpp restoreoptionswizardpage.cpp pluginwizardpage.cpp FORMS += fd-monitor.ui dir-monitor.ui sd-monitor.ui main-conf.ui res-conf.ui run.ui restorewizard.ui clientselectwizardpage.ui jobselectwizardpage.ui fileselectwizardpage.ui restoreoptionswizardpage.ui pluginwizardpage.ui # CDP Client CDP_DIR = ../../tools/cdp-client HEADERS += $$CDP_DIR/backupservice.h $$CDP_DIR/folderwatcher.h desktop-gui/cdp-main-ui.h $$CDP_DIR/cdp.h SOURCES += $$CDP_DIR/backupservice.cpp $$CDP_DIR/folderwatcher.cpp INCLUDEPATH += $$CDP_DIR # Journal JOURNAL_DIR = ../../plugins/fd HEADERS += $$JOURNAL_DIR/journal.h $$JOURNAL_DIR/file-record.h $$JOURNAL_DIR/folder-record.h $$JOURNAL_DIR/settings-record.h SOURCES += $$JOURNAL_DIR/journal.c INCLUDEPATH += $$JOURNAL_DIR TRANSLATIONS += ts/tm_fr.ts ts/tm_de.ts ts/tm_ja.ts bacula-15.0.3/src/qt-console/tray-monitor/ConfigTutorialPage.qml0000644000175000017500000001312514771010173024444 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 import io.qt.bmob.traycontroller 1.0 Page { id: configTutorialPage visible: true // Our C++ component TrayUiController { id: controller // Events triggered by our c++ code onClientsChanged: { // } onDirectorsChanged: { // } onStoragesChanged: { // } onInfoDialogTextChanged: { infoDialog.text = controller.infoDialogText infoDialog.open() } onFdTraceChanged: { // } Component.onCompleted: { // } } background: Rectangle { color: "#d32f2f" } GridLayout { id: configGrid columns: 2 anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top anchors.bottom: configText.top anchors.topMargin: 24 anchors.bottomMargin: 24 anchors.leftMargin: 16 anchors.rightMargin: 16 Image { source: "images/ss_trayconf.jpg" Layout.preferredWidth: configGrid.width / 2 Layout.preferredHeight: configGrid.height / 2 Rectangle { anchors.fill: parent anchors.margins: -border.width z: -1 border.width: 1 color: "#000" } } Image { source: "images/ss_resconf.jpg" Layout.preferredWidth: configGrid.width / 2 Layout.preferredHeight: configGrid.height / 2 Rectangle { anchors.fill: parent anchors.margins: -border.width z: -1 border.width: 1 color: "#000" } } Image { source: "images/ss_fdconf.jpg" Layout.preferredWidth: configGrid.width / 2 Layout.preferredHeight: configGrid.height / 2 Rectangle { anchors.fill: parent anchors.margins: -border.width z: -1 border.width: 1 color: "#000" } } Image { source: "images/ss_qrcode.jpg" Layout.preferredWidth: configGrid.width / 2 Layout.preferredHeight: configGrid.height / 2 Rectangle { anchors.fill: parent anchors.margins: -border.width z: -1 border.width: 1 color: "#000" } } } Text { id: configText text: "It's possible to automatically configure Bacula resources by scanning a QRCode generated by BWeb. Alternatively, those resources can be added manually." wrapMode: TextEdit.WordWrap font.pixelSize: 18 color: "white" anchors.bottom: nextButton.top anchors.left: parent.left anchors.right: parent.right anchors.bottomMargin: 16 anchors.leftMargin: 16 anchors.rightMargin: 16 } ToolButton { id: prevButton onClicked: { tutorialSwipe.currentIndex = 0 } anchors.left: parent.left anchors.bottom: parent.bottom anchors.leftMargin: 16 anchors.bottomMargin: 16 contentItem: Text { text: qsTr("<") font.pixelSize: 28 opacity: enabled ? 1.0 : 0.3 color: "white" } background: Rectangle { color: parent.down ? "#b71c1c" : "#d32f2f" } } ToolButton { id: nextButton onClicked: { stackView.push(Qt.resolvedUrl("MainMenuPage.qml")) } anchors.right: parent.right anchors.bottom: parent.bottom anchors.rightMargin: 16 anchors.bottomMargin: 16 contentItem: Text { text: qsTr(">") font.pixelSize: 28 opacity: enabled ? 1.0 : 0.3 color: "white" } background: Rectangle { color: parent.down ? "#b71c1c" : "#d32f2f" } } Rectangle { visible: IS_ENTERPRISE id: scanButton height: childrenRect.height width: childrenRect.width anchors.horizontalCenter: parent.horizontalCenter anchors.bottom: parent.bottom anchors.bottomMargin: 17 color: clickArea.pressed ? "#6d6d6d" : "#000" Text { text: qsTr("Scan QRCode") padding: 12 anchors.horizontalCenter: parent.horizontalCenter verticalAlignment: Text.AlignVCenter font.pixelSize: 18 color: "white" } } MouseArea { visible: IS_ENTERPRISE id: clickArea anchors.left: scanButton.left anchors.right: scanButton.right anchors.top: scanButton.top anchors.bottom: scanButton.bottom onClicked: controller.startQRCodeReader() } MessageDialog { id: infoDialog modality: Qt.ApplicationModal standardButtons: StandardButton.Ok visible: false onAccepted: { stackView.push(Qt.resolvedUrl("MainMenuPage.qml")) } } } bacula-15.0.3/src/qt-console/tray-monitor/app-boot-ui-controller.cpp0000644000175000017500000000204414771010173025222 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "app-boot-ui-controller.h" BootUiController::BootUiController(QObject *parent): QObject(parent) {} bool BootUiController::shouldShowTutorial() { ConfigStorage *config = &ConfigStorage::getInstance(); QList *resources = config->getAllResources(); bool shouldShowTutorial = resources->size() <= 1; delete resources; return shouldShowTutorial; } BootUiController::~BootUiController() {} bacula-15.0.3/src/qt-console/tray-monitor/conf.h0000644000175000017500000000356414771010173021307 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef CONF_H #define CONF_H #include "common.h" #include "ui_main-conf.h" #include "ui_res-conf.h" #include "tray_conf.h" class Conf: public QDialog { Q_OBJECT private: CONFIG *config; RES_HEAD **rhead; public: int items; QLineEdit::EchoMode passtype; Ui::Conf UIConf; Conf(); ~Conf(); bool parse_config(); void addResource(RESMON *res, const char *title); void addRes(int type, const char *title); /* create the resource */ public slots: void accept(); void selectCommandDir(); void addDir(); void addStore(); void addClient(); void togglePassword(); }; class ConfTab: public QWidget { Q_OBJECT public: Ui::ResConf ui; RESMON *res; int type; bool new_resource; ConfTab(RESMON *r): QWidget() { res = r; type = r->type; new_resource = r->new_resource; ui.setupUi(this); connect(ui.bpDelete, SIGNAL(clicked()), this, SLOT(disable())); }; ~ConfTab() { if (new_resource && res) { free_resource((RES*) res, res->type); res = NULL; } }; public slots: void disable() { setEnabled(false); }; void selectCaCertificateFile(); void selectCaCertificateDir(); void selectCertificate(); void selectKey(); }; #endif bacula-15.0.3/src/qt-console/tray-monitor/tray-monitor.android.pro.in0000644000175000017500000001246014771010173025416 0ustar bsbuildbsbuild###################################################################### # # !!!!!!! IMPORTANT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # Edit only tray-monitor.pro.in -- tray-monitor.pro is built by the ./configure program # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # ##################################################################################### #### Project Configuration TEMPLATE = app TARGET = bacula-tray-monitor CONFIG += qt QMAKE_CC = $(CXX) QMAKE_CLEAN += obj/* .libs/* bacula-tray-monitor release/bacula-tray-monitor QMAKE_CXXFLAGS += -DTRAY_MONITOR QMAKE_CFLAGS += -DTRAY_MONITOR INSTALLS = targets MOC_DIR = moc OBJECTS_DIR = obj UI_DIR = ui baculafd.path = /assets baculafd.files = ../../filed/static-bacula-fd ./android/bacula-fd.conf INSTALLS += baculafd ##################################################################################### #### QT Libraries used by this project QT += widgets QT += qml QT += androidextras QT += network ##################################################################################### #### Bacula Libraries used by this project and the way they are linked LIBS += -L../../lib -lbaccfg -lbac -L../../findlib -lbacfind -lbac @OPENSSL_LIBS@ DEPENDPATH += . INCLUDEPATH += ../.. . # We make Libtool create a dynamically linked shared object called "libbacula-tray-monitor.so" LIBTOOL_LINK = @QMAKE_LIBTOOL@ --silent --tag=CXX --mode=link QMAKE_LINK = $${LIBTOOL_LINK} $(CXX) -export-dynamic -rpath $(DESTDIR)/obj -avoid-version ###################################################################################### #### Android Native files DISTFILES += \ android/AndroidManifest.xml \ android/gradle/wrapper/gradle-wrapper.jar \ android/gradlew \ android/res/values/libs.xml \ android/build.gradle \ android/gradle/wrapper/gradle-wrapper.properties \ android/gradlew.bat ANDROID_PACKAGE_SOURCE_DIR = $$PWD/android ###################################################################################### #### Android Project Sources ### Images, QML files, etc RESOURCES = ../main.qrc android.qrc ### Translation files TRANSLATIONS += ts/tm_fr.ts ts/tm_de.ts ts/tm_ja.ts ### Program Entry Point HEADERS += tray-monitor.h SOURCES += tray-monitor.cpp HEADERS += tray-ui-controller.h \ runjob-ui-controller.h \ resmodel.h \ jobmodel.h \ runjobmodel.h \ restorejobmodel.h \ restore-ui-controller.h \ respanel-ui-controller.h SOURCES += tray-ui-controller.cpp \ runjob-ui-controller.cpp \ resmodel.cpp \ jobmodel.cpp \ runjobmodel.cpp \ restorejobmodel.cpp \ restore-ui-controller.cpp \ respanel-ui-controller.cpp # App Boot Controller HEADERS += app-boot-ui-controller.h SOURCES += app-boot-ui-controller.cpp # Enterprise Start Screen HEADERS += enterprise-tray-ui-controller.h SOURCES += enterprise-tray-ui-controller.cpp SOURCES += authenticate.cpp HEADERS += filesmodel.h SOURCES += ../../lib/authenticatebase.cc HEADERS += ../../lib/authenticatebase.h # Data Structures HEADERS += tray_conf.h SOURCES += tray_conf.cpp # Configuration Storage HEADERS += config-storage.h SOURCES += config-storage.cpp # Resource Details Screen HEADERS += resdetails-ui-controller.h SOURCES += resdetails-ui-controller.cpp # FD Config Screen HEADERS += fd-config-ui-controller.h SOURCES += fd-config-ui-controller.cpp #Task HEADERS += task.h SOURCES += task.cpp # CDP Client CDP_DIR = ../../tools/cdp-client HEADERS += $$CDP_DIR/backupservice.h $$CDP_DIR/folderwatcher.h desktop-gui/cdp-main-ui.h $$CDP_DIR/cdp.h SOURCES += $$CDP_DIR/backupservice.cpp $$CDP_DIR/folderwatcher.cpp INCLUDEPATH += $$CDP_DIR # Journal JOURNAL_DIR = ../../plugins/fd HEADERS += $$JOURNAL_DIR/journal.h $$JOURNAL_DIR/file-record.h $$JOURNAL_DIR/folder-record.h $$JOURNAL_DIR/settings-record.h SOURCES += $$JOURNAL_DIR/journal.c INCLUDEPATH += $$JOURNAL_DIR # Registration Wizard REGWIZ_DIR = ../RegistrationWizard HEADERS += $$REGWIZ_DIR/bweb-service.h SOURCES += $$REGWIZ_DIR/bweb-service.cpp INCLUDEPATH += $$REGWIZ_DIR # Android File Daemon Service HEADERS += android-fd-service.h ###################################################################################### #### Desktop Project Sources to be removed in the future HEADERS += tray-ui.h # Util HEADERS += ../util/fmtwidgetitem.h SOURCES += ../util/fmtwidgetitem.cpp # DIR Status HEADERS += dirstatus.h SOURCES += dirstatus.cpp # SD Status HEADERS += sdstatus.h SOURCES += sdstatus.cpp # Conf HEADERS += conf.h SOURCES += conf.cpp # Run Job HEADERS += runjob.h SOURCES += runjob.cpp # Status HEADERS += status.h SOURCES += status.cpp #FD Status HEADERS += fdstatus.h SOURCES += fdstatus.cpp # Wizards HEADERS += restorewizard.h HEADERS += clientselectwizardpage.h HEADERS += jobselectwizardpage.h HEADERS += fileselectwizardpage.h HEADERS += restoreoptionswizardpage.h HEADERS += pluginwizardpage.h SOURCES += restorewizard.cpp SOURCES += clientselectwizardpage.cpp SOURCES += jobselectwizardpage.cpp SOURCES += fileselectwizardpage.cpp SOURCES += restoreoptionswizardpage.cpp SOURCES += pluginwizardpage.cpp FORMS += fd-monitor.ui dir-monitor.ui sd-monitor.ui main-conf.ui res-conf.ui run.ui restorewizard.ui clientselectwizardpage.ui jobselectwizardpage.ui fileselectwizardpage.ui restoreoptionswizardpage.ui pluginwizardpage.ui bacula-15.0.3/src/qt-console/tray-monitor/resmodel.h0000644000175000017500000000575014771010173022173 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef RESMODEL_H #define RESMODEL_H #include #include #include "tray_conf.h" /* ResourceModel - This model represents one resource that is present in the bacula-tray-monitor.conf file (either a Director, File Daemon or Storage Daemon). */ class ResourceModel : public QObject { Q_OBJECT Q_PROPERTY(QString resourceName READ resourceName WRITE setResourceName NOTIFY resourceNameChanged) Q_PROPERTY(QString resourceAddress READ resourceAddress WRITE setResourceAddress NOTIFY resourceAddressChanged) Q_PROPERTY(QString resourcePort READ resourcePort WRITE setResourcePort NOTIFY resourcePortChanged) Q_PROPERTY(bool deletableResource READ isResourceDeletable) private: QString m_resourceName; QString m_resourceAddress; QString m_resourcePort; RESMON *m_res; rescode m_resCode; public: explicit ResourceModel(QObject *parent = nullptr); ~ResourceModel(); //Resource Name QString resourceName() { return m_resourceName; } void setResourceName(const QString &resourceName) { if (resourceName == m_resourceName) return; m_resourceName = resourceName; emit resourceNameChanged(); } //Resource Address QString resourceAddress() { return m_resourceAddress; } void setResourceAddress(const QString &resourceAddress) { if (resourceAddress == m_resourceAddress) return; m_resourceAddress = resourceAddress; emit resourceAddressChanged(); } //Resource Port QString resourcePort() { return m_resourcePort; } void setResourcePort(const QString &resourcePort) { if (resourcePort == m_resourcePort) return; m_resourcePort = resourcePort; emit resourcePortChanged(); } bool isResourceDeletable() { return !m_res->managed; } // Resource Struct RESMON *resource() { return m_res; } rescode resCode() { return m_resCode; } void setResource(RESMON * res, rescode code) { m_resCode = code; m_res = res; setResourceName(res->hdr.name); setResourceAddress(res->address); setResourcePort(QString::number(res->port)); } signals: void resourceNameChanged(); void resourceAddressChanged(); void resourcePortChanged(); }; #endif // RESMODEL_H bacula-15.0.3/src/qt-console/tray-monitor/runjobmodel.cpp0000644000175000017500000000142214771010173023224 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "runjobmodel.h" #include RunJobModel::RunJobModel(QObject *parent): QObject(parent) {} RunJobModel::~RunJobModel() {} bacula-15.0.3/src/qt-console/tray-monitor/status.h0000644000175000017500000000175714771010173021707 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef STATUS_H #define STATUS_H #include "common.h" #include #include "tray_conf.h" #include "task.h" class ResStatus: public QWidget { Q_OBJECT public: int count; RESMON *res; ResStatus(RESMON *c): count(0), res(c) { }; virtual ~ResStatus() { }; public slots: virtual void doUpdate(); virtual void taskDone(task *t); }; #endif bacula-15.0.3/src/qt-console/tray-monitor/android/0000755000175000017500000000000014771010173021621 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/android/src/0000755000175000017500000000000014771010173022410 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/android/src/org/0000755000175000017500000000000014771010173023177 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/android/src/org/baculasystems/0000755000175000017500000000000014771010173026056 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/android/src/org/baculasystems/bmob/0000755000175000017500000000000014771010173026775 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/android/src/org/baculasystems/bmob/alfa/0000755000175000017500000000000014771010173027700 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/android/src/org/baculasystems/bmob/alfa/BAndroidUtil.java0000644000175000017500000000640614771010173033071 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ package org.baculasystems.bmob.alfa; import android.content.Context; import android.content.SharedPreferences; import android.os.Build; import android.os.Environment; import android.util.Base64; import android.content.Intent; import android.net.Uri; import android.content.ComponentName; import android.content.Context; import android.content.pm.PackageManager; import android.util.Log; import java.security.SecureRandom; import java.util.Random; public final class BAndroidUtil { public static final String PREFS_NAME = "prefs"; public static final String DEVICE_ID = "prefs.device_id"; public static final String DEVICE_PWD = "prefs.device_pwd"; static String getSDcardPath() { return Environment.getExternalStorageDirectory().getPath(); } static String getDeviceId(Context ctx) { SharedPreferences prefs = ctx.getSharedPreferences(PREFS_NAME, Context.MODE_PRIVATE); String deviceId = prefs.getString(DEVICE_ID, null); if (deviceId == null) { Random rand = new Random(); StringBuilder builder = new StringBuilder(); builder.append(Build.MODEL); builder.append("-"); for (int i = 0; i < 5; i++) { builder.append(rand.nextInt(10)); } deviceId = builder.toString(); SharedPreferences.Editor editor = prefs.edit(); editor.putString(DEVICE_ID, deviceId); editor.commit(); } return deviceId; } static String getDevicePwd(Context ctx) { SharedPreferences prefs = ctx.getSharedPreferences(PREFS_NAME, Context.MODE_PRIVATE); String devicePwd = prefs.getString(DEVICE_PWD, null); if (devicePwd == null) { SecureRandom random = new SecureRandom(); byte[] bytes = new byte[8]; random.nextBytes(bytes); devicePwd = Base64.encodeToString(bytes, Base64.NO_PADDING); devicePwd = devicePwd.substring(0, devicePwd.length() - 2); SharedPreferences.Editor editor = prefs.edit(); editor.putString(DEVICE_PWD, devicePwd); editor.commit(); } return devicePwd; } static public void startMailApp(Context ctx, String subject, String message) { String[] adresses = new String[0]; Intent intent = new Intent(Intent.ACTION_SENDTO); // Only email apps will handle the Intent intent.setData(Uri.parse("mailto:")); intent.putExtra(Intent.EXTRA_EMAIL, adresses); intent.putExtra(Intent.EXTRA_SUBJECT, subject); intent.putExtra(Intent.EXTRA_TEXT, message); ctx.startActivity(intent); } } ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootrootbacula-15.0.3/src/qt-console/tray-monitor/android/src/org/baculasystems/bmob/alfa/QRCodeReaderActivity.javabacula-15.0.3/src/qt-console/tray-monitor/android/src/org/baculasystems/bmob/alfa/QRCodeReaderActivi0000644000175000017500000000462414771010173033231 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ package org.baculasystems.bmob.alfa; import android.app.Activity; import android.content.Context; import android.content.Intent; import android.os.Bundle; import android.util.SparseArray; import androidx.appcompat.app.AppCompatActivity; import com.google.android.gms.vision.barcode.Barcode; import java.util.List; import info.androidhive.barcode.BarcodeReader; /* QRCodeReaderActivity - Screen created to scan QR Codes. (Requires camera permission to work) */ public class QRCodeReaderActivity extends AppCompatActivity implements BarcodeReader.BarcodeReaderListener { // Called by our c++ code. static void start(Context ctx) { Intent it = new Intent(ctx, QRCodeReaderActivity.class); ctx.startActivity(it); } private BarcodeReader barcodeReader; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.layout_barcode); barcodeReader = (BarcodeReader) getSupportFragmentManager().findFragmentById(R.id.barcode_fragment); } @Override public void onScanned(final Barcode barcode) { barcodeReader.playBeep(); Intent returnIntent = new Intent(); // Returns the scanned data to our c++ code. returnIntent.putExtra("barcode.data", barcode.displayValue); setResult(Activity.RESULT_OK, returnIntent); finish(); } @Override public void onScannedMultiple(List list) {} @Override public void onBitmapScanned(SparseArray sparseArray) {} @Override public void onScanError(String s) {} @Override public void onCameraPermissionDenied() { Intent returnIntent = new Intent(); setResult(Activity.RESULT_CANCELED, returnIntent); finish(); } }././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootrootbacula-15.0.3/src/qt-console/tray-monitor/android/src/org/baculasystems/bmob/alfa/SplashScreenActivity.javabacula-15.0.3/src/qt-console/tray-monitor/android/src/org/baculasystems/bmob/alfa/SplashScreenActivi0000644000175000017500000000243514771010173033361 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ package org.baculasystems.bmob.alfa; import android.content.Intent; import android.os.Bundle; import androidx.appcompat.app.AppCompatActivity; import org.qtproject.qt5.android.bindings.QtActivity; /* SplashScreenActivity - Entry point of our App The Splash Screen GUI code is defined in @drawable/splash_screen.xml */ public class SplashScreenActivity extends AppCompatActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); //Starts QT Project Intent intent = new Intent(SplashScreenActivity.this, QtActivity.class); startActivity(intent); finish(); } }././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootbacula-15.0.3/src/qt-console/tray-monitor/android/src/org/baculasystems/bmob/alfa/BaculaFDService.javabacula-15.0.3/src/qt-console/tray-monitor/android/src/org/baculasystems/bmob/alfa/BaculaFDService.ja0000644000175000017500000002262414771010173033144 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ package org.baculasystems.bmob.alfa; import android.app.Notification; import android.app.NotificationChannel; import android.app.NotificationManager; import android.app.PendingIntent; import android.app.Service; import android.content.Context; import android.content.Intent; import android.content.SharedPreferences; import android.os.Build; import android.os.IBinder; import androidx.annotation.RequiresApi; import androidx.core.app.NotificationCompat; import android.util.Log; import java.io.File; import java.io.FileOutputStream; import java.io.InputStream; import java.io.OutputStream; /* BaculaFDService - Responsible for managing the Android File Daemon This Service was designed to not be killed by Android unless under heavy memory or battery pressure. */ public class BaculaFDService extends Service { public static final String LOG_TAG = "BACULA_FD_SERVICE"; public static final String COMMAND = "prefs.fdcommand"; public static final String TRACE_PATH = "prefs.tracepath"; public static final String LOG_LEVEL = "prefs.loglevel"; public static final String FD_RUNNING = "prefs.fdrunning"; public static final String CHANNEL_ID = "bsys.bmob.fd"; public static final String ACTION_STOP_SERVICE = "bsys.stop.service"; public static final int NOTIFICATION_ID = 42; // The Android File Deamon Process Process fdProcess = null; Notification notification = null; Runnable traceRunnable = null; // This thread reads the FD Process stdout and fills the trace file Thread traceThread = null; Runnable watcherRunnable = null; // This thread watches the FD Process to see if it's still alive Thread watcherThread = null; boolean readTrace = true; // Called by our c++ code. public static void start(Context ctx, String command, String tracePath, String logLevel) { Intent it = new Intent(ctx, BaculaFDService.class); if (command != null) { SharedPreferences prefs = ctx.getSharedPreferences(COMMAND, Context.MODE_MULTI_PROCESS); SharedPreferences.Editor editor = prefs.edit(); editor.putString(COMMAND, command); editor.putString(TRACE_PATH, tracePath); editor.putString(LOG_LEVEL, logLevel); editor.putBoolean(FD_RUNNING, true); editor.commit(); } if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) { ctx.startForegroundService(it); } else { ctx.startService(it); } } // Called by our c++ code. public static void stop(Context ctx) { SharedPreferences prefs = ctx.getSharedPreferences(COMMAND, Context.MODE_MULTI_PROCESS); SharedPreferences.Editor editor = prefs.edit(); editor.putBoolean(FD_RUNNING, false); editor.commit(); Intent it = new Intent(ctx, BaculaFDService.class); ctx.stopService(it); } private void logException(String tag, Exception e) { String msg = e.getMessage(); if(msg != null) { Log.e(tag, msg); } else { Log.e(tag,"Null message"); } } private void sendNotification(String text) { NotificationCompat.Builder builder = new NotificationCompat.Builder(getBaseContext(), CHANNEL_ID) .setSmallIcon(R.drawable.logo) .setContentTitle("Bacula") .setContentText(text) .setPriority(NotificationCompat.PRIORITY_DEFAULT); Intent stopSelf = new Intent(this, BaculaFDService.class); stopSelf.setAction(ACTION_STOP_SERVICE); PendingIntent pit = PendingIntent.getService(this, 0, stopSelf, PendingIntent.FLAG_CANCEL_CURRENT); builder.addAction(R.mipmap.ic_launcher, "Stop", pit); startForeground(NOTIFICATION_ID, builder.build()); } private void saveTrace(final InputStream pOutput, final String tracePath) { traceRunnable = new Runnable() { public void run() { try { File targetFile = new File(tracePath); OutputStream traceStream = new FileOutputStream(targetFile, true); byte[] buffer = new byte[2048]; int bytesRead; while (readTrace) { while ((bytesRead = pOutput.read(buffer, 0, pOutput.available())) != -1) { traceStream.write(buffer, 0, bytesRead); } } pOutput.close(); traceStream.close(); } catch (Exception e) { logException("FD Trace Error", e); } } }; traceThread = new Thread(traceRunnable); traceThread.start(); } private void startProcess(String command, String tracePath, String logLevel) { try { ProcessBuilder pb = new ProcessBuilder(command, "-f", "-d", logLevel, "-dt"); fdProcess = pb.start(); saveTrace(fdProcess.getInputStream(), tracePath); } catch (Exception e) { logException("FD Process start error", e); } } private boolean isRunning(Process p) { try { int eval = p.exitValue(); Log.e(LOG_TAG, "File Daemon stopped with exit code " + eval); return false; } catch (Exception e) { return true; } } private void startWatcher() { watcherRunnable = new Runnable() { public void run() { while (true) { Log.e(LOG_TAG, "CHECKING FD PROCESS..."); try { if (!isRunning(fdProcess)) { break; } Thread.sleep(1000); } catch (Exception e) { logException("Watcher Thread Error", e); break; } } } }; watcherThread = new Thread(watcherRunnable); watcherThread.start(); } @RequiresApi(Build.VERSION_CODES.O) private void startNotificationChannel() { CharSequence name = "Bacula Channel"; String description = "Channel desc"; int importance = NotificationManager.IMPORTANCE_DEFAULT; NotificationChannel channel = new NotificationChannel(CHANNEL_ID, name, importance); channel.setDescription(description); // Register the channel with the system; you can't change the importance // or other notification behaviors after this NotificationManager notificationManager = getSystemService(NotificationManager.class); notificationManager.createNotificationChannel(channel); } public void stopFileDaemon() { if (notification != null) { stopForeground(true); } if (fdProcess != null) { fdProcess.destroy(); } SharedPreferences prefs = getSharedPreferences(COMMAND, Context.MODE_MULTI_PROCESS); SharedPreferences.Editor editor = prefs.edit(); editor.putBoolean(FD_RUNNING, false); editor.commit(); readTrace = false; if (traceThread != null) { try { traceThread.join(); } catch (InterruptedException e) { logException("FD Trace Error - join()", e); } } } /** No need to bind this service to the caller activity */ @Override public IBinder onBind(Intent intent) { return null; } /** The service is starting, due to a call to startService() */ @Override public int onStartCommand(Intent intent, int flags, int startId) { int ret = super.onStartCommand(intent, flags, startId); Log.i(LOG_TAG, "Service created"); SharedPreferences prefs = getSharedPreferences(COMMAND, Context.MODE_MULTI_PROCESS); String command = prefs.getString(COMMAND, null); String tracePath = prefs.getString(TRACE_PATH, null); String logLevel = prefs.getString(LOG_LEVEL, null); if (intent != null) { String iaction = intent.getAction(); if (iaction != null && iaction.equals(ACTION_STOP_SERVICE)) { stopSelf(); return ret; } } if (command != null && fdProcess == null) { Log.i(LOG_TAG, "Starting FD Process"); startProcess(command, tracePath, logLevel); startWatcher(); if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) { startNotificationChannel(); } sendNotification("File Daemon is Running"); } return ret; } @Override public void onDestroy() { super.onDestroy(); stopFileDaemon(); Log.i(LOG_TAG, "Service destroyed"); } } ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootrootbacula-15.0.3/src/qt-console/tray-monitor/android/src/org/baculasystems/bmob/alfa/BaculaBroadcastReceiver.javabacula-15.0.3/src/qt-console/tray-monitor/android/src/org/baculasystems/bmob/alfa/BaculaBroadcastRec0000644000175000017500000000261214771010173033270 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ package org.baculasystems.bmob.alfa; import android.content.BroadcastReceiver; import android.content.Context; import android.content.Intent; import android.content.SharedPreferences; import static org.baculasystems.bmob.alfa.BaculaFDService.COMMAND; import static org.baculasystems.bmob.alfa.BaculaFDService.FD_RUNNING; /** Receiver that is called when the phone boots **/ public class BaculaBroadcastReceiver extends BroadcastReceiver { @Override public void onReceive(Context context, Intent intent) { SharedPreferences prefs = context.getSharedPreferences(COMMAND, Context.MODE_PRIVATE); boolean fdRunning = prefs.getBoolean(FD_RUNNING, false); if(fdRunning) { BaculaFDService.start(context, null, null, null); } } } bacula-15.0.3/src/qt-console/tray-monitor/android/build-depkgs-android.sh0000755000175000017500000000732014771010173026152 0ustar bsbuildbsbuild#!/bin/sh # # This script builds the dependency packages that # are needed to cross compile the android version of the Bacula # Tray Monitor. # # Dependencies will be installed on ~/android-depkgs # get_source() { URL=$1 HTTP_HEADERS=$2 if [ ! -e "$SRC_DIR" ]; then mkdir ${SRC_DIR} fi cd ${SRC_DIR} echo "Downloading ${URL} into ${SRC_DIR}" ARCHIVE=`basename ${URL}` case ${ARCHIVE} in *.tar.gz) ARCHIVER="tar xzf" [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.tar\.gz'` ;; *.tar.bz2) ARCHIVER="tar xjf" [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.tar\.bz2'` ;; *.zip) ARCHIVER="unzip -d ." [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.zip'` ;; *.xz) ARCHIVER="tar xf" [ -z "${SRC_DIR}" ] && SRC_DIR=`expr "${ARCHIVE}" : '\(.*\)\.tar\.xz'` ;; *) echo "Unsupported archive type - ${ARCHIVE}" exit 1 ;; esac if [ ! -e "${ARCHIVE}" ]; then echo Downloading "${URL}" if wget --passive-ftp --header "${HTTP_HEADERS}" "${URL}" then : else echo "Unable to download ${ARCHIVE}" exit 1 fi fi echo Extracting ${ARCHIVE} ${ARCHIVER} ${ARCHIVE} > ${ARCHIVE}.log 2>&1 return 1 } process_jdk() { HTTP_HEADERS="Cookie: oraclelicense=accept-securebackup-cookie" JDK_URL="http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz" JDK_SRC_DIR=${SRC_DIR}/jdk1.8.0_131 JDK_TARGET_DIR="${DEPKGS_DIR}/java" rm -rf ${JDK_SRC_DIR} get_source "${JDK_URL}" "${HTTP_HEADERS}" rm -rf ${JDK_TARGET_DIR} mkdir ${JDK_TARGET_DIR} mv ${JDK_SRC_DIR}/* ${JDK_TARGET_DIR} rm -rf ${JDK_SRC_DIR} } process_android_sdk() { SDK_URL="https://dl.google.com/android/repository/sdk-tools-linux-3859397.zip" SDK_SRC_DIR=${SRC_DIR}/tools SDK_TARGET_DIR="${DEPKGS_DIR}/sdk" rm -rf ${SDK_SRC_DIR} get_source "${SDK_URL}" rm -rf ${SDK_TARGET_DIR} mkdir ${SDK_TARGET_DIR} mv ${SDK_SRC_DIR} ${SDK_TARGET_DIR} rm -rf ${SDK_SRC_DIR} export ANDROID_HOME=${SDK_TARGET_DIR} export PATH="$ANDROID_HOME/tools:$ANDROID_HOME/platform-tools:$PATH" yes | ${SDK_TARGET_DIR}/tools/bin/sdkmanager --licenses ${SDK_TARGET_DIR}/tools/bin/sdkmanager "tools" "build-tools;28.0.0" "extras;android;m2repository" "extras;google;google_play_services" "platforms;android-28" "platform-tools" } process_android_ndk() { NDK_URL="https://dl.google.com/android/repository/android-ndk-r10e-linux-x86_64.zip" NDK_SRC_DIR=${SRC_DIR}/android-ndk-r10e NDK_TARGET_DIR="${DEPKGS_DIR}/ndk" rm -rf ${NDK_SRC_DIR} get_source "${NDK_URL}" rm -rf ${NDK_TARGET_DIR} mkdir ${NDK_TARGET_DIR} mv ${NDK_SRC_DIR}/* ${NDK_TARGET_DIR} rm -rf ${NDK_SRC_DIR} } process_openssl() { OPENSSL_URL="https://www.openssl.org/source/openssl-1.0.2r.tar.gz" OPENSSL_SRC_DIR=${SRC_DIR}/openssl-1.0.2r OPENSSL_TARGET_DIR="${SRC_DIR}/openssl" rm -rf ${OPENSSL_SRC_DIR} get_source "${OPENSSL_URL}" rm -rf ${OPENSSL_TARGET_DIR} mkdir ${OPENSSL_TARGET_DIR} mv ${OPENSSL_SRC_DIR}/* ${OPENSSL_TARGET_DIR} rm -rf ${OPENSSL_SRC_DIR} } process_qt() { QT_TARGET_DIR="${DEPKGS_DIR}/qt" if [ ! -e "${QT_TARGET_DIR}" ]; then mkdir ${QT_TARGET_DIR} fi } DEPKGS_DIR=$HOME/android-depkgs ANDROID_CFG_DIR=$HOME/.android SRC_DIR=$DEPKGS_DIR/src if [ ! -e "${DEPKGS_DIR}" ]; then mkdir ${DEPKGS_DIR} fi if [ ! -e "${ANDROID_CFG_DIR}" ]; then mkdir ${ANDROID_CFG_DIR} fi touch ${ANDROID_CFG_DIR}/repositories.cfg process_android_sdk process_android_ndk process_jdk process_openssl process_qt bacula-15.0.3/src/qt-console/tray-monitor/android/make_release.sh0000755000175000017500000002134414771010173024601 0ustar bsbuildbsbuild#!/bin/sh # # usage() { echo "usage: $0 [-f -p -a ] ..." echo " -h Displays this usage" echo " -f Path to Keystore File" echo " -p Keystore Password" echo " -a Desired android architecture" echo " (Accepted values: "arm", "aarch64", "x86, "x86_64", "mips", "mips64)" echo " -s Skip clean" echo " -e Generate enterprise version" echo "" } setenvs() { _TOOLCHAIN_NAME=$1 #Used to create NDK's standalone toolchain compiler _GCC_VERSION="4.9" # Set _ANDROID_API to the API you want to use. You should set it # to one of: android-26, android-24, etc. export ANDROID_API="21" ##################################################################### # Error checking # ANDROID_NDK_ROOT should always be set by the user (even when not running this script) # http://groups.google.com/group/android-ndk/browse_thread/thread/a998e139aca71d77 if [ -z "$ANDROID_NDK_ROOT" ] || [ ! -d "$ANDROID_NDK_ROOT" ]; then echo "Error: ANDROID_NDK_ROOT not found. Please, create this Environment Variable" exit 1 fi if [ ! -d "$ANDROID_NDK_ROOT/build/tools" ]; then echo "Error: ANDROID_NDK_ROOT/build/tools is not a valid path. Please edit this script." exit 1 fi if [ ! -e "$ANDROID_NDK_ROOT/build/tools/make-standalone-toolchain.sh" ]; then echo "Error: $ANDROID_NDK_ROOT/build/tools/make-standalone-toolchain.sh not found. Please edit this script." exit 1 fi if [ -z "$QT_ROOT" ] || [ ! -d "$QT_ROOT" ]; then echo "Error: QT_ROOT not found. Please, create this Environment Variable" exit 1 fi ##################################################################### #Uses config.guess script to save the system's canonical name #This will be used by Bacula's configure script #(needed to perform a cross-compilation) export BUILD_SYSTEM=$(${TOP_DIR}/autoconf/config.guess) ###################################################################### # Figure it out the Standalone Toolchain. # If it isn't been created yet, we create one # with the "make-standalone-toolchain.sh" script # Also, set up $ANDROID_ARCH variable to be used # with Bacula's "configure" script _FULL_TOOLCHAIN_NAME="" _ANDROID_ARCH="" _QT_LIB_FOLDER="" _QT_BIN_FOLDER="" _OPENSSL_MACHINE="" _OPENSSL_RELEASE=2.6.37 _OPENSSL_SYSTEM="" _OPENSSL_ARCH="" case ${_TOOLCHAIN_NAME} in "arm") _FULL_TOOLCHAIN_NAME="arm-linux-androideabi" _ANDROID_ARCH="armv7-none-linux-androideabi" _QT_LIB_FOLDER="armeabi-v7a" _QT_BIN_FOLDER="armv7" _OPENSSL_MACHINE=armv7 _OPENSSL_SYSTEM=armv7 _OPENSSL_ARCH=arm ;; "aarch64") _FULL_TOOLCHAIN_NAME="aarch64-linux-android" _ANDROID_ARCH="aarch64-none-linux-android" _QT_LIB_FOLDER="arm64-v8a" _QT_BIN_FOLDER="$_TOOLCHAIN_NAME" _OPENSSL_MACHINE=armv7 _OPENSSL_SYSTEM=armv7 _OPENSSL_ARCH=arm ;; "x86") _FULL_TOOLCHAIN_NAME="x86" _ANDROID_ARCH="i686-none-linux-android" _QT_LIB_FOLDER="$_TOOLCHAIN_NAME" _QT_BIN_FOLDER="$_TOOLCHAIN_NAME" _OPENSSL_MACHINE=i686 _OPENSSL_SYSTEM=x86 _OPENSSL_ARCH=x86 ;; "x86_64") _FULL_TOOLCHAIN_NAME="x86_64-linux-android" _ANDROID_ARCH="x86_64-none-linux-android" _QT_LIB_FOLDER="$_TOOLCHAIN_NAME" _QT_BIN_FOLDER="$_TOOLCHAIN_NAME" _OPENSSL_MACHINE=i686 _OPENSSL_SYSTEM=x86 _OPENSSL_ARCH=x86 ;; "mips") _FULL_TOOLCHAIN_NAME="mipsel-linux-android" _ANDROID_ARCH="mipsel-none-linux-android" _QT_LIB_FOLDER="$_TOOLCHAIN_NAME" _QT_BIN_FOLDER="$_TOOLCHAIN_NAME" ;; "mips64") _FULL_TOOLCHAIN_NAME="mips64el-linux-android" _ANDROID_ARCH="mips64el-none-linux-android" _QT_LIB_FOLDER="$_TOOLCHAIN_NAME" _QT_BIN_FOLDER="$_TOOLCHAIN_NAME" ;; *) echo "Invalid Android architecture. Possible values are: 'arm', 'aarch64', 'x86', 'x86_64', 'mips', 'mips64'" exit 1 ;; esac export ANDROID_ARCH="$_ANDROID_ARCH" export ANDROID_SYSROOT="$ANDROID_NDK_ROOT/platforms/android-$ANDROID_API/arch-$_TOOLCHAIN_NAME/" export ANDROID_QT_BUILD_LIBS="libs/$_QT_LIB_FOLDER" export ANDROID_QT_QMAKE="$QT_ROOT/android_$_QT_BIN_FOLDER/bin/qmake" export ANDROID_QT_APK_MAKE="$QT_ROOT/android_$_QT_BIN_FOLDER/bin/androiddeployqt" export MACHINE="$_OPENSSL_MACHINE" export RELEASE="$_OPENSSL_RELEASE" export SYSTEM="$_OPENSSL_SYSTEM" export ARCH="$_OPENSSL_ARCH" export TOOLCHAIN_PATH="$ANDROID_NDK_ROOT"/standalone/"$_TOOLCHAIN_NAME-android-$ANDROID_API" if [ ! -d "$TOOLCHAIN_PATH" ]; then echo "Creating Standalone Toolchain... (this may take a few seconds)" "$ANDROID_NDK_ROOT"/build/tools/make-standalone-toolchain.sh \ --platform="android-$ANDROID_API" \ --toolchain="$_FULL_TOOLCHAIN_NAME-$_GCC_VERSION" \ --install-dir="$TOOLCHAIN_PATH" fi ###################################################################### # Tries to find the C and C++ compilers # inside the Standalone Toolchain location CC_NAME="" CXX_NAME="" case ${_TOOLCHAIN_NAME} in "x86") CC_NAME="i686-linux-android-gcc" CXX_NAME="i686-linux-android-g++" ;; *) CC_NAME="$_FULL_TOOLCHAIN_NAME-gcc" CXX_NAME="$_FULL_TOOLCHAIN_NAME-g++" ;; esac export CC="$TOOLCHAIN_PATH/bin/$CC_NAME" export CXX="$TOOLCHAIN_PATH/bin/$CXX_NAME" export CPPFLAGS="-D__ANDROID_API__=$ANDROID_API -fPIC" export CXXFLAGS="-fPIC" export LDFLAGS="-fPIE -pie" export ANDROID_INSTALL_DIR="/data/user/0/org.baculasystems.bmob.alfa/files" if [ ! -e "$CC" ]; then echo "Error: Toolchain C compiler not found at $CC. Please edit this script." exit 1 fi if [ ! -e "$CXX" ]; then echo "Error: Toolchain C++ compiler not found at $CXX. Please edit this script." exit 1 fi VERBOSE=1 if [ ! -z "$VERBOSE" ] && [ "$VERBOSE" != "0" ]; then echo "" echo "######### COMPILER VARIABLES ########" echo "" echo "BUILD_SYSTEM: $BUILD_SYSTEM" echo "CC: $CC" echo "CXX: $CXX" echo "CPPFLAGS: $CPPFLAGS" echo "LDFLAGS: $LDFLAGS" echo "" echo "######### ANDROID VARIABLES ########" echo "" echo "ANDROID_API_LEVEL: $ANDROID_API" echo "ANDROID_ARCH: $ANDROID_ARCH" echo "TOOLCHAIN_FULL_NAME: $_FULL_TOOLCHAIN_NAME" echo "ANDROID_INSTALL_DIR: $ANDROID_INSTALL_DIR" echo "ANDROID_NDK_ROOT: $ANDROID_NDK_ROOT" echo "ANDROID_SYSROOT: $ANDROID_SYSROOT" echo "TOOLCHAIN_PATH: $TOOLCHAIN_PATH" echo "" echo "######### QT VARIABLES ########" echo "" echo "ANDROID_QT_QMAKE: $ANDROID_QT_QMAKE" echo "ANDROID_QT_APK_MAKE: $ANDROID_QT_APK_MAKE" echo "ANDROID_QT_BUILD_LIBS: (ANDROID-BUILD-DIR)/$ANDROID_QT_BUILD_LIBS" echo "" echo "######### OPENSSL VARIABLES ########" echo "" echo "OPENSSL_MACHINE: $MACHINE" echo "OPENSSL_RELEASE: $RELEASE" echo "OPENSSL_SYSTEM: $SYSTEM" echo "OPENSSL_ARCH: $ARCH" echo "" fi } #TODO - 'guess' top dir TOP_DIR=../../../.. CWD=${PWD} FILED_DIR=${TOP_DIR}/src/filed TRAYMON_DIR=${TOP_DIR}/src/qt-console/tray-monitor SKIP_CLEAN= ENTERPRISE= while getopts ":f:p:a:sehH:" opt; do case ${opt} in f) export KEYSTORE_FILE="${OPTARG}" ;; p) export KEYSTORE_PASSWORD="${OPTARG}" ;; a) export BMOB_ARCH="${OPTARG}" ;; s) export SKIP_CLEAN="true" ;; e) export ENTERPRISE="true" ;; H|h|\?) usage exit 1 ;; esac done if [ ! -e "${KEYSTORE_FILE}" ]; then echo "Error: Keystore file not found" exit 1 fi export DEPKGS_DIR="${HOME}/android-depkgs" if [ ! -d "$DEPKGS_DIR" ]; then echo "Error: Android Depkgs folder not found. Please, run 'build-depkgs-android.sh' first." exit 1 fi export QT_ROOT=${HOME}/android-depkgs/qt export ANDROID_OPENSSL_DIR=${DEPKGS_DIR}/openssl/build-${BMOB_ARCH} export ANDROID_NDK_ROOT=${DEPKGS_DIR}/ndk export ANDROID_SDK_ROOT=${DEPKGS_DIR}/sdk export ANDROID_HOME=${ANDROID_SDK_ROOT} export PATH=${PATH}:${ANDROID_HOME} setenvs $BMOB_ARCH if [ ! -d "${ANDROID_OPENSSL_DIR}" ]; then rm -rf $ANDROID_OPENSSL_DIR mkdir $ANDROID_OPENSSL_DIR (cd ${DEPKGS_DIR}/src/openssl; make clean) (cd ${DEPKGS_DIR}/src/openssl; ./Configure android-${SYSTEM} no-shared no-asm no-comp no-hw no-engine zlib threads --prefix="${ANDROID_OPENSSL_DIR}") (cd ${DEPKGS_DIR}/src/openssl; make depend) (cd ${DEPKGS_DIR}/src/openssl; make install) fi if [ -z "${SKIP_CLEAN}" ]; then (cd ${TOP_DIR}; ./platforms/android/configure_android.sh) (cd ${TOP_DIR}; make clean) (cd ${TOP_DIR}/src/lib; make) (cd ${TOP_DIR}/src/findlib; make) (cd ${FILED_DIR}; make static-bacula-fd) fi cd ${TRAYMON_DIR} ./make_release_apk.sh cd ${CWD} cp -a ${TRAYMON_DIR}/android-build/build/outputs/apk/release/android-build-release.apk ./bacula_${BMOB_ARCH}.apk bacula-15.0.3/src/qt-console/tray-monitor/android/AndroidManifest.xml0000644000175000017500000001603314771010173025415 0ustar bsbuildbsbuild bacula-15.0.3/src/qt-console/tray-monitor/android/gradlew0000755000175000017500000001232214771010173023174 0ustar bsbuildbsbuild#!/usr/bin/env sh ############################################################################## ## ## Gradle start up script for UN*X ## ############################################################################## # Attempt to set APP_HOME # Resolve links: $0 may be a link PRG="$0" # Need this for relative symlinks. while [ -h "$PRG" ] ; do ls=`ls -ld "$PRG"` link=`expr "$ls" : '.*-> \(.*\)$'` if expr "$link" : '/.*' > /dev/null; then PRG="$link" else PRG=`dirname "$PRG"`"/$link" fi done SAVED="`pwd`" cd "`dirname \"$PRG\"`/" >/dev/null APP_HOME="`pwd -P`" cd "$SAVED" >/dev/null APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS="-Xmx1024m -Dfile.encoding=UTF-8" # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" warn ( ) { echo "$*" } die ( ) { echo echo "$*" echo exit 1 } # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false nonstop=false case "`uname`" in CYGWIN* ) cygwin=true ;; Darwin* ) darwin=true ;; MINGW* ) msys=true ;; NONSTOP* ) nonstop=true ;; esac CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables JAVACMD="$JAVA_HOME/jre/sh/java" else JAVACMD="$JAVA_HOME/bin/java" fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else JAVACMD="java" which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi # Increase the maximum file descriptors if we can. if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then MAX_FD_LIMIT=`ulimit -H -n` if [ $? -eq 0 ] ; then if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then MAX_FD="$MAX_FD_LIMIT" fi ulimit -n $MAX_FD if [ $? -ne 0 ] ; then warn "Could not set maximum file descriptor limit: $MAX_FD" fi else warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" fi fi # For Darwin, add options to specify how the application appears in the dock if $darwin; then GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" fi # For Cygwin, switch paths to Windows format before running java if $cygwin ; then APP_HOME=`cygpath --path --mixed "$APP_HOME"` CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` JAVACMD=`cygpath --unix "$JAVACMD"` # We build the pattern for arguments to be converted via cygpath ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` SEP="" for dir in $ROOTDIRSRAW ; do ROOTDIRS="$ROOTDIRS$SEP$dir" SEP="|" done OURCYGPATTERN="(^($ROOTDIRS))" # Add a user-defined pattern to the cygpath arguments if [ "$GRADLE_CYGPATTERN" != "" ] ; then OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" fi # Now convert the arguments - kludge to limit ourselves to /bin/sh i=0 for arg in "$@" ; do CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` else eval `echo args$i`="\"$arg\"" fi i=$((i+1)) done case $i in (0) set -- ;; (1) set -- "$args0" ;; (2) set -- "$args0" "$args1" ;; (3) set -- "$args0" "$args1" "$args2" ;; (4) set -- "$args0" "$args1" "$args2" "$args3" ;; (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; esac fi # Escape application args save ( ) { for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done echo " " } APP_ARGS=$(save "$@") # Collect all arguments for the java command, following the shell quoting and substitution rules eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" # by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then cd "$(dirname "$0")" fi exec "$JAVACMD" "$@" bacula-15.0.3/src/qt-console/tray-monitor/android/res/0000755000175000017500000000000014771010173022412 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/android/res/mipmap-hdpi/0000755000175000017500000000000014771010173024617 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/android/res/mipmap-hdpi/ic_launcher.png0000644000175000017500000000576014771010173027611 0ustar bsbuildbsbuildPNG  IHDRHHUG IDATx{PTњ>g#Rkg^FQԘ( ( i>h1Q%/veePb&u41AF4jmM6MLԪ,ίﹻ{v9{gp{9=~> -%2!Mii^`lt_?t^}S`˲`˼qB9;e2´5ڶufc+ֵLמĶaKĎs =;泣Ψ;|/C^x0֔XUJj?Ր J[AKW*iѾ*];lGώs =;:jK[`31eSQ+Sh٠j'\n{wa>zveQ7Q^HSEﰋC>s/4P-.;Iq剝еta=;΍2iwz&ڲ1Csw _TI,G__4t\I^!2@ [`-,50@mP̖? qKa5m8j٠4 ܴC6gǹQu/_եyu415k#r K\ PKZ6ڟnga>zveQ7Q6 0%M/1ŹŢ6BBBBB1 A3*rYd>sHXyRea=iB.g ?;ꖶB'%Z Zj!QC.8wޥ6:|"-dOsllGx._6o9'uː'8[z,σ~N#l# r>=K\&.6a",:uOF_ݺE13~Hw<}ob;Ycy-${d={a6y[ m=qV4G|8\AW8yȥ@^yop}VK'k4iK8'9ʹ[Zm9l>'b a7EwPJe)MuQ5OI}:݃LO~ݴ_ză#RH !g=,@F-dՇQ̖OQy2uئTgeFIB34.diPB BbdȀ$Lر@@4G&@AJ`Zp*P^^H}v?HS@ %.҂(2 82bM8h5P}WyP}Rj8(7"8rHZZmLعx:)nXGwU!iG̊Y Y%%=é ŠdUKJeq+>>89ZP)T + a&de=Y]FUvJ`kU,ZRwRyuӂJd*r[=Mw\ -I٭4X٭ >6 vERh9Vy6P|J).O?@@P@:yȬR92 ?1' P\vsv"#$+$!- 1m <c2h6ٵO nlS/3Tv(5ýPYr[I$ȅʋA@2XG€/"cH'i^#z|n;&Nɟf -`n%1hlQ/|-(ʲC3$Y7ϑi9 ]JRÑP Ո5m81p[vE+N }0th Z1'rJV9ĜϢ0'hR1`@V?+kT H;?kO2~^ _x~G󳛣m (IQo mR=;,KZ;Y;<qOzfze- 1Df,\Gr9yc 8R4 @Nx01)6n1A ))񪭞]lJ \<rJ|f*w .z<JyB۴TqcXr򰰀Q?G&#VTwUk^Xa{R7#֮~J YTc{jϾXVī+k~*Vxa_+sb}{,ܯT> FN0{aH؈YZ|+KxXxA/'?0|?#ȇu{x?>d Qh -% +3zIENDB`bacula-15.0.3/src/qt-console/tray-monitor/android/res/mipmap-hdpi/ic_launcher_round.png0000644000175000017500000001164014771010173031012 0ustar bsbuildbsbuildPNG  IHDRHHUGgIDATx\ |U/((W6i(XPK ^/\R m$m~CU\]/W@*ETpmsdNB3'|?|s`GZZOEHlE;Ӯħ?KWGi4BPmMD>(RpL;?}^mquҼ"-ӔZ㫌HKOsߏEY WWk6EoчO6^#q+f0t˽5XN/e@m ,w\G;wFK=Drc_LSk&XYTӖ2 \[DITa6puɁsޖPkm,k *'/R()Љ$mully>(3uު2j00ך+pau0LKZ1FKs+#ĻZƖc]&A.Y a\Y\G^KN"$4́4<Sߕ\M+2)zA US8nrӉO] $ w d$IE卢;uH"F_..Ar{Hk֏j]\kj4¬[Ϋ4gpF6,!IB"b` )TYȔK%ԕxlh x$hβ𼘛 jg=m;Vּ[k{{Kvivn6_~aSw|M6'k7DHs \8P(i L3)?9D|b8={#"|IGr䑾y3S_9bP9'Na$h{mO<ѥw~-~ ~Ar`Eص$~nGcx ϭΞ D_|z^ z""2Z56>s38!Azj6A897.HxgGJћݜ9\1h4܆r+BPDE,wmP$+>; /J4I/o)|BC-?R8P#F>w!#9hܔjEDW]l҂AAZL\2T(LLx~}"L%Gnny/C$RxSSRd HА  B֋*IZ7ASKN[XӍ'ДN%jKJ o[VϿÔc(缷C b>[/wq *?5hc4U!h{o{CM,\ K2OeoݩXJndOQ;o0U"h˜n ^,Lk֟İ'ݿrrTLm`CZ3s|A%΋r$F(樓ϧ=#_y5%lT&mMD 'vqDObZ뙊 $y_%1Ť0OMk> rS".˾$!Zz }p3,ʬ#dq ::$'֤{yPBm4釋UFgc V˃kVwg!.hekn..5^Z%I($L&~xG3K.N윌*pTzfWӾ.Vr)R*bLK*E=\(-V燓r5oﺤ zRmv&EJ©!Z'H2~K,_ ʡtTL596x,<#Ab"j+c4Rr}ؖ9ß_a'%ݡ}ұ۪p7I(EQz *vJZ4rSmgqCcӟȧ={՞jvULa)`Ȏ$2le3*rdr!1MM7ױMU?̈́5X=H}OQ7怜Gwr6%P'=z;7ۛD4TXognPqg"0o҅N.owwtw0!hSr,/:dJvA34"M(tf99%dQfjE aDqv4K{]r9` /w&&^QKiUY65}ܸdߘ$=S ]2 fR*r;AnȁָJ4F,he'4He̱B/Eqxk#@㏰m<:L%sJr=&(UH)c'5 ɑZHZk5?mvq~I-ݵ 꺃:ڣH$aRNUrH#dԝN! ꄉW[@lٽ0(Y'够wi9v<$A&@c*xR"֤.ja_aN8qb v ^4Y!öd{#ߺ׻Hh49 (FRY@ PB@Dp DPhs9H|o/ͷ J{ ?)8v|wѶFNҢֆ;[|~gJmkb5SrYoab3rhgؾhPɓ>,!# DPg 9~rj!dk%9ѳt6#7dYr;#bdZi MhI@;$vKdAcܜOrqAF=O>wmw9/:M!\@#Ymr9k+,4bc6/a[1`҄9xsh܅{!6U^PK"i}O]Wf՘ܒ *2R9On 50:z  QCW.]qG}gjK#OrSh V)UW^8>d$^ e5Ga1"0&z'W7OC*BYUnz@ PԽ#dV$.sP*; --x7/9~`vjnB}|׼"[ Vrmx`n_f$e99qf  TDJa>˟^%k`{!dedىM῱hUOڤ:uE>&ߡ`-UEI{GnZcؐw*Uhv *!KZk?Gm/TGx&.6kĺM|E!j,Gr^fa/?ySlrϢ5}D 116d +hgHh a o).矨M'2+vQww?1c[=[,7xQ[g?YtWLt@ѯW^㯦IѪfFEMhoy} fЧԂEBxn% D<7m04}H1QTPr6^́8'[z^L@MMRՖ;IENDB`bacula-15.0.3/src/qt-console/tray-monitor/android/res/layout/0000755000175000017500000000000014771010173023727 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/android/res/layout/layout_barcode.xml0000644000175000017500000000160314771010173027445 0ustar bsbuildbsbuild bacula-15.0.3/src/qt-console/tray-monitor/android/res/mipmap-mdpi/0000755000175000017500000000000014771010173024624 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/android/res/mipmap-mdpi/ic_launcher.png0000644000175000017500000000406014771010173027606 0ustar bsbuildbsbuildPNG  IHDR00WIDATxYklTVAb&j;3B;m:-uC# j^AH4*ByPF܀DTDEHd}LigNe|k;w+w{ ׶f7x0s̕kF|T[,F<=rF%8IfLɧMfLLܦ%fNEr5 Auy"Z`侙gF݃%>T0?V"uӔږEïw;ڮFv[w;i|&FV:HUvuuA8ȌatkG`JL3LqTHx780{6Vs\"*xe>hx+emK#c)}3$PE+ uDn]kF}qI~8' >8Bʄw <R5@Vޚ};odl:8,1)M+s S ?A L,R O_Нc{ץM 81GSCo8-PW8E=q+P+,U  rNStgdSkЭA23rfR}_)}[y+mYo#'ӧ'ztV}?F&9~}b &GcL|S/eXG(I?OAVnCതa˷_ ϫJ `DFQV6%- g(¶9>s%D*,eоt/R&pa/aZ{Sg DrVWmSIU!#;}G;=pv9ns{7oԲ,u;!.Gg\TH?V*-Pt(vm,/tl xG>%չM qDzO Ȅ:oVBQ"< TA!אs8͖l}Yd&%Ǝ =ngiyGsJg9A1}XI 52]=}1jW&.r=t3W˄3.ٚ5펴qۀbymTPy856y\G ґm ȃP@EAZ(q "Rndְp5y@LEbY>ԋ)g|!`e&ȃV9* z%Y|%J$ m:F& 8yE y11y3qW%2 ࠎZʂ29P9+/) Wf>T2e$ʠm(76_B|Z%`*בaiC m %/ɪ'd]%@MJ?€Aùapњ5?ItM^  Q21O+0Y)[gu=6d)k7[0A-MXHywë/t1KrG6"]qͽ8$u΍#(ޤhPr'-Y5Yrۼ>$ o%A(lo xwLk&Yh0,.O%(-m\`^yVQVkI}Q1Ez&p+..k e,\;K '`P&Iledsܸ~ʅ%©AIENDB`bacula-15.0.3/src/qt-console/tray-monitor/android/res/mipmap-mdpi/ic_launcher_round.png0000644000175000017500000000545214771010173031023 0ustar bsbuildbsbuildPNG  IHDR00W IDATxZ pS g$l,S| .s:(M:Sl a:)hNXuXuKLIʕ!Ё-G%m{aK4۷ovW0q? `>#SH$?Q:tN6&TkU:g ln.]ctxFy鉄js[TFk-r`]|]'/N=okk##vADZT$p|,rm_}߇sZdGWS+v*=RcXI\69f_"l;ő_2E]"jT _ 묮k]Ww>B"uiVg@ mšK ;,B/lm!"x lH2cUe+%yF{qZ^刷T->/^Y_a ͜x3ۦ)BKj,ٶR㰠D 2 =*G&rUL '" ǦP G'`Bn@Hb"’yXaqJqT~isz_'xv9i`(jxru_)ELLsZf$q1ڠ2@UMt?%Eg#] :y#~NY6BG ΑOhkY.iP,pXk1=G wp9;NOz Į=VV/jVgW[z0B)rSg"#ŊN 4O.92LCMJӞD?LS(&;ڬtq֏iP|(wvT牦<)f=F 1 ok>*2Pk)lX3PD p Jua!w5aq p~ 8+7y%s,0Ȼ &OSv:|O(Kx) ҉dU,PDSk#7IsHm$ eZ6-ymtXyu 9H+ d^P"mscnў}KŠ=?80 *G3mx#ϙ v†;V,ȱ<=3^8A-c'qzų orI}LcX:6em>TOZ^֋gSz;d:_Ztrr8|܇Ř;\{MEV@ aԑ\mvbc7AN~]wn5Ѹ Cw_Pehma(xܶ-fB9I ҃5*H lט\$8I|mtG 0lYvhp\Vd~~L۝m9QN؟zC>D&փҙ#nƑR@LT"B^ }xwiQvW>FLD9w{G #Ai{#bsh/xդ;[c#_H/]I:ھS.p){?j$a yͦKV< :f㺝}Y܁Ϭ"Puȅ$J_`.1RwAI hR L˚َ&XPg>\$6BU%=p f7Zã̀|x0 }9.6%tcO{SʶXuᵙ?"f ElK6jemw0hO?(E:ab 7dVL uFAK$ϱ~W,eX~;QJ%IENDB`bacula-15.0.3/src/qt-console/tray-monitor/android/res/mipmap-xxxhdpi/0000755000175000017500000000000014771010173025367 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/android/res/mipmap-xxxhdpi/ic_launcher.png0000644000175000017500000002204214771010173030351 0ustar bsbuildbsbuildPNG  IHDRRl#IDATxxTU , ! 4R( EJ:eR!*((2=s'I޹s!LQ4+AYRLCͿUɘbCw"7^|cKR`)T8 @}ށ0h[w܍(No/ܑUP! @bpR$ Pj%xp  `?fȘ-pӊ'zm勝_N7(TS'`LC3DBUG\::Eo#MaiŻ@VB^!SbB&h\`_}Oڿ·͚,s}e䋨ąeIz"/b:ڽM k`k㻯T]3#kgμy.-g  /U]?s(M'5ZyS\pݴG| `ܼd8\1ƍk+\p>1`%5 8(0 4qkGWhg b2e 0 @s hA%GTRlۘl0 0^>P&L +&%SLJ8)&@V̑Y ]ĎB %X)+arb"lOx-#PCxz{3=zϵ&Tnٖ. b?=, +ʽ;D7K|yb.F|p?<-)&?!9|PsCؑCb]^z˜[ɢA}"*G%LlԴ& QOeqipt9邓>9q4 ݝ]׹&@/Kht'wg190ЄA' ﯿ2ð̄s@wVXO\IԆ#Wď>Iõ4-!T1Mߖ.<$9LUosh?Bv'艵xWy>؋QQhPDnfϗ:_x< 'u F8 Did**ΩD)ɃI5w_^|O;n4pt~:}ZQ>&Ƚy{SlT'ӻM~Ŋ=} =Osʓ)4W?A%C7sG:.lP-45):#x:4P@Å'd~ btȪ9 6{`e<@h`0Κ <֤?4ruwy- `7(F z@pQ.<^Z4gEpl1C??Xht"&Estbs@%uM)C4 m)%lޖpysW'bsbI GK; ׽7j\VmrA:t~ժk9)NG?l5ѹs;śt[̠i1rR`|_C{f@l~wΊ͜[+E1hE ZFuepmߩи|08=NwBLsAb$+0Ls'8DBS MNp#V*gVs4!h ؞4h8澳͉c g3ޝ4Z({Aɴ/PϣUSd::L<&:6:FYkrh46ƇBG崉+* }3b=t yG(h˻lq>|y3hK !(=yP ;KJOc1@,oCЉ YO颼8fY))l-lbϥS'a%­w`߷_78t޺e9|BFN I=tO R#tk:q2׉ӿx杀c7C'K"ȍ}v'I9"/xQhN:q4Lɴ/#p;ܝISıg*@ ٞU* lg`=PQUu^,l|/S\>Q_J_}nUyI pW @,;J4x2Θp;(H8y]:~/1!?.dY+e|A w@gh~Oďp18ywXb_'~:)tnGԘ. yқ?,fг;@@:+n3 =A)@ \g7o6XIˍܗb.JȬXH9moؤXߝ&Cg]FnhItOriŊQqyi˾ sRJlm%C4CSΩoOLNk>ƚxavYnρ5`=$egˋa'\̠2*)!B:F2ĎB{?z;6PCarf" Lβ끬$Ǯ"xZaJ~kO ,<#شדE &aТ] ehhFX"sOAHX3u5fЌ4[~ RPua鱶  M؜ ~#=#*Bhx~4GL@;%cnQ8`@I^Aw]r) hESCC.8 ;.5046)Ӱl~M"% )ɇUUClU%ߖb~U" Ю[N~S4[$0:XwT${zEYֻ0葰U̅hlR,-ʇ) ^'3qA agxzd pUVB1!KC2޽<@ QdCimtt+KG"],J`w1sx,?KN[:ڪ5E37єeCd _:C^w" ^T@th:@pNW=LP`Z5: eN7HMQk`j1? A `z] H@1;̘_wY{@ H Y]HA֙63fAg5"=  c@eMOii; 5{ .op0CLF9nFyjb 1mZ24 0S ̯ c0LǚulrWCjw C '` Mi<m $hpZJh3mLaFs&D3#j,vȄp=*T]/DO m%VQL4 sR}~=Ehd'?es(RChJ]7l4O^{BDk (8&FvR JR| T90VamhI-S G1k*Jzk49؞~? veO天+׮ysþ2xeWv\k߯ ugZDyhUO= ڃ}o/׋twɝ04tmUМiv = S "ۼ5&V2(6}x T={)o)KAĄmY" VAB;L{ٷI䳫B8%ŏN͍)-R,u ʊI;S̥%c >ݚӲM{w>Fӭ)GKBE;mBMsSŊn[g*(rx)0h@CXo(aVtnԕk5l퉝0h󻾝OZ*“|omB_'EDؑC|y&rSq"s`|g^(s{zo~tʀ9vQ"*RXYVhZة)hnule̿HQ.Օ*k^b{r6kͺ@MaSAMs` 4@fcu?5Kz+M i㣥kts4C 5{MuT hEM͞`oqF&t9vU:=N`Np:) 5`lIJ`6?5o'{҈Tτ5;2`֓5r>:Y:k`51Z Iܓ'HHf&*3T0h~/ةݯ L.UL1yy ^UD[L㉱4Fv賛4?P)vgD6A@|~$%@f;a!6q!U :ؐN$7"h~Z0S'fKg2i yq ~__/vĮx2wgZ4`+4G\/5W0e=kʲco؝!J'K2Pq.2S OcS< ΈM+E(Or7D('=,0:ϧ1[Ed)}K[:?6펤p]'l$#r`>]7^OlA;޵mz+z—h!k t}g%BJo?pXR@c-"KF;@I,|U YVGᱫLp!eȬ CS&ب^@Q}z_m? MYH޺5גm{_7NV- ^mw䝠8*&`6]#OWAyC84t_67[P]oynQVWʨ=T2hoVMYZ@VŜD{q|DԪ e&ޏ 46 Rr\7 +aH\^]BIY%֭o{_2z%m[<0w_`HZ(=FOt۪et'MI1gfI9.+QĜ_cf4NjmHRmŮQlm˶ ")6ځ6}xi U)+˓hJ`si1ڟ<@ >{8=ԝx*c|\GA]?VZu~p؉d7axF hMǚ=%0m{XQXʊ99?]Ĝ[g*:G2QCiʢ)zD`ME; v`wڂȭ84It @ ~V9-0&5Fyn[̧OZL!)^N5?&. Bu{Kn|lN͋|n3?o*OUU6SUUz~_|QbRժ[L[^nҪ/-kUlj۩eOZV[^Y^5pU{ _/NQGY>ߦ.IG+ z8``/ؽqEZ_x/u CY~~Yw)жMVmFѣ6W=oԜc:W1M@/؛; G+/uBpGc(ڡ:(Ӈ `5kuRAƍWTsqc2].Tl[+m+^P~#Y (޸RJk;66 .Rne*d_ k]+YB׼o\xec6~]-ʇf5?9E'Z֩@ Mz`5oq… .\p… .\p… .\p… .\p1_VJ IENDB`bacula-15.0.3/src/qt-console/tray-monitor/android/res/mipmap-xxxhdpi/ic_launcher_round.png0000644000175000017500000003624314771010173031570 0ustar bsbuildbsbuildPNG  IHDRRlkpeSٛ.~ynfչ \AopΞ,>su.FO^c!>0VcuBnty)D<j8s-:#p6=ҎG5ܛ+s`f5k^[} ZFdPH @קgJ# 29|LP2^(Gf BI6q8[)#ңkZUUppRO8U8ĿE՚Ÿqw -g+(óhfqnE1:|C8 oPf`pa7.#/fVNx8%;5@_A` -??峏ѦI7ϷJ}4 ̱\#߱\Aad?`9$}zߺN~{z~qn|N3';K\VO۳ it8ͯ8Zw|Ǡc9)}O-qǍY1U^@aRIN 71^_M<)Rv `=0bR<&<98&#١$>O`Mu:u'~xjBy࣮[?i4'|L~^S т[Brۗ5ft'[r^ wW)1Ņ ]IzTV;o1[w{azCh0r<}!7x{\sX() " ?]vԸ!(8[JU q<݊w;JGoWE@@QtpGNOc_h>E/ r-u9 AN5yZ >MDv$d)BW()a z`ʮ4LZ]np6"+$mkn֞+_w|&z,O@=\ʿ ߣ% hXX prР3Bw1P .6,? cJ@]n55H,K}(5R WPϟ&0 A- ?ls-LG_|JDϽ~` gpUszU=GVXߘ3tTWA}kn~{ݺa)Io?Rzy|N@p.W~N@o٢M}6:\o_rh v-w+mP \!]twb/Rs \,p0([:~_?b>R%|zCF89@E}47 {#"Z47ڸ=-6Wjqr2C*]=44sӥ&fmLQ&l^ %bt&swKcHPk/#ZψBxr _ApO,}MO{Vy!^o ;]mH ӥ nɑߐ`W&<c5^WsSLMC~;5`Fkl\iꓦLph|Sq)<":A(H =4%>i`>'EFCM7_j|c>|Cҍ _f1M%`VE1E1U}tW@W#ݛ+6[F 1=B`%>ןk9zЫ@~j-TХ{wmv_wxS?xOw_?A;.#cg|fSW J?j# Lv>o%s^  Bi;߿! Z@p=iմx}ןZDGзx_KϜ$S=^f~ǏHzE׈ջHz`?vHD rN2iyuuy bѧjŵz]S m5Nx>mcLh2cqG:D툫Χĺ?=ūc٠&;="w?EQDjұ4D@ ~x-c³ Jn.=w7~5ؠ7S ҔTq87 >ԠRfApnӡU=X7lWH.c>$7#o\[w(jWfWo\ŽdwȔ ]rP A.6bAAvp˰o+0pۇg-kӷE<ydV6١'큖5j#|VcͶ%epf [=^VV@=A>wFlc(4,]Y{Aż{;4GL\ $}{.v ӈ8 )NAgV^q@ y#{`A1goj}%,i`7 z\g=#Sk30Q-Oۣ`k1F @ZLD@x@Ka53uK~/z3P1]w^6H4LfLG%5J`]evn0 2C50xzȾV~~$ׯfG!PbüwKu0ksU<n2}^v x@KOuE&N3Z-&DZ)>h* T>|RPWg%~/0`\;kΑB:"LdYIya 0bi |)Ga,yPQo]~VKMW/HfgAm-٨Q-`V{VmA؃vg|)|pmut,QT`)Z~ zq_Hq,|Ge#0/OEWĐ ڸIKP4 fBn[0?s`F{a-M,ͩ) S-7v|[BI@q1W3˳5wmq@IqsX3ڦ@:ttK p~(1±1jWF$k Z >e[,ߙw>5_QwKrUߚ$pp.|E"VC|W7cr`ǃ.^5ͭm.xhL;Y$7xDC[Ay[0) %%?1xB2ɹ -Mv<P\zđe;7'CǭׯxśN~hTVT;.^T(3ƮlTv>'k1`{t1174 ꑩ~GigmxF|Q6TP:`U"~n=DGkAh]@SY%=jjAZ'/kq;z>6( _mSƏ-S=ysd<jQX]uC7M֭_~FUEBcsR pBB` xΓ' ~yN35 skl:WRcHWWbUlr]co\AݱEG)Gh u5{Rď6%ϵxA=u:d/E1CZ>6J?KRA`0t9%2#t. [bצF+"(@?iQu,T(X0`}S2@fxY=i-_zD0֦na6C>꜎Gx`>W2{=@N2r\skؖh\[ hr=|ҔXZSȿ Vm\QY# bæU ƚhtSfB1!kkk%>bJ+r-D-[JsOaqnB0r3ͧCӊpg'א=qL=G{4FX,lW̋ S-k3_xRvsxwvedVqQ=6D{?:O({JV.C~ر$b,D;/TXK|ki2ɄXl>ء_?K n+w u6]8v^4d]/n?h koLbKTԖas ^O.Pud 蝌DIs .zAhɜW9=],>8L6s3nZ=8x*64.7IƖ"cc-./B-Ccxk8V]z ~ako,-"Ih4ŗԆ ^{!U?|#ĭ~ɑ/gq!//CpL0i-Fev|ߦOXlifcNи6;q@a ]?|a`^Έ#@0$;L kyq.a: k1V vu(7!nh4*U A2?ΈGn^ ڥ6f..VC % EUe-(sv 3Дc4mxlnh$ Oq5KF=+ ^;ħa~kVDfv[hYt6LV^Y 7 M(`ؗB? J~Ev3@ &vgHvҗ@nIF j$Vz$?kbp\ΦAd @|( NwWsLXVym?W, thQf;OWwzaVfUZGѱOoH8Uh;|NVv]+I!_gfEy3L*uEkxUx+Xp} F^X0[sމW4Y)f7@6CߦHdMC,wrLPW7|/ xA @2UT x~@M#qK G7f5?Ъ]u efE $ W#k*=Q W W'A@ӟpmWc2Zwܶv) O|_%<_|_1:_ A88܎:|shj>|ТS0GrO7HZC\|_)<_|>I ?GpغW cAh}ڱc;2%xw{5 WWj)~.+K%]##U*YܠmkՋ@Ӡ^He8zwgPZ`&h"|H=d5mff'뮴O3>ZthvKkd7Hi /.~Nua UB1P_3+nhYu,Z[ـd} 4<$DBr5(~Z4? v.sNMRmZs̺t8#]7'ū x@Vĭ=įmϘXwkyy2hҬs.|?Af۷7kHߖ kӣj{Ed3hMuy൮7kScR,׎%#/,&C$Ue=@feX< 1 Vo h,姎#|ZbBN GK}XBjKTfțZZQ&Zu MrGj^wJD{|I"$hA13mI4dҷ.-;[&^v$V-Ga7.OoՕHH -Bںb~,~$;WjDb?WAϡ;p<-8r}< :s _y ~'vR 5""x0䕋()X;ʎ3V!kqƵ%%h3(e\%}DTҪbF oO]Zlme A13&>=5B|0 JkU@3eK(uR$KR4lv1b9m,Cskq6,N ԛF+NPvvrZL婵^u.Pe CݱC]IOչ9.P/xh 4@!-^ɸ7 \^(2pw.2* ܝ&ڌ: h'`Š@A$/rIނ`!`A.bq pw( xLOhhN64#J u ݋lV,E4ض_e+k93F:MڠP^I;9ZcQ$)@Ju AC1 qIւZtp * P<VB` Q擛L@j 7ů1:_B5(?mw#%߁j!_#d}G:OkGl9>N%z`)h=x?:JCʃ:LCG'ώio_S- pܶ![{$E/`,`Krc=įO=x?\%zOp:u[0d~@sLM} 6t }Hh3GȘEݡz=ů[ů=[~tada/>7mS)Q,],]Oۀ%A4Hj1E`]3;w+Gg+'+@}/q7ޏGΖ!=scbKVl mvG0O=5FWANa XX|_d貘<_!Lxq ًlĬu~DcZ̏}ELNl/` =+A@`뽆Wktp\- 1W0,7Y'\ 15;Ů<:e~z߬E X s*XW P09zoZ! @KXSntM:;$Z3ET73 w Xxn1 %& `5zB4\̢:g&GɈ/nT["u@L+8^[םxR 2=+TvEY>_=Rvmlܛhh5Wy@Y;jܳÜvmTLM!\)MLx 1u`Nq>9qBwDS@ y(u{XTP'~ޯ˛SA+T3vQ z,Nq AI (Ioj cZt}R" 8܌~34BҲ3'c򮏪r fnS.!{H]/dp{u8=Jx@PUd|nE6kAjZvy~+.aHP(B'R u{Te ۑҹ7[XDSAnQh(;ٞ[(b?dt̊"|V<^VSq[{b^BOK_cPm% +UP~Y Z[|0h_9_\u 8F hӥKO3֝FZ6;>`I6̻t 89#w7Idzk! z]WI.t~[!Y hHF)\m=S|k:CfmwKPx-;-:8A/7 ٸA1;H'lޡ-}ԨקFO!$JicKwǾ %2A" a-/gnQC;р--?T `W'M2CH+[}N |B`0( aBx jq%@z4BO#ev '!_z̥MpAzYf~C;<1O18ؼ>g+5$ T=7@fh#p1l;u$c|a\Ih:v`jQӾmZrѨELr,z7G zU G 5w7lqZ~V+ZDÅ!4o޼oRO,R 0!ra@ eLעeýd|6=#/g&񫂀fm۲<ͽ7>'ҩ<\D)J0 Pu.v1-ߋ GLӖ; =Qq8}̶[#( Uh%: @'$+ Rx c=CIȃ$odεC^@3U@8 n=%ܼ7h޼c?'uK!AؤPs{|s_zdpʭՋ:)_9~Ӆ+nGiR6.hq ,z2fֵX@l2(@Ém:J.Q4F,dq?]fosՇ) ^.O;/dz 4wA_>v[61hc2!y)է&5">-fuo*Rjx'w"-w?՗µ%_k=JI[7KĦJ;{1:]nlmkFFq>Dn-@̶ Z0F4bŧ;e뺚n f]8zJVfӋVߒ3Mpy*ll[&OiӇvKL ~8*6YG%a#>`w`n+䥩@aTĒ%~vkZǺ;$׸B{;;#u 3Њ?nK 6#0>eH{MQ*3—H{}2hvj;ž~#Ɲ5Z@>]fiDF"I| B'v9-6`m5b]AC @0RD*%ü*u}f=}JEeO1x\'zM9  ݢΜw[`bP'?5-߰n aP< D bM'0W -=.) ;7!uKvNI15 M`Z}4'>Py b}iL`6m>Cd {l|PPpn ψ|D;6b/k`ƩNqZ>p-Tc[|;-552<-?[^$}Z0%0x{u3|Ƿ@14 IA ~o0igmi޺a#W&0XZmݾ\7 ZжVV>C|fM9ϮYV سӓbOa@g%ݗf|RшdR zhF^=^?FGFoDy0o^YUJu2i͐f±{C gݜZXk#  )W|bMbD,e߲Cmr=|[ou{09oex~łkl])M+qܲ{~r޶Ǡokkwúw, Y^m~9l;~ f9~{zbB ֝+FϭAO_ֈ+P5ul!ɺWhy[[Yome՟W > "aV֛s@+=yѷk텬+H{. mО,6ԕprWbS1vیR61A;ݷ;.WݎXl8D=y i 9'f``{+Vt.- ?WgGO{wvY\ " 5u+,5i*Xlq_cׅ~֚w{][sZxED/l "(ZR1m9 sm8"oEC ^ y0X8s 1ȸ~ b6BMHMU\<(IENDB`bacula-15.0.3/src/qt-console/tray-monitor/android/res/values/0000755000175000017500000000000014771010173023711 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/android/res/values/colors.xml0000644000175000017500000000255714771010173025745 0ustar bsbuildbsbuild #d32f2f #b71c1c #f44336 #FAFAFA #F5F5F5 #EEEEEE #E0E0E0 #BDBDBD #9E9E9E #757575 #616161 #424242 #212121 #de000000 #8a000000 #61000000 @color/material_grey_palette_300 #d32f2f #388E3C #e65100 #F9A825 #FFFFFF #000000 bacula-15.0.3/src/qt-console/tray-monitor/android/res/values/libs.xml0000644000175000017500000000121414771010173025362 0ustar bsbuildbsbuild https://download.qt.io/ministro/android/qt5/qt-5.9 bacula-15.0.3/src/qt-console/tray-monitor/android/res/values/styles.xml0000644000175000017500000000035014771010173025754 0ustar bsbuildbsbuild bacula-15.0.3/src/qt-console/tray-monitor/android/res/mipmap-xhdpi/0000755000175000017500000000000014771010173025007 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/android/res/mipmap-xhdpi/ic_launcher.png0000644000175000017500000001073114771010173027773 0ustar bsbuildbsbuildPNG  IHDR``w8IDATx] pיcg"vMD>,}Xp.YTBbv˻^5#iFC3FHqLbcB,outJ{E/zы^TmRucw}*ڂV%[3ʾc5kf\q))8ej""\35'>rM ;cjd8A1[`}u39*;7;;RY ֎LU. Zt9>Lσqu{{ykؼF;٣mXdlc M۵{569EM>W9n׬i7>4;mqN/NCmT_s~x K{b^9jX{]7Qi]дK!_ Z އZܚvI5FsRv?sb`3h1wfx5?Zk+)V7ˬ05 Q]-j:]rM9IBhxleK] Ӝjvv AkDF*fӦ[솷̦HukYo*̮,:{!oDb]LTKdU'>d[7ݻO`X 8EB/ݳ]P꛸~,p nyS. .{]A:I[76}pi@;=i{Ogi|·wMGvq v]B]u\H0eU>L/dU j_ǴWXH5mQ 쨢pe b1eqhjvsUUט:'oz]]]]]]]]]1,MirpSjdkdEx>~z;21- jyKSr5r. {EJQUraV]˶;[-:|X^Pӣ)slݷ!A`=OD^>še&7WN^p[=^s#smGذ{ LZ@Ch7 j| }wk袃"YUoUŖP :4eS(PA(=yv+٣vԪԎ`33&bL }b 7q>weL'ra6>>>ΟcVv:/j HԀ?('$ I|Xb,l +Zkr(ǏސpNcXb:l`ԃW\u\:0|S)@ߎ$B}Um/g')>Ql{2KsYJFdCE+oܾVSc~fvas0*:@Ioސ{Pzh_|c [O5DhgO:}Fn)ckjO/}tZܺZ6j`rc$B!a ]}6`|a3ru%8O7N8uS!KEGA'ٍV_k0c "cjםh3 3\ԢjR, tJ#pK Cu1d/ȈEr%'_"y*\GTK%Ř/([6|bȮ{j'leڌnJ\ l0( cg da(AT[uX& YDdF9Oֱ,rabY>3~vo3[@* w?AW.ԎȚU9Y>VQ2+FUEKaa $ 7!z+{n_ {cnj=򿿧-#[ V6ְr`E]XMW\RZMD A#pQ-ͥ+vģ QNP!v5˅qgAu,-Ba%jmsgbk<F2 @>& \"U$ UBc YDKxDCx%(%(+#"q_JUQ8YRU*Tȗ`7(/td>OxBc ˈ(Jbq^:vIii;$$}C[J)/} :?$VBv{6j"Hbk([~=l-XFB|F7"q$(8mK]:KgBd_ڷ)-YQC~-;<::euVm5<>RhfX ۭt`` ,V>}t>9Pti.i4 G(5X} )R b^YjdLÜP1?>`f*!^nyf*ID W"!FJ+@# BXUD/?~npzanUJԲdKf\ &|5#&M$& $"|@="T$ CLr+b yf ]o|{LL4]oOp’D"/@f+@9U@'Lf_\!OGsZϏHD0pKPp(Qpt#O38 p n?kpKL5<]pT )s$|K/bLa0 kXFb: #Euq kZTK)$D+/]TtCTMѹIE(؁x'|Cqxnkl譵6CfIa>@ @-,c ˈ0 Iug|?]kujBF82*4iZJʂm˳WfWu6xQٵb&LsIj+TiV޵jT+/c+g_aG VvOQ2j+7t5I[~Xsgr"ptĉ!${Rr|`YߙQ ƤѹC.ls_- ߿O>󎬬q NmNGF FDu]wG?c pnkR*:7kQ&L:a5 F:\?77E]rܗ8n pq]׋^E/zކ>iIENDB`bacula-15.0.3/src/qt-console/tray-monitor/android/res/mipmap-xhdpi/ic_launcher_round.png0000644000175000017500000001567214771010173031213 0ustar bsbuildbsbuildPNG  IHDR``w8IDATx]xTUe!$: RTP" .X%J!u&( VXi"*.̝nyd{}s}9ݗc-"##[yyBWK\C_DNjC_t<v Yry{d$T%Ld%Jg΃D=Bɐ Q&Z6<@pNAI$]%KE—RLO MYs\8akglGʳgȑ+9Nƕe{آU U]V@PBc ڡi k*kKU[95ڨQƸ?VQhyn_V)SUJgeJXYY[ /g!$(΁skQpփ+K#N*6"OGBR~Әps4RspApM-HA r|켁U|W,jm#*IJ\jZ[7&b$dG\G]Qgkr[z /?U,,S#TP+&z2y)$7K1[㞚}oERo03lf'ꞃ{я 53TGzn>'AiiC_E`@0Z[7| `y/L2CvGl#d坍9e-?8 y/V BufWHGt]bsKYpv؂lPnaɼP:A:#'+ HJr%nM0~Oc\MU-H?a/\Ė^l2QT }ISki_i[TT3%-ݵɤ /H9el\{ȹ0CTKRmMLT*U~c၊ǫ `Uŋۘ{Mt|tIjuX~;z_{Zܭ4ʿuɿ+2eJT3[kPtA_No:U~LGOk׾(jl·;|>_fXrpNaOU1k\oVf7յ/\B'ӵĔ FB)Wf [s=q<"zi1P&V/L,ѠZ4 [e?4jcWg<~"݅Q@0{p 9SWbj/lǖXS/SFH߹ \ idRvҚS_ x 1v4 BZZk 2;~Wfw'Ԙ)HI{M ^lJ8Ohn2ϊuK;.]h?l#tF h9ڱQ LP:Z Ɏ0ln,܊#]=q:uvى _"-_$VKꝰ-`grl1&[P:g ? }d%v$587nvfL/{-nzipKck[Goˇ074q@?NqnbP4,8js Ot#txx%ϚRK>["6?ˣxgEV 8ަy?^Qǖf]kj/`?:n;ǘӤPAqץ, ml c*D8X"|.-7?΃&Rp<۩ $z1K+~Jf @WxSy$u?m=X)O}~XFX4%HI)d7l76Gutt&1J[Uqx "}0c{joѧ#cnܼuC,hblRzq{3PcnLBԈ"D*^9d(/rْꈺڻ2I*|TjD':I=D'$d?{<;gJ~yiB-x)USf%:k $Ф ]#)b Pqhk?=7l$+\Q0<-JySm вڗTל)݅/DFjͪϞf8\\8'΍k9lJy2qw{f潖 }Y :wF t_9WD1IN6nktt(柴-.slEA  7k-ZY,ԙ *fNdAIGCB'أ>b'Dk9*s oZ,r:ਖ਼yjښVQv|ΕpK/>~y}tJ]:!b-]<=2?oΊ|u۹P)ZIzJ ~¥{^k@L&t ITdž槪1{x cX3~vp\ɥ,L)csvkhPnz4ނT;$]MƌAp{H͎/X8nH>Hތs>Oߥ`rvE>ͩ,dkUkv稺 ԋW1s㎜kya_淰`iP^j]pNR&>M"`9caC,L8 B=~ׅ&ݗֽNx٘h̦̄}| w˦Q܃c #=K#g5#ֶsǖdܘ)ɴoΛ Pf$jx7%Fc=@Iyis' @Gd! tDqۇslkd{OQ)jw)d f$}nEoz Xfú `t ߰iЅYddy*ba3y x:V7e#OYTkM7|n,#-B>a2j%iO[GOgpKW7J6ly:QHv~FYM[;|d ʔ9$mn$-'a1Rp*x GgĨ?T̍^((cҴX( 7F n x5SmWC84?#"K'zH6yWoFJQ1xͧHke}ρ͎DX^:@"h*{`"&6 bL-y.%;Dq:Xr;iJבRPAZ S`*$ f2-eꤪDM݃KTk:yh(x@bR\,<֑mdw1[M^l2+31L ;YPU"?I4 BnQ [ͩ,0Ke5})yUy{Kؔ+}2 \J&N>J>f:CuA9ɘ``9 bM[۔L6)?R˵D@{0;!bQ˄c@< 3F:FPK^K=d&͇A礰A<@h{<φ"0A@ztpVľźv5>6uщdϭ7w5IlojhR|sW&[&U@lb%G!~׆<,[{}l6}xEo>#,S,Md oI9 8J/X `|,3 5[3\vsF׽Zv4}ִZWj"r; `|s^yP5bp$p۸s_Hx8m .Bi_Lov¯ 8 5qAzv"\|Y r D^ =!cmћo",{h򟅒5~ʽ̟G10Mp^$VAw@ܱfg -_{Ϸ|sұ\\]!}ڣߴ,ZИ}C}EL1'0(7s] Kz̎}vګ v lJ`u_fq @Spf˖I~lv2Ua<|Vİo(O\u|̐}^\>o0 u\MXݵgg =$ \tW Ob0}(~<~ f hXD}(l:iNJLJ>]'P'zǾE~s>sJuZ}ۑ|șz-HI=V-- ghq?ET 5z\$cI%ĩèE` H'a|ZiL:%S]Nu&x^7`[[Е;t ګ4IP>};cJW)6u44va1ʱ`{<;_yar`ڤuI%<$\Mӣ%YMq zS Midѧ4M+ne?ܠ# iG~~HF6fP_olX|n ؔQ2f;Ȳ T+Å権jY}!M|BQőĭ&'d!KR~T#4oI9Xxʶz}n}j@6^"bM4je=5}(?5C1 r|6R%Οs?Ɩz#2ԭAp  AU3ƠOy'=\d4Sº{ ~+ u D>Su8zr8@ P(a9fYRSfYuUxoNY&k@r'*d,wEեh?P3;J̲\v"驪"2+Si774ԛ _"xJK20 },[Ի?5ʶ9o,[/A׆ZY޵ڿZ+{j 8Vۥw/"@ ]q8@ q8@ q8@ j=^M >?}ļN`Oޝ$H8@)ꁆ^ '%Vp z,ɐ-/gɕ+W/O@6no?DrlW}PW)@<\4}_)XƁ_XU̾|ST]9y2Kc- f\ %V0  D <;ςsVdlbMG2GsyI5HFt985&rK" >hBvb~Wsy9; gH^;wd>aRq3ۓ,KcO<~1ڞG)ӐI~jxo4F͢yKH4HD&B$+>'H3\{ ol$/Ț=|"`yLq $0]jωja9|\:%*xBӥ3N}0[Q_«@@G1~|4tg\~A/u?.蒛u-Q(@Ρro<ʏ'Rn t+_e/#G@ Sm<l;zkZ$ؽv2ݙ#>!>Nzrp2<9@ \:ro}O! U?O(|.3'Wop^U>.jK:H#6A)[*R:#c 8N] @8oqCG"Lb(БIhF]<4yNy>y΄k`8eڒpYcfANZS[Ab4G3dqn1p\ 'ȂŦ!q[xY?ֆ;^98v*J)@VGr+ӲE@2ͦF U H%:M83N'D0aDz QK.Lvdw( RDِtvH8W6-Q_ ]7| c Na4Y3 ǒʢ 3@fc'ѹj,I6ԑ Ҷ}ɛjb-rVVVo;%T5u$>,.i]t(Q"uO^3s4dVvbZPh :zsN@)[Z*-Rl%#5It`0T2Ea$;@gb%QU-x(@D$# <#@XeQ)( PH6@X <# \Z!TE ]HAD + #>MzT[RL{x}2_΁r0@<ȠUCs  <ΗMaAh<<9P[Dw$PB{4G#n Xì @]CCeEYK2B7ìt ]O/5%~6Y*'K2\b,ɰ߭m"Oɨ`&x@˖K4v$!2e(K)=@p/%$:%\͚>{ED\;E:@TXynT YuyAMҕ$>P"ӧQBB>@]qg7%4pC$+@l<IU eWp*1p?Z]׿  PrQP=@lR8V! 10mљidۯ_fu^{kׅ!uL(Dտy<abx }&BF_[uۀቛZId$;$@H:C*T8 G?^;ٽw{З~ @~~n w7d߭V-{ӛ 4=ij$e[!)7ͫ(mXqU &XP }ϡ 3A"RƹXU2pW:APX81j{]د#*}ww7z]ԗfz ?K_!woC7{սv~]* /=驪"rCmUWN+dۿmHM1@?*򗝕dDQ!]X# irA%Vp E9*yV2`IΪ㼖YǍ sHf2uzew-ϋ%!wt4xK۸M"c.'o1 Uq^it;IbMm{:f|{qqqOM7 j|fWś5)yqVa|$e}O%*NS 'gDҶ ] ۴tu-yz+|,>nތ>Qٽw/LƆum辞}Nyٷ31! woN}Xo` 3 ̟=Tqڇ }> Aߠ,ގ>Q%ÿ-u~/<W }>Q_}.~m!-?ǯuO|پ5wG@oX|8Cq7Z-},UY_On{{"+H, E*ytWk΢\BV[p<ն7x7x7x7x㍷j~y!IENDB`bacula-15.0.3/src/qt-console/tray-monitor/android/res/mipmap-xxhdpi/ic_launcher_round.png0000644000175000017500000002466414771010173031404 0ustar bsbuildbsbuildPNG  IHDRF){IDATx\Sq  K]h_Eok]u⪨{ܨUmêZ&d$//yws={-,̇0| mI$7Z|~c>BH0#jB[i=|5|Rp5`Ұ뗘h Hp͎͌ˎg?|W#$T3PO4Spr| 2=_R73<_K=DED?{H+ @qySU@fd4}2Gg=$?$_X[K̚*|)~_YUY|D׈5\܋I S;dq%%6!/TmrC,H*-:~c!eXrD 6Ŀ}~ͩ#dΏdv7G{2(f>m&yȀKI?fl_fO'Ÿ5o+AE{\Pp75ZkHđG2k%# K]A دe|b,I[b2㑪ﰑ40CwSO >JʢǙÀȾ'4 &#VĶ$L\{Ӡlf L ^ I7>/4Z7|S$D3ῃr=E|2 ?G7e ưT0JXUT<=)ɚ<qYqCd}=?NoPĪBBA M+Is\S&~§nKMAP3@2M#݆:h 6RuZ'n3Z'VT=MILᆐ Y{}}ɘ+SPc &FuTOߘ>=}3+h+9( ) a{t2nI j-Ԩ7YCYqal ͸J[4k(|ZfJftxUċh#m;yjٚ,3@삐}7<z3ùwޯV,SgJn%#@ft`d~J£/p {^,v~f'"}%9}M[TUWVrو֦ll 5i|c7[ n mer- o 3@&P6Lm 'kS3e/7?A&O ɔ$v#Vό쑼 03@S6IwΛ.k>5q3aUfLl`;j fQ1k3@&Z6Fs-vMNOZ & ɖCq;ǦU#Sjl^KOj1dbe+jY܆F7eˈP2deSCWה#-bܤq"3@OV$m9>Ԕ47=&m: -(mD ޭD"JWbUhd&vc3 =u<2m)~''Ip+ KFagNn?z=~.sd(l-{f`vlo-DZ(˫;6 WmD b'z$(ϣJϝD1_%Cp)l9[n ~X2Ze *YI*ѥn,+/4.;Ìi< MJQg;%T CU2?s>{;4b~mTyU .½Q#>"?u[(h{iZDs5_~ܟEB2IMliLE￐}s3_7Gx%{7Ԟ/EЍn%FnW@s7c|Λ_{`3y)D=VcCW~RA<Y+mQ[sq6rh5hW uzP%^Zl~*?J Y~GO MrTp;4`T+Ah4Ǚ?|b4ʎ+:/݌zTA/gha::4q іs:xA?5q؞_/({SӀBWE]} "Nj=@MQIhкV?s`6Xn_hjAfzuqXT޻{f&f_F,1$޳։U,rM/nn)7~A+??S؏(7.|,tlfK]>\ꄔVxpuiO~?O&Xd1b@~)%ϗf1eBuU`DXR߾{?-H . g>vnRcrP@HoG`P;S'zHi| ׯ>uH?I4U\$Dm[3aCh ήO/1H̝_~"uTC=mdѡaCmW^@l0'?@e9F(_ovbxd3aPm/&VaW;=7Г֌t|-/܍MY{Ot_T PϨ7m_ѓ iyl|+FE+GmkCýz. qj E߳ҟ5@=l ~1J, OGC3 h/gxz!' ģ s mR#c۸|8豱?.tf]v!Rہ=?~lcX5I!@=#P"t H oV2Vr^8oSo#F/#ܠfp: #R=r)7@1;$VOmΝ 1WT+i7#@ e?}gp~X{\[oQ >,D7aM &Er>oFc`Tܾ3a9}tb N"auW[L~˨a ä1>1J΍m=L|ExWGv4sO1H_m>uܣH &7@oz i='E=2Hl q?}Odb!Dž4+ϟװYTgMIw208*JOgADS[Y8:ǯC<00-zT@ Js}2qLmƦqH$NϪÈ~p|C΍ݻk\{MF *XC+MgV-N(@hUfw._B0qܕY xy^q;Q@b@b&X‚G}_);9_w`dHC%R"D=DqÁDl p:]^I9LN}&יaLC#Pj4#<Q QF[s#,ti??4"nGz -8P/c%w8Žװ*r09Brr%YpI*d().Tɇa4|(ZP])' T-TE$)nqЙ:u^'fyc}Bk Ig"uQ䦱:]VI(2<3ͮ*FCO*$B^"PܘW2HU8R0@!(Y / M38ЁhE?3߄]$Csd1exf2@Z£ .,G"R|g~އ0 fvYXd ~WH QF@x 2@!-'"iތDhvdF?^uuzohlE9 @EGxV2 /5 uF= 4.`Ƽ\6 ՙ31Uq=@8K'xQZH+?8i۷5cTZG<('2xU,/a'gxag+<%<r 5*A"ψM7àm̗Ԍ988޵}O]dG4@ 3@  ã vތizKm1ކ3cxN6;/lMbxek잸gSIhNE!3(e9E(lF%HfsR|0KyJ$$MEo> LS!CdJe!wΈE={GU' [r;x;v|x$*oڔQ?6ƶy`QQ@4X ZS,̂Ϸ!4WL|ޙͪR%=hch*E)! ZlT38 ;yEEsq9PAP4̭D'DmmkkۉkK9*maaݾkWGrZmJN4n0c*%$QƂs}l͡}ؔ FW"PFyh5zC),+WU 6W|l4DJ|zx?ڀI&J)4ϣUAM>3P>9U>4ajaǗF@EgYVdN{{\sߴ8^,7J`P!T Kx<WKx@&c  Yx z& g큶m,I @gʢcGBn/y^s B2c *،mIJ ,@0+JEv[ v߶ Qo?p BPI"=EC/y7<$dD*?(M!``MBXREIRU _*9̐ ίIJ ذGrmbi(u@@[gAkWǚ W>CLT=y| %'*H>tIo+" 98Mn)<>C}65Ҳ˧T{ĉDڬۧ/@_3%|`a:e!*xR%AP3:um#sG4Y89y;6BNXHc~:dŽi u`}s(rN۾DI "G 4c6BrO}Dh-h!`qaH IE}#)HH2~Tt R2ʘhLq^0D~'ԡ.iݶhlFcGyXv5 5nk>Ҟ gVBjl+8 y`֭X k?6neѺuױ)2 M"s݅nʊhû͙aH$('~=H"5qK%VyDzEmAnkq>U:nיY~a(6ӹx3J7zhIUBc${FUHKxǁy`1YImGNs+squemQʶ}/C%+_JF5HEƴ |[LqGl"aZ̮,4 N/Rb_Ǖ֍ VO;8Q'j@e[vnAP^C8n-lrQaV; δZxw1( U$UwN}=+/}ȠIޤbsN뢓Yh 'RVVVf6.U"7w0-du;=1 10XD7%$А]=63CF\aRee8:h4:\Qk;Dn?򷲵>Xw8`Jź.[tm{j&*7{P N]R8$9@؄ NnmLzNқr۸뒅flǁ߂{bǃՎL @k$#YމXHt!>(E7gf;~YIJN}@Af}rpP~p@^_n_No&!Jĉ 3!YO*DP?F^;Pϝ,<Ԩ.=g{H9#ԖV-s ?=Vh%h~v<ϗ'ׂYqV.yݥvo|B$z0R0hLYS`jRFt4(Mn?q> 7#kEv/SMӗh|ppp ]iH YѰ}G3Fǂ-$05j&)PX0qӧnxAע+~T[>_}ڿm ooYߦϮ[O貋Kպ/S;ofjs¹[n=^/D==N,Vfp R+#hN20ؐ' LvГ4^{iH@~TC<~d{QBlycAo GRk5=Ie1 )j%Y3}(!Pt܇t> ";!& `s5k;{FQۘ1Q[g:PGՅjw^Ĵ@㋇S@xxsttװ״` m4KZӘJ?IBMuh) H@E5kV0OfhpdeE-m|k5D7V2b)J w@##{6IENDB`bacula-15.0.3/src/qt-console/tray-monitor/android/res/drawable/0000755000175000017500000000000014771010173024173 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/android/res/drawable/splash_screen.xml0000644000175000017500000000046114771010173027547 0ustar bsbuildbsbuild bacula-15.0.3/src/qt-console/tray-monitor/android/res/drawable/logo.png0000644000175000017500000001105214771010173025640 0ustar bsbuildbsbuildPNG  IHDReʓPLTE7 p%60, ~fOE]tX zo#a}>;eeeӚʃyyyqqqppp׹))) vvvhhhbccRSSBBB122¦===ǻXXXEEEsssmnn]\\PPP777--- J&%%"""ڸkkkaaakR VUU ŌKKKd냎:eeZ7YY$$?+{{{:;;#ﶶ{buuOffFee)SS DKss//qxx7FF#FF>>44((i?ll==t+nrtRNSd #u_ܸT8,ٚJ zlcS'tgCB⧤]:/I$ˋxbmIDATx՜TGw8P;{]5c,A.R* RPT (FKL4&&ٙao &/e}7;?<0vtgp(VǺ~0,m>5ދcc8duh6-' u][ց b#z~.t}[byšц],Zk_ OqőY8By]?=?y2՟clalYzQR=`l6NPKA쐾MGW6bx2[ mA'Ȏ} v]\x!-y~DތMaYʩؖ0eljK_*Oi:4#:VzbxmIs-Kwt8B ~ \nA5^c&Fnx8]pUtL1-v %f09 պ,st,s8~Yy઩ D]qg ohG{֙vڽeg}]jkP`=Q5/[}X98mLj rWqa4TpJ`e)U붃wQєM| HV0Y.׋PڞhoPʫ~ךw`6}a쨶pHmEc{8#aMӐrRHF3ֵ J@ >Mk-2mlӠeO*\Sهi~3.^Jt:,7~R2ʌWm{ 3"lB]jWVy&{ދЙqQi ueǏa\A"_؎Fg >E||8O'>I\(qK#_ I/["APO~kc&%(l%䍺ۼAkָgY6'iCh@l.꣓bzq_kخ*)5kT$6"&9Y׮%@h[&$fY.`1h֪E6"21l}|sJ5u. GS CA<8Cml`d gWb9Kq~g@d;@X)Rs;9;y\DZ_\y@/Lxg= 2B6.YJBYv;8uwmKtC 9?EbqG J /Xu 1lsڷ0r,e,+L^TY@/AD8@:wAb9Jre*𕩋R.ml _L M&H#aɎ5]K\KJh˾ֹg? ~ !/:o>Y$?k-FqFuTIGCY^4MdYY>G|'")B<x`H/D""TjusE#{q2)Ւո7Dmm[\JKbQ腾JAFVƮ)(tgU`?+2+M*&l6 ٤-`v b.]ZFěl:<dŃ<,2V;졶6i/Rq-x 3aJ%)9O;/=FVB$B*cGe'V"=?IPSQǔK{ֻ1xgyf`Vb'>H*CPҸ-Ϻ-$($Ġm a^{ShL݉G^(g0_DN ^ ME/ PFdǓsni?*Dƃ4o{ƘOTC8izflA\p SNIHv};씥نͫaN9 )E fmT@v@0l(lQSq3%n Tsf-DTVFnu`(\%:z%&zeh($\y9"MDF&*w:*)##zゔGSQEے'cib#X_ z wFjV;~H 0  GblP@)!0H Sz`R cyZE/`SV? DC([,~ @VYp_e`HD6s-LUUHv ~мuB]! *|R˙# /\3\:ҋ`d7 Te{G9\`ҶSO"@zⴅwDڀaI`"S@KTUN u?KZFA᳨ E*"(V,P ,FJX1D >qoE1iYE*`YPA#ƏjY'"_=)ΚB[]]uo~m(qaҷ,M%U *#Lldji47aKg2bp03GGeWe-+83]NٙI fCL(ڐW-?g NqToZbeoNgڂ/¿--~[NlgqF3/!/]9ˎp$p<~ss` %H_7? g<7wlq-l#*ُ|uNEݻ>י5i&l]O1}Ee]%ѹ:6C9Ӧt}XN"v R[ΟG]<'0MęcȔᑎ: p!a}LB w}Ʌ^6}.ӔLfD}Q69/)磺;>4N{F߆>;&i>@[{-Ipyۂix}zs;ڸ X:j=yE6x`V'R]{ if (project.hasProperty("android")) { android { compileSdkVersion 28 buildToolsVersion '28.0.3' } } } } sourceSets { main { manifest.srcFile 'AndroidManifest.xml' java.srcDirs = [qt5AndroidDir + '/src', 'src', 'java'] aidl.srcDirs = [qt5AndroidDir + '/src', 'src', 'aidl'] res.srcDirs = [qt5AndroidDir + '/res', 'res'] resources.srcDirs = ['src'] renderscript.srcDirs = ['src'] assets.srcDirs = ['assets'] jniLibs.srcDirs = ['libs'] } } lintOptions { abortOnError false } splits { abi { reset() include "x86", "x86_64" /*include "armeabi-v7a", "arm64-v8a"*/ } } signingConfigs { release { storeFile file("android_release.keystore") storePassword "b@cUl@Is.Co0L" keyAlias "bmob" keyPassword "b@cUl@Is.Co0L" } } buildTypes { release { signingConfig signingConfigs.release } } } bacula-15.0.3/src/qt-console/tray-monitor/android/bacula-fd.conf0000644000175000017500000000147214771010173024312 0ustar bsbuildbsbuild# # List Directors who are permitted to contact this File daemon # Director { Name = @DEVICE-ID@-dir Password = "@DEVICE-PWD@" } # # Restricted Director, used by tray-monitor to get the # status of the file daemon # Director { Name = @DEVICE-ID@-mon Password = "@DEVICE-PWD@" Monitor = yes } # # "Global" File daemon configuration specifications # FileDaemon { Name = @DEVICE-ID@-fd FDport = 9102 WorkingDirectory = /data/user/0/org.baculasystems.bmob.alfa/files/etc Pid Directory = /data/user/0/org.baculasystems.bmob.alfa/files/etc Maximum Concurrent Jobs = 20 Plugin Directory = /data/user/0/org.baculasystems.bmob.alfa/files/lib } # Send all messages except skipped files back to Director Messages { Name = Standard director = @DEVICE-ID@-dir = all, !skipped, !restored, !verified, !saved } bacula-15.0.3/src/qt-console/tray-monitor/android/gradlew.bat0000644000175000017500000000423714771010173023744 0ustar bsbuildbsbuild@if "%DEBUG%" == "" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @rem @rem ########################################################################## @rem Set local scope for the variables with windows NT shell if "%OS%"=="Windows_NT" setlocal set DIRNAME=%~dp0 if "%DIRNAME%" == "" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. set DEFAULT_JVM_OPTS=-Xmx1024m -Dfile.encoding=UTF-8 @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 if "%ERRORLEVEL%" == "0" goto init echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :findJavaFromJavaHome set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe if exist "%JAVA_EXE%" goto init echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :init @rem Get command-line arguments, handling Windows variants if not "%OS%" == "Windows_NT" goto win9xME_args :win9xME_args @rem Slurp the command line arguments. set CMD_LINE_ARGS= set _SKIP=2 :win9xME_args_slurp if "x%~1" == "x" goto execute set CMD_LINE_ARGS=%* :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar @rem Execute Gradle "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% :end @rem End local scope for the variables with windows NT shell if "%ERRORLEVEL%"=="0" goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 exit /b 1 :mainEnd if "%OS%"=="Windows_NT" endlocal :omega bacula-15.0.3/src/qt-console/tray-monitor/jobsmodel.h0000644000175000017500000000303714771010173022333 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef JOBSMODEL_H #define JOBSMODEL_H #include #include "common.h" class JobsModel : public QAbstractTableModel { Q_OBJECT public: enum { ID_COLUMN =0, TDATE_COLUMN, HASCACHE_COLUMN, NAME_COLUMN, NUM_COLUMN }; struct row_struct { uint64_t id; QDateTime tdate; QString hasCache; QString name; }; explicit JobsModel(const QList& t, QObject *parent = NULL); // Header: QVariant headerData(int section, Qt::Orientation orientation, int role = Qt::DisplayRole) const; // Basic functionality: int rowCount(const QModelIndex &parent = QModelIndex()) const; int columnCount(const QModelIndex &parent = QModelIndex()) const; QVariant data(const QModelIndex &index, int role = Qt::DisplayRole) const; private: QList table; }; #endif // JOBSMODEL_H bacula-15.0.3/src/qt-console/tray-monitor/pluginwizardpage.cpp0000644000175000017500000001371314771010173024266 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Restore Wizard: Plugin selection page * * Written by Norbert Bizet, May MMXVII * */ #include "common.h" #include "pluginwizardpage.h" #include "ui_pluginwizardpage.h" #include "pluginmodel.h" #include "task.h" #include #include #include #include #include #include "lib/ini.h" PluginWizardPage::PluginWizardPage(QWidget *parent) : QWizardPage(parent), ui(new Ui::PluginWizardPage), res(NULL) { ui->setupUi(this); ui->tabWidget->clear(); registerField("pluginKeysStr", this, "pluginKeysStr", "pluginKeysStrChanged"); } PluginWizardPage::~PluginWizardPage() { delete ui; } void PluginWizardPage::initializePage() { /* build plugin form UI dynamically*/ task t; t.init(res, -1); QStringList idsList = field("pluginIds").toString().split(","); QStringList nameList = field("pluginNames").toString().split(","); /* process ids and name lists with the assumption that indexes match */ ASSERT(idsList.count() == nameList.count()); for( int c=0; ctabWidget->count(); ++j) { if (ui->tabWidget->tabText(j) == pluginName) { exists=true; break; } } if (exists) continue; /* create a tab widget */ QWidget *pluginWidget = new QWidget(); /* insert a tab widget with an empty form layout */ QFormLayout *layout = new QFormLayout(); pluginWidget->setLayout(layout); ui->tabWidget->addTab(pluginWidget, pluginName); /* parse each plugin fields*/ t.plugin(nameList[c], field("jobIds").toString(), pluginId.toInt()); ConfigFile cf; cf.unserialize(pluginName.toLatin1().data()); /* for each field */ for (int i=0; i < MAX_INI_ITEMS && cf.items[i].name; i++) { ini_items f = cf.items[i]; /* create the w Widget dynamically, based on the field value type */ QWidget *w(NULL); if (f.handler == ini_store_str || f.handler == ini_store_name || f.handler == ini_store_alist_str) /*FIXME: treat alist separatly*/ { QLineEdit *l = new QLineEdit(); w=l; if (f.default_value) l->setText(f.default_value); } else if (f.handler == ini_store_pint64 || f.handler == ini_store_int64 || f.handler == ini_store_pint32 || f.handler == ini_store_int32) { QLineEdit *l = new QLineEdit(); w=l; l->setValidator(new QIntValidator()); if (f.default_value) l->setText(f.default_value); } else if (f.handler == ini_store_bool) { QCheckBox *c = new QCheckBox(); w=c; if (f.default_value) c->setChecked(f.default_value); } else if (f.handler == ini_store_date) { QDateTimeEdit *d = new QDateTimeEdit(); w=d; if (f.default_value) d->setDateTime(QDateTime::fromString(f.default_value, "yyyy-MM-dd hh:mm:ss")); } if (w) { w->setToolTip(f.comment); QString field_name = QString("%1_%2").arg(nameList[c]).arg(f.name); /* This doesn't work FIXME * if (f.required) { field_name.append("*"); }*/ registerField(field_name, w); /* there's no way to iterate thru page-register field */ /* As a workaround we keep track of registered fields in a separate list */ registeredFields.append(field_name); layout->addRow(f.name, w); emit completeChanged(); } } } } bool PluginWizardPage::validatePage() { QStringList pluginKeys; QStringList idsList = field("pluginIds").toString().split(","); QStringList nameList = field("pluginNames").toString().split(","); for (int idx=0; idxtabWidget->count(); ++idx) { QString name = ui->tabWidget->tabText(idx); ASSERT(name.compare(nameList[idx]) == 0); QFile file(name); if (file.open(QIODevice::WriteOnly)) { QTextStream outputStream(&file); foreach(QString fld, registeredFields) { QStringList sl = fld.split("_"); if ( (name.compare(sl[0]) == 0) && field(fld).isValid()) { QString s = QString("%1=%2\n").arg(sl[1]).arg(field(fld).toString()); outputStream << s; } } } /* create the key */ QString key = QString("j%1i%2").arg(field("jobIds").toString().remove(',').simplified()).arg(idsList[idx].simplified()); QString restoreKey = QString("%1:%2").arg(idsList[idx].simplified()).arg(key); pluginKeys.append(restoreKey); } m_pluginKeysStr = pluginKeys.join(","); emit pluginKeysStrChanged(); return true; } bacula-15.0.3/src/qt-console/tray-monitor/config-storage.h0000644000175000017500000000624114771010173023264 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef CONFIG_STORAGE_H #define CONFIG_STORAGE_H #include "common.h" #include "tray_conf.h" #include "android-fd-service.h" /* ConfigStorage - Responsible for managing all operations (reads, updates, writes) in the bacula-tray-monitor.conf file. */ class ConfigStorage { public: static ConfigStorage& getInstance() { static ConfigStorage instance; return instance; } // Creates the singleton instance static bool init(const char *config_path) { struct stat statp; ConfigStorage *storage = &ConfigStorage::getInstance(); storage->config_path = config_path; /** * If tray-monitor.conf does not exist yet, * create it with default resources. */ if (stat(config_path, &statp) != 0) { const char *err_msg; char *deviceId = bstrdup(AndroidFD::deviceId().toLatin1().constData()); err_msg = storage->addResource(deviceId, R_CLIENT, true); if (err_msg != NULL) { Dmsg1(0, "Error - could not save default client resource - %s\n", err_msg); return false; } err_msg = storage->saveMonitor(deviceId); if (err_msg != NULL) { Dmsg1(0, "Error - could not save default monitor resource - %s\n", err_msg); return false; } free(deviceId); } return storage->reloadResources(); } private: CONFIG *config = NULL; RES_HEAD **rhead; explicit ConfigStorage() {} void writeMonitor(FILE *fp, MONITOR *mon); void writeResource(FILE *fp, RESMON *res, rescode code); const char *validateResource(RESMON *res); public: QString config_path; bool reloadResources(); bool reloadResources(bool encodePassword); const char *saveMonitor(const char *deviceId); const char *saveMonitor(MONITOR *mon); MONITOR *getMonitor(); const char *addResource(const char *resName, rescode code); const char *addResource(const char *resName, rescode code, bool managed); const char *addResource(RESMON *res, rescode code); const char *editResource(RESMON *oldRes, RESMON *newRes, rescode code); const char *saveResources( QList *clients, QList *directors, QList *storages ); QList *getAllResources(); QList *getResources(rescode resCode); RESMON *getResourceByName(rescode resCode, const char* resName); //Debug Only void dump_storage(); }; #endif // CONFIG_STORAGE_H bacula-15.0.3/src/qt-console/tray-monitor/fd-config-ui-controller.h0000644000175000017500000000231014771010173024776 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef FDCONFIGUICONTROLLER_H #define FDCONFIGUICONTROLLER_H #include #include #include #include "conf.h" #include "tray_conf.h" #include "resmodel.h" #include "config-storage.h" class FdConfigUiController : public QObject { Q_OBJECT public: explicit FdConfigUiController(QObject *parent = nullptr); ~FdConfigUiController(); public slots: //FD Config File void readFileDaemonConfig(); void writeFileDaemonConfig(QString fcontents); signals: void fileDaemonConfigRead(const QString &text); }; #endif // FDCONFIGUICONTROLLER_H bacula-15.0.3/src/qt-console/tray-monitor/restorewizard.h0000644000175000017500000000241114771010173023254 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Restore Wizard * * Written by Norbert Bizet, May MMXVII * */ #ifndef RESTOREWIZARD_H #define RESTOREWIZARD_H #include #include namespace Ui { class RestoreWizard; } class task; class RESMON; class QStandardItemModel; class RestoreWizard : public QWizard { Q_OBJECT public: enum { RW_CLIENT_PAGE = 0, RW_JOB_PAGE = 1, RW_FILE_PAGE = 2, RW_PLUGIN_PAGE = 3, RW_ADVANCEDOPTIONS_PAGE = 4 }; explicit RestoreWizard(RESMON *r, QWidget *parent = 0); ~RestoreWizard(); private: RESMON *res; Ui::RestoreWizard *ui; }; #endif // RESTOREWIZARD_H bacula-15.0.3/src/qt-console/tray-monitor/task.cpp0000644000175000017500000013136114771010173021654 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "task.h" #include "jcr.h" #include "filesmodel.h" #include "pluginmodel.h" #include #include "../util/fmtwidgetitem.h" #define dbglvl 10 int authenticate_daemon(JCR *jcr, MONITOR *monitor, RESMON *res); static void *handle_task(void *data) { task *t; worker *wrk = (worker *)data; lmgr_init_thread(); wrk->set_running(); Dmsg0(dbglvl, "Worker started\n"); while (!wrk->is_quit_state()) { if (wrk->is_wait_state()) { wrk->wait(); continue; } t = (task *)wrk->dequeue(); if (!t) { continue; } /* Do the work */ switch(t->type) { case TASK_STATUS: t->do_status(); break; case TASK_RESOURCES: t->get_resources(); break; case TASK_DEFAULTS: t->get_job_defaults(); break; case TASK_RUN: t->run_job(); break; case TASK_BWLIMIT: t->set_bandwidth(); break; case TASK_INFO: t->get_job_info(t->arg2); break; case TASK_DISCONNECT: t->disconnect_bacula(); t->mark_as_done(); break; case TASK_LIST_CLIENT_JOBS: t->get_client_jobs(t->arg); break; case TASK_LIST_JOB_FILES: t->get_job_files(t->arg, t->pathId); break; case TASK_RESTORE: t->restore(); break; default: Mmsg(t->errmsg, "Unknown task"); t->mark_as_failed(); break; } } Dmsg0(dbglvl, "Worker stopped\n"); lmgr_cleanup_thread(); return NULL; } bool task::set_bandwidth() { bool ret = false; btimer_t *tid = NULL; if (res->type != R_CLIENT) { mark_as_failed(); Mmsg(errmsg, _("Bandwidth can set only set on Client")); return false; } if (!arg || !*arg) { mark_as_failed(); Mmsg(errmsg, _("Bandwidth parameter is invalid")); return false; } if (res->proxy_sent) { free_bsock(res->bs); } if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { if (!connect_bacula()) { mark_as_failed(); return false; } } tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); res->bs->fsend("setbandwidth limit=%s\n", NPRTB(arg)); while (get_next_line(res)) { Dmsg1(dbglvl, "-> %s\n", curline); } if (tid) { stop_thread_timer(tid); } /* Do not reuse the same socket */ disconnect_bacula(); if (ret) { mark_as_done(); } else { mark_as_failed(); } return ret; } RESMON *task::get_res() { return res; } void task::lock_res() { res->mutex->lock(); } void task::unlock_res() { res->mutex->unlock(); } bool task::disconnect_bacula() { free_bsock(res->bs); return true; } bool task::connect_bacula() { JCR jcr; bool ret = false; memset(&jcr, 0, sizeof(jcr)); curend = curline = NULL; #ifdef Q_OS_ANDROID ConfigStorage * config = &ConfigStorage::getInstance(); RESMON *r = get_res(); r->psk_ctx = new_psk_context(NULL /*r->password*/); MONITOR *monitor = config->getMonitor(); #else RESMON *r = get_res(); MONITOR *monitor = (MONITOR*)GetNextRes(R_MONITOR, NULL); #endif if (r->type == R_CLIENT) { r->proxy_sent = false; if (r->bs && (r->bs->is_error() || !r->bs->is_open())) { free_bsock(r->bs); } if (!r->bs) { r->bs = new_bsock(); Dmsg0(dbglvl, "Trying to connect to FD\n"); if (r->bs->connect(NULL, r->connect_timeout, 0, 0, _("Client daemon"), r->address, NULL, r->port, 0)) { Dmsg0(dbglvl, "Connect done!\n"); jcr.file_bsock = r->bs; if (!authenticate_daemon(&jcr, monitor, r)) { Dmsg0(dbglvl, "Unable to authenticate\n"); Mmsg(errmsg, "Unable to authenticate with the FileDaemon"); free_bsock(r->bs); return false; } Dmsg0(dbglvl, "Authenticate OK\n"); ret = true; } else { Mmsg(errmsg, "Unable to connect to the FileDaemon"); Dmsg0(dbglvl, "Connect error!\n"); } } else { ret = true; } } if (r->type == R_STORAGE) { if (r->bs && (r->bs->is_error() || !r->bs->is_open())) { free_bsock(r->bs); } if (!r->bs) { r->bs = new_bsock(); Dmsg0(dbglvl, "Trying to connect to FD\n"); if (r->bs->connect(NULL, r->connect_timeout, 0, 0, _("Storage daemon"), r->address, NULL, r->port, 0)) { Dmsg0(dbglvl, "Connect done!\n"); jcr.store_bsock = r->bs; if (!authenticate_daemon(&jcr, monitor, r)) { Dmsg0(dbglvl, "Unable to authenticate\n"); Mmsg(errmsg, "Unable to authenticate with the Storage Daemon"); free_bsock(r->bs); return false; } Dmsg0(dbglvl, "Authenticate OK\n"); ret = true; } else { Mmsg(errmsg, "Unable to connect to the Storage Daemon"); Dmsg0(dbglvl, "Connect error!\n"); } } else { ret = true; } } if (r->type == R_DIRECTOR) { if (r->bs && (r->bs->is_error() || !r->bs->is_open())) { free_bsock(r->bs); } if (!r->bs) { r->bs = new_bsock(); Dmsg0(dbglvl, "Trying to connect to DIR\n"); if (r->bs->connect(NULL, r->connect_timeout, 0, 0, _("Director daemon"), r->address, NULL, r->port, 0)) { Dmsg0(dbglvl, "Connect done!\n"); jcr.dir_bsock = r->bs; if (!authenticate_daemon(&jcr, monitor, r)) { Dmsg0(dbglvl, "Unable to authenticate\n"); Mmsg(errmsg, "Unable to authenticate with the Director"); free_bsock(r->bs); return false; } Dmsg0(dbglvl, "Authenticate OK\n"); ret = true; } else { Mmsg(errmsg, "Unable to connect to the Director"); Dmsg0(dbglvl, "Connect error!\n"); } } else { ret = true; } } return ret; } bool task::read_status_running(RESMON *r) { bool ret = false; char *start, *end; struct s_running_job *item = NULL; alist *running_jobs = New(alist(10, owned_by_alist)); while (r->bs->recv() >= -1) { if (r->bs->msglen < 0 && r->bs->msglen != BNET_CMD_BEGIN && r->bs->msglen != BNET_CMD_OK) { Dmsg1(dbglvl, "Got Signal %s\n", bnet_sig_to_ascii(r->bs->msglen)); break; } Dmsg2(dbglvl, "RECV -> %s:%d\n", r->bs->msg, r->bs->msglen); start = r->bs->msg; while ((end = strchr(start, '\n')) != NULL) { *end = 0; Dmsg1(dbglvl, "line=[%s]\n", start); if (strncasecmp(start, "jobid=", 6) == 0) { if (item) { Dmsg1(dbglvl, "Append item %ld\n", item->JobId); running_jobs->append(item); } item = (struct s_running_job *)malloc(sizeof(struct s_running_job)); memset(item, 0, sizeof(struct s_running_job)); item->JobId = str_to_uint64(start + 6); } else if (!item) { Dmsg0(dbglvl, "discard line\n"); } else if (strncasecmp(start, "level=", 6) == 0) { item->JobLevel = start[6]; } else if (strncasecmp(start, "type=", 5) == 0) { item->JobType = start[5]; } else if (strncasecmp(start, "status=", 7) == 0) { item->JobStatus = start[7]; } else if (strncasecmp(start, "jobbytes=", 9) == 0) { item->JobBytes = str_to_uint64(start + 9); } else if (strncasecmp(start, "jobfiles=", 9) == 0) { item->JobFiles = str_to_uint64(start + 9); } else if (strncasecmp(start, "job=", 4) == 0) { bstrncpy(item->Job, start + 4, sizeof(item->Job)); } else if (strncasecmp(start, "starttime_epoch=", 16) == 0) { item->start_time = str_to_uint64(start + 16); } else if (strncasecmp(start, "schedtime_epoch=", 16) == 0) { item->sched_time = str_to_uint64(start + 16); } else if (strncasecmp(start, "bytes/sec=", 10) == 0) { item->bytespersec = str_to_uint64(start + 10); } else if (strncasecmp(start, "avebytes_sec=", 13) == 0) { item->bytespersec = str_to_uint64(start + 13); } else if (strncasecmp(start, "errors=", 7) == 0) { item->Errors = str_to_uint64(start + 7); } else if (strncasecmp(start, "readbytes=", 10) == 0) { item->ReadBytes = str_to_uint64(start + 10); } else if (strncasecmp(start, "processing file=", 16) == 0) { bstrncpy(item->CurrentFile, start + 16, sizeof(item->CurrentFile)); } else if (strncasecmp(start, "clientname=", 11) == 0) { bstrncpy(item->Client, start + 11, sizeof(item->Client)); } else if (strncasecmp(start, "fileset=", 8) == 0) { bstrncpy(item->FileSet, start + 8, sizeof(item->FileSet)); } else if (strncasecmp(start, "storage=", 8) == 0) { bstrncpy(item->Storage, start + 8, sizeof(item->Storage)); } else if (strncasecmp(start, "rstorage=", 8) == 0) { bstrncpy(item->RStorage, start + 8, sizeof(item->Storage)); } else if (strncasecmp(start, "sdtls=", 6) == 0) { item->SDtls = str_to_uint64(start + 6); } start = end+1; } r->last_update = time(NULL); if (r->bs->is_error()) { Mmsg(errmsg, "Got error on the socket communication line"); goto bail_out; } } if (item) { Dmsg1(dbglvl, "Append item %ld\n", item->JobId); running_jobs->append(item); } ret = true; bail_out: r->mutex->lock(); if (r->running_jobs) { delete r->running_jobs; } r->running_jobs = running_jobs; r->mutex->unlock(); return ret; } bool task::read_status_terminated(RESMON *r) { bool ret = false; char *start, *end; struct s_last_job *item = NULL; r->mutex->lock(); if (r->terminated_jobs) { delete r->terminated_jobs; } r->terminated_jobs = New(dlist(item, &item->link)); r->mutex->unlock(); while (r->bs->recv() >= -1) { if (r->bs->msglen < 0 && r->bs->msglen != BNET_CMD_BEGIN && r->bs->msglen != BNET_CMD_OK) { Dmsg1(dbglvl, "Got Signal %s\n", bnet_sig_to_ascii(r->bs->msglen)); break; } Dmsg2(dbglvl, "RECV -> %s:%d\n", r->bs->msg, r->bs->msglen); r->mutex->lock(); start = r->bs->msg; while ((end = strchr(start, '\n')) != NULL) { *end = 0; Dmsg1(dbglvl, "line=[%s]\n", start); if (strncasecmp(start, "jobid=", 6) == 0) { if (item) { Dmsg1(dbglvl, "Append item %ld\n", item->JobId); r->terminated_jobs->append(item); } item = (struct s_last_job *)malloc(sizeof(struct s_last_job)); memset(item, 0, sizeof(struct s_last_job)); item->JobId = str_to_uint64(start + 6); } else if (!item) { Dmsg0(dbglvl, "discard line\n"); } else if (strncasecmp(start, "level=", 6) == 0) { item->JobLevel = start[6]; } else if (strncasecmp(start, "type=", 5) == 0) { item->JobType = start[5]; } else if (strncasecmp(start, "status=", 7) == 0) { item->JobStatus = start[7]; } else if (strncasecmp(start, "jobbytes=", 9) == 0) { item->JobBytes = str_to_uint64(start + 9); } else if (strncasecmp(start, "jobfiles=", 9) == 0) { item->JobFiles = str_to_uint64(start + 9); } else if (strncasecmp(start, "job=", 4) == 0) { bstrncpy(item->Job, start + 4, sizeof(item->Job)); } else if (strncasecmp(start, "starttime_epoch=", 16) == 0) { item->start_time = str_to_uint64(start + 16); } else if (strncasecmp(start, "endtime_epoch=", 14) == 0) { item->end_time = str_to_uint64(start + 14); } else if (strncasecmp(start, "errors=", 7) == 0) { item->Errors = str_to_uint64(start + 7); } start = end+1; } r->last_update = time(NULL); r->mutex->unlock(); if (r->bs->is_error()) { Mmsg(errmsg, "Got error on the socket communication line"); goto bail_out; } } if (item) { r->mutex->lock(); Dmsg1(dbglvl, "Append item %ld\n", item->JobId); r->terminated_jobs->append(item); r->mutex->unlock(); } ret = true; bail_out: return ret; } bool task::read_status_header(RESMON *r) { bool ret = false; char *start, *end; while (r->bs->recv() >= -1) { if (r->bs->msglen < 0 && r->bs->msglen != BNET_CMD_BEGIN && r->bs->msglen != BNET_CMD_OK) { Dmsg1(dbglvl, "Got Signal %d\n", r->bs->msglen); break; } Dmsg2(dbglvl, "RECV -> %s:%d\n", r->bs->msg, r->bs->msglen); r->mutex->lock(); start = r->bs->msg; while ((end = strchr(start, '\n')) != NULL) { *end = 0; Dmsg1(dbglvl, "line=[%s]\n", start); if (strncasecmp(start, "name=", 5) == 0) { bstrncpy(r->name, start + 5, sizeof(r->name)); } else if (strncasecmp(start, "version=", 8) == 0) { bstrncpy(r->version, start + 8, sizeof(r->version)); } else if (strncasecmp(start, "plugins=", 8) == 0) { bstrncpy(r->plugins, start + 8, sizeof(r->plugins)); } else if (strncasecmp(start, "bwlimit=", 8) == 0) { r->bwlimit = str_to_uint64(start + 8); } else if (strncasecmp(start, "started=", 8) == 0) { bstrncpy(r->started, start + 8, sizeof(r->started)); } else if (strncasecmp(start, "reloaded=", 9) == 0) { bstrncpy(r->reloaded, start + 9, sizeof(r->reloaded)); } start = end+1; } if (r->bs->is_error()) { r->mutex->unlock(); Mmsg(errmsg, "Got error on the socket communication line"); goto bail_out; } r->last_update = time(NULL); r->mutex->unlock(); } ret = true; bail_out: return ret; } bool task::do_status() { bool ret = false; btimer_t *tid = NULL; /* We don't want to use a proxy session */ if (res->type == R_CLIENT && res->proxy_sent) { free_bsock(res->bs); } if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { if (!connect_bacula()) { goto bail_out; } } /* TODO: */ tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); if (res->type == R_CLIENT || res->type == R_STORAGE) { Dmsg0(dbglvl, "Send status command header\n"); res->bs->fsend(".status header api=2\n"); // TODO: Update a local set of variables and commit everything when it's done ret = read_status_header(res); if (ret) { res->bs->fsend(".status terminated api=2\n"); ret = read_status_terminated(res); } if (ret) { res->bs->fsend(".status running api=2\n"); ret = read_status_running(res); } } if (res->type == R_DIRECTOR) { Dmsg0(dbglvl, "-> .api 2\n"); res->bs->fsend(".api 2\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } Dmsg0(dbglvl, "Send status command header\n"); res->bs->fsend(".status dir header\n"); // TODO: Update a local set of variables and commit everything when it's done ret = read_status_header(res); if (ret) { Dmsg0(dbglvl, "Send status command terminated\n"); res->bs->fsend(".status dir terminated\n"); ret = read_status_terminated(res); } if (ret) { Dmsg0(dbglvl, "Send status command running\n"); res->bs->fsend(".status dir running\n"); ret = read_status_running(res); } } bail_out: if (tid) { stop_thread_timer(tid); } /* Use a new socket the next time */ disconnect_bacula(); if (ret) { mark_as_done(); } else { mark_as_failed(); } return ret; } bool task::get_next_line(RESMON *r) { /* We are currently reading a line */ if (curline && curend && r->bs->msglen > 0 && curend < (r->bs->msg + r->bs->msglen - 1)) { curline = curend + 1; /* skip \0 */ if ((curend = strchr(curline, '\n')) != NULL) { *curend = '\0'; } return true; } curline = curend = NULL; do { r->bs->recv(); if (r->bs->msglen < 0) { Dmsg1(dbglvl, "<- %s\n", bnet_sig_to_ascii(r->bs->msglen)); switch(r->bs->msglen) { case BNET_ERROR_MSG: r->bs->recv(); strip_trailing_junk(r->bs->msg); Dmsg1(0, "ERROR: %s\n", r->bs->msg); break; case BNET_MAIN_PROMPT: // stop return false; case BNET_CMD_OK: case BNET_CMD_BEGIN: case BNET_MSGS_PENDING: break; case BNET_TERMINATE: return false; default: // error or question? return false; } } else if (r->bs->msglen == 0) { // strange return false; } else { Dmsg1(10, "<- %s\n", r->bs->msg); curline = r->bs->msg; curend = strchr(curline, '\n'); if (curend) { *curend = 0; } return true; // something to read } } while (!r->bs->is_error()); return false; } bool task::get_job_defaults() { bool ret = false; btimer_t *tid = NULL; char *p; if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { if (!connect_bacula()) { goto bail_out; } } res->mutex->lock(); bfree_and_null(res->defaults.client); bfree_and_null(res->defaults.pool); bfree_and_null(res->defaults.storage); bfree_and_null(res->defaults.level); bfree_and_null(res->defaults.type); bfree_and_null(res->defaults.fileset); bfree_and_null(res->defaults.catalog); bfree_and_null(res->defaults.where); tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); if (res->type == R_CLIENT && !res->proxy_sent) { res->proxy_sent = true; res->bs->fsend("proxy\n"); while (get_next_line(res)) { if (strncmp(curline, "2000", 4) != 0) { pm_strcpy(errmsg, curline); goto bail_out; } Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } res->bs->fsend(".api 2\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } res->bs->fsend(".defaults job=%s\n", res->defaults.job); while (get_next_line(res)) { Dmsg1(dbglvl, "line = [%s]\n", curline); if ((p = strchr(curline, '=')) == NULL) { continue; } *p++ = 0; if (strcasecmp(curline, "client") == 0) { res->defaults.client = bstrdup(p); } else if (strcasecmp(curline, "pool") == 0) { res->defaults.pool = bstrdup(p); } else if (strcasecmp(curline, "storage") == 0) { res->defaults.storage = bstrdup(p); } else if (strcasecmp(curline, "level") == 0) { res->defaults.level = bstrdup(p); } else if (strcasecmp(curline, "type") == 0) { res->defaults.type = bstrdup(p); } else if (strcasecmp(curline, "fileset") == 0) { res->defaults.fileset = bstrdup(p); } else if (strcasecmp(curline, "catalog") == 0) { res->defaults.catalog = bstrdup(p); } else if (strcasecmp(curline, "priority") == 0) { res->defaults.priority = str_to_uint64(p); } else if (strcasecmp(curline, "where") == 0) { res->defaults.where = bstrdup(p); } else if (strcasecmp(curline, "replace") == 0) { res->defaults.replace = str_to_uint64(p); } } ret = true; bail_out: if (tid) { stop_thread_timer(tid); } if (ret) { mark_as_done(); } else { mark_as_failed(); } res->mutex->unlock(); return ret; } bool task::get_job_info(const char *level) { bool ret = false; btimer_t *tid = NULL; char *p; if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { if (!connect_bacula()) { goto bail_out; } } res->mutex->lock(); memset(&res->infos, 0, sizeof(res->infos)); tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); if (res->type == R_CLIENT && !res->proxy_sent) { res->proxy_sent = true; res->bs->fsend("proxy\n"); while (get_next_line(res)) { if (strncmp(curline, "2000", 4) != 0) { pm_strcpy(errmsg, curline); goto bail_out; } Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } res->bs->fsend(".api 2\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } if (level) { res->bs->fsend(".estimate job=\"%s\" level=%s\n", arg, level); } else { res->bs->fsend(".estimate job=\"%s\"\n", arg); } while (get_next_line(res)) { Dmsg1(dbglvl, "line = [%s]\n", curline); if ((p = strchr(curline, '=')) == NULL) { continue; } *p++ = 0; if (strcasecmp(curline, "level") == 0) { res->infos.JobLevel = p[0]; } else if (strcasecmp(curline, "jobbytes") == 0) { res->infos.JobBytes = str_to_uint64(p); } else if (strcasecmp(curline, "jobfiles") == 0) { res->infos.JobFiles = str_to_uint64(p); } else if (strcasecmp(curline, "corrbytes") == 0) { res->infos.CorrJobBytes = str_to_uint64(p); } else if (strcasecmp(curline, "corrfiles") == 0) { res->infos.CorrJobFiles = str_to_uint64(p); } else if (strcasecmp(curline, "nbjob") == 0) { res->infos.CorrNbJob = str_to_uint64(p); } } ret = true; bail_out: res->mutex->unlock(); if (tid) { stop_thread_timer(tid); } if (ret) { mark_as_done(); } else { mark_as_failed(); } return ret; } bool task::run_job() { bool ret = false; char *p; btimer_t *tid = NULL; if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { if (!connect_bacula()) { goto bail_out; } } tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); if (res->type == R_CLIENT && !res->proxy_sent) { res->proxy_sent = true; res->bs->fsend("proxy\n"); while (get_next_line(res)) { if (strncmp(curline, "2000", 4) != 0) { pm_strcpy(errmsg, curline); goto bail_out; } Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } res->bs->fsend(".api 2\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } if (res->type == R_DIRECTOR && res->use_setip) { res->bs->fsend("setip\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } res->bs->fsend("%s\n", arg); while (get_next_line(res)) { if ((p = strstr(curline, "JobId=")) != NULL && sscanf(p, "JobId=%d\n", &result.i) == 1) { ret = true; } } // Close the socket, it's over or we don't want to reuse it disconnect_bacula(); bail_out: if (tid) { stop_thread_timer(tid); } if (ret) { mark_as_done(); } else { mark_as_failed(); } return ret; } bool task::get_client_jobs(const char* client) { bool ret = false; btimer_t *tid = NULL; int row=0; QStringList headers; struct s_last_job *ljob=NULL; if (!model) { goto bail_out; } model->clear(); headers << tr("JobId") << tr("Job") << tr("Level") << tr("Date") << tr("Files") << tr("Bytes"); model->setHorizontalHeaderLabels(headers); if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { if (!connect_bacula()) { goto bail_out; } } tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); if (res->type == R_CLIENT && !res->proxy_sent) { res->proxy_sent = true; res->bs->fsend("proxy\n"); while (get_next_line(res)) { if (strncmp(curline, "2000", 4) != 0) { pm_strcpy(errmsg, curline); goto bail_out; } Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } res->bs->fsend(".api 2\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } if (res->type == R_DIRECTOR && res->use_setip) { res->bs->fsend("setip\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } res->bs->fsend(".bvfs_get_jobs client=%s\n", client); while (get_next_line(res)) { QString line(curline); QStringList line_lst = line.split(" ", QString::SkipEmptyParts); model->setItem(row, 0, new QStandardItem(line_lst[0])); model->setItem(row, 1, new QStandardItem(line_lst[3])); QDateTime date; date.setTime_t(line_lst[1].toUInt()); QStandardItem *dateItem = new QStandardItem(); dateItem->setData(date, Qt::DisplayRole); model->setItem(row, 3, dateItem); /* find the job in res terminated list */ if (res->terminated_jobs) { foreach_dlist(ljob, res->terminated_jobs) { if (ljob->JobId == line_lst[0].toUInt()) { model->setItem(row, 2, new QStandardItem(QString(job_level_to_str(ljob->JobLevel)))); model->setItem(row, 4, new QStandardItem(QString::number(ljob->JobFiles))); model->setItem(row, 5, new QStandardItem(QString::number(ljob->JobBytes))); break; } } } ret = true; ++row; } // Close the socket, it's over or we don't want to reuse it disconnect_bacula(); bail_out: if (tid) { stop_thread_timer(tid); } if (ret) { mark_as_done(); } else { mark_as_failed(); } return ret; } extern int decode_stat(char *buf, struct stat *statp, int stat_size, int32_t *LinkFI); bool task::get_job_files(const char* job, uint64_t pathid) { bool ret = false; btimer_t *tid = NULL; QString jobs; struct stat statp; int32_t LinkFI; if (!model) { goto bail_out; } model->removeRows(0, model->rowCount()); if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { if (!connect_bacula()) { goto bail_out; } } tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); if (res->type == R_CLIENT && !res->proxy_sent) { res->proxy_sent = true; res->bs->fsend("proxy\n"); while (get_next_line(res)) { if (strncmp(curline, "2000", 4) != 0) { pm_strcpy(errmsg, curline); goto bail_out; } Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } if (res->type == R_DIRECTOR && res->use_setip) { res->bs->fsend("setip\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } res->bs->fsend(".api 2\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } /* retrieve all job ids*/ res->bs->fsend(".bvfs_get_jobids jobid=%s\n", job); while (get_next_line(res)) { jobs = QString(curline); Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } /* cache the file set */ res->bs->fsend(".bvfs_update jobid=%s\n", jobs.toLatin1().data()); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } if (pathid == 0) { res->bs->fsend(".bvfs_lsdirs jobid=%s path=\"\"\n", jobs.toLatin1().data()); } else { res->bs->fsend(".bvfs_lsdirs jobid=%s pathid=%lld\n", jobs.toLatin1().data(), pathid); } //+ " limit=" + limit + " offset=" + offset ; while (get_next_line(res)) { QString line(curline); QStringList line_lst = line.split("\t", QString::KeepEmptyParts); if ((line_lst.size() == 6) && line_lst[5] != ".") { DirectoryItem *d = new DirectoryItem(); d->setData(QVariant(line_lst[0]), PathIdRole); d->setData(QVariant(line_lst[1]), FilenameIdRole); d->setData(QVariant(line_lst[2]), FileIdRole); d->setData(QVariant(jobs), JobIdRole); d->setData(QVariant(line_lst[4]), LStatRole); d->setData(QVariant(line_lst[5]), PathRole); QFileInfo fi(QDir(QString(arg3)), line_lst[5]); d->setData(QVariant(fi.absoluteFilePath()), Qt::ToolTipRole); d->setData(QVariant(line_lst[5]), Qt::DisplayRole); model->appendRow(d); ret = true; } } /* then, request files */ if (strcmp(arg2,"") == 0) { if (pathid == 0) { res->bs->fsend(".bvfs_lsfiles jobid=%s path=\"\"\n", jobs.toLatin1().data()); } else { res->bs->fsend(".bvfs_lsfiles jobid=%s pathid=%lld\n", jobs.toLatin1().data(), pathid); } } else { if (pathid == 0) { res->bs->fsend(".bvfs_lsfiles jobid=%s path=\"\" pattern=\"%s\"\n", jobs.toLatin1().data(), arg2); } else { res->bs->fsend(".bvfs_lsfiles jobid=%s pathid=%lld pattern=\"%s\"\n", jobs.toLatin1().data(), pathid, arg2); } } //+ " limit=" + limit + " offset=" + offset ; while (get_next_line(res)) { QString line(curline); QStringList line_lst = line.split("\t", QString::SkipEmptyParts); if ((line_lst.size() == 6) && line_lst[5] != ".") { FileItem *f = new FileItem(); f->setData(QVariant(line_lst[0]), PathIdRole); f->setData(QVariant(line_lst[1]), FilenameIdRole); f->setData(QVariant(line_lst[2]), FileIdRole); f->setData(QVariant(jobs), JobIdRole); f->setData(QVariant(line_lst[4]), LStatRole); f->setData(QVariant(line_lst[5]), PathRole); QFileInfo fi(QDir(QString(arg3)), line_lst[5]); f->setData(QVariant(fi.absoluteFilePath()), Qt::ToolTipRole); f->setData(QVariant(line_lst[5]), Qt::DisplayRole); QList colums; decode_stat(line_lst[4].toLocal8Bit().data(), &statp, sizeof(statp), &LinkFI); char buf[200]; bstrutime(buf, sizeof(buf), statp.st_mtime); colums << f << new QStandardItem(convertBytesSI(statp.st_size)) << new QStandardItem(buf); model->appendRow(colums); ret = true; } } // Close the socket, it's over or we don't want to reuse it disconnect_bacula(); bail_out: if (tid) { stop_thread_timer(tid); } if (ret) { mark_as_done(); } else { mark_as_failed(); } return ret; } bool task::prepare_restore() { bool ret = false; btimer_t *tid = NULL; QString q; /* in the restore prepare phase, we apply plugins settings. * Upload and restoration must be done in one shot within the same connection since * the director uses UA adress to create a local file unique name */ FILE *fp; int idx = 0; QStringList pluginKeys; QStringList pluginNames; if (!restore_field.pluginkeys.isEmpty()) { pluginKeys = restore_field.pluginkeys.split(','); } if (!restore_field.pluginnames.isEmpty()) { pluginNames = restore_field.pluginnames.split(','); } ASSERT(pluginKeys.count() == pluginNames.count()); if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { if (!connect_bacula()) { goto bail_out; } } tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); if (res->type == R_CLIENT && !res->proxy_sent) { res->proxy_sent = true; res->bs->fsend("proxy\n"); while (get_next_line(res)) { if (strncmp(curline, "2000", 4) != 0) { pm_strcpy(errmsg, curline); goto bail_out; } Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } if (res->type == R_DIRECTOR && res->use_setip) { res->bs->fsend("setip\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } res->bs->fsend(".api 2\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } /* retrieve all job ids*/ q = QString(".bvfs_restore path=%1 jobid=%2").arg(restore_field.tableName, restore_field.jobIds); if (!restore_field.fileIds.isEmpty()) { q += QString(" fileid=%1").arg(restore_field.fileIds); } if (!restore_field.dirIds.isEmpty()) { q += QString(" dirid=%1").arg(restore_field.dirIds); } if (!restore_field.hardlinks.isEmpty()) { q += QString(" hardlink=%1").arg(restore_field.hardlinks); } q += "\n"; res->bs->fsend(q.toLatin1().data()); while (get_next_line(res)) { ret = true; Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } /* parse plugins files, upload them and call restore pluginrestoreconf*/ foreach(QString k, pluginKeys) { QStringList keysplit=k.split(':'); if (keysplit.count() > 1) { QString key = keysplit[1]; QString name = pluginNames[idx]; fp = fopen(name.toLatin1().data(), "r"); if (!fp) { berrno be; Dmsg2(dbglvl, "Unable to open %s. ERR=%s\n", name.toLatin1().data(), be.bstrerror(errno)); goto bail_out; } res->bs->fsend(".putfile key=\"%s\"\n", key.toLatin1().data()); /* Just read the file and send it to the director */ while (!feof(fp)) { int i = fread(res->bs->msg, 1, sizeof_pool_memory(res->bs->msg) - 1, fp); if (i > 0) { res->bs->msg[i] = 0; res->bs->msglen = i; res->bs->send(); } } res->bs->signal(BNET_EOD); fclose(fp); res->bs->fsend("restore pluginrestoreconf=\"%s\"\n", k.toLatin1().data()); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } ++idx; } // Close the socket, it's over or we don't want to reuse it disconnect_bacula(); bail_out: if (tid) { stop_thread_timer(tid); } return ret; } bool task::run_restore() { bool ret = false; btimer_t *tid = NULL; QString q; if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { if (!connect_bacula()) { goto bail_out; } } tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); if (res->type == R_CLIENT && !res->proxy_sent) { res->proxy_sent = true; res->bs->fsend("proxy\n"); while (get_next_line(res)) { if (strncmp(curline, "2000", 4) != 0) { pm_strcpy(errmsg, curline); goto bail_out; } Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } if (res->type == R_DIRECTOR && res->use_setip) { res->bs->fsend("setip\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } res->bs->fsend(".api 2\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } q = QString("restore client=\"%1\" restoreclient=\"%2\"").arg(restore_field.client, restore_field.restoreclient); if (!restore_field.where.isEmpty()) { restore_field.where.replace("\"", ""); q += QString(" where=\"%1\"").arg(restore_field.where); } if (!restore_field.comment.isEmpty()) { restore_field.comment.replace("\"", ""); q += QString(" comment=\"%1\"").arg(restore_field.comment); } q += QString(" file=\"?%1\"").arg(restore_field.tableName); q += " done yes\n"; res->bs->fsend(q.toLatin1().data()); /* drain the messages */ while (get_next_line(res)) { ret = true; // FIXME : report a signal to have a progress feedback Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } // Close the socket, it's over or we don't want to reuse it disconnect_bacula(); bail_out: if (tid) { stop_thread_timer(tid); } return ret; } bool task::clean_restore() { bool ret = false; btimer_t *tid = NULL; QString q; if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { if (!connect_bacula()) { goto bail_out; } } tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); if (res->type == R_CLIENT && !res->proxy_sent) { res->proxy_sent = true; res->bs->fsend("proxy\n"); while (get_next_line(res)) { if (strncmp(curline, "2000", 4) != 0) { pm_strcpy(errmsg, curline); goto bail_out; } Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } if (res->type == R_DIRECTOR && res->use_setip) { res->bs->fsend("setip\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } res->bs->fsend(".api 2\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } q = QString(".bvfs_cleanup path=%1\n").arg(restore_field.tableName); res->bs->fsend(q.toLatin1().data()); while (get_next_line(res)) { ret = true; // FIXME : report a signal to have a progress feedback Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } // Close the socket, it's over or we don't want to reuse it disconnect_bacula(); bail_out: if (tid) { stop_thread_timer(tid); } return ret; } bool task::restore() { bool ret=prepare_restore(); if (ret) { ret = run_restore(); if (ret) { ret = clean_restore(); } } if (ret) { mark_as_done(); } else { mark_as_failed(); } return ret; } QString task::plugins_ids(const QString& jobIds) { return parse_plugins(jobIds, "restoreobjectid"); } QString task::plugins_names(const QString& jobIds) { return parse_plugins(jobIds, "pluginname"); } QString task::parse_plugins(const QString& jobIds, const QString& fieldName) { btimer_t *tid = NULL; QString ret; QString q; if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { if (!connect_bacula()) { return ret; } } tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); if (res->type == R_CLIENT && !res->proxy_sent) { res->proxy_sent = true; res->bs->fsend("proxy\n"); while (get_next_line(res)) { if (strncmp(curline, "2000", 4) != 0) { pm_strcpy(errmsg, curline); return ret; } Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } if (res->type == R_DIRECTOR && res->use_setip) { res->bs->fsend("setip\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } q = QString("llist pluginrestoreconf jobid=%1\n").arg(jobIds); res->bs->fsend(q.toLatin1().data()); QStringList sl; while (get_next_line(res)) { QString line(curline); line = line.simplified(); QStringList line_lst = line.split(":", QString::SkipEmptyParts); if (!line_lst.empty() && fieldName.compare(line_lst[0]) == 0) { sl << line_lst[1]; } } ret = sl.join(","); // Close the socket, it's over or we don't want to reuse it disconnect_bacula(); if (tid) { stop_thread_timer(tid); } return ret; } QFile* task::plugin(const QString& name, const QString& jobIds, int id) { QFile *ret(NULL); btimer_t *tid = NULL; QString q; if (id < 0) return NULL; if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { if (!connect_bacula()) { return NULL; } } tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); if (res->type == R_CLIENT && !res->proxy_sent) { res->proxy_sent = true; res->bs->fsend("proxy\n"); while (get_next_line(res)) { if (strncmp(curline, "2000", 4) != 0) { pm_strcpy(errmsg, curline); return NULL; } Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } if (res->type == R_DIRECTOR && res->use_setip) { res->bs->fsend("setip\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } } res->bs->fsend(".api 1\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } ret = new QFile(name); ret->open(QIODevice::WriteOnly); q = QString("llist pluginrestoreconf jobid=%1 id=%2\n").arg(jobIds).arg(id); res->bs->fsend(q.toLatin1().data()); while (get_next_line(res)) { if (QString(curline).contains(":")) continue; ret->write(curline); ret->write("\n"); } ret->close(); // Close the socket, it's over or we don't want to reuse it disconnect_bacula(); if (tid) { stop_thread_timer(tid); } return ret; } /* Get resources to run a job */ bool task::get_resources() { bool ret = false; btimer_t *tid = NULL; if (!res->bs || !res->bs->is_open() || res->bs->is_error()) { if (!connect_bacula()) { goto bail_out; } } res->mutex->lock(); if (res->jobs) { delete res->jobs; } res->jobs = New(alist(10, owned_by_alist)); if (res->clients) { delete res->clients; } res->clients = New(alist(10, owned_by_alist)); if (res->filesets) { delete res->filesets; } res->filesets = New(alist(10, owned_by_alist)); if (res->pools) { delete res->pools; } res->pools = New(alist(10, owned_by_alist)); if (res->storages) { delete res->storages; } res->storages = New(alist(10, owned_by_alist)); if (res->catalogs) { delete res->catalogs; } res->catalogs = New(alist(10, owned_by_alist)); tid = start_thread_timer(NULL, pthread_self(), (uint32_t)120); if (res->type == R_CLIENT && !res->proxy_sent) { res->proxy_sent = true; res->bs->fsend("proxy\n"); while (get_next_line(res)) { if (strncmp(curline, "2000", 4) != 0) { pm_strcpy(errmsg, curline); goto bail_out; } Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } res->bs->fsend(".api 2\n"); while (get_next_line(res)) { Dmsg2(dbglvl, "<- %d %s\n", res->bs->msglen, curline); } res->bs->fsend(".jobs type=B\n"); while (get_next_line(res)) { res->jobs->append(bstrdup(curline)); } res->bs->fsend(".pools\n"); while (get_next_line(res)) { res->pools->append(bstrdup(curline)); } res->bs->fsend(".clients\n"); while (get_next_line(res)) { res->clients->append(bstrdup(curline)); } res->bs->fsend(".filesets\n"); while (get_next_line(res)) { res->filesets->append(bstrdup(curline)); } res->bs->fsend(".storage\n"); while (get_next_line(res)) { res->storages->append(bstrdup(curline)); } res->bs->fsend(".catalogs\n"); while (get_next_line(res)) { res->catalogs->append(bstrdup(curline)); } bail_out: res->mutex->unlock(); if (tid) { stop_thread_timer(tid); } if (ret) { mark_as_done(); } else { mark_as_failed(); } return ret; } worker *worker_start() { worker *w = New(worker()); w->start(handle_task, w); return w; } void worker_stop(worker *w) { if (w) { w->stop(); delete w; } } bacula-15.0.3/src/qt-console/tray-monitor/filesmodel.h0000644000175000017500000001451414771010173022502 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * definition of models related to file selection wizard page. * * Written by Norbert Bizet, May MMXVII * */ #ifndef FILESMODEL_H #define FILESMODEL_H #include #include #include #include "task.h" #include #include "../util/fmtwidgetitem.h" enum { PathIdRole = Qt::UserRole+1, FilenameIdRole = Qt::UserRole+2, FileIdRole = Qt::UserRole+3, JobIdRole = Qt::UserRole+4, LStatRole = Qt::UserRole+5, PathRole = Qt::UserRole+6, TypeRole = Qt::UserRole+7, FullPathRole = Qt::UserRole+8 }; enum { TYPEROLE_DIRECTORY = 0, TYPEROLE_FILE }; class DirectoryItem : public QStandardItem { public: DirectoryItem() : QStandardItem() { /* explicit set the data, so it can be serialized in Mime Data for D&D */ setData(QVariant(TYPEROLE_DIRECTORY), TypeRole); } QVariant data(int role = Qt::UserRole + 1) const { if (role == Qt::DecorationRole) { QFileIconProvider provider; return provider.icon(QFileIconProvider::Folder); } return QStandardItem::data(role); } }; class FileItem : public QStandardItem { public: FileItem() : QStandardItem() { /* explicit set the data, so it can be serialized in Mime Data for D&D */ setData(QVariant(TYPEROLE_FILE), TypeRole); } enum { FILE_TYPE = UserType +2 }; QVariant data(int role = Qt::UserRole + 1) const { if (role == Qt::DecorationRole) { QFileIconProvider provider; return provider.icon(QFileIconProvider::File); } return QStandardItem::data(role); } }; #define BATCH_SIZE 100 class FileSourceModel : public QStandardItemModel { public: FileSourceModel() : QStandardItemModel(), m_cursor(0), m_batchSize(BATCH_SIZE), m_canFetchMore(true) {} bool canFetchMore(const QModelIndex &parent) const { Q_UNUSED(parent) return false/*m_canFetchMore*/; } void fetchMore(const QModelIndex &parent) { Q_UNUSED(parent) } public slots: void taskComplete(task *t) { t->deleteLater(); } private: u_int64_t m_cursor; u_int64_t m_batchSize; bool m_canFetchMore; }; extern int decode_stat(char *buf, struct stat *statp, int stat_size, int32_t *LinkFI); class FileDestModel : public QStandardItemModel { bool canDropMimeData(const QMimeData *data, Qt::DropAction action, int row, int column, const QModelIndex &parent) const { Q_UNUSED(action) Q_UNUSED(row) Q_UNUSED(column) Q_UNUSED(parent) if (data->hasFormat("application/x-qstandarditemmodeldatalist")) { QByteArray encoded = data->data("application/x-qabstractitemmodeldatalist"); QDataStream stream(&encoded, QIODevice::ReadOnly); while (!stream.atEnd()) { int row, col; QMap roleDataMap; stream >> row >> col >> roleDataMap; /* do something with the data */ int type = roleDataMap[TypeRole].toInt(); switch(type) { case TYPEROLE_DIRECTORY: case TYPEROLE_FILE: break; default: return false; } } return true; } return false; } bool dropMimeData(const QMimeData * data, Qt::DropAction action, int row, int column, const QModelIndex & parent) { Q_UNUSED(action) Q_UNUSED(row) Q_UNUSED(column) Q_UNUSED(parent) QByteArray encoded = data->data("application/x-qabstractitemmodeldatalist"); QDataStream stream(&encoded, QIODevice::ReadOnly); while (!stream.atEnd()) { int row, col; QMap roleDataMap; stream >> row >> col >> roleDataMap; if (col == 0) { QStandardItem *item; /* do something with the data */ int type = roleDataMap[TypeRole].toInt(); switch(type) { case TYPEROLE_DIRECTORY: item = new DirectoryItem(); break; case TYPEROLE_FILE: item = new FileItem(); break; default: return false; } item->setData(roleDataMap[PathIdRole], PathIdRole); item->setData(roleDataMap[FilenameIdRole], FilenameIdRole); item->setData(roleDataMap[FileIdRole], FileIdRole); item->setData(roleDataMap[JobIdRole], JobIdRole); item->setData(roleDataMap[LStatRole], LStatRole); item->setData(roleDataMap[PathRole], PathRole); item->setData(roleDataMap[Qt::DisplayRole], Qt::DisplayRole); item->setData(roleDataMap[Qt::ToolTipRole], Qt::ToolTipRole); if (type == TYPEROLE_FILE) { QList colums; struct stat statp; int32_t LinkFI; decode_stat(roleDataMap[LStatRole].toString().toLocal8Bit().data(), &statp, sizeof(statp), &LinkFI); char buf[200]; bstrutime(buf, sizeof(buf), statp.st_mtime); colums << item << new QStandardItem(convertBytesSI(statp.st_size)) << new QStandardItem(buf); appendRow(colums); } else { appendRow(item); } } } return true; } }; #endif // FILESMODEL_H bacula-15.0.3/src/qt-console/tray-monitor/RestoreJobPage.qml0000644000175000017500000000374314771010173023576 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 import io.qt.bmob.restorejobcontroller 1.0 Page { id: restoreJobPage visible: true height: parent ? parent.height : 0 width: parent ? parent.width : 0 property var model; RestoreUiController { id: restoreJobController Component.onCompleted: { if(typeof restoreJobPage.model !== "undefined") { model = restoreJobPage.model } } onFileModelChanged: { fileSelectTab.fileModel = restoreJobController.files } onDialogTextChanged: { restoreJobDialog.text = restoreJobController.dialogText restoreJobDialog.visible = true } } StackLayout { id: restorePageStack anchors.fill: parent currentIndex: 0 JobSelectTab { id: jobSelectTab } FileSelectTab { id: fileSelectTab } RestoreConfirmTab { id: restoreConfirmTab } } MessageDialog { id: restoreJobDialog modality: Qt.ApplicationModal standardButtons: StandardButton.Ok visible: false onAccepted: { stackView.pop(); } } } bacula-15.0.3/src/qt-console/tray-monitor/tray-ui-controller.cpp0000644000175000017500000001023714771010173024463 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "tray-ui-controller.h" TrayUiController::TrayUiController(QObject *parent): QObject(parent), m_monitorName(""), m_infoDialogText(""), m_config(NULL), m_clients(NULL), m_directors(NULL), m_storages(NULL) { m_config = &ConfigStorage::getInstance(); m_config->dump_storage(); m_traceWatcher = new QFileSystemWatcher(this); connect(m_traceWatcher, SIGNAL(fileChanged(QString)), this, SLOT(handleFdTraceChange(QString)), Qt::QueuedConnection); m_traceWatcher->addPath(AndroidFD::tracePath()); MONITOR *mon = m_config->getMonitor(); m_monitorName = QString(mon->hdr.name); fetchClients(); fetchDirectors(); fetchStorages(); } QString TrayUiController::fetchFile(QString fpath) { QByteArray fileBytes; QFile file(fpath); file.open(QIODevice::ReadOnly); file.seek(file.size() - 20000); fileBytes = file.read(20000); return QString(fileBytes); } void TrayUiController::fetchClients() { QList *clients = fetchResources(R_CLIENT); setClients(clients); } void TrayUiController::fetchDirectors() { QList *directors = fetchResources(R_DIRECTOR); setDirectors(directors); } void TrayUiController::fetchStorages() { QList *storages = fetchResources(R_STORAGE); setStorages(storages); } QList *TrayUiController::fetchResources(rescode resCode) { QList *resources = m_config->getResources(resCode); QList *models = new QList(); for(RESMON *res : *resources) { ResourceModel *model = new ResourceModel(); model->setResource(res, resCode); models->append(model); } delete resources; return models; } void TrayUiController::deleteResource(ResourceModel *resModel) { const char *error_msg = NULL; switch(resModel->resource()->code) { case R_CLIENT: m_clients->removeAll(resModel); error_msg = m_config->saveResources(m_clients, m_directors, m_storages); emit clientsChanged(); break; case R_DIRECTOR: m_directors->removeAll(resModel); error_msg = m_config->saveResources(m_clients, m_directors, m_storages); emit directorsChanged(); break; case R_STORAGE: m_storages->removeAll(resModel); error_msg = m_config->saveResources(m_clients, m_directors, m_storages); emit storagesChanged(); break; default: break; } } void TrayUiController::startFileDaemon() { AndroidFD::start(); } void TrayUiController::stopFileDaemon() { AndroidFD::stop(); } void TrayUiController::composeTraceMail() { AndroidFD::composeTraceMail(); } QString TrayUiController::getMonitorName() { return m_monitorName; } void TrayUiController::setMonitorName(QString name) { MONITOR *newMon = new MONITOR(); const char *err_msg = NULL; newMon->hdr.name = bstrdup(name.toLatin1().constData()); err_msg = m_config->saveMonitor(newMon); if (err_msg != NULL) { Dmsg1(0, "Error - could not save monitor name - %s\n", err_msg); } m_monitorName = name; free(newMon->hdr.name); } void TrayUiController::handleFdTraceChange(QString) { emit fdTraceChanged(); } TrayUiController::~TrayUiController() { if (m_clients != NULL) { delete m_clients; } if (m_directors != NULL) { delete m_directors; } if (m_storages != NULL) { delete m_storages; } if (m_traceWatcher != NULL) { m_traceWatcher->removePath(AndroidFD::tracePath()); delete m_traceWatcher; } } bacula-15.0.3/src/qt-console/tray-monitor/MainMenuPage.qml0000644000175000017500000003420314771010173023224 0ustar bsbuildbsbuildimport QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 import io.qt.bmob.traycontroller 1.0 Page { id: mainMenuPage visible: true width: parent.width height: parent.height property bool main: true // Our C++ component TrayUiController { id: controller // Events triggered by our c++ code onClientsChanged: { clientsGrid.model = controller.clients } onDirectorsChanged: { directorsGrid.model = controller.directors } onStoragesChanged: { storagesGrid.model = controller.storages } onInfoDialogTextChanged: { } } onVisibleChanged: { if(visible) { controller.fetchClients() controller.fetchDirectors() controller.fetchStorages() } } header: ToolBar { id: toolbar height: 48 background: Rectangle { color: "#d32f2f" } RowLayout { anchors.fill: parent anchors.leftMargin: 8 anchors.rightMargin: 8 Label { id: title text: "Main Menu" color: "white" font.pixelSize: 18 anchors.centerIn: parent } } } Flickable { id: scrollableBox anchors.top: parent.top anchors.bottom: resDetailsPanel.visible? resDetailsPanel.top : divider.top width: parent.width contentWidth: parent.width contentHeight: scrollContent.height clip: true Item { id: scrollContent width: parent.width height: childrenRect.height Text { id: clientsLabel text: "Clients:" font.pixelSize: 18 anchors.left: parent.left anchors.leftMargin: 16 anchors.top: parent.top anchors.topMargin: 24 } Rectangle { id: clientsDivider width: parent.width height: 1 color: "#d32f2f" anchors.top: clientsLabel.bottom anchors.topMargin: 16 anchors.left: parent.left anchors.leftMargin: 16 anchors.right: parent.right anchors.rightMargin: 16 } GridView { id: clientsGrid height: childrenRect.height interactive: false anchors.top: clientsDivider.bottom anchors.topMargin: 8 anchors.left: parent.left anchors.right: parent.right model: controller.clients Button { id: emptyClientsButton text: "+ Add a Client" onClicked: { resDetailsPanel.resType = 0 resDetailsPanel.visible = true } anchors.left: parent.left anchors.leftMargin: 24 anchors.top: parent.top anchors.topMargin: 8 visible: parent.count == 0 contentItem: Text { text: emptyClientsButton.text font.pixelSize: 18 color: emptyClientsButton.down ? "#ef9a9a" : "#d32f2f" } background: Rectangle { color: "white" } } delegate: Item { id: gridItem width: clientsGrid.cellWidth height: clientsGrid.cellHeight Rectangle { anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top anchors.bottom: parent.bottom color: "#e0e0e0" visible: clickArea.pressed } Item { height: childrenRect.height width: parent.width anchors.verticalCenter: parent.verticalCenter Image { id: resIcon width: 48 height: 48 source: "images/client_icon_48dp.png" anchors.horizontalCenter: parent.horizontalCenter } Text { anchors.top: resIcon.bottom anchors.topMargin: 8 anchors.right: parent.right anchors.left: parent.left anchors.rightMargin: 8 anchors.leftMargin: 8 font.pixelSize: 13 horizontalAlignment: Text.AlignHCenter text: model.modelData.resourceName elide: Text.ElideRight } } MouseArea { id: clickArea anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top anchors.bottom: parent.bottom onClicked: { stackView.push( Qt.resolvedUrl("ResourceDetailsPage.qml"), {"resModel" : model.modelData} ) } } } } Text { id: directorsLabel text: "Directors:" font.pixelSize: 18 anchors.left: parent.left anchors.leftMargin: 16 anchors.top: clientsGrid.bottom anchors.topMargin: 24 } Rectangle { id: directorsDivider width: parent.width height: 1 color: "#d32f2f" anchors.top: directorsLabel.bottom anchors.topMargin: 16 anchors.left: parent.left anchors.leftMargin: 16 anchors.right: parent.right anchors.rightMargin: 16 } GridView { id: directorsGrid height: childrenRect.height interactive: false anchors.top: directorsDivider.bottom anchors.topMargin: 8 anchors.left: parent.left anchors.right: parent.right model: controller.directors Button { id: emptyDirsButton text: "+ Add a Director" onClicked: { resDetailsPanel.resType = 1 resDetailsPanel.visible = true } anchors.left: parent.left anchors.leftMargin: 24 anchors.top: parent.top anchors.topMargin: 8 visible: parent.count == 0 contentItem: Text { text: emptyDirsButton.text font.pixelSize: 18 color: emptyDirsButton.down ? "#ef9a9a" : "#d32f2f" } background: Rectangle { color: "white" } } delegate: Item { id: dirGridItem width: directorsGrid.cellWidth height: directorsGrid.cellHeight Rectangle { anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top anchors.bottom: parent.bottom color: "#e0e0e0" visible: dirClickArea.pressed } Item { height: childrenRect.height width: parent.width anchors.verticalCenter: parent.verticalCenter Image { id: dirResIcon width: 48 height: 48 source: "images/director_icon_48dp.png" anchors.horizontalCenter: parent.horizontalCenter } Text { anchors.top: dirResIcon.bottom anchors.topMargin: 8 anchors.right: parent.right anchors.left: parent.left anchors.rightMargin: 8 anchors.leftMargin: 8 font.pixelSize: 13 horizontalAlignment: Text.AlignHCenter text: model.modelData.resourceName elide: Text.ElideRight } } MouseArea { id: dirClickArea anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top anchors.bottom: parent.bottom onClicked: { stackView.push( Qt.resolvedUrl("ResourceDetailsPage.qml"), {"resModel" : model.modelData} ) } } } } Text { id: storagesLabel text: "Storages:" font.pixelSize: 18 anchors.left: parent.left anchors.leftMargin: 16 anchors.top: directorsGrid.bottom anchors.topMargin: 24 } Rectangle { id: storagesDivider width: parent.width height: 1 color: "#d32f2f" anchors.top: storagesLabel.bottom anchors.topMargin: 16 anchors.left: parent.left anchors.leftMargin: 16 anchors.right: parent.right anchors.rightMargin: 16 } GridView { id: storagesGrid height: childrenRect.height interactive: false anchors.top: storagesDivider.bottom anchors.topMargin: 8 anchors.left: parent.left anchors.right: parent.right model: controller.storages Button { id: emptyStoragesButton text: "+ Add a Storage" onClicked: { resDetailsPanel.resType = 2 resDetailsPanel.visible = true } anchors.left: parent.left anchors.leftMargin: 24 anchors.top: parent.top anchors.topMargin: 8 visible: parent.count == 0 contentItem: Text { text: emptyStoragesButton.text font.pixelSize: 18 color: emptyStoragesButton.down ? "#ef9a9a" : "#d32f2f" } background: Rectangle { color: "white" } } delegate: Item { id: sdGridItem width: storagesGrid.cellWidth height: storagesGrid.cellHeight Rectangle { anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top anchors.bottom: parent.bottom color: "#e0e0e0" visible: sdClickArea.pressed } Item { height: childrenRect.height width: parent.width anchors.verticalCenter: parent.verticalCenter Image { id: sdResIcon width: 48 height: 48 source: "images/storage_icon_48dp.png" anchors.horizontalCenter: parent.horizontalCenter } Text { anchors.top: sdResIcon.bottom anchors.topMargin: 8 anchors.right: parent.right anchors.left: parent.left anchors.rightMargin: 8 anchors.leftMargin: 8 font.pixelSize: 13 horizontalAlignment: Text.AlignHCenter text: model.modelData.resourceName elide: Text.ElideRight } } MouseArea { id: sdClickArea anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top anchors.bottom: parent.bottom onClicked: { stackView.push( Qt.resolvedUrl("ResourceDetailsPage.qml"), {"resModel" : model.modelData} ) } } } } // Visual margin in the bottom Item { width: parent.width height: 32 anchors.top: storagesGrid.bottom } } // Item (ScrollView contents) } // ScrollView Rectangle { id: divider visible: !resDetailsPanel.visible width: parent.width height: 1 color: "#d32f2f" anchors.bottom: tconfigButton.top } Button { id: tconfigButton visible: !resDetailsPanel.visible text: "Tray Config" onClicked: stackView.push(Qt.resolvedUrl("TrayUiPage.qml")) anchors.leftMargin: 12 anchors.left: parent.left anchors.bottom: parent.bottom contentItem: Text { text: tconfigButton.text font.pixelSize: 18 color: tconfigButton.down ? "#ef9a9a" : "#d32f2f" } background: Rectangle { color: "white" } } Button { id: fdButton visible: !resDetailsPanel.visible text: "File Daemon" onClicked: stackView.push(Qt.resolvedUrl("FileDaemonPage.qml")) anchors.rightMargin: 12 anchors.right: parent.right anchors.bottom: parent.bottom contentItem: Text { text: fdButton.text font.pixelSize: 18 color: fdButton.down ? "#ef9a9a" : "#d32f2f" } background: Rectangle { color: "white" } } ResourcePanel { id: resDetailsPanel onVisibleChanged: { if (!visible) { controller.fetchClients() controller.fetchDirectors() controller.fetchStorages() } } } } bacula-15.0.3/src/qt-console/tray-monitor/restore-ui-controller.h0000644000175000017500000001620014771010173024630 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef RESTOREUICONTROLLER_H #define RESTOREUICONTROLLER_H #include #include #include "tray_conf.h" #include "config-storage.h" #include "resmodel.h" #include #include "jcr.h" #include "task.h" #include "common.h" #include "jobmodel.h" #include "runjobmodel.h" #include "restorejobmodel.h" #include "filesmodel.h" #include #include /* RestoreJobUiController - controls the screen where the user specifies which Job it wants to restore */ class RestoreUiController : public QObject { Q_OBJECT // Contains the Director that will restore the Job Q_PROPERTY(RestoreJobModel *model WRITE setModel) // List of Jobs that can be restored Q_PROPERTY(QList jobs READ getJobs NOTIFY jobsChanged()) // List of clients that the user can choose // The Job will be restored into the selected client Q_PROPERTY(QStringList clients READ getClients NOTIFY clientsChanged()) // Restore Options Q_PROPERTY(QString where READ getWhereParam WRITE setWhereParam NOTIFY whereParamChanged()) Q_PROPERTY(int replaceIndex READ getReplaceIndex WRITE setReplaceIndex NOTIFY replaceIndexChanged()) Q_PROPERTY(QStringList replaceOptions READ getReplaceOptions NOTIFY replaceOptionsChanged()) // Optional comment Q_PROPERTY(QString comment READ getComment WRITE setComment NOTIFY commentChanged()) // Model usued to show the whole directory structure associated with the selected Job // It starts at "/", and the user can move down into the directory tree // to select specific files Q_PROPERTY(QStandardItemModel *files READ getFileModel NOTIFY fileModelChanged()) //Information dialog message Q_PROPERTY(QString dialogText READ getDialogText NOTIFY dialogTextChanged()) Q_PROPERTY(bool isConnecting READ isConnecting WRITE setIsConnecting NOTIFY isConnectingChanged) private: ConfigStorage *m_config; RestoreJobModel *m_model; FileSourceModel *m_sourceModel = new FileSourceModel(); QStringList *m_clients = new QStringList(); QStringList m_replaceOptions; QList *m_jobs = new QList(); QString m_where; QString m_comment; QString m_selectedJobId; QString m_dialogText; int m_replaceIndex = 0; int m_selectedClient = 0; int m_targetClient = 0; int m_currentSourceId; QString m_currentPathStr; QMap m_selectedFileIds; QMap m_selectedDirIds; bool m_connecting = false; void setDefaultValues(RESMON *dir); public: explicit RestoreUiController(QObject *parent = nullptr); ~RestoreUiController(); // Getters / Setters for data that is used by our GUI bool isConnecting() { return m_connecting; } void setIsConnecting(bool isConnecting) { if (m_connecting != isConnecting) { m_connecting = isConnecting; emit isConnectingChanged(); } } QString getDialogText() { return m_dialogText; } void setDialogText(QString dialogText) { if (m_dialogText == dialogText) { return; } m_dialogText = dialogText; emit dialogTextChanged(); } // Clients QStringList getClients() { return *m_clients; } void setClients(alist *clients) { char *cli; foreach_alist(cli, clients) { m_clients->append(QString(cli)); } emit clientsChanged(); } // Model void setModel(RestoreJobModel *model) { m_model = model; RESMON *dir = model->getDirector(); setClients(dir->clients); QStringList replaceOptions; replaceOptions << "Never" << "Always" << "IfNewer" << "IfOlder"; setReplaceOptions(replaceOptions); } // Jobs QList getJobs() { return *m_jobs; } void setJobs(QStandardItemModel *jobs) { JobModel *job; m_jobs->clear(); for(;;) { QList jobFields = jobs->takeRow(0); if (jobFields.size() < 2) { break; } job = new JobModel(); job->setData(jobFields); m_jobs->append(job); } emit jobsChanged(); } // Where Param QString getWhereParam() { return m_where; } void setWhereParam(QString where) { if (m_where == where) { return; } m_where = where; emit whereParamChanged(); } // Replace Options QStringList getReplaceOptions() { return m_replaceOptions; } void setReplaceOptions(QStringList replaceOptions) { m_replaceOptions = replaceOptions; emit replaceOptionsChanged(); } // Replace Index int getReplaceIndex() { return m_replaceIndex; } // Comment QString getComment() { return m_comment; } void setComment(QString comment) { if (comment == m_comment) return; m_comment = comment; emit commentChanged(); } QStandardItemModel *getFileModel() { return m_sourceModel; } void fetchDefaultValues(); void fetchPath(); signals: void clientsChanged(); void jobsChanged(); void whereParamChanged(); void replaceOptionsChanged(); void replaceIndexChanged(); void commentChanged(); void fileModelChanged(); void dialogTextChanged(); void isConnectingChanged(); public slots: void setTargetClient(int targetClient) { m_targetClient = targetClient; } void setReplaceIndex(int index) { if(index == m_replaceIndex) return; m_replaceIndex = index; emit replaceIndexChanged(); } // Called when the user selects a client. Calls the Director to fetch Jobs associated with it void handleClientChange(int clientIndex); void clientJobsCallback(task *t); // Called when the user selects a job. Calls the Director to fetch Files associated with it void handleJobSelection(QString jobId, QString jobName); void jobFilesCallback(task *t); // Called when the user navigates under the directory tree // to selected files to be restored void handleFileTreeNodeClick(int fileIndex); void handleFileSelection(int fileIndex, bool checked); bool fileIsSelected(int fileIndex); // Called when the user wishes to start the restore void restore(); // Called after we asked the resource to start the restore void restoreJobCallback(task *t); }; #endif // RESTOREUICONTROLLER_H bacula-15.0.3/src/qt-console/tray-monitor/JobListPage.qml0000644000175000017500000000213114771010173023054 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 ListView { anchors.fill: parent boundsBehavior: Flickable.StopAtBounds delegate: JobListItem { jobId: model.modelData.id name: model.modelData.name level: model.modelData.level finfo: model.modelData.fileInfo errorCount: model.modelData.errorCount } } bacula-15.0.3/src/qt-console/tray-monitor/restorejobmodel.cpp0000644000175000017500000000144614771010173024111 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "restorejobmodel.h" #include RestoreJobModel::RestoreJobModel(QObject *parent): QObject(parent) {} RestoreJobModel::~RestoreJobModel() {} bacula-15.0.3/src/qt-console/tray-monitor/clientselectwizardpage.cpp0000644000175000017500000000313014771010173025436 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Restore Wizard: Client selection page * * Written by Norbert Bizet, May MMXVII * */ #include "common.h" #include "clientselectwizardpage.h" #include "ui_clientselectwizardpage.h" #include "conf.h" ClientSelectWizardPage::ClientSelectWizardPage(QWidget *parent) : QWizardPage(parent), ui(new Ui::ClientSelectWizardPage) { ui->setupUi(this); /* currentClient field is mandatory */ registerField("currentClient*", ui->backupClientComboBox); } ClientSelectWizardPage::~ClientSelectWizardPage() { delete ui; } void ClientSelectWizardPage::initializePage() { if (res && res->clients && res->clients->size() > 0) { ui->backupClientComboBox->clear(); QStringList list; char *str; foreach_alist(str, res->clients) { list << QString(str); } ui->backupClientComboBox->addItems(list); ui->backupClientComboBox->setEnabled(true); } else { ui->backupClientComboBox->setEnabled(false); } } bacula-15.0.3/src/qt-console/tray-monitor/task.h0000644000175000017500000000742214771010173021321 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef TASK_H #define TASK_H #include "common.h" #include #include #include "tray_conf.h" #ifdef Q_OS_ANDROID #include "config-storage.h" #endif enum { TASK_NONE, TASK_STATUS, TASK_RESOURCES, TASK_QUERY, TASK_RUN, TASK_LIST_CLIENT_JOBS, TASK_LIST_JOB_FILES, TASK_RESTORE, TASK_PLUGIN, TASK_DEFAULTS, TASK_CLOSE, TASK_INFO, TASK_BWLIMIT, TASK_DISCONNECT }; /* The task should emit a signal when done */ class task: public QObject { Q_OBJECT public: RESMON *res; POOLMEM *errmsg; int type; bool status; char *curline; char *curend; const char *arg; /* Argument that can be used by some tasks */ const char *arg2; const char *arg3; QStandardItemModel *model; /* model to fill, depending on context */ uint64_t pathId; union { bool b; int i; char c[256]; } result; /* The task might return something */ struct r_field { QString tableName; QString jobIds; QString fileIds; QString dirIds; QString hardlinks; QString client; QString restoreclient; QString where; QString replace; QString comment; QString pluginnames; QString pluginkeys; } restore_field; task(): QObject(), res(NULL), type(TASK_NONE), status(false), curline(NULL), curend(NULL), arg(NULL), arg2(NULL), arg3(NULL), model(NULL), pathId(0) { errmsg = get_pool_memory(PM_FNAME); *errmsg = 0; memset(result.c, 0, sizeof(result.c)); } ~task() { Enter(); disconnect(); /* disconnect all signals */ free_pool_memory(errmsg); } void init(int t) { res = NULL; type = t; status = false; arg = NULL; arg2 = NULL; arg3 = NULL; model = NULL; pathId = 0; } void init(RESMON *s, int t) { init(t); res = s; } RESMON *get_res(); void lock_res(); void unlock_res(); bool connect_bacula(); bool do_status(); bool read_status_terminated(RESMON *res); bool read_status_header(RESMON *res); bool read_status_running(RESMON *res); bool set_bandwidth(); bool disconnect_bacula(); void mark_as_done() { status = true; emit done(this); } void mark_as_failed() { status = false; emit done(this); } bool get_resources(); bool get_next_line(RESMON *res); bool get_job_defaults(); /* Look r->defaults.job */ bool run_job(); bool get_job_info(const char *level); /* look r->info */ bool get_client_jobs(const char* client); bool get_job_files(const char* job, uint64_t pathId); bool prepare_restore(); bool run_restore(); bool clean_restore(); bool restore(); QString plugins_ids(const QString &jobIds); QString plugins_names(const QString& jobIds); QString parse_plugins(const QString& jobIds, const QString& fieldName); QFile* plugin(const QString &name, const QString& jobIds, int id); signals: void done(task *t); }; worker *worker_start(); void worker_stop(worker *); #endif bacula-15.0.3/src/qt-console/tray-monitor/sd-monitor.ui0000644000175000017500000001056714771010173022644 0ustar bsbuildbsbuild sdStatus 0 0 518 435 Form Storage Daemon Status Name: Started: Version: Plugins: Running Jobs QAbstractItemView::SingleSelection false Terminated Jobs QAbstractItemView::SingleSelection Qt::Horizontal 40 20 :/images/view-refresh.png:/images/view-refresh.png bacula-15.0.3/src/qt-console/tray-monitor/tray-monitor.cpp0000644000175000017500000002427414771010173023362 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "tray-monitor.h" #include #include #ifdef Q_OS_ANDROID #include #include #include #ifdef ENTERPRISE #include "enterprise-tray-ui-controller.h" #else #include "tray-ui-controller.h" #endif #include "resdetails-ui-controller.h" #include "respanel-ui-controller.h" #include "runjob-ui-controller.h" #include "restore-ui-controller.h" #include "fd-config-ui-controller.h" #include "android-fd-service.h" #include "app-boot-ui-controller.h" int64_t AndroidFD::logLevel; #endif /* Static variables */ char *configfile = NULL; static MONITOR *monitor = NULL; static CONFIG *config = NULL; static TrayUI *mainwidget = NULL; static TSched *scheduler = NULL; #define CONFIG_FILE "./bacula-tray-monitor.conf" /* default configuration file */ #ifdef HAVE_WIN32 #define HOME_VAR "APPDATA" #define CONFIG_FILE_HOME "bacula-tray-monitor.conf" /* In $HOME */ #else #define HOME_VAR "HOME" #define CONFIG_FILE_HOME ".bacula-tray-monitor.conf" /* In $HOME */ #endif static void usage() { fprintf(stderr, _( PROG_COPYRIGHT "\n%sVersion: %s (%s) %s %s %s\n\n" "Usage: tray-monitor [-c config_file] [-d debug_level]\n" " -c set configuration file to file\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -t test - read configuration and exit\n" " -W 0/1 force the detection of the systray\n" " -? print this message.\n" "\n"), 2004, BDEMO, VERSION, BDATE, HOST_OS, DISTNAME, DISTVER); } void terminate_tray_monitor(int /*sig*/) { #ifdef HAVE_WIN32 WSACleanup(); /* TODO: check when we have to call it */ #endif exit(0); } void refresh_tray(TrayUI *t) { RESMON *r; MONITOR *mon; if (!t) { return; } t->clearTabs(); if (!config) { return; } mon = (MONITOR *) GetNextRes(R_MONITOR, NULL); t->spinRefresh->setValue(mon?mon->RefreshInterval:60); foreach_res(r, R_CLIENT) { t->addTab(r); } foreach_res(r, R_DIRECTOR) { t->addTab(r); } foreach_res(r, R_STORAGE) { t->addTab(r); } } void display_error(const char *fmt, ...) { va_list arg_ptr; POOL_MEM tmp(PM_MESSAGE); QMessageBox msgBox; int maxlen; if (!fmt || !*fmt) { return; } maxlen = tmp.size() - 1; va_start(arg_ptr, fmt); bvsnprintf(tmp.c_str(), maxlen, fmt, arg_ptr); va_end(arg_ptr); msgBox.setIcon(QMessageBox::Critical); msgBox.setText(tmp.c_str()); msgBox.exec(); } void error_handler(const char *file, int line, LEX */* lc */, const char *msg, ...) { POOL_MEM tmp; va_list arg_ptr; va_start(arg_ptr, msg); vsnprintf(tmp.c_str(), tmp.size(), msg, arg_ptr); va_end(arg_ptr); display_error("Error %s:%d %s\n", file, line, tmp.c_str()); } int tls_pem_callback(char *buf, int size, const void * /*userdata*/) { bool ok; QString text = QInputDialog::getText(mainwidget, _("TLS PassPhrase"), buf, QLineEdit::Normal, QDir::home().dirName(), &ok); if (ok) { bstrncpy(buf, text.toUtf8().data(), size); return 1; } else { return 0; } } bool reload() { bool displaycfg=false; int nitems = 0; struct stat sp; Dmsg0(50, "reload the configuration!\n"); scheduler->stop(); if (config) { delete config; } config = NULL; monitor = NULL; if (stat(configfile, &sp) != 0) { berrno be; Dmsg2(50, "Unable to find %s. ERR=%s\n", configfile, be.bstrerror()); displaycfg = true; goto bail_out; } config = New(CONFIG()); if (!parse_tmon_config(config, configfile, M_ERROR)) { Dmsg1(50, "Error while parsing %s\n", configfile); // TODO: Display a warning message an open the configuration // window displaycfg = true; } LockRes(); foreach_res(monitor, R_MONITOR) { nitems++; } if (!displaycfg && nitems != 1) { Mmsg(config->m_errmsg, _("Error: %d Monitor resources defined in %s. " "You must define one Monitor resource.\n"), nitems, configfile); displaycfg = true; } monitor = (MONITOR*)GetNextRes(R_MONITOR, (RES *)NULL); UnlockRes(); if (displaycfg) { display_error(config->m_errmsg); } refresh_tray(mainwidget); if (monitor && monitor->command_dir) { scheduler->init(monitor->command_dir); scheduler->start(); } else { Dmsg0(50, "Do not start the scheduler\n"); } bail_out: return displaycfg; } /********************************************************************* * * Main Bacula Tray Monitor -- User Interface Program * */ int main(int argc, char *argv[]) { int ch; bool test_config = false, display_cfg = false; #ifdef Q_OS_ANDROID QFont font("Roboto"); QGuiApplication::setFont(font); QCoreApplication::setAttribute(Qt::AA_EnableHighDpiScaling); QGuiApplication app(argc, argv); QQmlApplicationEngine engine; AndroidFD::install(); #ifdef ENTERPRISE qmlRegisterType("io.qt.bmob.traycontroller", 1, 0, "TrayUiController"); engine.rootContext()->setContextProperty("IS_ENTERPRISE", QVariant(true)); #else qmlRegisterType("io.qt.bmob.traycontroller", 1, 0, "TrayUiController"); engine.rootContext()->setContextProperty("IS_ENTERPRISE", QVariant(false)); #endif // ENTERPRISE qmlRegisterType("io.qt.bmob.restorejobcontroller", 1, 0, "RestoreUiController"); qmlRegisterType("io.qt.bmob.runjobcontroller", 1, 0, "RunJobUiController"); qmlRegisterType("io.qt.bmob.resdetailscontroller", 1, 0, "ResDetailsUiController"); qmlRegisterType("io.qt.bmob.respanelcontroller", 1, 0, "ResPanelUiController"); qmlRegisterType("io.qt.bmob.fdconfigcontroller", 1, 0, "FdConfigUiController"); qmlRegisterType("io.qt.bmob.bootuicontroller", 1, 0, "BootUiController"); #else QApplication app(argc, argv); TrayUI tray; #endif TSched sched; setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); my_name_is(argc, argv, "tray-monitor"); lmgr_init_thread(); init_msg(NULL, NULL, NULL); #if defined(HAVE_WIN32) working_directory = getenv("TMP"); #elif defined(Q_OS_ANDROID) debug_level = 0; AndroidFD::logLevel = debug_level; working_directory = QStandardPaths::writableLocation(QStandardPaths::AppDataLocation) .toLatin1() .constData(); set_trace(true); #else if (working_directory == NULL) { working_directory = "/tmp"; } #endif start_watchdog(); #ifndef HAVE_WIN32 struct sigaction sigignore; sigignore.sa_flags = 0; sigignore.sa_handler = SIG_IGN; sigfillset(&sigignore.sa_mask); sigaction(SIGPIPE, &sigignore, NULL); #endif while ((ch = getopt(argc, argv, "c:d:th?TW:")) != -1) { switch (ch) { case 'c': /* configuration file */ if (configfile != NULL) { free(configfile); } configfile = bstrdup(optarg); break; #ifndef Q_OS_ANDROID case 'W': tray.have_systray = (atoi(optarg) != 0); break; #endif case 'T': set_trace(true); break; case 'd': if (*optarg == 't') { dbg_timestamp = true; } else { debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } } break; case 't': test_config = true; break; case 'h': case '?': default: usage(); exit(1); } } argc -= optind; //argv += optind; #ifndef Q_OS_ANDROID init_signals(terminate_tray_monitor); #endif if (argc) { usage(); exit(1); } /* Keep generated files for ourself */ umask(0077); if (configfile == NULL) { if (getenv(HOME_VAR) != NULL) { int len = strlen(getenv(HOME_VAR)) + strlen(CONFIG_FILE_HOME) + 5; configfile = (char *) malloc(len); bsnprintf(configfile, len, "%s/%s", getenv(HOME_VAR), CONFIG_FILE_HOME); } else { configfile = bstrdup(CONFIG_FILE); } } #ifndef Q_OS_ANDROID Dmsg1(0, "configfile=%s\n", configfile); #else Dmsg0(0, "Opening App...\n"); #endif // We need to initialize the scheduler before the reload() command scheduler = &sched; OSDependentInit(); /* Initialize Windows path handling */ (void)WSA_Init(); /* Initialize Windows sockets */ if (init_crypto() != 0) { Emsg0(M_ERROR_TERM, 0, _("Cryptography library initialization failed.\n")); } #ifdef Q_OS_ANDROID ConfigStorage::init(configfile); engine.load(QUrl(QStringLiteral("qrc:/main.qml"))); if (engine.rootObjects().isEmpty()) return -1; #else display_cfg = reload(); if (test_config) { exit(0); } /* If we have a systray, we always keep the application*/ if (tray.have_systray) { app.setQuitOnLastWindowClosed(false); } else { /* Without a systray, we quit when we close */ app.setQuitOnLastWindowClosed(true); } tray.setupUi(&tray, monitor); refresh_tray(&tray); mainwidget = &tray; if (display_cfg) { new Conf(); } #endif app.exec(); sched.stop(); stop_watchdog(); (void)WSACleanup(); /* Cleanup Windows sockets */ if (config) { delete config; } config = NULL; bfree_and_null(configfile); term_msg(); return 0; } bacula-15.0.3/src/qt-console/tray-monitor/install_conf_file.in0000755000175000017500000000106114771010173024204 0ustar bsbuildbsbuild#!/bin/sh # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # sbindir=@sbindir@ sysconfdir=@sysconfdir@ INSTALL_CONFIG="@INSTALL@ -m 640" DESTDIR=`echo ${DESTDIR}` srcconf=bacula-tray-monitor.conf if test -f ${DESTDIR}${sysconfdir}/${srcconf}; then destconf=${srcconf}.new echo " ==> Found existing $srcconf, installing new conf file as ${destconf}" else destconf=${srcconf} fi echo "${INSTALL_CONFIG} ${srcconf} ${DESTDIR}${sysconfdir}/${destconf}" ${INSTALL_CONFIG} ${srcconf} ${DESTDIR}${sysconfdir}/${destconf} bacula-15.0.3/src/qt-console/tray-monitor/pluginwizardpage.h0000644000175000017500000000277414771010173023740 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Restore Wizard: Plugin selection page * * Written by Norbert Bizet, May MMXVII * */ #ifndef PLUGINWIZARDPAGE_H #define PLUGINWIZARDPAGE_H #include namespace Ui { class PluginWizardPage; } class RESMON; class PluginWizardPage : public QWizardPage { Q_OBJECT Q_PROPERTY(QString pluginKeysStr READ pluginKeysStr NOTIFY pluginKeysStrChanged) private: QString m_pluginKeysStr; public: QString pluginKeysStr() const { return m_pluginKeysStr; } signals: void pluginKeysStrChanged(); public: explicit PluginWizardPage(QWidget *parent = 0); ~PluginWizardPage(); /* QWizardPage interface */ void initializePage(); bool validatePage(); /* local interface */ inline void setRes(RESMON *r) {res=r;} private: Ui::PluginWizardPage *ui; RESMON *res; QStringList registeredFields; }; #endif // PLUGINWIZARDPAGE_H bacula-15.0.3/src/qt-console/tray-monitor/jobselectwizardpage.h0000644000175000017500000000324614771010173024407 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Restore Wizard: Job selection page * * Written by Norbert Bizet, May MMXVII * */ #ifndef JOBSELECTWIZARDPAGE_H #define JOBSELECTWIZARDPAGE_H #include "common.h" #include class QStandardItemModel; class QItemSelection; class RESMON; namespace Ui { class JobSelectWizardPage; } class JobSelectWizardPage : public QWizardPage { Q_OBJECT Q_PROPERTY(qlonglong currentJob READ currentJob NOTIFY currentJobChanged) public: explicit JobSelectWizardPage(QWidget *parent = 0); ~JobSelectWizardPage(); /* QWizardPage interface */ void initializePage(); bool isComplete() const; /* local interface */ inline void setRes(RESMON *r) {res=r;} /* currentJob READ */ qlonglong currentJob() const; signals: /* currentJob NOTIFY */ void currentJobChanged(); protected slots: void populateModel(); private: Ui::JobSelectWizardPage *ui; RESMON *res; QStandardItemModel *model; qlonglong m_jobId; }; #endif // JOBSELECTWIZARDPAGE_H bacula-15.0.3/src/qt-console/tray-monitor/android-fd-service.h0000644000175000017500000002233314771010173024022 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef ANDROID_FD_SERVICE_H #define ANDROID_FD_SERVICE_H #include "bacula.h" #include "common.h" #include #include /* AndroidFD - Contains the methods we use to setup the Android File Daemon. Many methods involve calling Java code, such as: 1 - The Android Service that creates the Android File Daemon process 2 - The Screen that scans QR Codes 3 - Some data that requires native Java calls (deviceId and devicePwd) */ class AndroidFD { public: static int64_t logLevel; // Starts the screen that scans QR Codes. The receiver will be responsible // for handling the scanned QR Code data. static void startQRCodeReader(QAndroidActivityResultReceiver *receiver) { bool hasPermission = AndroidFD::getPermission("android.permission.CAMERA"); if(!hasPermission) { return; } QAndroidJniObject jPackageName = QAndroidJniObject::fromString("org.baculasystems.bmob.alfa"); QAndroidJniObject jClassName = QAndroidJniObject::fromString("org.baculasystems.bmob.alfa.QRCodeReaderActivity"); QAndroidJniObject intent("android/content/Intent","()V"); intent.callObjectMethod("setClassName", "(Ljava/lang/String;Ljava/lang/String;)Landroid/content/Intent;", jPackageName.object(), jClassName.object()); QtAndroid::startActivity(intent, 0, receiver); } static QString deviceId() { return QAndroidJniObject::callStaticObjectMethod("org/baculasystems/bmob/alfa/BAndroidUtil", "getDeviceId", "(Landroid/content/Context;)Ljava/lang/String;", QtAndroid::androidActivity().object() ).toString(); } static QString devicePwd() { return QAndroidJniObject::callStaticObjectMethod("org/baculasystems/bmob/alfa/BAndroidUtil", "getDevicePwd", "(Landroid/content/Context;)Ljava/lang/String;", QtAndroid::androidActivity().object() ).toString(); } static QString sdcardPath() { return QAndroidJniObject::callStaticObjectMethod("org/baculasystems/bmob/alfa/BAndroidUtil", "getSDcardPath", "()Ljava/lang/String;", QtAndroid::androidActivity().object() ).toString(); } static QString tracePath() { return QStandardPaths::writableLocation(QStandardPaths::AppDataLocation) + "/bacula-fd.trace"; } // Reads Bacula FD trace and send it in a request for Android to start an Mail App static void composeTraceMail() { QString mailSubject = "Bacula File Daemon Log"; QString fdTrace = AndroidFD::tracePath(); QByteArray fileBytes; QFile file(fdTrace); file.open(QIODevice::ReadOnly); file.seek(file.size() - 20000); fileBytes = file.read(20000); QString mailText = QString(fileBytes); QAndroidJniObject::callStaticMethod("org/baculasystems/bmob/alfa/BAndroidUtil", "startMailApp", "(Landroid/content/Context;Ljava/lang/String;Ljava/lang/String;)V", QtAndroid::androidActivity().object(), QAndroidJniObject::fromString(mailSubject).object(), QAndroidJniObject::fromString(mailText).object()); } // Changes the bacula-fd.conf template by adding information related to the device static void setupFdConfig(QString configPath) { QByteArray fileBytes; QFile file(configPath); file.open(QIODevice::ReadWrite); fileBytes = file.readAll(); QString text(fileBytes); text.replace("@DEVICE-ID@", AndroidFD::deviceId()); text.replace("@DEVICE-PWD@", AndroidFD::devicePwd()); file.seek(0); file.write(text.toUtf8()); file.close(); } // Creates the dialog that asks the user for a permission static bool getPermission(QString permission) { QtAndroid::PermissionResult res = QtAndroid::checkPermission(permission); if(res == QtAndroid::PermissionResult::Denied) { QtAndroid::requestPermissionsSync(QStringList() << permission); res = QtAndroid::checkPermission(permission); if(res == QtAndroid::PermissionResult::Denied) { Dmsg1(0, "Permission denied by the user: %s\n", permission.toLatin1().constData()); return false; } } return true; } // Copies the file to a folder where we have permissions to do // whatever we want (read, write, execute) static void unpackAsset(QString srcAsset, QString destPath) { if (QFile::exists(srcAsset)) { Dmsg2(0, "Found %s. Copying to %s\n", srcAsset.toLatin1().constData(), destPath.toLatin1().constData()); QFile asset(srcAsset); if (asset.copy(destPath)) { QFile::setPermissions(destPath, QFile::WriteOwner | QFile::ReadOwner | QFile::ExeGroup | QFile::ExeOther | QFile::ExeOwner | QFile::ExeUser); } else { Dmsg1(0, "ERROR - could no create %s\n", destPath.toLatin1().constData()); } } else { Dmsg1(0, "%s not found.\n", srcAsset.toLatin1().constData()); } } // Installs the Android File Daemon into the phone static void install() { // Ask the user for permissions needed for performing backups AndroidFD::getPermission("android.permission.READ_EXTERNAL_STORAGE"); AndroidFD::getPermission("android.permission.WRITE_EXTERNAL_STORAGE"); QString appDir = QStandardPaths::writableLocation(QStandardPaths::AppDataLocation); QString fdAsset = "assets:/static-bacula-fd"; QString fdPath = appDir + "/bin/static-bacula-fd"; QString configAsset = "assets:/bacula-fd.conf"; QString configPath = appDir + "/etc/bacula-fd.conf"; QString fdTrace = AndroidFD::tracePath(); // If the Android File Daemon is not found, then install it if (!QFile::exists(fdPath)) { Dmsg0(0, "Unpacking Bacula FD..."); QDir qd(appDir); qd.mkdir("bin"); qd.mkdir("etc"); qd.mkdir("lib"); AndroidFD::unpackAsset(fdAsset, fdPath); AndroidFD::unpackAsset(configAsset, configPath); AndroidFD::setupFdConfig(configPath); // Create empty Trace File with proper permissions const char *tPath = fdTrace.toLatin1().constData(); fclose(fopen(tPath, "ab+")); QFile::setPermissions(fdTrace, QFile::WriteOther | QFile::ReadOther | QFile::WriteUser | QFile::ReadUser | QFile::WriteOwner | QFile::ReadOwner | QFile::WriteGroup | QFile::ReadGroup ); } } // Starts the Android Service that's responsible for creating the Android File Daemon process static void start () { QString fdDir = QStandardPaths::writableLocation(QStandardPaths::AppDataLocation); QString fdCommand = fdDir + "/bin/static-bacula-fd"; QString fdTrace = AndroidFD::tracePath(); QString lLevel = QString::number(logLevel); QAndroidJniObject::callStaticMethod("org/baculasystems/bmob/alfa/BaculaFDService", "start", "(Landroid/content/Context;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)V", QtAndroid::androidActivity().object(), QAndroidJniObject::fromString(fdCommand).object(), QAndroidJniObject::fromString(fdTrace).object(), QAndroidJniObject::fromString(lLevel).object()); } // Stops our Android Service static void stop() { QAndroidJniObject::callStaticMethod("org/baculasystems/bmob/alfa/BaculaFDService", "stop", "(Landroid/content/Context;)V", QtAndroid::androidActivity().object()); } }; #endif // ANDROID_FD_SERVICE_H bacula-15.0.3/src/qt-console/tray-monitor/JobSelectTab.qml0000644000175000017500000001215314771010173023217 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 import io.qt.bmob.traycontroller 1.0 Page { visible: true height: parent ? parent.height : 0 width: parent ? parent.width : 0 header: ToolBar { id: toolbar height: 48 background: Rectangle { color: "#d32f2f" } RowLayout { anchors.fill: parent anchors.leftMargin: 8 anchors.rightMargin: 8 ToolButton { id: backButton onClicked: stackView.pop() anchors.left: parent.left contentItem: Text { text: qsTr("<") font.pixelSize: 24 opacity: enabled ? 1.0 : 0.3 color: "white" } background: Rectangle { color: backButton.down ? "#b71c1c" : "#d32f2f" } } Label { id: titleLabel text: "1 - Select a Job" color: "white" font.pixelSize: 18 anchors.centerIn: parent } } } Text { id: clientsLabel text: "Client:" anchors.verticalCenter: clientsCombo.verticalCenter anchors.left: parent.left anchors.leftMargin: 16 } ComboBox { id: clientsCombo model: restoreJobController.clients anchors.top: parent.top anchors.topMargin: 16 anchors.left: clientsLabel.right anchors.leftMargin: 8 anchors.right: parent.right anchors.rightMargin: 16 delegate: ItemDelegate { width: clientsCombo.width contentItem: Text { text: model.modelData color: "#000000" font: clientsCombo.font elide: Text.ElideRight verticalAlignment: Text.AlignVCenter } highlighted: clientsCombo.highlightedIndex === index } indicator: Canvas { } contentItem: Text { leftPadding: 8 rightPadding: clientsCombo.indicator.width + clientsCombo.spacing text: clientsCombo.displayText font: clientsCombo.font color: clientsCombo.pressed ? "#ef9a9a" : "#000000" verticalAlignment: Text.AlignVCenter elide: Text.ElideRight } background: Rectangle { implicitWidth: 120 implicitHeight: 40 border.color: clientsCombo.pressed ? "#ef9a9a" : "#d32f2f" border.width: clientsCombo.visualFocus ? 2 : 1 radius: 2 } popup: Popup { y: clientsCombo.height - 1 width: clientsCombo.width implicitHeight: contentItem.implicitHeight padding: 1 contentItem: ListView { clip: true implicitHeight: contentHeight model: clientsCombo.popup.visible ? clientsCombo.delegateModel : null currentIndex: clientsCombo.highlightedIndex ScrollIndicator.vertical: ScrollIndicator { } } background: Rectangle { border.color: "#d32f2f" radius: 2 } } onCurrentIndexChanged: { restoreJobController.handleClientChange( clientsCombo.currentIndex ) } } Rectangle { id: divider anchors.left: parent.left anchors.right: parent.right anchors.top: clientsCombo.bottom anchors.topMargin: 16 height: 1 color: "#e0e0e0" } ListView { id: jobsListView clip: true anchors.top: divider.bottom anchors.right: parent.right anchors.left: parent.left anchors.bottom: parent.bottom model:restoreJobController.jobs boundsBehavior: Flickable.StopAtBounds delegate: JobListItem { jobId: model.modelData.id name: model.modelData.name level: model.modelData.level finfo: model.modelData.fileInfo errorCount: model.modelData.errorCount clickArea.onClicked: { restoreJobController.handleJobSelection( model.modelData.id, model.modelData.name ) restorePageStack.currentIndex = 1 } } } PulseLoader { useDouble: true visible: restoreJobController.isConnecting radius: 28 color: "#d32f2f" anchors.horizontalCenter: parent.horizontalCenter anchors.bottom: parent.bottom anchors.bottomMargin: 24 } } bacula-15.0.3/src/qt-console/tray-monitor/restorejobmodel.h0000644000175000017500000000253214771010173023553 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef RESTOREJOBMODEL_H #define RESTOREJOBMODEL_H #include #include #include "tray_conf.h" /* RestoreJobModel - contains the director that the App is connected with. The only reason for the existence of this class is the fact we can't send a RESMON struct between QML screens. Therefore, we need to wrap it into a class that extends QObject. */ class RestoreJobModel : public QObject { Q_OBJECT private: RESMON *m_director; public: explicit RestoreJobModel(QObject *parent = nullptr); ~RestoreJobModel(); void setDirector(RESMON *dir) { m_director = dir; } RESMON *getDirector() { return m_director; } signals: }; #endif // RESTOREJOBMODEL_H bacula-15.0.3/src/qt-console/tray-monitor/jobmodel.h0000644000175000017500000000764214771010173022156 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef JOBMODEL_H #define JOBMODEL_H #include #include #include "tray_conf.h" #include "jcr.h" #include "../util/fmtwidgetitem.h" #include "android/log.h" class JobModel : public QObject { Q_OBJECT Q_PROPERTY(QString id READ getId NOTIFY idChanged) Q_PROPERTY(QString name READ getName NOTIFY nameChanged) Q_PROPERTY(QString level READ getLevel NOTIFY levelChanged) Q_PROPERTY(QString fileInfo READ getFileInfo NOTIFY fileInfoChanged) Q_PROPERTY(QString errorCount READ getErrorCount NOTIFY errorCountChanged) private: QString m_id; QString m_name; QString m_level; QString m_fileInfo; QString m_errorCount; public: explicit JobModel(QObject *parent = nullptr); ~JobModel(); //Job Id QString getId() { return m_id; } void setId(const QString &id) { if (m_id == id) return; m_id = id; emit idChanged(); } //Job Name QString getName() { return m_name; } void setName(const QString &name) { if (m_name == name) return; m_name = name; emit nameChanged(); } //Job Level QString getLevel() { return m_level; } void setLevel(const QString &level) { if (m_level == level) return; m_level = level; emit levelChanged(); } //Job File Info QString getFileInfo() { return m_fileInfo; } void setFileInfo(uint64_t bytesInt, uint32_t fileCount) { char buf[50]; qint64 bytes = QString(edit_uint64(bytesInt, buf)) .trimmed() .toLongLong(); QString bytesStr = convertBytesIEC(bytes); m_fileInfo = QString("%1 Files (%2)") .arg(fileCount) .arg(bytesStr); emit fileInfoChanged(); } //Job Error Count QString getErrorCount() { return m_errorCount; } void setErrorCount(int32_t count) { QString temp; if(count == 1) { temp = "%1 Error"; } else { temp = "%1 Errors"; } m_errorCount = QString(temp).arg(count); emit errorCountChanged(); } void setData(s_last_job *job) { setId(QString::number(job->JobId)); setName(job->Job); setLevel(job_level_to_str(job->JobLevel)); setFileInfo(job->JobBytes, job->JobFiles); if(job->Errors > 0) { setErrorCount(job->Errors); } } void setData(s_running_job *job) { setId(QString::number(job->JobId)); setName(job->Job); setLevel(job_level_to_str(job->JobLevel)); setFileInfo(job->JobBytes, job->JobFiles); if(job->Errors > 0) { setErrorCount(job->Errors); } } void setData(QListjobFields) { setId(jobFields[0]->text()); setName(jobFields[1]->text()); if(jobFields[2] != NULL) { setLevel(jobFields[2]->text()); } if(jobFields[4] != NULL && jobFields[5] != NULL) { // setFileInfo(jobFields[5]->text().toUInt(), jobFields[4]->text().toUInt()); } } signals: void idChanged(); void nameChanged(); void levelChanged(); void fileInfoChanged(); void errorCountChanged(); }; #endif // JOBMODEL_H bacula-15.0.3/src/qt-console/tray-monitor/make_debug_apk.sh.in0000644000175000017500000000220714771010173024061 0ustar bsbuildbsbuild#!/bin/sh MAKE=make QMAKE=@ANDROID_QT_QMAKE@ APK_MAKE=@ANDROID_QT_APK_MAKE@ TRAYMON_DIR=@TOP_DIR@/bacula/src/qt-console/tray-monitor BUILD_DIR=${TRAYMON_DIR}/android-build BUILD_TARGET=android-@ANDROID_API@ BUILD_JSON_FILE=android-libbacula-tray-monitor.so-deployment-settings.json rm -rf ${BUILD_DIR} mkdir ${BUILD_DIR} ${QMAKE} ${TRAYMON_DIR}/tray-monitor.android.pro -spec android-g++ CONFIG+=debug CONFIG+=force_debug_info CONFIG+=separate_debug_info #libfix.sh sed -i -e 's/\.so/\.la/g' Makefile ${MAKE} clean ${MAKE} ${MAKE} install INSTALL_ROOT=${BUILD_DIR} #mvdyn.sh.in cp ${TRAYMON_DIR}/.libs/libbacula-tray-monitor.so ${TRAYMON_DIR} cp ${TRAYMON_DIR}/.libs/libbacula-tray-monitor.so ${BUILD_DIR}/@ANDROID_QT_BUILD_LIBS@ cp @TOP_DIR@/bacula/src/lib/.libs/libbaccfg-@VERSION@.so ${BUILD_DIR}/@ANDROID_QT_BUILD_LIBS@ cp @TOP_DIR@/bacula/src/lib/.libs/libbac-@VERSION@.so ${BUILD_DIR}/@ANDROID_QT_BUILD_LIBS@ cp @TOP_DIR@/bacula/src/findlib/.libs/libbacfind-@VERSION@.so ${BUILD_DIR}/@ANDROID_QT_BUILD_LIBS@ #build android apk ${APK_MAKE} --output ${BUILD_DIR} --verbose --input ${BUILD_JSON_FILE} --android-platform ${BUILD_TARGET} --gradle bacula-15.0.3/src/qt-console/tray-monitor/clientselectwizardpage.ui0000644000175000017500000000331414771010173025275 0ustar bsbuildbsbuild ClientSelectWizardPage 0 0 714 330 WizardPage true 0 0 0 0 Qt::DefaultContextMenu Backup Client: 0 0 595 0 -1 bacula-15.0.3/src/qt-console/tray-monitor/fileselectwizardpage.h0000644000175000017500000000747214771010173024561 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Restore Wizard: File selection page * * Written by Norbert Bizet, May MMXVII * */ #ifndef FILESELECTWIZARDPAGE_H #define FILESELECTWIZARDPAGE_H #include class QStandardItemModel; class QModelIndex; class RESMON; class task; namespace Ui { class FileSelectWizardPage; } class FileSelectWizardPage : public QWizardPage { Q_OBJECT Q_PROPERTY(qulonglong currentSourceId READ currentSourceId NOTIFY currentSourceIdChanged) Q_PROPERTY(QString currentPathStr READ currentPathStr NOTIFY currentPathStrChanged) Q_PROPERTY(QString jobIds READ jobIds NOTIFY jobIdsChanged) Q_PROPERTY(QString fileIds READ fileIds NOTIFY fileIdsChanged) Q_PROPERTY(QString dirIds READ dirIds NOTIFY dirIdsChanged) Q_PROPERTY(QString hardlinks READ hardlinks NOTIFY hardlinksChanged) Q_PROPERTY(QString pluginIds READ pluginIds NOTIFY pluginIdsChanged) Q_PROPERTY(QString pluginNames READ pluginNames NOTIFY pluginNamesChanged) private: qulonglong m_currentSourceId; public: qulonglong currentSourceId() const { return m_currentSourceId; } signals: void currentSourceIdChanged(); private: QString m_currentPathStr; public: QString currentPathStr() const { return m_currentPathStr; } signals: void currentPathStrChanged(); private: QString m_jobIds; public: QString jobIds() const { return m_jobIds; } signals: void jobIdsChanged(); private: QString m_fileIds; public: QString fileIds() const { return m_fileIds; } signals: void fileIdsChanged(); private: QString m_dirIds; public: QString dirIds() const { return m_dirIds; } signals: void dirIdsChanged(); private: QString m_hardlinks; public: QString hardlinks() const { return m_hardlinks; } signals: void hardlinksChanged(); private: QString m_pluginIds; public: QString pluginIds() const { return m_pluginIds; } signals: void pluginIdsChanged(); private: QString m_pluginNames; public: QString pluginNames() const { return m_pluginNames; } signals: void pluginNamesChanged(); public: explicit FileSelectWizardPage(QWidget *parent = 0); ~FileSelectWizardPage(); /* QWizardPage interface */ void initializePage(); bool isComplete() const; int nextId() const; bool validatePage(); /* local interface */ void setRes(RESMON *r) {res=r;} protected slots: void updateSourceModel(); void optimizeSize(); void changeCurrentFolder(const QModelIndex& current); void changeCurrentText(const QString ¤t); void deleteDestSelection(); void delayedFilter(); void freezeSrcView(); void unFreezeSrcView(); private: Ui::FileSelectWizardPage *ui; QStandardItemModel *src_files_model; QStandardItemModel *dest_files_model; QTimer *m_filterTimer; RESMON *res; bool need_optimize; }; #endif // FILESELECTWIZARDPAGE_H bacula-15.0.3/src/qt-console/tray-monitor/PulseLoader.qml0000644000175000017500000001042014771010173023130 0ustar bsbuildbsbuildimport QtQuick 2.10 Item { // ----- Public Properties ----- // property int radius: 25 property bool useDouble: false property bool running: true property color color: "white" // ----- Private Properties ----- // property int _innerRadius: radius * 0.7 property int _circleRadius: (radius - _innerRadius) * 0.5 id: root width: radius * 2 height: radius * 2 onRunningChanged: { if (running === false) { for (var i = 0; i < repeater.model; i++) { if (repeater.itemAt(i)) { repeater.itemAt(i).stopAnimation(); } } } else { for (var i = 0; i < repeater.model; i++) { if (repeater.itemAt(i)) { repeater.itemAt(i).playAnimation(); } } } } Repeater { id: repeater model: root.useDouble ? 10 : 4 delegate: Component { Rectangle { // ----- Private Properties ----- // property int _currentAngle: _getStartAngle() id: rect width: _getWidth() height: width radius: width color: root.color transformOrigin: Item.Center x: root._getPosOnCircle(_currentAngle).x y: root._getPosOnCircle(_currentAngle).y antialiasing: true SequentialAnimation { id: anim loops: Animation.Infinite NumberAnimation { target: rect property: "_currentAngle" duration: root.useDouble ? 1800 : 1000 from: rect._getStartAngle() to: 360 + rect._getStartAngle() easing.type: Easing.OutQuad } PauseAnimation { duration: 500 } } // ----- Public Functions ----- // function playAnimation() { if (anim.running == false) { anim.start(); } else if (anim.paused) { anim.resume(); } } function stopAnimation() { if (anim.running) { anim.pause(); } } // ----- Private Functions ----- // function _getStartAngle() { var ang = 90; if (root.useDouble) { ang = index < 5 ? 90 : 270; } return ang; } function _getWidth() { var w = (root._circleRadius) * 0.5 * (repeater.model - index); if (root.useDouble) { w = (root._circleRadius) * 0.5 * ((repeater.model / 2) - Math.abs(repeater.model / 2 - index)) } return w; } } } } Timer { // ----- Private Properties ----- // property int _circleIndex: 0 id: timer interval: 100 repeat: true running: true onTriggered: { var maxIndex = root.useDouble ? repeater.model / 2 : repeater.model; if (_circleIndex === maxIndex) { stop(); _circleIndex = 0; } else { repeater.itemAt(_circleIndex).playAnimation(); if (root.useDouble) { repeater.itemAt(repeater.model - _circleIndex - 1).playAnimation(); } _circleIndex++; } } } // ----- Private Functions ----- // function _toRadian(degree) { return (degree * 3.14159265) / 180.0; } function _getPosOnCircle(angleInDegree) { var centerX = root.width / 2, centerY = root.height / 2; var posX = 0, posY = 0; posX = centerX + root._innerRadius * Math.cos(_toRadian(angleInDegree)); posY = centerY - root._innerRadius * Math.sin(_toRadian(angleInDegree)); return Qt.point(posX, posY); } } bacula-15.0.3/src/qt-console/tray-monitor/dirstatus.h0000644000175000017500000000207014771010173022373 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "common.h" #include "ui_dir-monitor.h" #include "task.h" #include "status.h" class DIRStatus: public ResStatus { Q_OBJECT public: Ui::dirStatus status; DIRStatus(RESMON *d): ResStatus(d) { status.setupUi(this); QObject::connect(status.pushButton, SIGNAL(clicked()), this, SLOT(doUpdate()), Qt::QueuedConnection); }; ~DIRStatus() { }; public slots: void doUpdate(); void taskDone(task *); }; bacula-15.0.3/src/qt-console/tray-monitor/tray-monitor.conf.in0000644000175000017500000000122414771010173024120 0ustar bsbuildbsbuild# # Bacula Tray Monitor Configuration File # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # Monitor { Name = @basename@-mon Password = "@mon_dir_password@" # password for the Directors RefreshInterval = 30 seconds } Client { Name = @basename@-fd Address = @hostname@ FDPort = @fd_port@ Password = "@mon_fd_password@" # password for FileDaemon } Storage { Name = @basename@-sd Address = @hostname@ SDPort = @sd_port@ Password = "@mon_sd_password@" # password for StorageDaemon } Director { Name = @basename@-dir DIRport = @dir_port@ address = @hostname@ } bacula-15.0.3/src/qt-console/tray-monitor/TutorialPage.qml0000644000175000017500000000243614771010173023321 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 Page { id: tutorialPage visible: true height: parent ? parent.height : 0 width: parent ? parent.width : 0 SwipeView { id: tutorialSwipe height: parent.height width: parent.width FeaturesTutorialPage {} ConfigTutorialPage {} } PageIndicator { id: indicator count: tutorialSwipe.count currentIndex: tutorialSwipe.currentIndex anchors.bottom: tutorialSwipe.bottom anchors.horizontalCenter: parent.horizontalCenter } } bacula-15.0.3/src/qt-console/tray-monitor/jobmodel.cpp0000644000175000017500000000135114771010173022500 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "jobmodel.h" JobModel::JobModel(QObject *parent): QObject(parent) {} JobModel::~JobModel() {} bacula-15.0.3/src/qt-console/tray-monitor/ts/0000755000175000017500000000000014771010173020627 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/ts/tm_fr.ts0000644000175000017500000006245114771010173022316 0ustar bsbuildbsbuild Conf Configuration Monitor Configuration The Monitor name will be used during the authentication phase. Name: Refresh Interval: Specify the "Command Directory" where the tray-monitor program will check regularly for jobs to run Command Directory: ... Display or Hide advanced options in the "Run Job" window Display Advanced Options: Save and Apply the changes Save Cancel Show/Hide Passwords Password Add Client resource to monitor Client Add Storage resource to monitor Storage Add Director resource to monitor Director Select Command Directory ConfTab Select CA Certificate File PEM file Select CA Certificate Directory Select TLS Certificate File DIRStatus JobId Job Level Client Status Storage Files Bytes Errors FDStatus JobId Job Level Files Bytes Errors Current File Status QObject Invalid job status %1 Yes No ResConf Form General The Name will be used only in the Tray Monitor interface Name: Description: Password: Address: Port: Timeout: Use Client Initiated backup/restore feature Remote Update the tray monitor icon with the status of this component Monitor: Use SetIp: TLS ... CA Certificate File: Enabled Key File: Certificate File: CA Certificate Directory: RunJob Computed over %1 job%2, the correlation is %3. Computed over %1 job%2, The correlation is %3. SDStatus JobId Job Level Client Storage Files Bytes Errors Status TrayMonitor Display Quit About Run... Configure... Bacula Tray Monitor dirStatus Form Director Status Name: Started: Version: Plugins: Reloaded: Running Jobs Terminated Jobs fdStatus Form FileDaemon Status Name: Bandwidth Limit: Started: Version: Plugins: Running Jobs Terminated Jobs runForm Run job <h3>Run a Job</h3> Properties Job: When: yyyy-MM-dd hh:mm:ss <html><head/><body><p>Job statistics computed from the Catalog with previous jobs.</p><p>For accurate information, it is possible to use the bconsole &quot;estimate&quot; command.</p></body></html> Estimate: Job Bytes: Job Files: Level: Advanced Client: FileSet: Pool: Storage: Catalog: Priority: OK Cancel sdStatus Form Storage Daemon Status Name: Started: Version: Plugins: Running Jobs Terminated Jobs bacula-15.0.3/src/qt-console/tray-monitor/ts/tm_de.ts0000644000175000017500000006245114771010173022277 0ustar bsbuildbsbuild Conf Configuration Monitor Configuration The Monitor name will be used during the authentication phase. Name: Refresh Interval: Specify the "Command Directory" where the tray-monitor program will check regularly for jobs to run Command Directory: ... Display or Hide advanced options in the "Run Job" window Display Advanced Options: Save and Apply the changes Save Cancel Show/Hide Passwords Password Add Client resource to monitor Client Add Storage resource to monitor Storage Add Director resource to monitor Director Select Command Directory ConfTab Select CA Certificate File PEM file Select CA Certificate Directory Select TLS Certificate File DIRStatus JobId Job Level Client Status Storage Files Bytes Errors FDStatus JobId Job Level Files Bytes Errors Current File Status QObject Invalid job status %1 Yes No ResConf Form General The Name will be used only in the Tray Monitor interface Name: Description: Password: Address: Port: Timeout: Use Client Initiated backup/restore feature Remote Update the tray monitor icon with the status of this component Monitor: Use SetIp: TLS ... CA Certificate File: Enabled Key File: Certificate File: CA Certificate Directory: RunJob Computed over %1 job%2, the correlation is %3. Computed over %1 job%2, The correlation is %3. SDStatus JobId Job Level Client Storage Files Bytes Errors Status TrayMonitor Display Quit About Run... Configure... Bacula Tray Monitor dirStatus Form Director Status Name: Started: Version: Plugins: Reloaded: Running Jobs Terminated Jobs fdStatus Form FileDaemon Status Name: Bandwidth Limit: Started: Version: Plugins: Running Jobs Terminated Jobs runForm Run job <h3>Run a Job</h3> Properties Job: When: yyyy-MM-dd hh:mm:ss <html><head/><body><p>Job statistics computed from the Catalog with previous jobs.</p><p>For accurate information, it is possible to use the bconsole &quot;estimate&quot; command.</p></body></html> Estimate: Job Bytes: Job Files: Level: Advanced Client: FileSet: Pool: Storage: Catalog: Priority: OK Cancel sdStatus Form Storage Daemon Status Name: Started: Version: Plugins: Running Jobs Terminated Jobs bacula-15.0.3/src/qt-console/tray-monitor/ts/tm_ja.ts0000644000175000017500000006245114771010173022301 0ustar bsbuildbsbuild Conf Configuration Monitor Configuration The Monitor name will be used during the authentication phase. Name: Refresh Interval: Specify the "Command Directory" where the tray-monitor program will check regularly for jobs to run Command Directory: ... Display or Hide advanced options in the "Run Job" window Display Advanced Options: Save and Apply the changes Save Cancel Show/Hide Passwords Password Add Client resource to monitor Client Add Storage resource to monitor Storage Add Director resource to monitor Director Select Command Directory ConfTab Select CA Certificate File PEM file Select CA Certificate Directory Select TLS Certificate File DIRStatus JobId Job Level Client Status Storage Files Bytes Errors FDStatus JobId Job Level Files Bytes Errors Current File Status QObject Invalid job status %1 Yes No ResConf Form General The Name will be used only in the Tray Monitor interface Name: Description: Password: Address: Port: Timeout: Use Client Initiated backup/restore feature Remote Update the tray monitor icon with the status of this component Monitor: Use SetIp: TLS ... CA Certificate File: Enabled Key File: Certificate File: CA Certificate Directory: RunJob Computed over %1 job%2, the correlation is %3. Computed over %1 job%2, The correlation is %3. SDStatus JobId Job Level Client Storage Files Bytes Errors Status TrayMonitor Display Quit About Run... Configure... Bacula Tray Monitor dirStatus Form Director Status Name: Started: Version: Plugins: Reloaded: Running Jobs Terminated Jobs fdStatus Form FileDaemon Status Name: Bandwidth Limit: Started: Version: Plugins: Running Jobs Terminated Jobs runForm Run job <h3>Run a Job</h3> Properties Job: When: yyyy-MM-dd hh:mm:ss <html><head/><body><p>Job statistics computed from the Catalog with previous jobs.</p><p>For accurate information, it is possible to use the bconsole &quot;estimate&quot; command.</p></body></html> Estimate: Job Bytes: Job Files: Level: Advanced Client: FileSet: Pool: Storage: Catalog: Priority: OK Cancel sdStatus Form Storage Daemon Status Name: Started: Version: Plugins: Running Jobs Terminated Jobs bacula-15.0.3/src/qt-console/tray-monitor/fd-monitor.ui0000644000175000017500000001142414771010173022620 0ustar bsbuildbsbuild fdStatus 0 0 518 435 Form FileDaemon Status Name: Bandwidth Limit: Started: Version: Plugins: Running Jobs QAbstractItemView::SingleSelection false Terminated Jobs QAbstractItemView::SingleSelection Qt::Horizontal 40 20 :/images/view-refresh.png:/images/view-refresh.png bacula-15.0.3/src/qt-console/tray-monitor/tray-ui-controller.h0000644000175000017500000001174614771010173024136 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef TRAYUICONTROLLER_H #define TRAYUICONTROLLER_H #include #include #include #include #include #include #include #include "conf.h" #include "tray_conf.h" #include "resmodel.h" #include "config-storage.h" #include "android-fd-service.h" /* TrayUiController - Controls the first screen displayed by our Qt App */ class TrayUiController : public QObject { Q_OBJECT // Where to find the Android File Daemon trace file Q_PROPERTY(QString fdTracePath READ getDaemonTracePath) // Where to find the Tray Monitor trace file Q_PROPERTY(QString trayTracePath READ getTrayTracePath) // Controls the Tray Monitor log level Q_PROPERTY(QString trayLogLevel READ getTrayLogLevel WRITE setTrayLogLevel) // Controls the File Daemon log level Q_PROPERTY(QString fdLogLevel READ getFdLogLevel WRITE setFdLogLevel) // Text that is displayed in an information dialog Q_PROPERTY(QString infoDialogText READ getInfoDialogText WRITE setInfoDialogText) // Resources that were obtained from the bacula-tray-monitor.conf file Q_PROPERTY(QList clients READ getClients NOTIFY clientsChanged()) Q_PROPERTY(QList directors READ getDirectors NOTIFY directorsChanged()) Q_PROPERTY(QList storages READ getStorages NOTIFY storagesChanged()) private: QString m_monitorName; QString m_infoDialogText; QFileSystemWatcher *m_traceWatcher; ConfigStorage *m_config; QList *m_clients; QList *m_directors; QList *m_storages; QList *fetchResources(rescode resCode); public: explicit TrayUiController(QObject *parent = nullptr); ~TrayUiController(); // Getters / Setters for data that is used by our GUI QString getDaemonTracePath() { return AndroidFD::tracePath(); } QString getTrayTracePath() { return QStandardPaths::writableLocation(QStandardPaths::AppDataLocation) + "/tray-monitor.trace"; } QString getTrayLogLevel() { return QString::number(debug_level); } void setTrayLogLevel(QString level) { debug_level = level.toInt(); } QString getFdLogLevel() { char buf[50]; edit_int64(AndroidFD::logLevel, buf); return QString(buf); } void setFdLogLevel(QString level) { AndroidFD::logLevel = level.toInt(); } QList getClients() { return *m_clients; } void setClients(QList *clients) { if(m_clients != NULL) { delete m_clients; } m_clients = clients; emit clientsChanged(); } QList getDirectors() { return *m_directors; } void setDirectors(QList *directors) { if(m_directors != NULL) { delete m_directors; } m_directors = directors; emit directorsChanged(); } QList getStorages() { return *m_storages; } void setStorages(QList *storages) { if(m_storages != NULL) { delete m_storages; } m_storages = storages; emit storagesChanged(); } QString getInfoDialogText() { return m_infoDialogText; } void setInfoDialogText(const QString &text) { m_infoDialogText = text; emit infoDialogTextChanged(); } public slots: // Reads and updates data from bacula-tray-monitor.conf void fetchClients(); void fetchDirectors(); void fetchStorages(); void deleteResource(ResourceModel* resModel); QString getMonitorName(); void setMonitorName(QString name); // Controls the execution of our Android File Daemon Service void startFileDaemon(); void stopFileDaemon(); // Send FD Trace to a Mail App void composeTraceMail(); // Reads and returns the contents of a file in the device QString fetchFile(QString fpath); // Called when the File Daemon updates it's trace file void handleFdTraceChange(QString path); signals: // Events that are emitted to our GUI code to inform changes in our data void clientsChanged(); void directorsChanged(); void storagesChanged(); void fdStatusChanged(); void fdTraceChanged(); void infoDialogTextChanged(); }; #endif // TRAYUICONTROLLER_H bacula-15.0.3/src/qt-console/tray-monitor/tray-monitor.h0000644000175000017500000000157414771010173023025 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef TRAY_MONITOR_H #define TRAY_MONITOR_H #include "common.h" #include "tray-ui.h" #include "tray_conf.h" bool parse_tmon_config(CONFIG *config, const char *configfile, int exit_code); bool reload(); void display_error(const char *msg, ...); #endif bacula-15.0.3/src/qt-console/tray-monitor/pluginwizardpage.ui0000644000175000017500000000151514771010173024116 0ustar bsbuildbsbuild PluginWizardPage 0 0 637 503 WizardPage Tab 1 Tab 2 bacula-15.0.3/src/qt-console/tray-monitor/fileselectwizardpage.cpp0000644000175000017500000002440714771010173025111 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Restore Wizard: File selection page * * Written by Norbert Bizet, May MMXVII * */ #include "common.h" #include "fileselectwizardpage.h" #include "filesmodel.h" #include "ui_fileselectwizardpage.h" #include "restorewizard.h" #include "task.h" #include #include FileSelectWizardPage::FileSelectWizardPage(QWidget *parent) : QWizardPage(parent), m_currentSourceId(0), m_currentPathStr(""), ui(new Ui::FileSelectWizardPage), src_files_model(new FileSourceModel), dest_files_model(new FileDestModel), m_filterTimer(new QTimer), res(NULL), need_optimize(true) { ui->setupUi(this); /* keep track of the current source view selection */ registerField("currentSourceId", this, "currentSourceId", "currentSourceIdChanged"); registerField("currentPathStr", this, "currentPathStr", "currentPathStrChanged"); registerField("fileFilter", ui->FilterEdit); /* usefull to the following pages */ registerField("jobIds", this, "jobIds", "jobIdsChanged"); registerField("fileIds", this, "fileIds", "fileIdsChanged"); registerField("dirIds", this, "dirIds", "dirIdsChanged"); registerField("hardlinks", this, "hardlinks", "hardlinksChanged"); registerField("pluginIds", this, "pluginIds", "pluginIdsChanged"); registerField("pluginNames", this, "pluginNames", "pluginNamesChanged"); QStringList headers; headers << tr("Name") << tr("Size") << tr("Date"); ui->sourceTableView->setModel(src_files_model); //ui->sourceTableView->setLayoutMode(QListView::Batched); //ui->sourceTableView->setBatchSize(BATCH_SIZE); src_files_model->setHorizontalHeaderLabels(headers); connect(ui->sourceTableView, SIGNAL(activated(const QModelIndex &)), this, SLOT(changeCurrentFolder(const QModelIndex&))); connect(ui->sourceTableView, SIGNAL(activated(const QModelIndex &)), this, SLOT(updateSourceModel())); ui->destTableView->setModel(dest_files_model); dest_files_model->setHorizontalHeaderLabels(headers); connect(dest_files_model, SIGNAL(rowsInserted(const QModelIndex &, int, int)), this, SIGNAL(completeChanged())); connect(dest_files_model, SIGNAL(rowsRemoved(const QModelIndex &, int, int)), this, SIGNAL(completeChanged())); connect(ui->filePathComboPath, SIGNAL(activated(const QString &)), this, SLOT(changeCurrentText(const QString &))); connect(ui->filePathComboPath, SIGNAL(activated(const QString &)), this, SLOT(updateSourceModel())); connect(ui->FilterEdit, SIGNAL(textChanged(const QString &)), this, SLOT(delayedFilter())); m_filterTimer->setSingleShot(true); connect(m_filterTimer, SIGNAL(timeout()), this, SLOT(unFreezeSrcView())); connect(m_filterTimer, SIGNAL(timeout()), this, SLOT(updateSourceModel())); QShortcut *shortcut = new QShortcut(QKeySequence(Qt::Key_Delete), this); QObject::connect(shortcut, SIGNAL(activated()), this, SLOT(deleteDestSelection())); } FileSelectWizardPage::~FileSelectWizardPage() { delete ui; } void FileSelectWizardPage::initializePage() { /* first request synchronous */ if (res) { task t; t.init(res, -1); const char *p = field("fileFilter").toString().toLatin1().data(); POOL_MEM info2; pm_strcpy(info2, p); t.arg2 = info2.c_str(); p = currentPathStr().toLatin1().data(); POOL_MEM info3; pm_strcpy(info3, p); t.arg3 = info3.c_str(); t.model = src_files_model; t.get_job_files(field("currentJob").toString().toLatin1().data(), currentSourceId()); } need_optimize = true; } bool FileSelectWizardPage::isComplete() const { return dest_files_model->rowCount() != 0; } int FileSelectWizardPage::nextId() const { /* if some plugins are used in current selection, move to plugin page, otherwise options */ if (field("pluginIds").toString().isEmpty()) { return RestoreWizard::RW_ADVANCEDOPTIONS_PAGE; } return RestoreWizard::RW_PLUGIN_PAGE; } bool FileSelectWizardPage::validatePage() { /* compute result fields based on destination model content */ QStringList fileids, jobids, dirids, findexes; struct stat statp; int32_t LinkFI; for (int row=0; row < dest_files_model->rowCount(); ++row) { QModelIndex idx = dest_files_model->index(row, 0); if (idx.data(TypeRole) == TYPEROLE_FILE) { fileids << idx.data(FileIdRole).toString(); jobids << idx.data(JobIdRole).toString(); decode_stat(idx.data(LStatRole).toString().toLocal8Bit().data(), &statp, sizeof(statp), &LinkFI); if (LinkFI) { findexes << idx.data(JobIdRole).toString() + "," + QString().setNum(LinkFI); } } else /* TYPEROLE_DIRECTORY */ { dirids << idx.data(PathIdRole).toString(); jobids << idx.data(JobIdRole).toString().split(","); /* Can have multiple jobids */ } } fileids.removeDuplicates(); jobids.removeDuplicates(); dirids.removeDuplicates(); findexes.removeDuplicates(); m_jobIds = jobids.join(","); m_fileIds = fileids.join(","); m_dirIds = dirids.join(","); m_hardlinks = findexes.join(","); /* plugin Ids and Names are retrieved now, so NextId() can decide to schedule the PluginPage before RestoreOptionsPage */ /* Stored as properties so thru the next Wizard Page can use them */ task t; t.init(res, -1); /* pass res, ID is set to -1 because task method is called synchronously */ m_pluginIds = t.plugins_ids(m_jobIds); m_pluginNames = t.plugins_names(m_jobIds); return true; } void FileSelectWizardPage::updateSourceModel() { /* subsequent request async */ if (res) { task *t = new task(); /* optimize size only once */ if (need_optimize) { connect(t, SIGNAL(done(task*)), this, SLOT(optimizeSize())); need_optimize = false; } connect(t, SIGNAL(done(task*)), t, SLOT(deleteLater())); t->init(res, TASK_LIST_JOB_FILES); const char *p = field("currentJob").toString().toLatin1().data(); POOL_MEM info; pm_strcpy(info, p); t->arg = info.c_str(); p = field("fileFilter").toString().toLatin1().data(); POOL_MEM info2; pm_strcpy(info2, p); t->arg2 = info2.c_str(); p = currentPathStr().toLatin1().data(); POOL_MEM info3; pm_strcpy(info3, p); t->arg3 = info3.c_str(); t->pathId = currentSourceId(); t->model = src_files_model; res->wrk->queue(t); } } void FileSelectWizardPage::optimizeSize() { int w = ui->destTableView->width()/4; ui->destTableView->horizontalHeader()->resizeSection( 0, w*2); ui->destTableView->horizontalHeader()->resizeSection( 1, w); ui->destTableView->horizontalHeader()->resizeSection( 2, w ); ui->destTableView->horizontalHeader()->setStretchLastSection(true); w = ui->sourceTableView->width()/4; ui->sourceTableView->horizontalHeader()->resizeSection( 0, w*2); ui->sourceTableView->horizontalHeader()->resizeSection( 1, w); ui->sourceTableView->horizontalHeader()->resizeSection( 2, w ); ui->sourceTableView->horizontalHeader()->setStretchLastSection(true); } void FileSelectWizardPage::changeCurrentFolder(const QModelIndex& current) { if (current.isValid()) { QStandardItem *item = src_files_model->itemFromIndex(current); if (item && item->data(TypeRole) == TYPEROLE_DIRECTORY) { QString path = item->text(); m_currentSourceId = item->data(PathIdRole).toULongLong(); if (m_currentSourceId > 0) { // Choose .. update current path to parent dir if (path == "..") { if (m_currentPathStr == "/") { m_currentPathStr = ""; } else { m_currentPathStr.remove(QRegExp("[^/]+/$")); } } else if (path == "/" && m_currentPathStr == "") { m_currentPathStr += path; } else if (path != "/") { m_currentPathStr += path; } } emit currentSourceIdChanged(); emit currentPathStrChanged(); int idx = ui->filePathComboPath->findText(m_currentPathStr); if (idx >= 0) { ui->filePathComboPath->setCurrentIndex(idx); } else { ui->filePathComboPath->insertItem(-1, m_currentPathStr, QVariant(m_currentSourceId)); ui->filePathComboPath->model()->sort(0); ui->filePathComboPath->setCurrentIndex(ui->filePathComboPath->findText(m_currentPathStr)); } } } } void FileSelectWizardPage::changeCurrentText(const QString& current) { int idx = ui->filePathComboPath->findText(current); m_currentSourceId = ui->filePathComboPath->itemData(idx).toULongLong(); m_currentPathStr = ui->filePathComboPath->itemText(idx); emit currentSourceIdChanged(); } void FileSelectWizardPage::deleteDestSelection() { QMap rows; foreach (QModelIndex index, ui->destTableView->selectionModel()->selectedIndexes()) rows.insert(index.row(), 0); QMapIterator r(rows); r.toBack(); while (r.hasPrevious()) { r.previous(); ui->destTableView->model()->removeRow(r.key()); } } void FileSelectWizardPage::delayedFilter() { freezeSrcView(); m_filterTimer->start( 100 ); } void FileSelectWizardPage::freezeSrcView() { ui->sourceTableView->setUpdatesEnabled(false); } void FileSelectWizardPage::unFreezeSrcView() { ui->sourceTableView->setUpdatesEnabled(true); ui->sourceTableView->update(); } bacula-15.0.3/src/qt-console/tray-monitor/runjob-ui-controller.cpp0000644000175000017500000001174114771010173025004 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "runjob-ui-controller.h" RunJobUiController::RunJobUiController(QObject *parent): QObject(parent) { m_config = &ConfigStorage::getInstance(); } void RunJobUiController::handleSelectedJobChange(int jobIndex, QString jobLevel) { if (isConnecting()) { return; } setIsConnecting(true); RESMON *dir = m_rJobModel->getDirector(); POOL_MEM jobName; POOL_MEM level; char *p = (char *) dir->jobs->get(jobIndex); pm_strcpy(jobName, p); p = jobLevel.toUtf8().data(); pm_strcpy(level, p); task *t = new task(); t->init(dir, TASK_INFO); connect(t, SIGNAL(done(task *)), this, SLOT(jobInfoCallback(task *)), Qt::QueuedConnection); t->arg = jobName.c_str(); t->arg2 = level.c_str(); dir->wrk->queue(t); m_selectedJobIndex = jobIndex; } void RunJobUiController::jobInfoCallback(task *t) { RESMON *dir = m_rJobModel->getDirector(); char buf[50]; edit_uint64_with_suffix(dir->infos.JobBytes, buf); bstrncat(buf, "B", sizeof(buf)); setFilesDescription( QString("%1 Files (%2)") .arg(dir->infos.JobFiles) .arg(QString(buf)) ); delete t; // Now fetch job defaults t = new task(); QString selectedJob = m_jobNames[m_selectedJobIndex]; char *job = bstrdup(selectedJob.toUtf8().data()); Dmsg1(10, "get defaults for %s\n", job); dir->mutex->lock(); bfree_and_null(dir->defaults.job); dir->defaults.job = job; dir->mutex->unlock(); connect(t, SIGNAL(done(task *)), this, SLOT(jobDefaultsCallback(task *)), Qt::QueuedConnection); t->init(dir, TASK_DEFAULTS); dir->wrk->queue(t); setIsConnecting(false); } int RunJobUiController::findIndex(QStringList list, QString str) { for (int i = 0; i < list.size(); i++) { if (list[i] == str) { return i; } } Dmsg1(0, "Error: could not find default index. Value: %s\n", str.toLatin1().constData()); return 0; } void RunJobUiController::jobDefaultsCallback(task *t) { if (t->status == true) { RESMON *dir = m_rJobModel->getDirector(); dir->mutex->lock(); int levelIndex = this->findIndex(m_jobLevel, dir->defaults.level); int filesetIndex = this->findIndex(m_filesetNames, dir->defaults.fileset); int clientIndex = this->findIndex(m_clientNames, dir->defaults.client); int storageIndex = this->findIndex(m_storageNames, dir->defaults.storage); int poolIndex = this->findIndex(m_poolNames, dir->defaults.pool); int catalogIndex = this->findIndex(m_catalogNames, dir->defaults.catalog); setLevel(levelIndex); setFileset(filesetIndex); setClient(clientIndex); setStorage(storageIndex); setPool(poolIndex); setCatalog(catalogIndex); dir->mutex->unlock(); } t->deleteLater(); setIsConnecting(false); } void RunJobUiController::runJob() { if (isConnecting()) { return; } setIsConnecting(true); RESMON *dir = m_rJobModel->getDirector(); POOL_MEM command; POOL_MEM tmp; char *p; p = (char *) dir->jobs->get(m_selectedJobIndex); Mmsg(command, "run job=\"%s\" yes", p); p = (char *) dir->storages->get(m_selectedStorage); Mmsg(tmp, " storage=\"%s\"", p); pm_strcat(command, tmp.c_str()); p = (char *) dir->clients->get(m_selectedClient); RESMON *client = m_config->getResourceByName(R_CLIENT, p); if (client != NULL && client->managed) { AndroidFD::start(); } Mmsg(tmp, " client=\"%s\"", p); pm_strcat(command, tmp.c_str()); p = m_jobLevel[m_selectedLevel].toUtf8().data(); Mmsg(tmp, " level=\"%s\"", p); pm_strcat(command, tmp.c_str()); p = (char *) dir->pools->get(m_selectedPool); Mmsg(tmp, " pool=\"%s\"", p); pm_strcat(command, tmp.c_str()); p = (char *) dir->filesets->get(m_selectedFileset); Mmsg(tmp, " fileset=\"%s\"", p); pm_strcat(command, tmp.c_str()); Mmsg(tmp, " priority=\"%d\"", m_priority.toUInt()); pm_strcat(command, tmp.c_str()); // if (res->type == R_CLIENT) { // pm_strcat(command, " fdcalled=1"); // } task *t = new task(); t->init(dir, TASK_RUN); connect(t, SIGNAL(done(task *)), this, SLOT(runJobCallback(task *)), Qt::QueuedConnection); t->arg = command.c_str(); dir->wrk->queue(t); } void RunJobUiController::runJobCallback(task *t) { emit jobSuccess(); delete t; setIsConnecting(false); } RunJobUiController::~RunJobUiController() {} bacula-15.0.3/src/qt-console/tray-monitor/RestoreConfirmTab.qml0000644000175000017500000002374514771010173024317 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 import io.qt.bmob.traycontroller 1.0 Page { id: restoreJobPage visible: true height: parent ? parent.height : 0 width: parent ? parent.width : 0 header: ToolBar { id: toolbar height: 48 background: Rectangle { color: "#d32f2f" } RowLayout { anchors.fill: parent anchors.leftMargin: 8 anchors.rightMargin: 8 ToolButton { id: backButton onClicked: restorePageStack.currentIndex = 1 anchors.left: parent.left contentItem: Text { text: qsTr("<") font.pixelSize: 24 opacity: enabled ? 1.0 : 0.3 color: "white" } background: Rectangle { color: backButton.down ? "#b71c1c" : "#d32f2f" } } Label { id: titleLabel text: "3 - Destination" color: "white" font.pixelSize: 18 anchors.centerIn: parent } } } Flickable { id: scrollableBox anchors.top: parent.top anchors.bottom: restoreButton.top width: parent.width contentWidth: parent.width contentHeight: body.height clip: true Item { id: body width: parent.width height: childrenRect.height Text { id: clientsLabel text: "Client:" anchors.top: parent.top anchors.left: parent.left anchors.topMargin: 24 anchors.leftMargin: 16 } ComboBox { id: clientsCombo model: restoreJobController.clients anchors.top: clientsLabel.bottom anchors.left: parent.left anchors.right: parent.right anchors.topMargin: 8 anchors.leftMargin: 24 anchors.rightMargin: 24 delegate: ItemDelegate { width: clientsCombo.width contentItem: Text { text: model.modelData color: "#000000" font: clientsCombo.font elide: Text.ElideRight verticalAlignment: Text.AlignVCenter } highlighted: clientsCombo.highlightedIndex === index } indicator: Canvas { } contentItem: Text { leftPadding: 8 rightPadding: clientsCombo.indicator.width + clientsCombo.spacing text: clientsCombo.displayText font: clientsCombo.font color: clientsCombo.pressed ? "#ef9a9a" : "#000000" verticalAlignment: Text.AlignVCenter elide: Text.ElideRight } background: Rectangle { implicitWidth: 120 implicitHeight: 40 border.color: clientsCombo.pressed ? "#ef9a9a" : "#d32f2f" border.width: clientsCombo.visualFocus ? 2 : 1 radius: 2 } popup: Popup { y: clientsCombo.height - 1 width: clientsCombo.width implicitHeight: contentItem.implicitHeight padding: 1 contentItem: ListView { clip: true implicitHeight: contentHeight model: clientsCombo.popup.visible ? clientsCombo.delegateModel : null currentIndex: clientsCombo.highlightedIndex ScrollIndicator.vertical: ScrollIndicator { } } background: Rectangle { border.color: "#d32f2f" radius: 2 } } onCurrentIndexChanged: { restoreJobController.setTargetClient(clientsCombo.currentIndex) } } Text { id: whereLabel text: "Where:" anchors.top: clientsCombo.bottom anchors.left: parent.left anchors.leftMargin: 16 anchors.topMargin: 24 } TextField { id: whereField text: restoreJobController.where anchors.top: whereLabel.bottom anchors.left: parent.left anchors.right: parent.right anchors.topMargin: 8 anchors.leftMargin: 24 anchors.rightMargin: 24 topPadding: 10 bottomPadding: 10 background: Rectangle { radius: 2 border.color: "#d32f2f" border.width: 1 } onTextChanged: restoreJobController.where = text } Text { id: replaceLabel text: "Replace:" anchors.top: whereField.bottom anchors.left: parent.left anchors.topMargin: 24 anchors.leftMargin: 16 } ComboBox { id: replaceCombo model: restoreJobController.replaceOptions anchors.top: replaceLabel.bottom anchors.left: parent.left anchors.right: parent.right anchors.topMargin: 8 anchors.leftMargin: 24 anchors.rightMargin: 24 delegate: ItemDelegate { width: replaceCombo.width contentItem: Text { text: model.modelData color: "#000000" font: replaceCombo.font elide: Text.ElideRight verticalAlignment: Text.AlignVCenter } highlighted: replaceCombo.highlightedIndex === index } indicator: Canvas { } contentItem: Text { leftPadding: 8 rightPadding: replaceCombo.indicator.width + replaceCombo.spacing text: replaceCombo.displayText font: replaceCombo.font color: replaceCombo.pressed ? "#ef9a9a" : "#000000" verticalAlignment: Text.AlignVCenter elide: Text.ElideRight } background: Rectangle { implicitWidth: 120 implicitHeight: 40 border.color: replaceCombo.pressed ? "#ef9a9a" : "#d32f2f" border.width: replaceCombo.visualFocus ? 2 : 1 radius: 2 } popup: Popup { y: replaceCombo.height - 1 width: replaceCombo.width implicitHeight: contentItem.implicitHeight padding: 1 contentItem: ListView { clip: true implicitHeight: contentHeight model: replaceCombo.popup.visible ? replaceCombo.delegateModel : null currentIndex: replaceCombo.highlightedIndex ScrollIndicator.vertical: ScrollIndicator { } } background: Rectangle { border.color: "#d32f2f" radius: 2 } } onCurrentIndexChanged: { restoreJobController.setReplaceIndex(replaceCombo.currentIndex) } } Text { id: commentLabel text: "Comment:" anchors.top: replaceCombo.bottom anchors.left: parent.left anchors.topMargin: 24 anchors.leftMargin: 16 } TextArea { id: commentField text: restoreJobController.comment height: 200 anchors.top: commentLabel.bottom anchors.left: parent.left anchors.right: parent.right anchors.topMargin: 8 anchors.leftMargin: 24 anchors.rightMargin: 24 topPadding: 10 bottomPadding: 10 background: Rectangle { radius: 2 border.color: "#d32f2f" border.width: 1 } wrapMode: TextEdit.WrapAnywhere onTextChanged: restoreJobController.comment = text } // Visual margin in the bottom Item { width: parent.width height: 32 anchors.top: commentField.bottom } } } Rectangle { id: restoreButton height: childrenRect.height anchors.left: parent.left anchors.right: parent.right anchors.bottom: parent.bottom color: clickArea.pressed ? "#b71c1c" : "#d32f2f" Text { text: qsTr("Restore >") topPadding: 8 bottomPadding: 8 anchors.horizontalCenter: parent.horizontalCenter verticalAlignment: Text.AlignVCenter font.pixelSize: 18 color: "white" } } PulseLoader { useDouble: true visible: restoreJobController.isConnecting radius: 28 color: "#d32f2f" anchors.horizontalCenter: parent.horizontalCenter anchors.bottom: restoreButton.top anchors.bottomMargin: 24 } MouseArea { id: clickArea anchors.left: restoreButton.left anchors.right: restoreButton.right anchors.top: restoreButton.top anchors.bottom: restoreButton.bottom onClicked: restoreJobController.restore() } } bacula-15.0.3/src/qt-console/tray-monitor/jobselectwizardpage.cpp0000644000175000017500000000647714771010173024753 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Restore Wizard: Job selection page * * Written by Norbert Bizet, May MMXVII * */ #include "common.h" #include "jobselectwizardpage.h" #include "conf.h" #include "task.h" #include "ui_jobselectwizardpage.h" #include JobSelectWizardPage::JobSelectWizardPage(QWidget *parent) : QWizardPage(parent), ui(new Ui::JobSelectWizardPage), res(NULL), model(new QStandardItemModel), m_jobId(-1) { ui->setupUi(this); /* currentJob in mandatory */ registerField("currentJob*", this, "currentJob", SIGNAL(currentJobChanged())); /* assign model to widget */ ui->BackupTableView->setModel(model); /* when selection change, change the field value and the next button state */ connect(ui->BackupTableView->selectionModel(), SIGNAL(selectionChanged(const QItemSelection&, const QItemSelection&)), this, SIGNAL(currentJobChanged())); connect(ui->BackupTableView->selectionModel(), SIGNAL(selectionChanged(const QItemSelection&, const QItemSelection&)), this, SIGNAL(completeChanged())); } JobSelectWizardPage::~JobSelectWizardPage() { delete model; delete ui; } void JobSelectWizardPage::initializePage() { /* populate model */ if (res && (!res->terminated_jobs || res->terminated_jobs->empty())) { /* get terminated_jobs info if not present. Queue populateModel() at end of task */ task *t = new task(); connect(t, SIGNAL(done(task*)), this , SLOT(populateModel()), Qt::QueuedConnection); connect(t, SIGNAL(done(task*)), t, SLOT(deleteLater())); t->init(res, TASK_STATUS); res->wrk->queue(t); } else { /* populate Model directly */ populateModel(); } } bool JobSelectWizardPage::isComplete() const { /* any selection will do since it's single selection */ return (ui->BackupTableView->selectionModel() && !ui->BackupTableView->selectionModel()->selectedRows().isEmpty() ); } qlonglong JobSelectWizardPage::currentJob() const { /* single selection */ QModelIndex idx = ui->BackupTableView->selectionModel()->currentIndex(); /* return the JobId in column 0 */ QModelIndex idIdx = idx.sibling(idx.row(), 0); return idIdx.data().toLongLong(); } void JobSelectWizardPage::populateModel() { if (res) { /* populate model with jobs listed in currentClient */ task *t = new task(); connect(t, SIGNAL(done(task*)), t, SLOT(deleteLater())); t->init(res, TASK_LIST_CLIENT_JOBS); int idx = field("currentClient").toInt(); char *p = (char*) res->clients->get(idx); POOL_MEM info; pm_strcpy(info, p); t->arg = info.c_str(); t->model = model; res->wrk->queue(t); } } bacula-15.0.3/src/qt-console/tray-monitor/resdetails-ui-controller.cpp0000644000175000017500000000721214771010173025642 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "resdetails-ui-controller.h" ResDetailsUiController::ResDetailsUiController(QObject *parent): QObject(parent) { m_storage = &ConfigStorage::getInstance(); } void ResDetailsUiController::connectToResource() { if(m_res == NULL) { return; } if (!isConnecting()) { setIsConnecting(true); if (m_res->managed) { AndroidFD::start(); } task *t = new task(); connect(t, SIGNAL(done(task *)), this, SLOT(connectCallback(task *)), Qt::QueuedConnection); //We get a version of the Resource that contains an encoded password m_storage->reloadResources(true); m_res = m_storage->getResourceByName(m_res->code, m_res->hdr.name); m_storage->reloadResources(); t->init(m_res, TASK_STATUS); m_res->wrk->queue(t); emit connectionStarted(); } } void ResDetailsUiController::connectCallback(task *t) { if (!t->status) { setConnectionError(t->errmsg); } else { emit connectionSuccess(); setStartedDate(m_res->started); setResourceVersion(m_res->version); setResourcePlugins(m_res->plugins); setBandwidthLimit(QString::number(m_res->bwlimit)); QList *tJobs = new QList(); struct s_last_job *job; foreach_dlist(job, m_res->terminated_jobs) { JobModel *model = new JobModel(); model->setData(job); tJobs->append(model); } setTerminatedJobs(tJobs); QList *rJobs = new QList(); struct s_running_job *rjob; foreach_alist(rjob, m_res->running_jobs) { JobModel *model = new JobModel(); model->setData(rjob); rJobs->append(model); } setRunningJobs(rJobs); } t->deleteLater(); setIsConnecting(false); } void ResDetailsUiController::createRunJobModel() { if (!isConnecting()) { setIsConnecting(true); task *t = new task(); connect(t, SIGNAL(done(task *)), this, SLOT(createBackupModelCallback(task *)), Qt::QueuedConnection); t->init(m_res, TASK_RESOURCES); m_res->wrk->queue(t); } } void ResDetailsUiController::createBackupModelCallback(task *t) { RunJobModel *model = new RunJobModel(); model->setDirector(t->res); t->deleteLater(); setIsConnecting(false); emit runJobModelCreated(model); } void ResDetailsUiController::createRestoreJobModel() { if (!isConnecting()) { setIsConnecting(true); task *t = new task(); connect(t, SIGNAL(done(task *)), this, SLOT(createRestoreModelCallback(task *)), Qt::QueuedConnection); t->init(m_res, TASK_RESOURCES); m_res->wrk->queue(t); } } void ResDetailsUiController::createRestoreModelCallback(task *t) { RestoreJobModel *model = new RestoreJobModel(); model->setDirector(t->res); t->deleteLater(); setIsConnecting(false); emit restoreModelCreated(model); } ResDetailsUiController::~ResDetailsUiController() {} bacula-15.0.3/src/qt-console/tray-monitor/RunJobPage.qml0000644000175000017500000005333314771010173022717 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 import io.qt.bmob.runjobcontroller 1.0 Page { id: runJobPage visible: true height: parent ? parent.height : 0 width: parent ? parent.width : 0 property var model; RunJobUiController { id: runJobController Component.onCompleted: { if(typeof runJobPage.model !== "undefined") { runJobController.model = runJobPage.model } } onJobSuccess: { successDialog.text = "Job Started" successDialog.visible = true } } header: ToolBar { id: toolbar height: 48 background: Rectangle { color: "#d32f2f" } RowLayout { anchors.fill: parent anchors.leftMargin: 8 anchors.rightMargin: 8 ToolButton { id: backButton onClicked: stackView.pop() anchors.left: parent.left contentItem: Text { text: qsTr("<") font.pixelSize: 24 opacity: enabled ? 1.0 : 0.3 color: "white" } background: Rectangle { color: backButton.down ? "#b71c1c" : "#d32f2f" } } Label { id: titleLabel text: "Run Job" color: "white" font.pixelSize: 18 anchors.centerIn: parent } } } Text { id: selectJobLabel text: "Job:" anchors.verticalCenter: jobsCombo.verticalCenter anchors.right: jobsCombo.left anchors.rightMargin: 8 } ComboBox { id: jobsCombo model: runJobController.jobNames anchors.top: parent.top anchors.topMargin: 16 anchors.left: storageCombo.left anchors.right: parent.right anchors.rightMargin: 32 delegate: ItemDelegate { width: jobsCombo.width contentItem: Text { text: model.modelData color: "#000000" font: jobsCombo.font elide: Text.ElideRight verticalAlignment: Text.AlignVCenter } highlighted: jobsCombo.highlightedIndex === index } indicator: Canvas { } contentItem: Text { leftPadding: 8 rightPadding: jobsCombo.indicator.width + jobsCombo.spacing text: jobsCombo.displayText font: jobsCombo.font color: jobsCombo.pressed ? "#ef9a9a" : "#000000" verticalAlignment: Text.AlignVCenter elide: Text.ElideRight } background: Rectangle { implicitWidth: 120 implicitHeight: 40 border.color: jobsCombo.pressed ? "#ef9a9a" : "#d32f2f" border.width: jobsCombo.visualFocus ? 2 : 1 radius: 2 } popup: Popup { y: jobsCombo.height - 1 width: jobsCombo.width implicitHeight: contentItem.implicitHeight padding: 1 contentItem: ListView { clip: true implicitHeight: contentHeight model: jobsCombo.popup.visible ? jobsCombo.delegateModel : null currentIndex: jobsCombo.highlightedIndex ScrollIndicator.vertical: ScrollIndicator { } } background: Rectangle { border.color: "#d32f2f" radius: 2 } } onCurrentIndexChanged: { runJobController.handleSelectedJobChange( jobsCombo.currentIndex, selectedLevel.text ) } } Text { id: levelLabel text: "Level:" anchors.verticalCenter: levelCombo.verticalCenter anchors.right: levelCombo.left anchors.rightMargin: 8 } ComboBox { id: levelCombo currentIndex: runJobController.selectedLevel model: runJobController.jobLevels anchors.top: jobsCombo.bottom anchors.topMargin: 8 anchors.left: storageCombo.left anchors.right: jobsCombo.right onCurrentIndexChanged: { runJobController.setLevel(currentIndex) } delegate: ItemDelegate { width: levelCombo.width contentItem: Text { text: model.modelData color: "#000000" font: levelCombo.font elide: Text.ElideRight verticalAlignment: Text.AlignVCenter } highlighted: levelCombo.highlightedIndex === index } indicator: Canvas { } contentItem: Text { id: selectedLevel leftPadding: 8 rightPadding: levelCombo.indicator.width + levelCombo.spacing text: levelCombo.displayText font: levelCombo.font color: levelCombo.pressed ? "#ef9a9a" : "#000000" verticalAlignment: Text.AlignVCenter elide: Text.ElideRight onTextChanged: { if(runJobController.loadedModel) { runJobController.handleSelectedJobChange( jobsCombo.currentIndex, selectedLevel.text ) } } } background: Rectangle { implicitWidth: 120 implicitHeight: 40 border.color: levelCombo.pressed ? "#ef9a9a" : "#d32f2f" border.width: levelCombo.visualFocus ? 2 : 1 radius: 2 } popup: Popup { y: levelCombo.height - 1 width: levelCombo.width implicitHeight: contentItem.implicitHeight padding: 1 contentItem: ListView { clip: true implicitHeight: contentHeight model: levelCombo.popup.visible ? levelCombo.delegateModel : null currentIndex: levelCombo.highlightedIndex ScrollIndicator.vertical: ScrollIndicator { } } background: Rectangle { border.color: "#d32f2f" radius: 2 } } } Text { id: filesLabel text: "Files:" anchors.top: levelCombo.bottom anchors.topMargin: 12 anchors.right: files.left anchors.rightMargin: 8 } Text { id: files text: runJobController.filesDescription anchors.top: levelCombo.bottom anchors.topMargin: 12 anchors.left: storageCombo.left } Rectangle { id: divider anchors.left: parent.left anchors.right: parent.right anchors.top: filesLabel.bottom anchors.topMargin: 16 height: 1 color: "#e0e0e0" } Text { id: clientLabel text: "Client:" anchors.verticalCenter: clientCombo.verticalCenter anchors.right: clientCombo.left anchors.rightMargin: 8 } ComboBox { id: clientCombo currentIndex: runJobController.selectedClient model: runJobController.clientNames anchors.top: divider.bottom anchors.topMargin: 16 anchors.left: storageLabel.right anchors.leftMargin: 8 anchors.right: jobsCombo.right onCurrentIndexChanged: { runJobController.setClient(currentIndex) } delegate: ItemDelegate { width: clientCombo.width contentItem: Text { text: model.modelData color: "#000000" font: clientCombo.font elide: Text.ElideRight verticalAlignment: Text.AlignVCenter } highlighted: clientCombo.highlightedIndex === index } indicator: Canvas { } contentItem: Text { id: selectedClient leftPadding: 8 rightPadding: clientCombo.indicator.width + clientCombo.spacing text: clientCombo.displayText font: clientCombo.font color: clientCombo.pressed ? "#ef9a9a" : "#000000" verticalAlignment: Text.AlignVCenter elide: Text.ElideRight onTextChanged: { } } background: Rectangle { implicitWidth: 120 implicitHeight: 40 border.color: clientCombo.pressed ? "#ef9a9a" : "#d32f2f" border.width: clientCombo.visualFocus ? 2 : 1 radius: 2 } popup: Popup { y: clientCombo.height - 1 width: clientCombo.width implicitHeight: contentItem.implicitHeight padding: 1 contentItem: ListView { clip: true implicitHeight: contentHeight model: clientCombo.popup.visible ? clientCombo.delegateModel : null currentIndex: clientCombo.highlightedIndex ScrollIndicator.vertical: ScrollIndicator { } } background: Rectangle { border.color: "#d32f2f" radius: 2 } } } Text { id: filesetLabel text: "FileSet:" anchors.verticalCenter: filesetCombo.verticalCenter anchors.right: filesetCombo.left anchors.rightMargin: 8 } ComboBox { id: filesetCombo currentIndex: runJobController.selectedFileset model: runJobController.filesetNames anchors.top: clientCombo.bottom anchors.topMargin: 8 anchors.left: storageCombo.left anchors.right: jobsCombo.right onCurrentIndexChanged: { runJobController.setFileset(currentIndex) } delegate: ItemDelegate { width: filesetCombo.width contentItem: Text { text: model.modelData color: "#000000" font: filesetCombo.font elide: Text.ElideRight verticalAlignment: Text.AlignVCenter } highlighted: filesetCombo.highlightedIndex === index } indicator: Canvas { } contentItem: Text { id: selectedFileset leftPadding: 8 rightPadding: filesetCombo.indicator.width + filesetCombo.spacing text: filesetCombo.displayText font: filesetCombo.font color: filesetCombo.pressed ? "#ef9a9a" : "#000000" verticalAlignment: Text.AlignVCenter elide: Text.ElideRight onTextChanged: { } } background: Rectangle { implicitWidth: 120 implicitHeight: 40 border.color: filesetCombo.pressed ? "#ef9a9a" : "#d32f2f" border.width: filesetCombo.visualFocus ? 2 : 1 radius: 2 } popup: Popup { y: filesetCombo.height - 1 width: filesetCombo.width implicitHeight: contentItem.implicitHeight padding: 1 contentItem: ListView { clip: true implicitHeight: contentHeight model: filesetCombo.popup.visible ? filesetCombo.delegateModel : null currentIndex: filesetCombo.highlightedIndex ScrollIndicator.vertical: ScrollIndicator { } } background: Rectangle { border.color: "#d32f2f" radius: 2 } } } Text { id: poolLabel text: "Pool:" anchors.verticalCenter: poolCombo.verticalCenter anchors.right: poolCombo.left anchors.rightMargin: 8 } ComboBox { id: poolCombo currentIndex: runJobController.selectedPool model: runJobController.poolNames anchors.top: filesetCombo.bottom anchors.topMargin: 8 anchors.left: storageCombo.left anchors.right: jobsCombo.right onCurrentIndexChanged: { runJobController.setPool(currentIndex) } delegate: ItemDelegate { width: poolCombo.width contentItem: Text { text: model.modelData color: "#000000" font: poolCombo.font elide: Text.ElideRight verticalAlignment: Text.AlignVCenter } highlighted: poolCombo.highlightedIndex === index } indicator: Canvas { } contentItem: Text { id: selectedPool leftPadding: 8 rightPadding: poolCombo.indicator.width + poolCombo.spacing text: poolCombo.displayText font: poolCombo.font color: poolCombo.pressed ? "#ef9a9a" : "#000000" verticalAlignment: Text.AlignVCenter elide: Text.ElideRight onTextChanged: { } } background: Rectangle { implicitWidth: 120 implicitHeight: 40 border.color: poolCombo.pressed ? "#ef9a9a" : "#d32f2f" border.width: poolCombo.visualFocus ? 2 : 1 radius: 2 } popup: Popup { y: poolCombo.height - 1 width: poolCombo.width implicitHeight: contentItem.implicitHeight padding: 1 contentItem: ListView { clip: true implicitHeight: contentHeight model: poolCombo.popup.visible ? poolCombo.delegateModel : null currentIndex: poolCombo.highlightedIndex ScrollIndicator.vertical: ScrollIndicator { } } background: Rectangle { border.color: "#d32f2f" radius: 2 } } } Text { id: storageLabel text: "Storage:" anchors.verticalCenter: storageCombo.verticalCenter anchors.left: parent.left anchors.leftMargin: 16 } ComboBox { id: storageCombo currentIndex: runJobController.selectedStorage model: runJobController.storageNames anchors.top: poolCombo.bottom anchors.topMargin: 8 anchors.left: storageLabel.right anchors.leftMargin: 8 anchors.right: jobsCombo.right onCurrentIndexChanged: { runJobController.setStorage(currentIndex) } delegate: ItemDelegate { width: storageCombo.width contentItem: Text { text: model.modelData color: "#000000" font: storageCombo.font elide: Text.ElideRight verticalAlignment: Text.AlignVCenter } highlighted: storageCombo.highlightedIndex === index } indicator: Canvas { } contentItem: Text { id: selectedStorage leftPadding: 8 rightPadding: storageCombo.indicator.width + storageCombo.spacing text: storageCombo.displayText font: storageCombo.font color: storageCombo.pressed ? "#ef9a9a" : "#000000" verticalAlignment: Text.AlignVCenter elide: Text.ElideRight onTextChanged: { } } background: Rectangle { implicitWidth: 120 implicitHeight: 40 border.color: storageCombo.pressed ? "#ef9a9a" : "#d32f2f" border.width: storageCombo.visualFocus ? 2 : 1 radius: 2 } popup: Popup { y: storageCombo.height - 1 width: storageCombo.width implicitHeight: contentItem.implicitHeight padding: 1 contentItem: ListView { clip: true implicitHeight: contentHeight model: storageCombo.popup.visible ? storageCombo.delegateModel : null currentIndex: storageCombo.highlightedIndex ScrollIndicator.vertical: ScrollIndicator { } } background: Rectangle { border.color: "#d32f2f" radius: 2 } } } Text { id: catalogLabel text: "Catalog:" anchors.verticalCenter: catalogCombo.verticalCenter anchors.right: catalogCombo.left anchors.rightMargin: 8 } ComboBox { id: catalogCombo currentIndex: runJobController.selectedCatalog model: runJobController.catalogNames anchors.top: storageCombo.bottom anchors.topMargin: 8 anchors.left: storageCombo.left anchors.right: jobsCombo.right onCurrentIndexChanged: { runJobController.setCatalog(currentIndex) } delegate: ItemDelegate { width: catalogCombo.width contentItem: Text { text: model.modelData color: "#000000" font: catalogCombo.font elide: Text.ElideRight verticalAlignment: Text.AlignVCenter } highlighted: catalogCombo.highlightedIndex === index } indicator: Canvas { } contentItem: Text { id: selectedCatalog leftPadding: 8 rightPadding: catalogCombo.indicator.width + catalogCombo.spacing text: catalogCombo.displayText font: catalogCombo.font color: catalogCombo.pressed ? "#ef9a9a" : "#000000" verticalAlignment: Text.AlignVCenter elide: Text.ElideRight onTextChanged: { } } background: Rectangle { implicitWidth: 120 implicitHeight: 40 border.color: catalogCombo.pressed ? "#ef9a9a" : "#d32f2f" border.width: catalogCombo.visualFocus ? 2 : 1 radius: 2 } popup: Popup { y: catalogCombo.height - 1 width: catalogCombo.width implicitHeight: contentItem.implicitHeight padding: 1 contentItem: ListView { clip: true implicitHeight: contentHeight model: catalogCombo.popup.visible ? catalogCombo.delegateModel : null currentIndex: catalogCombo.highlightedIndex ScrollIndicator.vertical: ScrollIndicator { } } background: Rectangle { border.color: "#d32f2f" radius: 2 } } } Text { id: priorityLabel text: "Priority:" anchors.verticalCenter: priorityBox.verticalCenter anchors.right: priorityBox.left anchors.rightMargin: 8 } TextField { id: priorityBox text: runJobController.priority maximumLength: 2 anchors.top: catalogCombo.bottom anchors.topMargin: 12 anchors.left: jobsCombo.left anchors.right: jobsCombo.right inputMethodHints: Qt.ImhDigitsOnly topPadding: 10 bottomPadding: 10 background: Rectangle { radius: 2 border.color: "#d32f2f" border.width: 1 } onTextChanged: runJobController.priority = text } Rectangle { id: runButton height: childrenRect.height anchors.left: parent.left anchors.right: parent.right anchors.bottom: parent.bottom color: clickArea.pressed ? "#b71c1c" : "#d32f2f" Text { text: qsTr("Run >") topPadding: 8 bottomPadding: 8 anchors.horizontalCenter: parent.horizontalCenter verticalAlignment: Text.AlignVCenter font.pixelSize: 18 color: "white" } } MouseArea { id: clickArea anchors.left: runButton.left anchors.right: runButton.right anchors.top: runButton.top anchors.bottom: runButton.bottom onClicked: runJobController.runJob() } MessageDialog { id: successDialog modality: Qt.ApplicationModal standardButtons: StandardButton.Ok visible: false onAccepted: { stackView.pop() } } PulseLoader { useDouble: true visible: runJobController.isConnecting radius: 28 color: "#d32f2f" anchors.horizontalCenter: parent.horizontalCenter anchors.bottom: runButton.top anchors.bottomMargin: 24 } // Text { // id: startDateLabel // text: "When:" // anchors.verticalCenter: startDate.verticalCenter // anchors.left: parent.left // anchors.leftMargin: 16 // } // TextField { // id: startDate // text: runJobController.startDate // placeholderText: qsTr("When") // anchors.top: catalogCombo.bottom // anchors.topMargin: 8 // anchors.left: startDateLabel.right // anchors.leftMargin: 8 // anchors.right: poolCombo.right // } } bacula-15.0.3/src/qt-console/tray-monitor/FileSelectTab.qml0000644000175000017500000001241014771010173023360 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 import io.qt.bmob.traycontroller 1.0 Page { id: restoreJobPage visible: true height: parent ? parent.height : 0 width: parent ? parent.width : 0 property alias fileModel: fileTree.model header: ToolBar { id: toolbar height: 48 background: Rectangle { color: "#d32f2f" } RowLayout { anchors.fill: parent anchors.leftMargin: 8 anchors.rightMargin: 8 ToolButton { id: backButton onClicked: restorePageStack.currentIndex = 0 anchors.left: parent.left contentItem: Text { text: qsTr("<") font.pixelSize: 24 opacity: enabled ? 1.0 : 0.3 color: "white" } background: Rectangle { color: backButton.down ? "#b71c1c" : "#d32f2f" } } Label { id: titleLabel text: "2 - Select Files" color: "white" font.pixelSize: 18 anchors.centerIn: parent } } } ListView { id: fileTree clip: true anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top anchors.bottom: nextButton.top model: restoreJobController.files boundsBehavior: Flickable.StopAtBounds delegate: Item { width: parent.width height: childrenRect.height Rectangle { anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top anchors.bottom: divider.bottom color: "#e0e0e0" visible: clickArea.pressed } Text { id: node text: model.display padding: 12 font.pixelSize: 18 } MouseArea { id: clickArea anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top anchors.bottom: divider.bottom onClicked: { restoreJobController.handleFileTreeNodeClick(index) } } CheckBox { id: checkbox visible: node.text !== ".." anchors.right: parent.right anchors.rightMargin: 8 anchors.verticalCenter: node.verticalCenter indicator: Rectangle { implicitWidth: 20 implicitHeight: 20 x: checkbox.leftPadding y: parent.height / 2 - height / 2 radius: 3 border.color: checkbox.down ? "#ef9a9a" : "#d32f2f" Rectangle { width: 8 height: 8 x: 6 y: 6 radius: 2 color: checkbox.down ? "#ef9a9a" : "#d32f2f" visible: checkbox.checked } } onCheckedChanged: { nextButton.visible = true nextClickArea.visible = true restoreJobController.handleFileSelection(index, checked) } Component.onCompleted: { checked = restoreJobController.fileIsSelected(index) } } Rectangle { id: divider anchors.left: parent.left anchors.right: parent.right anchors.top: node.bottom height: 1 color: "#e0e0e0" } } } PulseLoader { useDouble: true visible: restoreJobController.isConnecting radius: 28 color: "#d32f2f" anchors.horizontalCenter: parent.horizontalCenter anchors.bottom: nextButton.top anchors.bottomMargin: 24 } Rectangle { id: nextButton visible: false height: childrenRect.height anchors.left: parent.left anchors.right: parent.right anchors.bottom: parent.bottom color: nextClickArea.pressed ? "#b71c1c" : "#d32f2f" Text { text: qsTr("Next >") topPadding: 8 bottomPadding: 8 anchors.horizontalCenter: parent.horizontalCenter verticalAlignment: Text.AlignVCenter font.pixelSize: 18 color: "white" } } MouseArea { id: nextClickArea visible: false anchors.left: nextButton.left anchors.right: nextButton.right anchors.top: nextButton.top anchors.bottom: nextButton.bottom onClicked: restorePageStack.currentIndex = 2 } } bacula-15.0.3/src/qt-console/tray-monitor/runjob.h0000644000175000017500000000503514771010173021654 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef RUN_H #define RUN_H #include "common.h" #include "ui_run.h" #include "tray_conf.h" #include "task.h" #include class RunJob: public QDialog { Q_OBJECT public: RESMON *res; QWidget *tabAdvanced; POOL_MEM command; POOL_MEM info; POOL_MEM level; POOL_MEM curjob; Ui::runForm ui; RunJob(RESMON *r); ~RunJob(); public slots: void jobChanged(int); void levelChanged(int); void jobStarted(task *); void jobInfo(task *); void fill_defaults(task *); void tabChange(int idx); void runjob(); /* close the window properly */ void close_cb(task *t); void close_cb(); }; /* Object that can scan a directory to find jobs */ class TSched: public QObject { Q_OBJECT private: char *command_dir; bool read_command_file(const char *file, alist *lst, btime_t mtime); int timer; public: TSched(); ~TSched(); void init(const char *cmd_dir); bool scan_for_commands(alist *lst); void start() { timer = startTimer(60000); // 1-minute timer }; void stop() { if (timer >= 0) { killTimer(timer); timer = -1; } }; public slots: void jobStarted(task *t); protected: void timerEvent(QTimerEvent *event); }; /* Job found in the command directory */ class TSchedJob: public QObject { Q_OBJECT public: char *component; // Name of the daemon char *command; // job command btime_t create_date; // When the command file was created TSchedJob() : component(NULL), command(NULL) {}; TSchedJob(const char *comp, const char *cmd, btime_t cd) { component = bstrdup(comp); command = bstrdup(cmd); create_date = cd; }; ~TSchedJob() { clear(); }; void clear() { if (component) { bfree_and_null(component); } if (command) { bfree_and_null(command); } create_date = 0; }; }; #endif bacula-15.0.3/src/qt-console/tray-monitor/pluginmodel.h0000644000175000017500000000161314771010173022672 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef PLUGINMODEL_H #define PLUGINMODEL_H enum { PluginNameRole = Qt::UserRole+1, PluginTypeRole = Qt::UserRole+2, PluginCommentRole = Qt::UserRole+3, PluginRequiredRole = Qt::UserRole+4, PluginDefaultRole = Qt::UserRole+5 }; #endif // PLUGINMODEL_H bacula-15.0.3/src/qt-console/tray-monitor/conf.cpp0000644000175000017500000002757614771010173021653 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "conf.h" #include "tray-monitor.h" #include extern char *configfile; // defined in tray-monitor.cpp extern RES_TABLE resources[]; extern int32_t r_first; extern int32_t r_last; extern int32_t res_all_size; extern URES res_all; bool Conf::parse_config() { bool ret; config = New(CONFIG()); config->encode_password(false); config->init(configfile, NULL, M_ERROR, (void *)&res_all, res_all_size, r_first, r_last, resources, &rhead); ret = config->parse_config(); return ret; } /* Check for \ at the end */ static char *is_str_valid(POOLMEM **buf, const char *p) { char *p1; if (!p || !*p) { return NULL; } p1 = *buf = check_pool_memory_size(*buf, (strlen(p) + 1)); for (; *p ; p++) { if (*p == '\\') { *p1++ = '/'; } else if (*p == '"') { return NULL; } else { *p1++ = *p; } } *p1 = 0; return *buf; } /* The .toUtf8().data() function can be called only one time at a time */ void Conf::accept() { POOLMEM *buf = get_pool_memory(PM_FNAME); POOL_MEM tmp, tmp2, name; QString str; const char *restype=NULL, *p=NULL, *pname; struct stat sp; FILE *fp=NULL; int n; bool commit=false; bool doclose=true; bool ok; Mmsg(tmp, "%s.temp", configfile); fp = fopen(tmp.c_str(), "w"); if (!fp) { berrno be; display_error("Unable to open %s to write the new configuration file. ERR=%s\n", tmp.c_str(), be.bstrerror()); goto bail_out; } str = UIConf.editName->text(); p = is_str_valid(&buf, str.toUtf8().data()); if (!p) { display_error(_("The Name of the Monitor should be set")); doclose = false; goto bail_out; } fprintf(fp, "Monitor {\n Name=\"%s\"\n", p); n = UIConf.spinRefresh->value(); fprintf(fp, " Refresh Interval = %d\n", n); if (UIConf.cbDspAdvanced->isChecked()) { fprintf(fp, " Display Advanced Options = yes\n"); } str = UIConf.editCommandDir->text(); p = is_str_valid(&buf, str.toUtf8().data()); if (p) { // TODO: Check for \ at the end of the string fprintf(fp, " Command Directory = \"%s\"\n", p); } fprintf(fp, "}\n"); for (int i = 1; i < UIConf.tabWidget->count() ; i++) { ConfTab *t = (ConfTab *) UIConf.tabWidget->widget(i); if (t->isEnabled() == false) { continue; // This one was deleted } for(int i = 0; resources[i].name ; i++) { if (resources[i].rcode == t->res->type) { restype = resources[i].name; break; } } if (!restype) { goto bail_out; } str = t->ui.editName->text(); pname = is_str_valid(&buf, str.toUtf8().data()); if (!pname) { display_error(_("The name of the Resource should be set")); doclose = false; goto bail_out; } pm_strcpy(name, pname); str = t->ui.editAddress->text(); p = is_str_valid(&buf, str.toUtf8().data()); if (!p) { display_error(_("The address of the Resource should be set for resource %s"), name.c_str()); doclose = false; goto bail_out; } fprintf(fp, "%s {\n Name = \"%s\"\n Address = \"%s\"\n", restype, name.c_str(), p); str = t->ui.editPassword->text(); p = is_str_valid(&buf, str.toUtf8().data()); if (!p) { display_error(_("The Password of should be set for resource %s"), name.c_str()); doclose = false; goto bail_out; } fprintf(fp, " Password = \"%s\"\n", p); str = t->ui.editDescription->text(); p = is_str_valid(&buf, str.toUtf8().data()); if (p) { fprintf(fp, " Description = \"%s\"\n", p); } n = t->ui.editPort->text().toInt(&ok, 10); if (ok && n > 0 && n < 65636) { fprintf(fp, " Port = %d\n", n); } n = t->ui.editTimeout->text().toInt(&ok, 10); if (ok && n > 0) { fprintf(fp, " Connect Timeout = %d\n", n); } str = t->ui.editCaCertificateFile->text(); p = is_str_valid(&buf, str.toUtf8().data()); if (p) { if (stat(p, &sp) != 0 || !S_ISREG(sp.st_mode)) { display_error(_("The TLS CA Certificate File should be a PEM file for resource %s"), name.c_str()); doclose = false; goto bail_out; } fprintf(fp, " TLSCaCertificateFile = \"%s\"\n", p); } str = t->ui.editCaCertificateDir->text(); p = is_str_valid(&buf, str.toUtf8().data()); if (p) { if (stat(p, &sp) != 0 || !S_ISDIR(sp.st_mode)) { display_error(_("The TLS CA Certificate Directory should be a directory for resource %s"), name.c_str()); doclose = false; goto bail_out; } fprintf(fp, " TLSCaCertificateDir = \"%s\"\n", p); } str = t->ui.editCertificate->text(); p = is_str_valid(&buf, str.toUtf8().data()); if (p) { if (stat(p, &sp) != 0 || !S_ISREG(sp.st_mode)) { display_error(_("The TLS Certificate File should be a file for resource %s"), name.c_str()); doclose = false; goto bail_out; } fprintf(fp, " TLSCertificate = \"%s\"\n", p); } str = t->ui.editKey->text(); p = is_str_valid(&buf, str.toUtf8().data()); if (p) { if (stat(p, &sp) != 0 || !S_ISREG(sp.st_mode)) { display_error(_("The TLS Key File should be a file for resource %s"), name.c_str()); doclose = false; goto bail_out; } fprintf(fp, " TLSKey = \"%s\"\n", p); } if (t->ui.cbTLSEnabled->isChecked()) { fprintf(fp, " TLS Enable = yes\n"); } if (strcmp(restype, "client") == 0 && t->ui.cbRemote->isChecked()) { fprintf(fp, " Remote = yes\n"); } if (strcmp(restype, "director") == 0 && t->ui.cbUseSetIp->isChecked()) { fprintf(fp, " UseSetIp = yes\n"); } if (t->ui.cbMonitor->isChecked()) { fprintf(fp, " Monitor = yes\n"); } fprintf(fp, "}\n"); } commit = true; // Save the configuration file bail_out: if (fp) { fclose(fp); } if (commit) { // TODO: We probably need to load the configuration file to see if it works unlink(configfile); if (rename(tmp.c_str(), configfile) == 0) { reload(); } else { berrno be; display_error("Unable to write to the configuration file %s ERR=%s\n", configfile, be.bstrerror()); } } if (doclose) { close(); deleteLater(); } } Conf::~Conf() { if (config) { delete config; } } Conf::Conf(): QDialog() { RESMON *res; MONITOR *mon; rhead = NULL; items = 0; UIConf.setupUi(this); if (parse_config()) { for(res=NULL; (res=(RESMON *)GetNextRes(rhead, R_CLIENT, (RES*)res));) { addResource(res, res->hdr.name); } for(res=NULL; (res=(RESMON *)GetNextRes(rhead, R_DIRECTOR, (RES*)res));) { addResource(res, res->hdr.name); } for(res=NULL; (res=(RESMON *)GetNextRes(rhead, R_STORAGE, (RES*)res));) { addResource(res, res->hdr.name); } mon = (MONITOR *)GetNextRes(rhead, R_MONITOR, NULL); UIConf.editName->setText(QString(mon->hdr.name)); UIConf.spinRefresh->setValue(mon->RefreshInterval); UIConf.editCommandDir->setText(QString(NPRTB(mon->command_dir))); if (mon->display_advanced_options) { UIConf.cbDspAdvanced->setChecked(true); } } setAttribute(Qt::WA_DeleteOnClose, true); show(); } void Conf::addResource(RESMON *res, const char *title) { char ed1[50]; ConfTab *w = new ConfTab(res); w->ui.editName->setText(QString(res->hdr.name)); if (res->password) { w->ui.editPassword->setText(QString(res->password)); } if (res->type != R_CLIENT) { w->ui.cbRemote->hide(); w->ui.labelRemote->hide(); } else if (res->use_remote) { w->ui.cbRemote->setChecked(true); } if (res->type != R_DIRECTOR) { w->ui.cbUseSetIp->hide(); w->ui.labelSetIp->hide(); } else if (res->use_setip) { w->ui.cbUseSetIp->setChecked(true); } if (res->use_monitor) { w->ui.cbMonitor->setChecked(true); } w->ui.editAddress->setText(QString(res->address)); w->ui.editPort->setText(QString(edit_uint64(res->port, ed1))); w->ui.editTimeout->setText(QString(edit_uint64(res->connect_timeout, ed1))); if (!res->tls_enable) { if (w->ui.cbTLSEnabled->isChecked()) { emit w->ui.cbTLSEnabled->click(); } } if (res->tls_ca_certfile) { w->ui.editCaCertificateFile->setText(QString(res->tls_ca_certfile)); } if (res->tls_ca_certdir) { w->ui.editCaCertificateDir->setText(QString(res->tls_ca_certdir)); } if (res->tls_certfile) { w->ui.editCertificate->setText(QString(res->tls_certfile)); } if (res->tls_keyfile) { w->ui.editKey->setText(QString(res->tls_keyfile)); } UIConf.tabWidget->addTab(w, QString(title)); items++; } void Conf::addRes(int type, const char *title) { RESMON *res = (RESMON *) malloc(sizeof(RESMON)); init_resource(config, type, res, sizeof(RESMON)); res->type = type; // Not sure it's set by init_resource res->new_resource = true; // We want to free this resource with the ConfTab addResource(res, title); } void Conf::addDir() { addRes(R_DIRECTOR, "New Director"); } void Conf::addStore() { addRes(R_STORAGE, "New Storage"); } void Conf::addClient() { addRes(R_CLIENT, "New Client"); } void Conf::togglePassword() { if (passtype == QLineEdit::Normal) { passtype = QLineEdit::PasswordEchoOnEdit; } else { passtype = QLineEdit::Normal; } for (int i = 1; i < UIConf.tabWidget->count() ; i++) { ConfTab *tab = (ConfTab *) UIConf.tabWidget->widget(i); tab->ui.editPassword->setEchoMode(passtype); } } void Conf::selectCommandDir() { QString directory = QFileDialog::getExistingDirectory(this, tr("Select Command Directory"), QDir::currentPath()); UIConf.editCommandDir->setText(directory); } void ConfTab::selectCaCertificateFile() { QString directory = QFileDialog::getOpenFileName(this, tr("Select CA Certificate File PEM file"), QDir::currentPath()); ui.editCaCertificateFile->setText(directory); } void ConfTab::selectCaCertificateDir() { QString directory = QFileDialog::getExistingDirectory(this, tr("Select CA Certificate Directory"), QDir::currentPath()); ui.editCaCertificateDir->setText(directory); } void ConfTab::selectCertificate() { QString file = QFileDialog::getOpenFileName(this, tr("Select TLS Certificate File"), QDir::currentPath()); ui.editCertificate->setText(file); } void ConfTab::selectKey() { QString file = QFileDialog::getOpenFileName(this, tr("Select TLS Certificate File"), QDir::currentPath()); ui.editKey->setText(file); } bacula-15.0.3/src/qt-console/tray-monitor/restoreoptionswizardpage.cpp0000644000175000017500000000701714771010173026067 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Restore Wizard : Options page * * Written by Norbert Bizet, May MMXVII * */ #include "common.h" #include "restoreoptionswizardpage.h" #include "ui_restoreoptionswizardpage.h" #include "common.h" #include "filesmodel.h" #include "task.h" #define TABLENAME "b21234" RestoreOptionsWizardPage::RestoreOptionsWizardPage(QWidget *parent) : QWizardPage(parent), ui(new Ui::RestoreOptionsWizardPage), res(0) { ui->setupUi(this); registerField("restoreClient", ui->restoreClientComboxBox); registerField("restoreWhere", ui->whereLineEdit); registerField("restoreReplace", ui->replaceComboBox); registerField("restoreComment", ui->commentLineEdit); } RestoreOptionsWizardPage::~RestoreOptionsWizardPage() { delete ui; } void RestoreOptionsWizardPage::initializePage() { /* first request synchronous */ if (res) { QStringList list; char *str; foreach_alist(str, res->clients) { list << QString(str); } ui->restoreClientComboxBox->clear(); if (!list.isEmpty()) { ui->restoreClientComboxBox->addItems(list); ui->restoreClientComboxBox->setEnabled(true); ui->restoreClientComboxBox->setCurrentIndex(0); } else { ui->restoreClientComboxBox->setEnabled(false); } /* find RestoreFiles default */ const char *p = "RestoreFiles"; POOL_MEM info; pm_strcpy(info, p); res->mutex->lock(); bfree_and_null(res->defaults.job); res->defaults.job = bstrdup(info.c_str()); res->mutex->unlock(); task t; t.init(res, -1); t.get_job_defaults(); ui->whereLineEdit->setText(res->defaults.where); ui->replaceComboBox->setCurrentIndex(res->defaults.replace); } } bool RestoreOptionsWizardPage::validatePage() { task *t = new task(); connect(t, SIGNAL(done(task*)), wizard(), SLOT(deleteLater())); connect(t, SIGNAL(done(task*)), t, SLOT(deleteLater())); t->init(res, TASK_RESTORE); t->restore_field.tableName = QString(TABLENAME); t->restore_field.jobIds = field("jobIds").toString(); t->restore_field.fileIds = field("fileIds").toString(); t->restore_field.dirIds = field("dirIds").toString(); t->restore_field.hardlinks = field("hardlinks").toString(); int idx = field("currentClient").toInt(); t->restore_field.client = QString((char*)res->clients->get(idx)); idx = field("restoreClient").toInt(); t->restore_field.restoreclient = QString((char*)res->clients->get(idx)); t->restore_field.where = field("restoreWhere").toString(); t->restore_field.replace = ui->replaceComboBox->currentText(); t->restore_field.comment = field("restoreComment").toString(); t->restore_field.pluginnames = field("pluginNames").toString(); t->restore_field.pluginkeys = field("pluginKeysStr").toString(); res->wrk->queue(t); return true; } bacula-15.0.3/src/qt-console/tray-monitor/tray-monitor.pro.mingw64.in0000644000175000017500000000725214771010173025274 0ustar bsbuildbsbuild# # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # ###################################################################### # # !!!!!!! IMPORTANT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # Edit only tray-monitor.pro.mingw64.in -- tray-monitor.pro.mingw64 is built by the ./configure program # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # CONFIG options for Windows are pulled from win32/qmake.conf # # Copyright (C) 2000-2022 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # CONFIG += qt cross-win32 #CONFIG += qt debug QT += widgets gui core cross-win32 { LIBS += ../../win32/lib/obj64/ini.o -mwindows -L../../win32/release64 -lbacula -lwinpthread INCLUDEPATH += ../../win32/compat } !cross-win32 { LIBS += -L../../lib -lbaccfg -lbac -L../../findlib -lbacfind @OPENSSL_LIBS@ } bins.path = /$(DESTDIR)@sbindir@ bins.files = bacula-tray-monitor confs.path = /$(DESTDIR)@sysconfdir@ confs.commands = ./install_conf_file TEMPLATE = app TARGET = bacula-tray-monitor QMAKE_EXTRA_TARGETS += depend DEPENDPATH += . INCLUDEPATH += ../.. . LIBTOOL_LINK = @QMAKE_LIBTOOL@ --silent --tag=CXX --mode=link LIBTOOL_INSTALL = @QMAKE_LIBTOOL@ --silent --mode=install QMAKE_LINK = $${LIBTOOL_LINK} $(CXX) QMAKE_INSTALL_PROGRAM = $${LIBTOOL_INSTALL} install -m @SBINPERM@ -p QMAKE_CLEAN += .libs/* bacula-tray-monitor release/bacula-tray-monitor QMAKE_CXXFLAGS += -DTRAY_MONITOR QMAKE_CFLAGS += -DTRAY_MONITOR RESOURCES = ../main.qrc MOC_DIR = moc64 OBJECTS_DIR = obj64 UI_DIR = ui64 QMAKE_CC = $(CXX) QMAKE_CXX = x86_64-w64-mingw32-g++ QMAKE_INCDIR = $(DEPKGS)/depkgs-mingw-w64/include/ ../win32/compat $(DEPKGS)/depkgs-mingw-w64/include/QtGui $(DEPKGS)/depkgs-mingw-w64/include/QtCore $(DEPKGS)/depkgs-mingw-w64/include/QtWidgets ui64/ QMAKE_INCDIR_QT = $(DEPKGS)/depkgs-mingw-w64/include/qt QMAKE_LIBDIR_QT = $(DEPKGS)/depkgs-mingw-w64/lib/qt QMAKE_LINK = x86_64-w64-mingw32-g++ QMAKE_LFLAGS = -mthreads -Wl,-enable-stdcall-fixup -Wl,-enable-auto-import -m64 -fno-strict-aliasing -Wl,-enable-runtime-pseudo-reloc QMAKE_LIB = x86_64-w64-mingw32-ar -ru QMAKE_RC = x86_64-w64-mingw32-windres # Main directory HEADERS += tray-monitor.h tray_conf.h tray-ui.h fdstatus.h task.h ../util/fmtwidgetitem.h dirstatus.h conf.h sdstatus.h runjob.h status.h restorewizard.h filesmodel.h clientselectwizardpage.h jobselectwizardpage.h fileselectwizardpage.h restoreoptionswizardpage.h pluginwizardpage.h SOURCES += tray-monitor.cpp tray_conf.cpp fdstatus.cpp task.cpp authenticate.cpp ../util/fmtwidgetitem.cpp dirstatus.cpp sdstatus.cpp conf.cpp runjob.cpp status.cpp restorewizard.cpp clientselectwizardpage.cpp jobselectwizardpage.cpp fileselectwizardpage.cpp restoreoptionswizardpage.cpp pluginwizardpage.cpp FORMS += fd-monitor.ui dir-monitor.ui sd-monitor.ui main-conf.ui res-conf.ui run.ui restorewizard.ui clientselectwizardpage.ui jobselectwizardpage.ui fileselectwizardpage.ui restoreoptionswizardpage.ui pluginwizardpage.ui # CDP Client CDP_DIR = ../../tools/cdp-client HEADERS += $$CDP_DIR/backupservice.h $$CDP_DIR/folderwatcher.h desktop-gui/cdp-main-ui.h $$CDP_DIR/cdp.h SOURCES += $$CDP_DIR/backupservice.cpp $$CDP_DIR/folderwatcher.cpp INCLUDEPATH += $$CDP_DIR # Journal JOURNAL_DIR = ../../plugins/fd HEADERS += $$JOURNAL_DIR/journal.h $$JOURNAL_DIR/file-record.h $$JOURNAL_DIR/folder-record.h $$JOURNAL_DIR/settings-record.h SOURCES += $$JOURNAL_DIR/journal.c INCLUDEPATH += $$JOURNAL_DIR TRANSLATIONS += ts/tm_fr.ts ts/tm_de.ts ts/tm_ja.ts bacula-15.0.3/src/qt-console/tray-monitor/enterprise-tray-ui-controller.cpp0000644000175000017500000000506514771010173026644 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "enterprise-tray-ui-controller.h" EnterpriseTrayUiController::EnterpriseTrayUiController(QObject *parent): TrayUiController(parent) { QString configDir = QStandardPaths::writableLocation(QStandardPaths::AppDataLocation) + "/etc"; ConfigStorage *storage = &ConfigStorage::getInstance(); m_bweb = new BWebService(configDir, storage->config_path); m_bweb->m_fdPath = QStandardPaths::writableLocation(QStandardPaths::AppDataLocation); m_bweb->m_monitorName = AndroidFD::deviceId(); m_bweb->m_monitorPwd = AndroidFD::devicePwd(); connect(this, SIGNAL(onQRCodeRead(QString)), this, SLOT(handleQRCodeData(QString))); } void EnterpriseTrayUiController::handleActivityResult(int receiverRequestCode, int resultCode, const QAndroidJniObject & data) { jint RESULT_OK = QAndroidJniObject::getStaticField("android/app/Activity", "RESULT_OK"); if (resultCode == RESULT_OK && data.isValid()) { QAndroidJniObject key = QAndroidJniObject::fromString("barcode.data"); QString bwebUrl = data.callObjectMethod("getStringExtra" , "(Ljava/lang/String;)Ljava/lang/String;" , key.object() ).toString(); // We can't use the network here // because this method is called from a secondary thread. // So we just emit a signal and do the network call elsewhere; emit onQRCodeRead(bwebUrl); } } void EnterpriseTrayUiController::handleQRCodeData(QString bwebUrl) { if (!isConnecting()) { setIsConnecting(true); m_bweb->doLinkRegistration(bwebUrl, this); } } void EnterpriseTrayUiController::onBWebSuccess(QString msg) { setIsConnecting(false); setInfoDialogText(msg); } void EnterpriseTrayUiController::onBWebError(QString msg) { setIsConnecting(false); setInfoDialogText(msg); } EnterpriseTrayUiController::~EnterpriseTrayUiController() { delete m_bweb; } bacula-15.0.3/src/qt-console/tray-monitor/runjobmodel.h0000644000175000017500000000247614771010173022703 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef RUNJOBMODEL_H #define RUNJOBMODEL_H #include #include #include "tray_conf.h" /* RunJobModel - contains the director that the App is connected with. The only reason for the existence of this class is the fact we can't send a RESMON struct between QML screens. Therefore, we need to wrap it into a class that extends QObject. */ class RunJobModel : public QObject { Q_OBJECT private: RESMON *m_director; public: explicit RunJobModel(QObject *parent = nullptr); ~RunJobModel(); void setDirector(RESMON *dir) { m_director = dir; } RESMON *getDirector() { return m_director; } signals: }; #endif // RUNJOBMODEL_H bacula-15.0.3/src/qt-console/tray-monitor/restorewizard.cpp0000644000175000017500000000316514771010173023616 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Restore Wizard * * Written by Norbert Bizet, May MMXVII * */ #include "common.h" #include "restorewizard.h" #include "ui_restorewizard.h" #include "filesmodel.h" #include "task.h" #include RestoreWizard::RestoreWizard(RESMON *r, QWidget *parent) : QWizard(parent), res(r), ui(new Ui::RestoreWizard) { ui->setupUi(this); ui->RestWizClientPage->setRes(res); ui->RestWizJobSelectPage->setRes(res); ui->RestWiFileSelectionPage->setRes(res); ui->RestWizPluginPage->setRes(res); ui->RestWizAdvancedOptionsPage->setRes(res); /* avoid connect warnings */ qRegisterMetaType("Qt::Orientation"); qRegisterMetaType< QList < QPersistentModelIndex > >("QList"); qRegisterMetaType< QVector < int > >("QVector"); #if QT_VERSION >= 0x050000 qRegisterMetaType< QAbstractItemModel::LayoutChangeHint >("QAbstractItemModel::LayoutChangeHint"); #endif } RestoreWizard::~RestoreWizard() { delete ui; } bacula-15.0.3/src/qt-console/tray-monitor/common.h0000644000175000017500000000300314771010173021636 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef TRAY_MONITOR_COMMON_H #define TRAY_MONITOR_COMMON_H #if defined(HAVE_WIN32) #if !defined(_STAT_H) #define _STAT_H /* don't pull in MinGW stat.h */ #endif #ifndef _STAT_DEFINED #define _STAT_DEFINED /* don't pull in MinGW stat.h */ #endif #endif #if defined(HAVE_WIN32) #if defined(HAVE_MINGW) #include "mingwconfig.h" #else #include "winconfig.h" #endif #else #include "config.h" #endif #define __CONFIG_H #if QT_VERSION >= 0x050000 #include #else #include #endif #include #include "bacula.h" class DbgLine { public: const char *funct; DbgLine(const char *fun): funct(fun) { Dmsg2(0, "[%p] -> %s\n", bthread_get_thread_id(), funct); } ~DbgLine() { Dmsg2(0, "[%p] <- %s\n", bthread_get_thread_id(), funct); } }; #ifdef Enter #undef Enter #endif //#define Enter() DbgLine _enter(__PRETTY_FUNCTION__) #define Enter() #endif bacula-15.0.3/src/qt-console/tray-monitor/runjob-ui-controller.h0000644000175000017500000002232414771010173024450 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef RUNJOBUICONTROLLER_H #define RUNJOBUICONTROLLER_H #include #include #include #include #include "conf.h" #include "tray_conf.h" #include "runjobmodel.h" #include "task.h" /* RunJobUiController - controls the screen where the user specifies which Job it wants to run */ class RunJobUiController : public QObject { Q_OBJECT // Contains the Director that will run the Job Q_PROPERTY(RunJobModel *model WRITE setModel) Q_PROPERTY(bool loadedModel READ loadedModel) // Lists of options that the user can choose for the desired Job Q_PROPERTY(QStringList jobNames READ getJobNames NOTIFY jobNamesChanged()) Q_PROPERTY(QStringList jobLevels READ getJobLevels NOTIFY jobLevelsChanged()) Q_PROPERTY(QStringList clientNames READ getClientNames NOTIFY clientNamesChanged()) Q_PROPERTY(QStringList filesetNames READ getFilesetNames NOTIFY filesetNamesChanged()) Q_PROPERTY(QStringList poolNames READ getPoolNames NOTIFY poolNamesChanged()) Q_PROPERTY(QStringList storageNames READ getStorageNames NOTIFY storageNamesChanged()) Q_PROPERTY(QStringList catalogNames READ getCatalogNames NOTIFY catalogNamesChanged()) // Indexes of user selected options Q_PROPERTY(int selectedLevel READ getLevel WRITE setLevel NOTIFY levelChanged()) Q_PROPERTY(int selectedClient READ getClient WRITE setClient NOTIFY clientChanged()) Q_PROPERTY(int selectedFileset READ getFileset WRITE setFileset NOTIFY filesetChanged()) Q_PROPERTY(int selectedPool READ getPool WRITE setPool NOTIFY poolChanged()) Q_PROPERTY(int selectedStorage READ getStorage WRITE setStorage NOTIFY storageChanged()) Q_PROPERTY(int selectedCatalog READ getCatalog WRITE setCatalog NOTIFY catalogChanged()) // Information about the Job files (total size, number of files) Q_PROPERTY(QString filesDescription READ getFilesDescription WRITE setFilesDescription NOTIFY filesDescriptionChanged()) // Job priority Q_PROPERTY(QString priority READ getPriority WRITE setPriority NOTIFY priorityChanged()) Q_PROPERTY(bool isConnecting READ isConnecting WRITE setIsConnecting NOTIFY isConnectingChanged) // Q_PROPERTY(QString startDate READ getStartDate WRITE setStartDate NOTIFY startDateChanged()) private: int m_selectedJobIndex; int m_selectedLevel = 0; int m_selectedClient= 0; int m_selectedFileset = 0; int m_selectedPool= 0; int m_selectedStorage = 0; int m_selectedCatalog = 0; ConfigStorage *m_config; QStringList m_jobNames; QStringList m_jobLevel; QStringList m_clientNames; QStringList m_filesetNames; QStringList m_poolNames; QStringList m_storageNames; QStringList m_catalogNames; QString m_startDate; QString m_filesDescription; QString m_priority = "10"; RunJobModel *m_rJobModel; bool m_loadedModel = false; bool m_connecting = false; int findIndex(QStringList list, QString str); public: explicit RunJobUiController(QObject *parent = nullptr); ~RunJobUiController(); // Getters / Setters for data that is used by our GUI bool loadedModel() { return m_loadedModel; } QString getPriority() { return m_priority; } QString getFilesDescription() { return m_filesDescription; } QStringList getJobNames() { return m_jobNames; } QStringList getJobLevels() { return m_jobLevel; } QStringList getFilesetNames() { return m_filesetNames; } QStringList getClientNames() { return m_clientNames; } QStringList getPoolNames() { return m_poolNames; } QStringList getStorageNames() { return m_storageNames; } QStringList getCatalogNames() { return m_catalogNames; } int getLevel() { return m_selectedLevel; } int getClient() { return m_selectedClient; } int getFileset() { return m_selectedFileset; } int getPool() { return m_selectedPool; } int getStorage() { return m_selectedStorage; } int getCatalog() { return m_selectedCatalog; } bool isConnecting() { return m_connecting; } // QString getStartDate() { return m_startDate; } void setIsConnecting(bool isConnecting) { if (m_connecting != isConnecting) { m_connecting = isConnecting; emit isConnectingChanged(); } } void setJobLevels(QStringList jobLevels) { m_jobLevel = jobLevels; emit jobLevelsChanged(); } void setFilesDescription(QString filesDesc) { m_filesDescription = filesDesc; emit filesDescriptionChanged(); } void setJobNames(alist *jobNames) { char *name; foreach_alist(name, jobNames) { m_jobNames << QString(name); } emit jobNamesChanged(); } void setClientNames(alist *clientNames) { char *name; foreach_alist(name, clientNames) { m_clientNames << QString(name); } emit clientNamesChanged(); } void setFilesetNames(alist *filesetNames) { char *name; foreach_alist(name, filesetNames) { m_filesetNames << QString(name); } emit filesetNamesChanged(); } void setPoolNames(alist *poolNames) { char *name; foreach_alist(name, poolNames) { m_poolNames << QString(name); } emit poolNamesChanged(); } void setStorageNames(alist *storageNames) { char *name; foreach_alist(name, storageNames) { m_storageNames << QString(name); } emit storageNamesChanged(); } void setCatalogNames(alist *catalogNames) { char *name; foreach_alist(name, catalogNames) { m_catalogNames << QString(name); } emit catalogNamesChanged(); } void setPriority(const QString &priority) { m_priority = priority; } // void setStartDate(QString startDate) { // m_startDate = startDate; // emit startDateChanged(); // } // Model provided by previous screen void setModel(RunJobModel *model) { RESMON *dir = model->getDirector(); m_rJobModel = model; setJobNames(model->getDirector()->jobs); // int year = QDate::currentDate().year(); // int month = QDate::currentDate().month(); // int day = QDate::currentDate().day(); // QString date("%1 / %2 / %3"); // date = date.arg(year).arg(month).arg(day); // QString time = QTime::currentTime().toString(); // setStartDate(date.append(" - ").append(time)); setClientNames(dir->clients); setFilesetNames(dir->filesets); setPoolNames(dir->pools); setStorageNames(dir->storages); setCatalogNames(dir->catalogs); QStringList levels; levels << "Full" << "Incremental" << "Differential"; setJobLevels(levels); m_loadedModel = true; } public slots: // When the User selects a Job, we ask the Director for information related to it void handleSelectedJobChange(int newIndex, QString jobLevel); void jobInfoCallback(task *t); void jobDefaultsCallback(task *t); void setLevel(int level) { if (m_selectedLevel != level) { m_selectedLevel = level; emit levelChanged(); } } void setClient(int client) { if (m_selectedClient != client) { m_selectedClient = client; emit clientChanged(); } } void setFileset(int fs) { if (m_selectedFileset != fs) { m_selectedFileset = fs; emit filesetChanged(); } } void setPool(int pool) { if (m_selectedPool != pool) { m_selectedPool = pool; emit poolChanged(); } } void setStorage(int storage) { if (m_selectedStorage != storage) { m_selectedStorage = storage; emit storageChanged(); } } void setCatalog(int catalog) { if(m_selectedCatalog != catalog) { m_selectedCatalog = catalog; emit catalogChanged(); } } // Called when the user wishes to run the Job void runJob(); // Called after we ask the Director to run the Job void runJobCallback(task *t); signals: // void startDateChanged(); // Events that are emitted when the lists changed void jobNamesChanged(); void jobLevelsChanged(); void clientNamesChanged(); void filesetNamesChanged(); void poolNamesChanged(); void storageNamesChanged(); void catalogNamesChanged(); // Events that are emitted when the User selects one option void levelChanged(); void clientChanged(); void filesetChanged(); void poolChanged(); void storageChanged(); void catalogChanged(); void filesDescriptionChanged(); void priorityChanged(); // Emitted when we successfully requested the Director to run the Job void jobSuccess(); void isConnectingChanged(); }; #endif // RUNJOBUICONTROLLER_H bacula-15.0.3/src/qt-console/tray-monitor/clientselectwizardpage.h0000644000175000017500000000240014771010173025102 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Restore Wizard: Client selection page * * Written by Norbert Bizet, May MMXVII * */ #ifndef CLIENTSELECTWIZARDPAGE_H #define CLIENTSELECTWIZARDPAGE_H #include class RESMON; namespace Ui { class ClientSelectWizardPage; } class ClientSelectWizardPage : public QWizardPage { Q_OBJECT public: explicit ClientSelectWizardPage(QWidget *parent = 0); ~ClientSelectWizardPage(); /* QWizardPage interface */ void initializePage(); /* local interface */ inline void setRes(RESMON *r) {res=r;} private: Ui::ClientSelectWizardPage *ui; RESMON *res; }; #endif // CLIENTSELECTWIZARDPAGE_H bacula-15.0.3/src/qt-console/tray-monitor/ResourceListItem.qml0000644000175000017500000000647714771010173024174 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 Item { id: listItem width: parent.width height: childrenRect.height Rectangle { anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top anchors.bottom: divider.bottom color: "#e0e0e0" visible: clickArea.pressed } property alias name: resName.text property alias address: resAddress.text property alias port: resPort.text property alias delButton: deleteButton property alias clickArea: clickArea property alias deletable: deleteButton.visible Text { id: nameLabel text: "Name:" anchors.top: parent.top anchors.topMargin: 16 anchors.left: parent.left anchors.leftMargin: 16 } Text { id: resName anchors.top: parent.top anchors.topMargin: 16 anchors.left: nameLabel.right anchors.leftMargin: 8 } Text { id: addressLabel text: "Address:" anchors.top: resName.bottom anchors.left: parent.left anchors.leftMargin: 16 anchors.topMargin: 8 } Text { id: resAddress anchors.top: nameLabel.bottom anchors.left: addressLabel.right anchors.leftMargin: 8 anchors.topMargin: 8 } Text { id: portLabel text: "Port:" anchors.top: addressLabel.bottom anchors.left: parent.left anchors.leftMargin: 16 anchors.topMargin: 8 } Text { id: resPort anchors.top: addressLabel.bottom anchors.left: portLabel.right anchors.leftMargin: 8 anchors.topMargin: 8 } Rectangle { id: divider anchors.left: parent.left anchors.right: parent.right anchors.top: resPort.bottom anchors.topMargin: 16 height: 1 color: "#e0e0e0" } MouseArea { id: clickArea anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top anchors.bottom: divider.bottom } ToolButton { id: deleteButton anchors.right: parent.right anchors.rightMargin: 10 anchors { top: parent.top topMargin: 8 right: parent.right rightMargin: 8 } contentItem: Text { text: qsTr("x") font.pixelSize: 20 color: deleteButton.down ? "#ffcdd2" : "#d32f2f" } background: Rectangle { color: "#ffffff" } } } bacula-15.0.3/src/qt-console/tray-monitor/restoreoptionswizardpage.h0000644000175000017500000000251714771010173025534 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Restore Wizard: Options page * * Written by Norbert Bizet, May MMXVII * */ #ifndef RESTOREOPTIONSWIZARDPAGE_H #define RESTOREOPTIONSWIZARDPAGE_H #include class alist; class QStandardItemModel; class RESMON; class task; namespace Ui { class RestoreOptionsWizardPage; } class RestoreOptionsWizardPage : public QWizardPage { Q_OBJECT public: explicit RestoreOptionsWizardPage(QWidget *parent = 0); ~RestoreOptionsWizardPage(); /* QWizardPage interface */ void initializePage(); bool validatePage(); /* local interface */ inline void setRes(RESMON *r) {res=r;} private: Ui::RestoreOptionsWizardPage *ui; RESMON *res; }; #endif // RESTOREOPTIONSWIZARDPAGE_H bacula-15.0.3/src/qt-console/tray-monitor/fileselectwizardpage.ui0000644000175000017500000001055414771010173024742 0ustar bsbuildbsbuild FileSelectWizardPage 0 0 698 484 WizardPage Drag from left to right false QAbstractItemView::NoEditTriggers false true QAbstractItemView::DragOnly false QAbstractItemView::ExtendedSelection QAbstractItemView::SelectRows false false false false Filter false QAbstractItemView::NoEditTriggers true false QAbstractItemView::DropOnly Qt::CopyAction QAbstractItemView::ExtendedSelection QAbstractItemView::SelectRows false false false false false bacula-15.0.3/src/qt-console/tray-monitor/ResourceStatusPage.qml0000644000175000017500000000753614771010173024517 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 Item { width: parent.width height: childrenRect.height property bool connected: false Text { id: versionLabel text: "Version:" visible: connected anchors.top: parent.top anchors.topMargin: 16 anchors.left: parent.left anchors.leftMargin: 16 } Text { id: version visible: connected text: resDetailsController.resourceVersion anchors.top: parent.top anchors.topMargin: 16 anchors.left: versionLabel.right anchors.leftMargin: 8 } Text { id: startedAtLabel visible: connected text: "Started:" anchors.top: versionLabel.bottom anchors.left: parent.left anchors.leftMargin: 16 anchors.topMargin: 8 } Text { id: startedAt visible: connected text: resDetailsController.startedDate anchors.top: versionLabel.bottom anchors.left: startedAtLabel.right anchors.leftMargin: 8 anchors.topMargin: 8 } Text { id: pluginsLabel visible: connected text: "Plugins:" anchors.top: startedAtLabel.bottom anchors.left: parent.left anchors.leftMargin: 16 anchors.topMargin: 8 } Text { id: plugins visible: connected text: resDetailsController.resourcePlugins anchors.top: startedAtLabel.bottom anchors.left: pluginsLabel.right anchors.leftMargin: 8 anchors.topMargin: 8 } Text { id: bandwidthLabel visible: connected text: "Bandwidth Limit:" anchors.top: pluginsLabel.bottom anchors.left: parent.left anchors.leftMargin: 16 anchors.topMargin: 8 } Text { id: bandwidthLimit visible: connected text: resDetailsController.bandwidthLimit anchors.top: pluginsLabel.bottom anchors.left: bandwidthLabel.right anchors.leftMargin: 8 anchors.topMargin: 8 } // Bottom Margin Item { height: 16 anchors.top: bandwidthLabel.bottom } Button { id: runJobButton visible: connected && resDetailsController.canRunJobs() text: "Run Job" onClicked: resDetailsController.createRunJobModel() anchors.right: parent.right anchors.rightMargin: 8 anchors.verticalCenter: bandwidthLimit.verticalCenter contentItem: Text { text: runJobButton.text font: runJobButton.font color: runJobButton.down ? "#ef9a9a" : "#d32f2f" } background: Rectangle { color: "white" } } Button { id: restoreJobButton visible: connected && resDetailsController.canRunJobs() text: "Restore" onClicked: resDetailsController.createRestoreJobModel() anchors.right: runJobButton.left anchors.rightMargin: 8 anchors.verticalCenter: bandwidthLimit.verticalCenter contentItem: Text { text: restoreJobButton.text font: restoreJobButton.font color: restoreJobButton.down ? "#ef9a9a" : "#d32f2f" } background: Rectangle { color: "white" } } } bacula-15.0.3/src/qt-console/tray-monitor/sdstatus.h0000644000175000017500000000206314771010173022225 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "common.h" #include "ui_sd-monitor.h" #include "task.h" #include "status.h" class SDStatus: public ResStatus { Q_OBJECT public: Ui::sdStatus status; SDStatus(RESMON *d): ResStatus(d) { status.setupUi(this); QObject::connect(status.pushButton, SIGNAL(clicked()), this, SLOT(doUpdate()), Qt::QueuedConnection); }; ~SDStatus() { }; public slots: void doUpdate(); void taskDone(task *); }; bacula-15.0.3/src/qt-console/tray-monitor/tray_conf.cpp0000644000175000017500000003264414771010173022702 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Main configuration file parser for Bacula Tray Monitor. * * Adapted from dird_conf.c * * Note, the configuration file parser consists of three parts * * 1. The generic lexical scanner in lib/lex.c and lib/lex.h * * 2. The generic config scanner in lib/parse_config.c and * lib/parse_config.h. * These files contain the parser code, some utility * routines, and the common store routines (name, int, * string). * * 3. The daemon specific file, which contains the Resource * definitions as well as any specific store routines * for the resource records. * * Nicolas Boichat, August MMIV * */ #include "common.h" #include "tray_conf.h" worker *worker_start(); void worker_stop(worker *); /* Define the first and last resource ID record * types. Note, these should be unique for each * daemon though not a requirement. */ int32_t r_first = R_FIRST; int32_t r_last = R_LAST; RES_HEAD **res_head; /* We build the current resource here as we are * scanning the resource configuration definition, * then move it to allocated memory when the resource * scan is complete. */ URES res_all; int32_t res_all_size = sizeof(res_all); /* Definition of records permitted within each * resource with the routine to process the record * information. NOTE! quoted names must be in lower case. */ /* * Monitor Resource * * name handler value code flags default_value */ static RES_ITEM mon_items[] = { {"Name", store_name, ITEM(res_monitor.hdr.name), 0, ITEM_REQUIRED, 0}, {"Description", store_str, ITEM(res_monitor.hdr.desc), 0, 0, 0}, {"requiressl", store_bool, ITEM(res_monitor.require_ssl), 1, ITEM_DEFAULT, 0}, {"RefreshInterval", store_time,ITEM(res_monitor.RefreshInterval), 0, ITEM_DEFAULT, 60}, {"CommCompression", store_bool, ITEM(res_monitor.comm_compression), 0, ITEM_DEFAULT, true}, {"CommandDirectory", store_dir, ITEM(res_monitor.command_dir), 0, 0, 0}, {"DisplayAdvancedOptions", store_bool, ITEM(res_monitor.display_advanced_options), 0, 0, 0}, {NULL, NULL, {0}, 0, 0, 0} }; /* Director's that we can contact */ static RES_ITEM dir_items[] = { {"Name", store_name, ITEM(res_main.hdr.name), 0, ITEM_REQUIRED, 0}, {"Description", store_str, ITEM(res_main.hdr.desc), 0, 0, 0}, {"Port", store_pint32, ITEM(res_main.port), 0, ITEM_DEFAULT, 9101}, {"Address", store_str, ITEM(res_main.address), 0, ITEM_REQUIRED, 0}, {"Password", store_password, ITEM(res_main.password), 0, ITEM_REQUIRED, 0}, {"Monitor", store_bool, ITEM(res_main.use_monitor), 0, ITEM_DEFAULT, 0}, {"ConnectTimeout", store_time,ITEM(res_main.connect_timeout), 0, ITEM_DEFAULT, 10}, {"UseSetIp", store_bool, ITEM(res_main.use_setip), 0, 0, 0}, {"TlsEnable", store_bool, ITEM(res_main.tls_enable), 0, 0, 0}, {"TlsPskEnable", store_bool, ITEM(res_main.tls_psk_enable), 0, ITEM_DEFAULT, tls_psk_default}, {"TlsCaCertificateFile", store_dir, ITEM(res_main.tls_ca_certfile), 0, 0, 0}, {"TlsCaCertificateDir", store_dir, ITEM(res_main.tls_ca_certdir), 0, 0, 0}, {"TlsCertificate", store_dir, ITEM(res_main.tls_certfile), 0, 0, 0}, {"TlsKey", store_dir, ITEM(res_main.tls_keyfile), 0, 0, 0}, {NULL, NULL, {0}, 0, 0, 0} }; /* * Client or File daemon resource * * name handler value code flags default_value */ static RES_ITEM cli_items[] = { {"Name", store_name, ITEM(res_main.hdr.name), 0, ITEM_REQUIRED, 0}, {"Description", store_str, ITEM(res_main.hdr.desc), 0, 0, 0}, {"Address", store_str, ITEM(res_main.address), 0, ITEM_REQUIRED, 0}, {"Port", store_pint32, ITEM(res_main.port), 0, ITEM_DEFAULT, 9102}, {"Password", store_password, ITEM(res_main.password), 0, ITEM_REQUIRED, 0}, {"ConnectTimeout", store_time,ITEM(res_main.connect_timeout), 0, ITEM_DEFAULT, 10}, {"Remote", store_bool, ITEM(res_main.use_remote), 0, ITEM_DEFAULT, 0}, {"Monitor", store_bool, ITEM(res_main.use_monitor), 0, ITEM_DEFAULT, 0}, {"Managed", store_bool, ITEM(res_main.managed), 0, ITEM_DEFAULT, 0}, {"TlsEnable", store_bool, ITEM(res_main.tls_enable), 0, 0, 0}, {"TlsPskEnable", store_bool, ITEM(res_main.tls_psk_enable), 0, ITEM_DEFAULT, tls_psk_default}, {"TlsCaCertificateFile", store_dir, ITEM(res_main.tls_ca_certfile), 0, 0, 0}, {"TlsCaCertificateDir", store_dir, ITEM(res_main.tls_ca_certdir), 0, 0, 0}, {"TlsCertificate", store_dir, ITEM(res_main.tls_certfile), 0, 0, 0}, {"TlsKey", store_dir, ITEM(res_main.tls_keyfile), 0, 0, 0}, {NULL, NULL, {0}, 0, 0, 0} }; /* Storage daemon resource * * name handler value code flags default_value */ static RES_ITEM store_items[] = { {"Name", store_name, ITEM(res_main.hdr.name), 0, ITEM_REQUIRED, 0}, {"Description", store_str, ITEM(res_main.hdr.desc), 0, 0, 0}, {"Port", store_pint32, ITEM(res_main.port), 0, ITEM_DEFAULT, 9103}, {"Address", store_str, ITEM(res_main.address), 0, ITEM_REQUIRED, 0}, {"Password", store_password, ITEM(res_main.password), 0, ITEM_REQUIRED, 0}, {"ConnectTimeout", store_time,ITEM(res_main.connect_timeout), 0, ITEM_DEFAULT, 10}, {"Monitor", store_bool, ITEM(res_main.use_monitor), 0, ITEM_DEFAULT, 0}, {"TlsEnable", store_bool, ITEM(res_main.tls_enable), 0, 0, 0}, {"TlsPskEnable", store_bool, ITEM(res_main.tls_psk_enable), 0, ITEM_DEFAULT, tls_psk_default}, {"TlsCaCertificateFile", store_dir, ITEM(res_main.tls_ca_certfile), 0, 0, 0}, {"TlsCaCertificateDir", store_dir, ITEM(res_main.tls_ca_certdir), 0, 0, 0}, {"TlsCertificate", store_dir, ITEM(res_main.tls_certfile), 0, 0, 0}, {"TlsKey", store_dir, ITEM(res_main.tls_keyfile), 0, 0, 0}, {NULL, NULL, {0}, 0, 0, 0} }; /* * This is the master resource definition. * It must have one item for each of the resources. * * NOTE!!! keep it in the same order as the R_codes * or eliminate all resources[rindex].name * * name items rcode res_head */ RES_TABLE resources[] = { {"monitor", mon_items, R_MONITOR}, {"director", dir_items, R_DIRECTOR}, {"client", cli_items, R_CLIENT}, {"storage", store_items, R_STORAGE}, {NULL, NULL, 0} }; /* Dump contents of resource */ void dump_resource(int type, RES *ares, void sendit(void *sock, const char *fmt, ...), void *sock) { RES *next; URES *res = (URES *)ares; bool recurse = true; if (res == NULL) { sendit(sock, _("No %s resource defined\n"), res_to_str(type)); return; } if (type < 0) { /* no recursion */ type = - type; recurse = false; } switch (type) { case R_MONITOR: sendit(sock, _("Monitor: name=%s\n"), ares->name); break; case R_DIRECTOR: sendit(sock, _("Director: name=%s address=%s port=%d\n"), res->res_main.hdr.name, res->res_main.address, res->res_main.port); break; case R_CLIENT: sendit(sock, _("Client: name=%s address=%s port=%d\n"), res->res_main.hdr.name, res->res_main.address, res->res_main.port); break; case R_STORAGE: sendit(sock, _("Storage: name=%s address=%s port=%d\n"), res->res_main.hdr.name, res->res_main.address, res->res_main.port); break; default: sendit(sock, _("Unknown resource type %d in dump_resource.\n"), type); break; } if (recurse) { next = GetNextRes(0, (RES *)res); if (next) { dump_resource(type, next, sendit, sock); } } } /* * Free memory of resource -- called when daemon terminates. * NB, we don't need to worry about freeing any references * to other resources as they will be freed when that * resource chain is traversed. Mainly we worry about freeing * allocated strings (names). */ void free_resource(RES *sres, int type) { URES *res = (URES *)sres; if (res == NULL) return; /* common stuff -- free the resource name and description */ if (res->res_monitor.hdr.name) { free(res->res_monitor.hdr.name); } if (res->res_monitor.hdr.desc) { free(res->res_monitor.hdr.desc); } switch (type) { case R_MONITOR: if (res->res_monitor.password) { free(res->res_monitor.password); } if (res->res_monitor.command_dir) { free(res->res_monitor.command_dir); } break; case R_DIRECTOR: case R_CLIENT: case R_STORAGE: delete res->res_main.mutex; free_bsock(res->res_main.bs); if (res->res_main.wrk) { worker_stop(res->res_main.wrk); res->res_main.wrk = NULL; } if (res->res_main.address) { free(res->res_main.address); } if (res->res_main.tls_ctx) { free_tls_context(res->res_main.tls_ctx); } if (res->res_main.tls_ca_certfile) { free(res->res_main.tls_ca_certfile); } if (res->res_main.tls_ca_certdir) { free(res->res_main.tls_ca_certdir); } if (res->res_main.tls_certfile) { free(res->res_main.tls_certfile); } if (res->res_main.tls_keyfile) { free(res->res_main.tls_keyfile); } if (res->res_main.jobs) { delete res->res_main.jobs; } if (res->res_main.clients) { delete res->res_main.clients; } if (res->res_main.filesets) { delete res->res_main.filesets; } if (res->res_main.pools) { delete res->res_main.pools; } if (res->res_main.storages) { delete res->res_main.storages; } if (res->res_main.running_jobs) { delete res->res_main.terminated_jobs; } break; default: printf(_("Unknown resource type %d in free_resource.\n"), type); } /* Common stuff again -- free the resource, recurse to next one */ if (res) { free(res); } } /* * Save the new resource by chaining it into the head list for * the resource. If this is pass 2, we update any resource * pointers because they may not have been defined until * later in pass 1. */ bool save_resource(CONFIG *config, int type, RES_ITEM *items, int pass) { int rindex = type - r_first; int i, size; int error = 0; /* * Ensure that all required items are present */ for (i=0; items[i].name; i++) { if (items[i].flags & ITEM_REQUIRED) { if (!bit_is_set(i, res_all.res_monitor.hdr.item_present)) { Mmsg(config->m_errmsg, _("\"%s\" directive is required in \"%s\" resource, but not found.\n"), items[i].name, resources[rindex].name); return false; } } /* If this triggers, take a look at lib/parse_conf.h */ if (i >= MAX_RES_ITEMS) { Mmsg(config->m_errmsg, _("Too many directives in \"%s\" resource\n"), resources[rindex].name); return false; } } /* * During pass 2 in each "store" routine, we looked up pointers * to all the resources referrenced in the current resource, now we * must copy their addresses from the static record to the allocated * record. */ if (pass == 2) { switch (type) { /* Resources not containing a resource */ case R_STORAGE: case R_DIRECTOR: case R_CLIENT: case R_MONITOR: break; default: Emsg1(M_ERROR, 0, _("Unknown resource type %d in save_resource.\n"), type); error = 1; break; } /* Note, the resource name was already saved during pass 1, * so here, we can just release it. */ if (res_all.res_monitor.hdr.name) { free(res_all.res_monitor.hdr.name); res_all.res_monitor.hdr.name = NULL; } if (res_all.res_monitor.hdr.desc) { free(res_all.res_monitor.hdr.desc); res_all.res_monitor.hdr.desc = NULL; } return true; } /* * The following code is only executed during pass 1 */ switch (type) { case R_MONITOR: size = sizeof(MONITOR); break; case R_CLIENT: case R_STORAGE: case R_DIRECTOR: // We need to initialize the mutex res_all.res_main.mutex = new QMutex(); res_all.res_main.wrk = worker_start(); size = sizeof(RESMON); break; default: printf(_("Unknown resource type %d in save_resource.\n"), type); error = 1; size = 1; break; } /* Common */ if (!error) { res_all.res_main.type = type; if (!config->insert_res(rindex, size)) { return false; } } return true; } bool parse_tmon_config(CONFIG *config, const char *configfile, int exit_code) { config->init(configfile, error_handler, exit_code, (void *)&res_all, res_all_size, r_first, r_last, resources, &res_head); return config->parse_config(); } bacula-15.0.3/src/qt-console/tray-monitor/tray-ui.h0000644000175000017500000003702114771010173021747 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef TRAYUI_H #define TRAYUI_H #include "common.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fdstatus.h" #include "sdstatus.h" #include "dirstatus.h" #include "conf.h" #include "runjob.h" #include "restorewizard.h" #include "desktop-gui/cdp-main-ui.h" #include "backupservice.h" #include "cdp.h" void display_error(const char *fmt, ...); int tls_pem_callback(char *buf, int size, const void *userdata); class TrayUI: public QMainWindow { Q_OBJECT public: QWidget *centralwidget; QTabWidget *tabWidget; QStatusBar *statusbar; QSystemTrayIcon *tray; QSpinBox *spinRefresh; QTimer *timer; bool have_systray; RestoreWizard *restorewiz; POOLMEM *spool_dir; POOLMEM *journal_path; Journal *journal; BackupService *bservice; CdpUi *cdpUi; TrayUI(): QMainWindow(), tabWidget(NULL), statusbar(NULL), tray(NULL), spinRefresh(NULL), timer(NULL), have_systray(QSystemTrayIcon::isSystemTrayAvailable()), restorewiz(NULL) { } ~TrayUI() { } void addTab(RESMON *r) { QWidget *tab; QString t = QString(r->hdr.name); if (r->tls_enable) { char buf[512]; /* Generate passphrase prompt */ bsnprintf(buf, sizeof(buf), "Passphrase for \"%s\" TLS private key: ", r->hdr.name); /* Initialize TLS context: * Args: CA certfile, CA certdir, Certfile, Keyfile, * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ r->tls_ctx = new_tls_context(r->tls_ca_certfile, r->tls_ca_certdir, r->tls_certfile, r->tls_keyfile, tls_pem_callback, &buf, NULL, true); if (!r->tls_ctx) { display_error(_("Failed to initialize TLS context for \"%s\".\n"), r->hdr.name); } } if (r->tls_psk_enable) { r->psk_ctx = new_psk_context(NULL /*r->password*/); } switch(r->type) { case R_CLIENT: tab = new FDStatus(r); break; case R_STORAGE: tab = new SDStatus(r); break; case R_DIRECTOR: tab = new DIRStatus(r); break; default: return; } tabWidget->setUpdatesEnabled(false); tabWidget->addTab(tab, t); tabWidget->setUpdatesEnabled(true); } void clearTabs() { tabWidget->setUpdatesEnabled(false); for(int i = tabWidget->count() - 1; i >= 0; i--) { QWidget *w = tabWidget->widget(i); tabWidget->removeTab(i); delete w; } tabWidget->setUpdatesEnabled(true); tabWidget->update(); } void startTimer() { if (!timer) { timer = new QTimer(this); connect(timer, SIGNAL(timeout()), this, SLOT(refresh_screen())); } timer->start(spinRefresh->value()*1000); } void setupUi(QMainWindow *TrayMonitor, MONITOR *mon) { QPushButton *menubp = NULL; timer = NULL; if (TrayMonitor->objectName().isEmpty()) TrayMonitor->setObjectName(QString::fromUtf8("TrayMonitor")); TrayMonitor->setWindowIcon(QIcon(":/images/tray-monitor-logo.png")); TrayMonitor->resize(789, 595); centralwidget = new QWidget(TrayMonitor); centralwidget->setObjectName(QString::fromUtf8("centralwidget")); QVBoxLayout *verticalLayout = new QVBoxLayout(centralwidget); verticalLayout->setObjectName(QString::fromUtf8("verticalLayout")); tabWidget = new QTabWidget(centralwidget); tabWidget->setObjectName(QString::fromUtf8("tabWidget")); tabWidget->setTabPosition(QTabWidget::North); tabWidget->setTabShape(QTabWidget::Rounded); tabWidget->setTabsClosable(false); verticalLayout->addWidget(tabWidget); QDialogButtonBox *buttonBox = new QDialogButtonBox(centralwidget); buttonBox->setObjectName(QString::fromUtf8("buttonBox")); if (have_systray) { buttonBox->setStandardButtons(QDialogButtonBox::Close); connect(buttonBox, SIGNAL(rejected()), this, SLOT(cb_show())); } else { /* Here we can display something else, now it's just a simple menu */ menubp = new QPushButton(tr("&Options")); buttonBox->addButton(menubp, QDialogButtonBox::ActionRole); } TrayMonitor->setCentralWidget(centralwidget); statusbar = new QStatusBar(TrayMonitor); statusbar->setObjectName(QString::fromUtf8("statusbar")); TrayMonitor->setStatusBar(statusbar); QHBoxLayout *hLayout = new QHBoxLayout(); QLabel *refreshlabel = new QLabel(centralwidget); refreshlabel->setText("Refresh:"); hLayout->addWidget(refreshlabel); spinRefresh = new QSpinBox(centralwidget); QSizePolicy sizePolicy(QSizePolicy::Fixed, QSizePolicy::Fixed); sizePolicy.setHorizontalStretch(0); sizePolicy.setVerticalStretch(0); sizePolicy.setHeightForWidth(spinRefresh->sizePolicy().hasHeightForWidth()); spinRefresh->setSizePolicy(sizePolicy); spinRefresh->setMinimum(1); spinRefresh->setMaximum(600); spinRefresh->setSingleStep(10); spinRefresh->setValue(mon?mon->RefreshInterval:60); hLayout->addWidget(spinRefresh); hLayout->addWidget(buttonBox); verticalLayout->addLayout(hLayout); //QSystemTrayIcon::isSystemTrayAvailable tray = new QSystemTrayIcon(TrayMonitor); QMenu* stmenu = new QMenu(TrayMonitor); #if QT_VERSION >= 0x050000 QAction *actShow = new QAction(QApplication::translate("TrayMonitor", "Display", 0),TrayMonitor); QAction* actQuit = new QAction(QApplication::translate("TrayMonitor", "Quit", 0),TrayMonitor); QAction* actAbout = new QAction(QApplication::translate("TrayMonitor", "About", 0),TrayMonitor); QAction* actRun = new QAction(QApplication::translate("TrayMonitor", "Run...", 0),TrayMonitor); QAction* actRes = new QAction(QApplication::translate("TrayMonitor", "Restore...", 0),TrayMonitor); QAction* actCDP = new QAction(QApplication::translate("TrayMonitor", "Watch...", 0),TrayMonitor); QAction* actConf = new QAction(QApplication::translate("TrayMonitor", "Configure...", 0),TrayMonitor); #else QAction *actShow = new QAction(QApplication::translate("TrayMonitor", "Display", 0, QApplication::UnicodeUTF8),TrayMonitor); QAction* actQuit = new QAction(QApplication::translate("TrayMonitor", "Quit", 0, QApplication::UnicodeUTF8),TrayMonitor); QAction* actAbout = new QAction(QApplication::translate("TrayMonitor", "About", 0, QApplication::UnicodeUTF8),TrayMonitor); QAction* actRun = new QAction(QApplication::translate("TrayMonitor", "Run...", 0, QApplication::UnicodeUTF8),TrayMonitor); QAction* actRes = new QAction(QApplication::translate("TrayMonitor", "Restore...", 0, QApplication::UnicodeUTF8),TrayMonitor); QAction* actCDP = new QAction(QApplication::translate("TrayMonitor", "Watch...", 0, QApplication::UnicodeUTF8),TrayMonitor); QAction* actConf = new QAction(QApplication::translate("TrayMonitor", "Configure...", 0, QApplication::UnicodeUTF8),TrayMonitor); #endif stmenu->addAction(actShow); stmenu->addAction(actRun); stmenu->addAction(actRes); stmenu->addAction(actCDP); stmenu->addSeparator(); stmenu->addAction(actConf); stmenu->addSeparator(); stmenu->addAction(actAbout); stmenu->addSeparator(); stmenu->addAction(actQuit); connect(actRun, SIGNAL(triggered()), this, SLOT(cb_run())); connect(actShow, SIGNAL(triggered()), this, SLOT(cb_show())); connect(actConf, SIGNAL(triggered()), this, SLOT(cb_conf())); connect(actRes, SIGNAL(triggered()), this, SLOT(cb_restore())); connect(actCDP, SIGNAL(triggered()), this, SLOT(cb_cdp())); connect(actQuit, SIGNAL(triggered()), this, SLOT(cb_quit())); connect(actAbout, SIGNAL(triggered()), this, SLOT(cb_about())); connect(spinRefresh, SIGNAL(valueChanged(int)), this, SLOT(cb_refresh(int))); connect(tray, SIGNAL(activated(QSystemTrayIcon::ActivationReason)), this, SLOT(cb_trayIconActivated(QSystemTrayIcon::ActivationReason))); tray->setContextMenu(stmenu); QIcon icon(":/images/tray-monitor-logo.png"); tray->setIcon(icon); tray->setToolTip(QString("Bacula Tray Monitor")); tray->show(); retranslateUi(TrayMonitor); QMetaObject::connectSlotsByName(TrayMonitor); startTimer(); /* When we don't have the systemtray, we keep the menu, but disabled */ if (!have_systray) { actShow->setEnabled(false); menubp->setMenu(stmenu); TrayMonitor->show(); } spool_dir = CDP::spoolDir(); mkdir(spool_dir, 0750); journal_path = CDP::journalPath(JOURNAL_CLI_FNAME); journal = new Journal(); bservice = new BackupService(); journal->setJournalPath(journal_path, spool_dir); bservice->start(spool_dir, journal); cdpUi = new CdpUi(bservice); } // setupUi void retranslateUi(QMainWindow *TrayMonitor) { #if QT_VERSION >= 0x050000 TrayMonitor->setWindowTitle(QApplication::translate("TrayMonitor", "Bacula Tray Monitor", 0)); #else TrayMonitor->setWindowTitle(QApplication::translate("TrayMonitor", "Bacula Tray Monitor", 0, QApplication::UnicodeUTF8)); #endif } // retranslateUi private slots: void cb_quit() { QApplication::quit(); } void cb_refresh(int val) { if (timer) { timer->setInterval(val*1000); } } void cb_about() { QMessageBox::about(this, "Bacula Tray Monitor", "Bacula Tray Monitor\n" "For more information, see: www.bacula.org\n" "Copyright (C) 1999-2021, Kern Sibbald.\n" "All rights reserved."); } RESMON *get_director() { QStringList dirs; RESMON *d, *director=NULL; bool ok; foreach_res(d, R_DIRECTOR) { if (!director) { director = d; } dirs << QString(d->hdr.name); } foreach_res(d, R_CLIENT) { if (d->use_remote) { if (!director) { director = d; } dirs << QString(d->hdr.name); } } if (dirs.count() > 1) { /* TODO: Set Modal attribute */ QString dir = QInputDialog::getItem(this, _("Select a Director"), "Director:", dirs, 0, false, &ok, 0); if (!ok) { return NULL; } if (ok && !dir.isEmpty()) { char *p = dir.toUtf8().data(); foreach_res(d, R_DIRECTOR) { if (strcmp(p, d->hdr.name) == 0) { director = d; break; } } foreach_res(d, R_CLIENT) { if (strcmp(p, d->hdr.name) == 0) { director = d; break; } } } } if (dirs.count() == 0 || director == NULL) { /* We need the proxy feature */ display_error("No Director defined"); return NULL; } return director; } void cb_run() { RESMON *dir = get_director(); if (!dir) { return; } task *t = new task(); connect(t, SIGNAL(done(task *)), this, SLOT(run_job(task *)), Qt::QueuedConnection); t->init(dir, TASK_RESOURCES); dir->wrk->queue(t); } void cb_cdp() { cdpUi->show(); } void refresh_item() { /* Probably do only the first one */ int oldnbjobs = 0; for (int i=tabWidget->count() - 1; i >= 0; i--) { ResStatus *s = (ResStatus *) tabWidget->widget(i); if (s->res->use_monitor) { s->res->mutex->lock(); if (s->res->running_jobs) { oldnbjobs += s->res->running_jobs->size(); } s->res->mutex->unlock(); } if (isVisible() || s->res->use_monitor) { s->doUpdate(); } } /* We need to find an other way to compute running jobs */ if (oldnbjobs) { QString q; tray->setIcon(QIcon(":/images/R.png")); tray->setToolTip(q.sprintf("Bacula Tray Monitor - %d job%s running", oldnbjobs, oldnbjobs>1?"s":"")); //tray->showMessage(); Can use this function to display a popup } else { tray->setIcon(QIcon(":/images/cartridge1.png")); tray->setToolTip("Bacula Tray Monitor"); } } void cb_conf() { new Conf(); } void cb_restore() { RESMON *dir = get_director(); if (!dir) { return; } task *t = new task(); connect(t, SIGNAL(done(task *)), this, SLOT(start_restore_wizard(task *)), Qt::QueuedConnection); connect(t, SIGNAL(done(task*)), t, SLOT(deleteLater())); t->init(dir, TASK_RESOURCES); dir->wrk->queue(t); } void cb_trayIconActivated(QSystemTrayIcon::ActivationReason r) { if (r == QSystemTrayIcon::Trigger) { cb_show(); } } void refresh_screen() { refresh_item(); } void cb_show() { if (isVisible()) { hide(); } else { refresh_item(); show(); } } public slots: void task_done(task *t) { Dmsg0(0, "Task done!\n"); t->deleteLater(); } void run_job(task *t) { Dmsg0(0, "Task done!\n"); RESMON *dir = t->res; t->deleteLater(); new RunJob(dir); } void start_restore_wizard(task *t) { restorewiz = new RestoreWizard(t->res); restorewiz->show(); } }; #endif /* TRAYUI_H */ bacula-15.0.3/src/qt-console/tray-monitor/main-conf.ui0000644000175000017500000002305014771010173022407 0ustar bsbuildbsbuild Conf 0 0 556 337 Configuration 0 Monitor Configuration QFormLayout::AllNonFixedFieldsGrow The Monitor name will be used during the authentication phase. Name: The Monitor name will be used during the authentication phase. 127 Refresh Interval: 5 9999 120 Specify the "Command Directory" where the tray-monitor program will check regularly for jobs to run Command Directory: Specify the "Command Directory" where the tray-monitor program will check regularly for jobs to run ... Display or Hide advanced options in the "Run Job" window Display Advanced Options: Display or Hide advanced options in the "Run Job" window Save and Apply the changes Save :/images/label.png:/images/label.png Cancel :/images/A.png:/images/A.png Qt::Vertical 20 40 Show/Hide Passwords Password :/images/zoom.png:/images/zoom.png Add Client resource to monitor Client :/images/mark.png:/images/mark.png Add Storage resource to monitor Storage :/images/mark.png:/images/mark.png Add Director resource to monitor Director :/images/mark.png:/images/mark.png bpSave clicked() Conf accept() 511 30 521 46 bpCancel clicked() Conf close() 511 76 521 159 bpStrip clicked() Conf togglePassword() 511 178 496 142 bpAddClient clicked() Conf addClient() 511 239 521 245 bpAddStorage clicked() Conf addStore() 511 272 521 289 bpAddDir clicked() Conf addDir() 511 313 521 331 bpCommandDir clicked() Conf selectCommandDir() 405 135 466 112 togglePassword() addClient() addStore() addDir() selectCommandDir() bacula-15.0.3/src/qt-console/tray-monitor/JobListItem.qml0000644000175000017500000000641514771010173023107 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 Item { id: listItem width: parent.width height: childrenRect.height property alias jobId: jobId.text property alias name: name.text property alias level: level.text property alias finfo: finfo.text property alias errorCount: errors.text property alias clickArea: clickArea Rectangle { anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top anchors.bottom: divider.bottom color: "#e0e0e0" visible: clickArea.pressed } Text { id: idLabel text: "Id:" anchors.top: parent.top anchors.topMargin: 16 anchors.left: parent.left anchors.leftMargin: 16 } Text { id: jobId anchors.top: parent.top anchors.topMargin: 16 anchors.left: idLabel.right anchors.leftMargin: 8 } Text { id: errors color: "#d32f2f" anchors.top: parent.top anchors.topMargin: 16 anchors.right: parent.right anchors.rightMargin: 12 } Text { id: nameLabel text: "Name:" anchors.top: idLabel.bottom anchors.left: parent.left anchors.leftMargin: 16 anchors.topMargin: 8 } Text { id: name elide: Text.ElideRight anchors.top: idLabel.bottom anchors.topMargin: 8 anchors.right: parent.right anchors.rightMargin: 12 anchors.left: nameLabel.right anchors.leftMargin: 8 } Text { id: levelLabel text: "Level:" anchors.top: nameLabel.bottom anchors.left: parent.left anchors.leftMargin: 16 anchors.topMargin: 8 } Text { id: level anchors.top: nameLabel.bottom anchors.left: levelLabel.right anchors.leftMargin: 8 anchors.topMargin: 8 } Text { id: finfo anchors.top: nameLabel.bottom anchors.topMargin: 8 anchors.right: parent.right anchors.rightMargin: 12 anchors.leftMargin: 8 } MouseArea { id: clickArea anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top anchors.bottom: divider.bottom } Rectangle { id: divider anchors.left: parent.left anchors.right: parent.right anchors.top: levelLabel.bottom anchors.topMargin: 16 height: 1 color: "#e0e0e0" } } bacula-15.0.3/src/qt-console/tray-monitor/respanel-ui-controller.h0000644000175000017500000001413114771010173024757 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef RESPANELGUICONTROLLER_H #define RESPANELGUICONTROLLER_H #include #include #include "tray_conf.h" #include "config-storage.h" #include "resmodel.h" #include #include "jcr.h" #include "task.h" #include "common.h" #include "jobmodel.h" #include "runjobmodel.h" #include "restorejobmodel.h" /* TrayUiController - Controls the screen displayed when a user selects one resource (Director, File Daemon or Storage Daemon). It allows the user to: 1 - Change the data that's required to connect with the resource (name, password, address and port) 2 - Connect to the resource, and therefore see it's terminated jobs and running jobs */ class ResPanelUiController : public QObject { Q_OBJECT // Data related to the resource that the user selected Q_PROPERTY(ResourceModel *resModel WRITE setResModel) Q_PROPERTY(QString resType WRITE setResType) //These can be edited by the user Q_PROPERTY(QString resourceName READ resourceName WRITE setResourceName NOTIFY resourceNameChanged) Q_PROPERTY(QString resourcePassword READ resourcePassword WRITE setResourcePassword NOTIFY resourcePasswordChanged) Q_PROPERTY(QString resourceAddress READ resourceAddress WRITE setResourceAddress NOTIFY resourceAddressChanged) Q_PROPERTY(QString resourcePort READ resourcePort WRITE setResourcePort NOTIFY resourcePortChanged) Q_PROPERTY(bool remoteClient READ isRemoteClient WRITE setIsRemoteClient NOTIFY isRemoteClientChanged) //TODO merge both into a variable 'dialogMsg' // Message displayed on the GUI dialogs Q_PROPERTY(QString successMsg READ successMsg WRITE setSuccessMessage NOTIFY successMessageChanged) Q_PROPERTY(QString errorMsg READ errorMsg WRITE setErrorMessage NOTIFY errorMessageChanged) private: QString m_resourceName; QString m_resourcePassword; QString m_resourceAddress; QString m_resourcePort; QString m_successMsg; QString m_errorMsg; bool m_remoteClient; ConfigStorage *m_storage = NULL; rescode m_resCode; RESMON *m_res = NULL; public: explicit ResPanelUiController(QObject *parent = nullptr); ~ResPanelUiController(); void setResCode(rescode code) { if (m_resCode == code) { return; } m_resCode = code; emit resCodeChanged(); } // Resource Name QString resourceName() { return m_resourceName; } void setResourceName(const QString &resourceName) { if (resourceName == m_resourceName) return; m_resourceName = resourceName; emit resourceNameChanged(); } // Resource Password QString resourcePassword() { return m_resourcePassword; } void setResourcePassword(const QString &resourcePassword) { if (resourcePassword == m_resourcePassword) return; m_resourcePassword = resourcePassword; emit resourcePasswordChanged(); } // Resource Address QString resourceAddress() { return m_resourceAddress; } void setResourceAddress(const QString &resourceAddress) { if (resourceAddress == m_resourceAddress) return; m_resourceAddress = resourceAddress; emit resourceAddressChanged(); } // Resource Port QString resourcePort() { return m_resourcePort; } void setResourcePort(const QString &resourcePort) { if (resourcePort == m_resourcePort) return; m_resourcePort = resourcePort; emit resourcePortChanged(); } // Remote Client bool isRemoteClient() { return m_remoteClient; } void setIsRemoteClient(bool remote) { if (m_remoteClient == remote) { return; } m_remoteClient = remote; emit isRemoteClientChanged(); } // Dialog Success Message QString successMsg() { return m_successMsg; } void setSuccessMessage(const QString &successMsg) { m_successMsg = successMsg; emit successMessageChanged(); } // Dialog Error Message QString errorMsg() { return m_errorMsg; } void setErrorMessage(const QString &errorMsg) { m_errorMsg = errorMsg; emit errorMessageChanged(); } // Model (If Edition Mode) void setResModel(ResourceModel *resModel) { m_res = resModel->resource(); setResCode(m_res->code); setResourceName(m_res->hdr.name); setResourcePassword(m_res->password); setResourceAddress(m_res->address); setResourcePort(QString::number(m_res->port)); setIsRemoteClient(m_res->use_remote); } // Called if Save Mode void setResType(QString resType) { m_res = NULL; setResourceName(""); setResourcePassword(""); setResourceAddress(""); setIsRemoteClient(false); if(resType == "1") { setResCode(R_DIRECTOR); setResourcePort("9101"); } else if(resType == "2") { setResCode(R_STORAGE); setResourcePort("9103"); } else { setResCode(R_CLIENT); setResourcePort("9102"); } } signals: // Events that are emitted to our GUI code to inform changes in our data void resourceNameChanged(); void resourcePasswordChanged(); void resourceAddressChanged(); void resourcePortChanged(); void successMessageChanged(); void errorMessageChanged(); void isRemoteClientChanged(); void resCodeChanged(); public slots: // Save the changes made to this resource on bacula-tray-monitor.conf void saveChanges(); bool isClient() { return m_resCode == R_CLIENT; } }; #endif // RESPANELGUICONTROLLER_H bacula-15.0.3/src/qt-console/tray-monitor/jobsmodel.cpp0000644000175000017500000000444114771010173022666 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "jobsmodel.h" JobsModel::JobsModel(const QList& t, QObject *parent) : QAbstractTableModel(parent) , table(t) { } QVariant JobsModel::headerData(int section, Qt::Orientation orientation, int role) const { if (role != Qt::DisplayRole) return QVariant(); if (orientation == Qt::Horizontal) { switch (section) { case ID_COLUMN: return tr("Id"); case TDATE_COLUMN: return tr("Date"); case HASCACHE_COLUMN: return tr("Cache"); case NAME_COLUMN: return tr("Name"); default: return QVariant(); } } return QVariant(); return QVariant(); } int JobsModel::rowCount(const QModelIndex &parent) const { if (parent.isValid()) return 0; return table.size(); } int JobsModel::columnCount(const QModelIndex &parent) const { if (parent.isValid()) return 0; return NUM_COLUMN; } QVariant JobsModel::data(const QModelIndex &index, int role) const { if (!index.isValid()) return QVariant(); if (index.row() >= table.size() || index.row() < 0) return QVariant(); if (index.column() >= NUM_COLUMN || index.column() < 0) return QVariant(); if (role == Qt::DisplayRole) { row_struct row = table.at(index.row()); switch(index.column()) { case 0: return quint64(row.id); break; case 1: return row.tdate; break; case 2: return row.hasCache; break; case 3: return row.name; break; } } return QVariant(); } bacula-15.0.3/src/qt-console/tray-monitor/ResourceDetailsPage.qml0000644000175000017500000001674714771010173024625 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 import io.qt.bmob.resdetailscontroller 1.0 Page { id: resDetailsPage visible: true height: parent ? parent.height : 0 width: parent ? parent.width : 0 property var resModel; property var resType; ResDetailsUiController { id: resDetailsController onSuccessMessageChanged: { titleLabel.text = resDetailsController.resourceName dialog.text = resDetailsController.successMsg dialog.open() } onErrorMsgChanged: { dialog.text = resDetailsController.errorMsg dialog.open() } Component.onCompleted: { resModel = resDetailsPage.resModel connectToResource(); } onConnectionStarted: { connStatus.text = "Connecting..." connStatus.color = "#fff176" } onConnectionError: { connStatus.text = connectionError connStatus.color = "#ffcdd2" } onConnectionSuccess: { connStatus.text = "Connected" connStatus.color = "#66bb6a" resStatusPage.connected = true bar.visible = true } // "model" here is received as an argument onRunJobModelCreated: { stackView.push( Qt.resolvedUrl("RunJobPage.qml"), {"model" : model} ) } onRestoreModelCreated: { stackView.push( Qt.resolvedUrl("RestoreJobPage.qml"), {"model" : model} ) } } onVisibleChanged: { if (visible) { resDetailsController.connectToResource() } } header: ToolBar { id: toolbar height: 48 background: Rectangle { color: "#d32f2f" } RowLayout { anchors.fill: parent anchors.leftMargin: 8 anchors.rightMargin: 8 ToolButton { id: backButton onClicked: stackView.pop() anchors.left: parent.left contentItem: Text { text: qsTr("<") font.pixelSize: 24 opacity: enabled ? 1.0 : 0.3 color: "white" } background: Rectangle { color: backButton.down ? "#b71c1c" : "#d32f2f" } } Label { id: titleLabel text: resDetailsController.resourceName color: "white" font.pixelSize: 18 anchors.centerIn: parent } } } Rectangle { id: connStatus anchors.top: parent.top anchors.left: parent.left anchors.right: parent.right height: childrenRect.height color: "#ffcdd2" property alias text: statusLabel.text Label { id: statusLabel text: "Not Connected" anchors.horizontalCenter: parent.horizontalCenter topPadding: 4 bottomPadding: 4 color: "#000000" } } Flickable { id: scrollableBox anchors.top: connStatus.bottom anchors.bottom: parent.bottom width: parent.width contentWidth: parent.width contentHeight: scrollContent.height clip: true Item { id: scrollContent width: parent.width height: childrenRect.height ResourceStatusPage { id: resStatusPage } Rectangle { id: divider anchors.left: parent.left anchors.right: parent.right anchors.top: resStatusPage.bottom anchors.topMargin: 16 height: 1 color: "#e0e0e0" } Text { id: rjobsLabel text: "Running Jobs" font.pixelSize: 18 anchors.top: divider.bottom anchors.topMargin: 24 anchors.horizontalCenter: parent.horizontalCenter } ListView { id: runningJobsPage model: resDetailsController.runningJobs height: childrenRect.height anchors.top: rjobsLabel.bottom anchors.topMargin: 12 anchors.left: parent.left anchors.right: parent.right interactive: false boundsBehavior: Flickable.StopAtBounds delegate: JobListItem { jobId: model.modelData.id name: model.modelData.name level: model.modelData.level finfo: model.modelData.fileInfo errorCount: model.modelData.errorCount } Text { text: "No jobs are running" anchors.left: parent.left anchors.leftMargin: 24 anchors.top: parent.top anchors.topMargin: 8 visible: parent.count == 0 font.pixelSize: 18 color: "#d32f2f" } } Rectangle { id: divider2 anchors.left: parent.left anchors.right: parent.right anchors.top: runningJobsPage.bottom anchors.topMargin: 24 height: 1 color: "#e0e0e0" } Text { id: tjobsLabel text: "Terminated Jobs" font.pixelSize: 18 anchors.top: divider2.bottom anchors.topMargin: 24 anchors.horizontalCenter: parent.horizontalCenter } ListView { id: terminatedJobsPage model: resDetailsController.terminatedJobs height: childrenRect.height anchors.top: tjobsLabel.bottom anchors.topMargin: 12 anchors.left: parent.left anchors.right: parent.right interactive: false boundsBehavior: Flickable.StopAtBounds delegate: JobListItem { jobId: model.modelData.id name: model.modelData.name level: model.modelData.level finfo: model.modelData.fileInfo errorCount: model.modelData.errorCount } Text { text: "No terminated jobs" anchors.left: parent.left anchors.leftMargin: 24 anchors.top: parent.top anchors.topMargin: 8 visible: parent.count == 0 font.pixelSize: 18 color: "#d32f2f" } } } } PulseLoader { useDouble: true visible: resDetailsController.isConnecting radius: 28 color: "#d32f2f" anchors.horizontalCenter: parent.horizontalCenter anchors.bottom: parent.bottom anchors.bottomMargin: 24 } MessageDialog { id: dialog modality: Qt.ApplicationModal standardButtons: StandardButton.Ok visible: false } } bacula-15.0.3/src/qt-console/tray-monitor/fd-config-ui-controller.cpp0000644000175000017500000000333314771010173025337 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "fd-config-ui-controller.h" FdConfigUiController::FdConfigUiController(QObject *parent): QObject(parent) {} void FdConfigUiController::readFileDaemonConfig() { QString fileName = QStandardPaths::writableLocation(QStandardPaths::AppDataLocation) + "/etc/bacula-fd.conf"; if (QFile::exists(fileName)) { QFile file(fileName); if (file.open(QFile::ReadOnly)) { QTextStream stream(&file); QString fdConfig = stream.readAll(); file.close(); emit fileDaemonConfigRead(fdConfig); } } else { emit fileDaemonConfigRead("ERROR - File Daemon config file not found"); } } void FdConfigUiController::writeFileDaemonConfig(QString fcontents) { QString filePath = QStandardPaths::writableLocation(QStandardPaths::AppDataLocation) + "/etc/bacula-fd.conf"; QFile file(filePath); if (!file.open(QFile::WriteOnly | QFile::Truncate | QFile::Text)) { return; } file.write(fcontents.toUtf8().constData()); file.close(); } FdConfigUiController::~FdConfigUiController() {} bacula-15.0.3/src/qt-console/tray-monitor/ResourceListPage.qml0000644000175000017500000000575314771010173024146 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 import io.qt.bmob.traycontroller 1.0 Page { anchors.fill: parent property alias model: resList.model property alias panelVisible: resDetailsPanel.visible property alias resType: resDetailsPanel.resType ListView { id: resList anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top anchors.bottom: resDetailsPanel.visible? resDetailsPanel.top : monitorName.top boundsBehavior: Flickable.StopAtBounds clip: true delegate: ResourceListItem { name: model.modelData.resourceName address: model.modelData.resourceAddress port: model.modelData.resourcePort deletable: model.modelData.deletableResource delButton.onClicked: { confirmDialog.text = "Delete resource " + model.modelData.resourceName + " ?" confirmDialog.res = model.modelData confirmDialog.open() } clickArea.onClicked: { resDetailsPanel.resModel = model.modelData resDetailsPanel.visible = true } } } ResourcePanel { id: resDetailsPanel onVisibleChanged: { if (!visible) { controller.fetchClients() controller.fetchDirectors() controller.fetchStorages() } } } TextField { id: monitorName height: childrenRect.height width: parent.width anchors.bottom: parent.bottom placeholderText: qsTr("Monitor name") text: controller.getMonitorName() visible: !resDetailsPanel.visible } Button { id: saveMonitorBtn text: "Save" onClicked: controller.setMonitorName(monitorName.text) anchors.right: parent.right anchors.rightMargin: 16 anchors.verticalCenter: monitorName.verticalCenter visible: !resDetailsPanel.visible contentItem: Text { text: saveMonitorBtn.text font: saveMonitorBtn.font color: saveMonitorBtn.down ? "#ef9a9a" : "#d32f2f" } background: Rectangle { color: "white" } } onVisibleChanged: { if (visible) { monitorName.text = controller.getMonitorName() } } } bacula-15.0.3/src/qt-console/tray-monitor/status.cpp0000644000175000017500000000213014771010173022224 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "status.h" #include "lib/worker.h" void ResStatus::doUpdate() { if (count == 0) { task *t = new task(); connect(t, SIGNAL(done(task *)), this, SLOT(taskDone(task *)), Qt::QueuedConnection); t->init(res, TASK_STATUS); res->wrk->queue(t); Dmsg0(0, "doUpdate()\n"); count++; } } void ResStatus::taskDone(task *t) { if (!t->status) { Dmsg2(0, " Task %p failed => %s\n", t, t->errmsg); } delete t; count--; } bacula-15.0.3/src/qt-console/tray-monitor/res-conf.ui0000644000175000017500000003523014771010173022257 0ustar bsbuildbsbuild ResConf 0 0 417 541 Form General QFormLayout::AllNonFixedFieldsGrow The Name will be used only in the Tray Monitor interface Name: The Name will be used only in the Tray Monitor interface 127 Description: 512 Password: 127 QLineEdit::PasswordEchoOnEdit Address: 1024 Port: 0 0 100 16777215 5 Timeout: 0 0 100 16777215 5 Use Client Initiated backup/restore feature Remote Use Client Initiated backup/restore feature Update the tray monitor icon with the status of this component Monitor: Update the tray monitor icon with the status of this component Use SetIp: TLS ... CA Certificate File: Enabled true ... Key File: ... Certificate File: CA Certificate Directory: ... cbTLSEnabled editCaCertificateFile label_5 label_6 editCaCertificateDir label_7 editCertificate label_8 editKey bpCaCertificateFile bpCaCertificateDir bpCertificate bpKey 64 16777215 64 0 :/images/purge.png:/images/purge.png false Qt::Horizontal 40 20 editName editDescription editPassword editAddress editPort editTimeout cbRemote cbTLSEnabled editCaCertificateFile bpCaCertificateFile editCaCertificateDir bpCaCertificateDir editCertificate bpCertificate editKey bpKey bpCaCertificateFile clicked() ResConf selectCaCertificateFile() 461 294 521 247 bpCaCertificateDir clicked() ResConf selectCaCertificateDir() 452 334 501 355 bpCertificate clicked() ResConf selectCertificate() 459 364 495 384 bpKey clicked() ResConf selectKey() 461 395 481 410 cbTLSEnabled toggled(bool) editCaCertificateFile setEnabled(bool) 132 271 249 291 cbTLSEnabled toggled(bool) editCaCertificateDir setEnabled(bool) 120 274 203 325 cbTLSEnabled toggled(bool) editCertificate setEnabled(bool) 68 271 220 360 cbTLSEnabled toggled(bool) editKey setEnabled(bool) 51 275 288 392 cbTLSEnabled toggled(bool) bpCaCertificateFile setEnabled(bool) 161 267 449 291 cbTLSEnabled toggled(bool) bpCaCertificateDir setEnabled(bool) 145 271 455 329 cbTLSEnabled toggled(bool) bpCertificate setEnabled(bool) 140 266 459 358 cbTLSEnabled toggled(bool) bpKey setEnabled(bool) 118 272 458 389 selectCaCertificateFile() selectCaCertificateDir() selectCertificate() selectKey() bacula-15.0.3/src/qt-console/tray-monitor/DesignDocument0000644000175000017500000000604314771010173023037 0ustar bsbuildbsbuild*/ /* * Tray Monitor Design Document * * Written by Henrique Faria, February MMXIX * */ This document aims to explain major design decisions related to the Tray Monitor. ###### Goals One of the major requirements of this project is that the Tray Monitor must run on Desktop and Mobile (Android / iOS) platforms, along with the requirement of reusing as much code as possible across all platforms. With that mind, the design must isolate as much platform specific code as possible, thus making it dependant on platform independent code. One such example is the Tray-Monitor User Interface, which contains specific code for Desktop platforms and specific code for Mobile platforms, both sharing the same platform independent controllers and models (See Architecture Design). ###### Interface Design #### User Interface The Tray Monitor UI is implemented with two different Qt libraries: Qt Widgets for Desktop, which is based on C++, and Qt QML for Mobile, which is based on QML / JS. 1-) Desktop http://doc.qt.io/qt-5/qtwidgets-index.html 2-) Mobile http://doc.qt.io/qt-5/qtquick-index.html http://doc.qt.io/qt-5/qmlapplications.html The UI code should depend on platform independent controllers and models. No other part of the Tray Monitor should have dependencies with the UI code. #### Bacula Interface The Tray Monitor may talk via sockets with Bacula's FD, SD or DIR to issue commands or gather information. (See "authenticate.cpp | authenticate.h" and "task.cpp | task.h") (TODO detail network interface or point to a proper document) #### Filesystem Interface The Tray Monitor stores configuration data on files. (See "config_storage.cpp | config_storage.h") ###### Architecture Design #### UI The UI is composed of C++ classes (or QML files, if on Mobile platform), with the following responsibilites: 1-) Setup the User Interface 2-) Handle the creation of other screens, windows, dialogs, etc 3-) Read, Write and Watch properties of Models, updating the UI if necessary 4-) Delegate User Input and anything else to the Controller Those classes (or QML files) should have a direct reference to: 1-) Zero or more Controllers (in the majority of the cases, one Controller per UI file should be enough) 2-) Zero or more Models (more than one Model per UI file should be common) #### Controllers The Controllers are C++ classes that extends the QObject class, with the following responsibilites: 1-) Handle actions requested by the User Interface 2-) Talk with external interfaces (See Bacula Interface, Filesystem Interface), if necessary 3-) Update Model properties, if necessary Controllers should have a direct reference to: 1-) Zero or more Models 2-) Objects that access External Interfaces (See Bacula Interface, Filesystem Interface) #### Models The Models are C++ classes that extends the QObject class, with the following responsibilites: 1-) Hold properties and Data Structures The Models should have a direct reference to: 1-) Properties and Data Structures ###### Data Design (TODO show data structures and write brief explanation about them) bacula-15.0.3/src/qt-console/tray-monitor/fdstatus.cpp0000644000175000017500000001222514771010173022544 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "fdstatus.h" #include "../util/fmtwidgetitem.h" #include "jcr.h" void FDStatus::doUpdate() { if (count == 0) { // We do only doUpdate() count++; task *t = new task(); status.pushButton->setEnabled(false); connect(t, SIGNAL(done(task *)), this, SLOT(taskDone(task *)), Qt::QueuedConnection); t->init(res, TASK_STATUS); res->wrk->queue(t); status.statusBar->setText(QString("Trying to connect to FD...")); Dmsg1(50, "doUpdate(%p)\n", res); } } void FDStatus::taskDone(task *t) { int nbjobs=0; count--; if (!t->status) { status.statusBar->setText(QString(t->errmsg)); } else if (isVisible()) { status.statusBar->clear(); if (t->type == TASK_STATUS) { char ed1[50]; struct s_last_job *ljob; struct s_running_job *rjob; res->mutex->lock(); status.labelName->setText(QString(res->name)); status.labelVersion->setText(QString(res->version)); status.labelPlugins->setText(QString(res->plugins)); status.labelStarted->setText(QString(res->started)); status.labelBandwidth->setText(QString(edit_uint64(res->bwlimit, ed1))); /* Clear the table first */ Freeze(*status.tableRunning); Freeze(*status.tableTerminated); QStringList headerlistR = (QStringList() << tr("JobId") << tr("Job") << tr("Level") << tr("Files") << tr("Bytes") << tr("Errors") << tr("Current File")); status.tableRunning->clear(); status.tableRunning->setRowCount(0); status.tableRunning->setColumnCount(headerlistR.count()); status.tableRunning->setHorizontalHeaderLabels(headerlistR); status.tableRunning->setEditTriggers(QAbstractItemView::NoEditTriggers); status.tableRunning->verticalHeader()->hide(); status.tableRunning->setSortingEnabled(true); if (res->running_jobs) { status.tableRunning->setRowCount(res->running_jobs->size()); int row=0; foreach_alist(rjob, res->running_jobs) { int col=0; TableItemFormatter item(*status.tableRunning, row++); item.setNumericFld(col++, QString(edit_uint64(rjob->JobId, ed1))); item.setTextFld(col++, QString(rjob->Job)); item.setJobLevelFld(col++, QString(rjob->JobLevel)); item.setNumericFld(col++, QString(edit_uint64(rjob->JobFiles, ed1))); item.setBytesFld(col++, QString(edit_uint64(rjob->JobBytes, ed1))); item.setNumericFld(col++, QString(edit_uint64(rjob->Errors, ed1))); item.setNumericFld(col++, QString(rjob->CurrentFile)); nbjobs++; } } else { Dmsg0(0, "Strange, the list is NULL\n"); } QStringList headerlistT = (QStringList() << tr("JobId") << tr("Job") << tr("Level") << tr("Status") << tr("Files") << tr("Bytes") << tr("Errors")); status.tableTerminated->clear(); status.tableTerminated->setRowCount(0); status.tableTerminated->setColumnCount(headerlistT.count()); status.tableTerminated->setHorizontalHeaderLabels(headerlistT); status.tableTerminated->setEditTriggers(QAbstractItemView::NoEditTriggers); status.tableTerminated->verticalHeader()->hide(); status.tableTerminated->setSortingEnabled(true); if (res->terminated_jobs) { status.tableTerminated->setRowCount(res->terminated_jobs->size()); int row=0; foreach_dlist(ljob, res->terminated_jobs) { int col=0; TableItemFormatter item(*status.tableTerminated, row++); item.setNumericFld(col++, QString(edit_uint64(ljob->JobId, ed1))); item.setTextFld(col++, QString(ljob->Job)); item.setJobLevelFld(col++, QString(ljob->JobLevel)); item.setJobStatusFld(col++, QString(ljob->JobStatus)); item.setNumericFld(col++, QString(edit_uint64(ljob->JobFiles, ed1))); item.setBytesFld(col++, QString(edit_uint64(ljob->JobBytes, ed1))); item.setNumericFld(col++, QString(edit_uint64(ljob->Errors, ed1))); } } else { Dmsg0(0, "Strange, the list is NULL\n"); } res->mutex->unlock(); } Dmsg1(50, " Task %p OK\n", t); } t->deleteLater(); status.pushButton->setEnabled(true); } bacula-15.0.3/src/qt-console/tray-monitor/authenticate.cpp0000644000175000017500000001600114771010173023361 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Bacula authentication. Provides authentication with * File and Storage daemons. * * Nicolas Boichat, August MMIV * * This routine runs as a thread and must be thread reentrant. * * Basic tasks done here: * */ #include "tray-monitor.h" #ifndef COMMUNITY #define UA_VERSION 1 /* Enterprise */ #else #define UA_VERSION 100 /* Community */ #endif /* Commands sent to Director */ static char DIRhello[] = "Hello %s calling %d tlspsk=%d\n"; static char SDhello[] = "Hello SD: Bacula Director %s calling %d tlspsk=%d\n"; /* Response from Director */ static char DIROKhello[] = "1000 OK:"; /* Commands sent to File daemon and received * from the User Agent */ static char FDhello[] = "Hello Director %s calling %d tlspsk=%d\n"; /* Response from SD */ static char SDOKhello[] = "3000 OK Hello"; /* Response from FD */ static char FDOKhello[] = "2000 OK Hello"; /* Forward referenced functions */ class GUIAuthenticate: public AuthenticateBase { public: GUIAuthenticate(JCR *jcr, BSOCK *bsock, int rm_cls); virtual ~GUIAuthenticate() {}; int authenticate_daemon(MONITOR *mon, RESMON *res); }; //AuthenticateBase(jcr, jcr->dir_bsock, dtSrv, dcFD, dcDIR) GUIAuthenticate::GUIAuthenticate(JCR *jcr, BSOCK *bsock, int rm_cls): AuthenticateBase(jcr, bsock, dtCli, dcGUI, rm_cls) { } int authenticate_daemon(JCR *jcr, MONITOR *mon, RESMON *res) { int rm_cls=res->type==R_DIRECTOR?AuthenticateBase::dcDIR:(res->type==R_STORAGE?AuthenticateBase::dcSD:AuthenticateBase::dcFD); return GUIAuthenticate(jcr, res->bs, rm_cls).authenticate_daemon(mon, res); } int GUIAuthenticate::authenticate_daemon(MONITOR *mon, RESMON *res) { BSOCK *bs = bsock; char bashed_name[MAX_NAME_LENGTH]; char *p=NULL; /* void AuthenticateBase::CalcLocalTLSNeedFromRes(bool tls_enable, bool tls_require, bool atls_authenticate, bool atls_verify_peer, alist *atls_verify_list, TLS_CONTEXT *atls_ctx, bool tls_psk_enable, TLS_CONTEXT *apsk_ctx, const char *apassword) */ /* Calculate tls_local_need from the resource */ CalcLocalTLSNeedFromRes(res->tls_enable, res->tls_enable, false, false, NULL, res->tls_ctx, res->tls_psk_enable, res->psk_ctx, res->password); bstrncpy(bashed_name, mon->hdr.name, sizeof(bashed_name)); bash_spaces(bashed_name); /* Timeout Hello after 5 mins */ StartAuthTimeout(60 * 5); if (res->type == R_DIRECTOR) { bs->fsend(DIRhello, bashed_name, UA_VERSION, tlspsk_local_need); } else if (res->type == R_STORAGE) { bs->fsend(SDhello, bashed_name, UA_VERSION, tlspsk_local_need); } else { bs->fsend(FDhello, bashed_name, UA_VERSION, tlspsk_local_need); } /* Try to authenticate using cram-md5 */ if (!ClientCramMD5Authenticate(res->password)) { Jmsg(jcr, M_FATAL, 0, _("Authorization problem.\n" "Most likely the passwords do not agree.\n" "For help, please see " MANUAL_AUTH_URL "\n")); return 0; } if (!HandleTLS()) { return 0; } Dmsg1(6, "> %s", bs->msg); if (bs->recv() <= 0) { Jmsg1(jcr, M_FATAL, 0, _("Bad response to Hello command: ERR=%s\n"), bs->bstrerror()); return 0; } Dmsg1(10, "< %s", bs->msg); switch(res->type) { case R_DIRECTOR: p = DIROKhello; break; case R_CLIENT: p = FDOKhello; break; case R_STORAGE: p = SDOKhello; break; } if (strncmp(bs->msg, p, strlen(p)) != 0) { Jmsg(jcr, M_FATAL, 0, _("Daemon rejected Hello command\n")); return 0; } else { //Jmsg0(jcr, M_INFO, 0, dir->msg); } return 1; } int authenticate_daemon_old(JCR *jcr, MONITOR *mon, RESMON *res) { BSOCK *bs = res->bs; int tls_local_need = BNET_TLS_NONE; int tls_remote_need = BNET_TLS_NONE; int compatible = true; char bashed_name[MAX_NAME_LENGTH]; char *password, *p; int ret = 0; bstrncpy(bashed_name, mon->hdr.name, sizeof(bashed_name)); bash_spaces(bashed_name); password = res->password; /* TLS Requirement */ if (res->tls_enable) { tls_local_need = BNET_TLS_REQUIRED; } /* Timeout Hello after 5 mins */ btimer_t *tid = start_bsock_timer(bs, 60 * 5); if (res->type == R_DIRECTOR) { p = DIRhello; } else if (res->type == R_STORAGE) { p = SDhello; } else { p = FDhello; } bs->fsend(p, bashed_name); if (!cram_md5_respond(bs, password, &tls_remote_need, &compatible) || !cram_md5_challenge(bs, password, tls_local_need, compatible)) { Jmsg(jcr, M_FATAL, 0, _("Authorization problem.\n" "Most likely the passwords do not agree.\n" "For help, please see " MANUAL_AUTH_URL "\n")); goto bail_out; } /* Verify that the remote host is willing to meet our TLS requirements */ if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { Jmsg(jcr, M_FATAL, 0, _("Authorization problem:" " Remote server did not advertise required TLS support.\n")); goto bail_out; } /* Verify that we are willing to meet the remote host's requirements */ if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { Jmsg(jcr, M_FATAL, 0, ("Authorization problem:" " Remote server requires TLS.\n")); goto bail_out; } /* Is TLS Enabled? */ if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { /* Engage TLS! Full Speed Ahead! */ if (!bnet_tls_client(res->tls_ctx, bs, NULL, NULL)) { Jmsg(jcr, M_FATAL, 0, _("TLS negotiation failed\n")); goto bail_out; } } Dmsg1(6, "> %s", bs->msg); if (bs->recv() <= 0) { Jmsg1(jcr, M_FATAL, 0, _("Bad response to Hello command: ERR=%s\n"), bs->bstrerror()); goto bail_out; } Dmsg1(10, "< %s", bs->msg); switch(res->type) { case R_DIRECTOR: p = DIROKhello; break; case R_CLIENT: p = FDOKhello; break; case R_STORAGE: p = SDOKhello; break; } if (strncmp(bs->msg, p, strlen(p)) != 0) { Jmsg(jcr, M_FATAL, 0, _("Daemon rejected Hello command\n")); goto bail_out; } else { //Jmsg0(jcr, M_INFO, 0, dir->msg); } ret = 1; bail_out: if (tid) { stop_bsock_timer(tid); } return ret; } bacula-15.0.3/src/qt-console/tray-monitor/app-boot-ui-controller.h0000644000175000017500000000217214771010173024671 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef BOOT_UI_CONTROLLER_H #define BOOT_UI_CONTROLLER_H #include #include #include "tray_conf.h" #include "config-storage.h" /* BootUiController - Class responsible for performing any tasks that are necessary before the first screen of the App is shown */ class BootUiController : public QObject { Q_OBJECT public: explicit BootUiController(QObject *parent = nullptr); ~BootUiController(); public slots: bool shouldShowTutorial(); }; #endif // BOOT_UI_CONTROLLER_H bacula-15.0.3/src/qt-console/tray-monitor/win32/0000755000175000017500000000000014771010173021143 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/win32/qmake.conf0000644000175000017500000000713614771010173023117 0ustar bsbuildbsbuild# # qmake configuration for win32-g++ # # Written for MinGW # MAKEFILE_GENERATOR = MINGW TEMPLATE = app CONFIG += qt warn_on release link_prl copy_dir_files debug_and_release debug_and_release_target precompile_header cross-win32 QT += core gui DEFINES += UNICODE QMAKE_COMPILER_DEFINES += __GNUC__ WIN32 QMAKE_EXT_OBJ = .o QMAKE_EXT_RES = _res.o QMAKE_LEX = flex QMAKE_LEXFLAGS = QMAKE_YACC = byacc QMAKE_YACCFLAGS = -d QMAKE_CFLAGS = -DHAVE_MINGW -DHAVE_WIN32 -DHAVE_MINGW_W64 QMAKE_CFLAGS_DEPS = -M QMAKE_CFLAGS_WARN_ON = -Wall QMAKE_CFLAGS_WARN_OFF = -w QMAKE_CFLAGS_RELEASE = -O2 QMAKE_CFLAGS_DEBUG = -g -O2 QMAKE_CFLAGS_YACC = -Wno-unused -Wno-parentheses QMAKE_CXXFLAGS = $$QMAKE_CFLAGS -DHAVE_MINGW -DHAVE_WIN32 -DHAVE_ZLIB_H -DHAVE_LIBZ -DHAVE_CRYPTO -DHAVE_OPENSSL -DHAVE_TLS -DHAVE_MINGW_W64 QMAKE_CXXFLAGS_DEPS = $$QMAKE_CFLAGS_DEPS QMAKE_CXXFLAGS_WARN_ON = $$QMAKE_CFLAGS_WARN_ON QMAKE_CXXFLAGS_WARN_OFF = $$QMAKE_CFLAGS_WARN_OFF QMAKE_CXXFLAGS_RELEASE = $$QMAKE_CFLAGS_RELEASE QMAKE_CXXFLAGS_DEBUG = $$QMAKE_CFLAGS_DEBUG QMAKE_CXXFLAGS_YACC = $$QMAKE_CFLAGS_YACC QMAKE_CXXFLAGS_THREAD = $$QMAKE_CFLAGS_THREAD QMAKE_CXXFLAGS_RTTI_ON = -frtti QMAKE_CXXFLAGS_RTTI_OFF = -fno-rtti QMAKE_CXXFLAGS_EXCEPTIONS_ON = -fexceptions -mthreads QMAKE_CXXFLAGS_EXCEPTIONS_OFF = -fno-exceptions QMAKE_INCDIR = $(DEPKGS)/depkgs-mingw32/include/ ../win32/compat QMAKE_INCDIR_QT = $(DEPKGS)/depkgs-mingw32/include/qt QMAKE_LIBDIR_QT = $(DEPKGS)/depkgs-mingw32/lib/qt QMAKE_RUN_CC = $(CXX) -c $(CFLAGS) $(INCPATH) -o $obj $src QMAKE_RUN_CC_IMP = $(CXX) -c $(CFLAGS) $(INCPATH) -o $@ $< QMAKE_RUN_CXX = $(CXX) -c $(CXXFLAGS) $(INCPATH) -o $obj $src QMAKE_RUN_CXX_IMP = $(CXX) -c $(CXXFLAGS) $(INCPATH) -o $@ $< QMAKE_LINK = i686-w64-mingw32-g++ QMAKE_LFLAGS = -mthreads -Wl,-enable-stdcall-fixup -Wl,-enable-auto-import -m32 -fno-strict-aliasing -Wl,-enable-runtime-pseudo-reloc QMAKE_LFLAGS_EXCEPTIONS_ON = -mthreads -Wl QMAKE_LFLAGS_EXCEPTIONS_OFF = QMAKE_LFLAGS_RELEASE = -Wl,-s QMAKE_LFLAGS_DEBUG = QMAKE_LFLAGS_CONSOLE = -Wl,-subsystem,console QMAKE_LFLAGS_WINDOWS = -Wl,-subsystem,windows QMAKE_LFLAGS_DLL = -shared QMAKE_LINK_OBJECT_MAX = 10 QMAKE_LINK_OBJECT_SCRIPT= object_script QMAKE_LIBS = -lwsock32 -lstdc++ QMAKE_LIBS_CORE = -lkernel32 -luser32 -lshell32 -luuid -lole32 -ladvapi32 -lws2_32 QMAKE_LIBS_GUI = -lgdi32 -lcomdlg32 -loleaut32 -limm32 -lwinmm -lwinspool -lws2_32 -lole32 -luuid -luser32 -ladvapi32 QMAKE_LIBS_NETWORK = -lws2_32 QMAKE_LIBS_OPENGL = -lopengl32 -lglu32 -lgdi32 -luser32 QMAKE_LIBS_COMPAT = -ladvapi32 -lshell32 -lcomdlg32 -luser32 -lgdi32 -lws2_32 QMAKE_LIBS_QT_ENTRY = -lmingw32 -lqtmain MINGW_IN_SHELL = 1 QMAKE_DIR_SEP = / QMAKE_COPY = cp QMAKE_COPY_DIR = cp -r QMAKE_MOVE = mv QMAKE_DEL_FILE = rm -f QMAKE_MKDIR = mkdir -p QMAKE_DEL_DIR = rm -rf QMAKE_RCC = rcc QMAKE_CHK_DIR_EXISTS = test -d QMAKE_MOC = $$[QT_INSTALL_BINS]$${DIR_SEPARATOR}moc QMAKE_UIC = $$[QT_INSTALL_BINS]$${DIR_SEPARATOR}uic QMAKE_IDC = $$[QT_INSTALL_BINS]$${DIR_SEPARATOR}idc QMAKE_IDL = midl QMAKE_ZIP = zip -r -9 QMAKE_STRIP = i686-w64-mingw32-strip QMAKE_STRIPFLAGS_LIB += --strip-unneeded load(qt_config) bacula-15.0.3/src/qt-console/tray-monitor/win32/qplatformdefs.h0000644000175000017500000001202214771010173024160 0ustar bsbuildbsbuild/**************************************************************************** ** ** Copyright (C) 1992-2007 Trolltech ASA. All rights reserved. ** ** This file is part of the qmake spec of the Qt Toolkit. ** ** This file may be used under the terms of the GNU General Public ** License version 2.0 as published by the Free Software Foundation ** and appearing in the file LICENSE.GPL included in the packaging of ** this file. Please review the following information to ensure GNU ** General Public Licensing requirements will be met: ** http://trolltech.com/products/qt/licenses/licensing/opensource/ ** ** If you are unsure which license is appropriate for your use, please ** review the following information: ** http://trolltech.com/products/qt/licenses/licensing/licensingoverview ** or contact the sales department at sales@trolltech.com. ** ** In addition, as a special exception, Trolltech gives you certain ** additional rights. These rights are described in the Trolltech GPL ** Exception version 1.0, which can be found at ** http://www.trolltech.com/products/qt/gplexception/ and in the file ** GPL_EXCEPTION.txt in this package. ** ** In addition, as a special exception, Trolltech, as the sole copyright ** holder for Qt Designer, grants users of the Qt/Eclipse Integration ** plug-in the right for the Qt/Eclipse Integration to link to ** functionality provided by Qt Designer and its related libraries. ** ** Trolltech reserves all rights not expressly granted herein. ** ** Trolltech ASA (c) 2007 ** ** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE ** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. ** ****************************************************************************/ #ifndef QPLATFORMDEFS_H #define QPLATFORMDEFS_H #ifdef UNICODE #ifndef _UNICODE #define _UNICODE #endif #endif // Get Qt defines/settings #include "qglobal.h" #include "winhdrs.h" #include #include #include #include #include #include #include #include #include #if !defined(_WIN32_WINNT) || (_WIN32_WINNT-0 < 0x0500) typedef enum { NameUnknown = 0, NameFullyQualifiedDN = 1, NameSamCompatible = 2, NameDisplay = 3, NameUniqueId = 6, NameCanonical = 7, NameUserPrincipal = 8, NameCanonicalEx = 9, NameServicePrincipal = 10, NameDnsDomain = 12 } EXTENDED_NAME_FORMAT, *PEXTENDED_NAME_FORMAT; #endif #define Q_FS_FAT #ifdef QT_LARGEFILE_SUPPORT #define QT_STATBUF struct _stati64 // non-ANSI defs #define QT_STATBUF4TSTAT struct _stati64 // non-ANSI defs #define QT_STAT ::_stati64 #define QT_FSTAT ::_fstati64 #else #define QT_STATBUF struct _stat // non-ANSI defs #define QT_STATBUF4TSTAT struct _stat // non-ANSI defs #define QT_STAT ::_stat #define QT_FSTAT ::_fstat #endif #define QT_STAT_REG _S_IFREG #define QT_STAT_DIR _S_IFDIR #define QT_STAT_MASK _S_IFMT #if defined(_S_IFLNK) # define QT_STAT_LNK _S_IFLNK #endif #define QT_FILENO _fileno #define QT_OPEN ::_open #define QT_CLOSE ::_close #ifdef QT_LARGEFILE_SUPPORT #define QT_LSEEK ::_lseeki64 #ifndef UNICODE #define QT_TSTAT ::_stati64 #else #define QT_TSTAT ::_wstati64 #endif #else #define QT_LSEEK ::_lseek #ifndef UNICODE #define QT_TSTAT ::_stat #else #define QT_TSTAT ::_wstat #endif #endif #define QT_READ ::_read #define QT_WRITE ::_write #define QT_ACCESS ::_access #define QT_GETCWD ::_getcwd #define QT_CHDIR ::_chdir #define QT_MKDIR ::_mkdir #define QT_RMDIR ::_rmdir #define QT_OPEN_LARGEFILE 0 #define QT_OPEN_RDONLY _O_RDONLY #define QT_OPEN_WRONLY _O_WRONLY #define QT_OPEN_RDWR _O_RDWR #define QT_OPEN_CREAT _O_CREAT #define QT_OPEN_TRUNC _O_TRUNC #define QT_OPEN_APPEND _O_APPEND #if defined(O_TEXT) # define QT_OPEN_TEXT _O_TEXT # define QT_OPEN_BINARY _O_BINARY #endif #define QT_FOPEN ::fopen #ifdef QT_LARGEFILE_SUPPORT #define QT_FSEEK ::fseeko64 #define QT_FTELL ::ftello64 #else #define QT_FSEEK ::fseek #define QT_FTELL ::ftell #endif #define QT_FGETPOS ::fgetpos #define QT_FSETPOS ::fsetpos #define QT_FPOS_T fpos_t #ifdef QT_LARGEFILE_SUPPORT #define QT_OFF_T off64_t #else #define QT_OFF_T long #endif #define QT_SIGNAL_ARGS int #define QT_VSNPRINTF ::_vsnprintf #define QT_SNPRINTF ::_snprintf # define F_OK 0 # define X_OK 1 # define W_OK 2 # define R_OK 4 #endif // QPLATFORMDEFS_H bacula-15.0.3/src/qt-console/tray-monitor/resmodel.cpp0000644000175000017500000000142714771010173022523 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "resmodel.h" #include ResourceModel::ResourceModel(QObject *parent): QObject(parent) {} ResourceModel::~ResourceModel() {} bacula-15.0.3/src/qt-console/tray-monitor/ResourcePanel.qml0000644000175000017500000001363514771010173023473 0ustar bsbuildbsbuildimport QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 import io.qt.bmob.respanelcontroller 1.0 Item { id: resPanel width: parent.width height: resPanel.visible ? childrenRect.height : 0 visible: false anchors.bottom: parent.bottom property alias resModel: resPanelController.resModel; property alias resType: resPanelController.resType; ResPanelUiController { id: resPanelController onSuccessMessageChanged: { dialog.text = resPanelController.successMsg dialog.open() } onErrorMsgChanged: { dialog.text = resPanelController.errorMsg dialog.open() } onResCodeChanged: { labelRemote.visible = isClient() checkbox.visible = isClient() } } Rectangle { id: panelDivider anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top height: 1 color: "#d32f2f" } ToolButton { id: closePanelButton anchors.top: panelDivider.bottom anchors.right: parent.right anchors.rightMargin: 10 anchors.topMargin: 8 anchors { top: parent.top topMargin: 8 right: parent.right rightMargin: 8 } contentItem: Text { text: qsTr("x") font.pixelSize: 20 color: closePanelButton.down ? "#ffcdd2" : "#d32f2f" } background: Rectangle { color: "#ffffff" } onClicked: { resPanel.visible = false } } Text { id: nameLabel text: "Name:" anchors.top: panelDivider.bottom anchors.left: parent.left anchors.topMargin: 16 anchors.leftMargin: 16 } TextField { id: resName text: resPanelController.resourceName placeholderText: qsTr("Resource name") anchors.top: nameLabel.bottom anchors.left: parent.left anchors.right: parent.right anchors.topMargin: 8 anchors.leftMargin: 24 anchors.rightMargin: 48 onTextChanged: resPanelController.resourceName = text } Text { id: passwordLabel text: "Password:" anchors.top: resName.bottom anchors.left: parent.left anchors.topMargin: 16 anchors.leftMargin: 16 } TextField { id: resPassword echoMode: TextInput.PasswordEchoOnEdit text: resPanelController.resourcePassword placeholderText: qsTr("Resource password") anchors.top: passwordLabel.bottom anchors.left: parent.left anchors.right: parent.right anchors.topMargin: 8 anchors.leftMargin: 24 anchors.rightMargin: 48 onTextChanged: resPanelController.resourcePassword = text } Text { id: addressLabel text: "Address:" anchors.top: resPassword.bottom anchors.left: parent.left anchors.topMargin: 16 anchors.leftMargin: 16 } TextField { id: resAddress text: resPanelController.resourceAddress placeholderText: qsTr("Resource address") anchors.top: addressLabel.bottom anchors.left: resPassword.left anchors.right: parent.right anchors.topMargin: 8 anchors.rightMargin: 48 onTextChanged: resPanelController.resourceAddress = text } Text { id: portLabel text: "Port:" anchors.top: resAddress.bottom anchors.left: parent.left anchors.topMargin: 16 anchors.leftMargin: 16 } TextField { id: resPort text: resPanelController.resourcePort placeholderText: qsTr("Resource port") anchors.top: portLabel.bottom anchors.left: parent.left anchors.right: parent.right anchors.topMargin: 8 anchors.leftMargin: 24 anchors.rightMargin: 48 onTextChanged: resPanelController.resourcePort = text } Text { id: labelRemote text: "Remote:" color: "black" visible: resPanelController.isClient() anchors.left: parent.left anchors.leftMargin: 16 anchors.verticalCenter: checkbox.verticalCenter } CheckBox { id: checkbox checked: resPanelController.remoteClient visible: resPanelController.isClient() anchors.top: resPort.bottom anchors.topMargin: 16 anchors.left: labelRemote.right anchors.leftMargin: 8 indicator: Rectangle { implicitWidth: 20 implicitHeight: 20 x: checkbox.leftPadding y: parent.height / 2 - height / 2 radius: 3 border.color: checkbox.down ? "#ef9a9a" : "#d32f2f" Rectangle { width: 8 height: 8 x: 6 y: 6 radius: 2 color: checkbox.down ? "#ef9a9a" : "#d32f2f" visible: checkbox.checked } } onCheckedChanged: { resPanelController.remoteClient = checked } } Button { id: saveButton text: "Save" onClicked: resPanelController.saveChanges() anchors.top: labelRemote.visible? labelRemote.bottom : resPort.bottom anchors.topMargin: 16 anchors.horizontalCenter: parent.horizontalCenter contentItem: Text { text: saveButton.text font.pixelSize: 20 color: saveButton.down ? "#ef9a9a" : "#d32f2f" } background: Rectangle { color: "white" } } // Bottom Margin Item { height: 16 anchors.top: saveButton.bottom } MessageDialog { id: dialog modality: Qt.ApplicationModal standardButtons: StandardButton.Ok visible: false onAccepted: resPanel.visible = false } } bacula-15.0.3/src/qt-console/tray-monitor/restorewizard.ui0000644000175000017500000000641014771010173023445 0ustar bsbuildbsbuild RestoreWizard 0 0 853 444 Wizard false false 2 0 false Restore Select a Client. Restore Select Backup to restore. Restore Files Selection Plugins Restore pluging values Restore Options Select advanced options for restore ClientSelectWizardPage QWizardPage
clientselectwizardpage.h
1
JobSelectWizardPage QWizardPage
jobselectwizardpage.h
1
FileSelectWizardPage QWizardPage
fileselectwizardpage.h
1
RestoreOptionsWizardPage QWizardPage
restoreoptionswizardpage.h
1
PluginWizardPage QWizardPage
pluginwizardpage.h
1
RestWizClientPage completeChanged() RestoreWizard setFocus() 424 288 424 274
bacula-15.0.3/src/qt-console/tray-monitor/tray_conf.h0000644000175000017500000001214714771010173022343 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Tray Monitor specific configuration and defines * * Adapted from dird_conf.c * * Nicolas Boichat, August MMIV * */ #ifndef TRAY_CONF #define TRAY_CONF #include "common.h" /* * Resource codes -- they must be sequential for indexing */ enum rescode { R_MONITOR = 1001, R_DIRECTOR, R_CLIENT, R_STORAGE, R_CONSOLE_FONT, R_FIRST = R_MONITOR, R_LAST = R_CONSOLE_FONT /* keep this updated */ }; /* * Some resource attributes */ enum { R_NAME = 1020, R_ADDRESS, R_PASSWORD, R_TYPE, R_BACKUP }; struct s_running_job { int32_t Errors; /* FD/SD errors */ int32_t JobType; int32_t JobStatus; int32_t JobLevel; uint32_t JobId; uint32_t VolSessionId; uint32_t VolSessionTime; uint32_t JobFiles; uint64_t JobBytes; uint64_t ReadBytes; utime_t start_time; int64_t bytespersec; int SDtls; char Client[MAX_NAME_LENGTH]; char FileSet[MAX_NAME_LENGTH]; char Storage[MAX_NAME_LENGTH]; char RStorage[MAX_NAME_LENGTH]; char sched_time; char Job[MAX_NAME_LENGTH]; char CurrentFile[4096]; }; /* forward definition */ class worker; /* Director/Client/Storage */ struct RESMON { RES hdr; /* Keep First */ uint32_t type; /* Keep 2nd R_CLIENT, R_DIRECTOR, R_STORAGE */ rescode code; /* R_CLIENT, R_DIRECTOR, R_STORAGE */ bool managed; /* Managed (started, destroyed, etc...) by the Tray Monitor */ uint32_t port; /* UA server port */ char *address; /* UA server address */ utime_t connect_timeout; /* timeout for connect in seconds */ char *password; bool use_remote; /* Use Client Initiated backup feature */ bool use_monitor; /* update the status icon with this resource */ bool use_setip; /* Send setip command before a job */ bool tls_enable; /* Enable TLS on all connections */ bool tls_psk_enable; /* Enable TLS-PSK on all connections */ char *tls_ca_certfile; /* TLS CA Certificate File */ char *tls_ca_certdir; /* TLS CA Certificate Directory */ char *tls_certfile; /* TLS Client Certificate File */ char *tls_keyfile; /* TLS Client Key File */ /* ------------------------------------------------------------ */ TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ TLS_CONTEXT *psk_ctx; /* Shared TLS-PSK Context */ worker *wrk; /* worker that will handle async op */ QMutex *mutex; BSOCK *bs; char name[MAX_NAME_LENGTH]; char version[MAX_NAME_LENGTH]; char plugins[MAX_NAME_LENGTH]; char started[32]; /* ISO date */ char reloaded[32]; /* ISO date */ int bwlimit; alist *running_jobs; dlist *terminated_jobs; btime_t last_update; bool new_resource; bool proxy_sent; /* List of resources available */ alist *jobs; alist *clients; alist *filesets; alist *pools; alist *storages; alist *catalogs; /* Default value */ struct { char *job; char *client; char *pool; char *storage; char *level; char *type; char *fileset; char *catalog; int priority; char *where; int replace; } defaults; /* Information about the job */ struct { uint64_t JobBytes; uint32_t JobFiles; int CorrJobBytes; int CorrJobFiles; int CorrNbJob; char JobLevel; } infos; }; Q_DECLARE_METATYPE(RESMON*) /* * Tray Monitor Resource * */ struct MONITOR { RES hdr; /* Keep first */ int32_t type; /* Keep second */ bool comm_compression; /* Enable comm line compression */ bool require_ssl; /* Require SSL for all connections */ bool display_advanced_options; /* Display advanced options (run options for example) */ MSGS *messages; /* Daemon message handler */ char *password; /* UA server password */ char *command_dir; /* Where to find Commands */ utime_t RefreshInterval; /* Status refresh interval */ }; /* Define the Union of all the above * resource structure definitions. */ union URES { MONITOR res_monitor; RESMON res_main; RES hdr; }; void error_handler(const char *file, int line, LEX *lc, const char *msg, ...); #endif bacula-15.0.3/src/qt-console/tray-monitor/jobselectwizardpage.ui0000644000175000017500000000262714771010173024577 0ustar bsbuildbsbuild JobSelectWizardPage 0 0 706 360 WizardPage QAbstractItemView::NoEditTriggers QAbstractItemView::SingleSelection QAbstractItemView::SelectRows true false false false true false bacula-15.0.3/src/qt-console/tray-monitor/runjob.cpp0000644000175000017500000003441314771010173022211 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "runjob.h" #include static void fillcombo(QComboBox *cb, alist *lst, bool addempty=true) { if (lst && lst->size() > 0) { QStringList list; char *str; if (addempty) { list << QString(""); } foreach_alist(str, lst) { list << QString(str); } cb->addItems(list); } else { cb->setEnabled(false); } } RunJob::RunJob(RESMON *r): QDialog(), res(r), tabAdvanced(NULL) { int nbjob; if (res->jobs->size() == 0) { QMessageBox msgBox; msgBox.setText(_("This restricted console does not have access to Backup jobs")); msgBox.setIcon(QMessageBox::Warning); msgBox.exec(); deleteLater(); return; } ui.setupUi(this); setModal(true); connect(ui.cancelButton, SIGNAL(clicked()), this, SLOT(close_cb())); connect(ui.okButton, SIGNAL(clicked()), this, SLOT(runjob())); connect(ui.jobCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(jobChanged(int))); connect(ui.levelCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(levelChanged(int))); ui.dateTimeEdit->setMinimumDate(QDate::currentDate()); ui.dateTimeEdit->setMaximumDate(QDate::currentDate().addDays(7)); ui.dateTimeEdit->setDate(QDate::currentDate()); ui.dateTimeEdit->setTime(QTime::currentTime()); ui.boxEstimate->setVisible(false); res->mutex->lock(); nbjob = res->jobs->size(); fillcombo(ui.jobCombo, res->jobs, (nbjob > 1)); fillcombo(ui.clientCombo, res->clients); fillcombo(ui.filesetCombo,res->filesets); fillcombo(ui.poolCombo, res->pools); fillcombo(ui.storageCombo,res->storages); fillcombo(ui.catalogCombo,res->catalogs); res->mutex->unlock(); connect(ui.tabWidget, SIGNAL(currentChanged(int)), this, SLOT(tabChange(int))); QStringList levels; levels << "" << "Incremental" << "Differential" << "Full"; ui.levelCombo->addItems(levels); MONITOR *m = (MONITOR*) GetNextRes(R_MONITOR, NULL); if (!m->display_advanced_options) { tabAdvanced = ui.tabWidget->widget(1); ui.tabWidget->removeTab(1); } show(); } void RunJob::tabChange(int idx) { QString q = ui.tabWidget->tabText(idx); if (q.contains("Advanced")) { if (ui.jobCombo->currentText().compare("") == 0) { pm_strcpy(curjob, ""); ui.tab2->setEnabled(false); } else if (ui.jobCombo->currentText().compare(curjob.c_str()) != 0) { task *t = new task(); char *job = bstrdup(ui.jobCombo->currentText().toUtf8().data()); pm_strcpy(curjob, job); // Keep the job name to not refresh the Advanced tab the next time Dmsg1(10, "get defaults for %s\n", job); res->mutex->lock(); bfree_and_null(res->defaults.job); res->defaults.job = job; res->mutex->unlock(); ui.tab2->setEnabled(false); connect(t, SIGNAL(done(task *)), this, SLOT(fill_defaults(task *)), Qt::QueuedConnection); t->init(res, TASK_DEFAULTS); res->wrk->queue(t); } } } void RunJob::runjob() { POOL_MEM tmp; char *p; p = ui.jobCombo->currentText().toUtf8().data(); if (!p || !*p) { QMessageBox msgBox; msgBox.setText(_("Nothing selected")); msgBox.setIcon(QMessageBox::Warning); msgBox.exec(); return; } Mmsg(command, "run job=\"%s\" yes", p); if (strcmp(p, NPRTB(res->defaults.job)) == 0 || strcmp("", NPRTB(res->defaults.job)) == 0) { p = ui.storageCombo->currentText().toUtf8().data(); if (p && *p && strcmp(p, NPRTB(res->defaults.storage)) != 0) { Mmsg(tmp, " storage=\"%s\"", p); pm_strcat(command, tmp.c_str()); } p = ui.clientCombo->currentText().toUtf8().data(); if (p && *p && strcmp(p, NPRTB(res->defaults.client)) != 0) { Mmsg(tmp, " client=\"%s\"", p); pm_strcat(command, tmp.c_str()); } p = ui.levelCombo->currentText().toUtf8().data(); if (p && *p && strcmp(p, NPRTB(res->defaults.level)) != 0) { Mmsg(tmp, " level=\"%s\"", p); pm_strcat(command, tmp.c_str()); } p = ui.poolCombo->currentText().toUtf8().data(); if (p && *p && strcmp(p, NPRTB(res->defaults.pool)) != 0) { Mmsg(tmp, " pool=\"%s\"", p); pm_strcat(command, tmp.c_str()); } p = ui.filesetCombo->currentText().toUtf8().data(); if (p && *p && strcmp(p, NPRTB(res->defaults.fileset)) != 0) { Mmsg(tmp, " fileset=\"%s\"", p); pm_strcat(command, tmp.c_str()); } if (res->defaults.priority && res->defaults.priority != ui.prioritySpin->value()) { Mmsg(tmp, " priority=\"%d\"", res->defaults.priority); pm_strcat(command, tmp.c_str()); } } QDate dnow = QDate::currentDate(); QTime tnow = QTime::currentTime(); QDate dval = ui.dateTimeEdit->date(); QTime tval = ui.dateTimeEdit->time(); if (dval > dnow || (dval == dnow && tval > tnow)) { Mmsg(tmp, " when=\"%s %s\"", dval.toString("yyyy-MM-dd").toUtf8().data(), tval.toString("hh:mm:00").toUtf8().data()); pm_strcat(command, tmp.c_str()); } if (res->type == R_CLIENT) { pm_strcat(command, " fdcalled=1"); } // Build the command and run it! task *t = new task(); t->init(res, TASK_RUN); connect(t, SIGNAL(done(task *)), this, SLOT(jobStarted(task *)), Qt::QueuedConnection); t->arg = command.c_str(); res->wrk->queue(t); } void RunJob::jobStarted(task *t) { Dmsg1(10, "%s\n", command.c_str()); Dmsg1(10, "-> jobid=%d\n", t->result.i); deleteLater(); delete t; } void RunJob::close_cb(task *t) { deleteLater(); delete t; } void RunJob::close_cb() { task *t = new task(); connect(t, SIGNAL(done(task *)), this, SLOT(close_cb(task *)), Qt::QueuedConnection); t->init(res, TASK_DISCONNECT); res->wrk->queue(t); } void RunJob::jobChanged(int) { char *p; ui.levelCombo->setCurrentIndex(0); ui.storageCombo->setCurrentIndex(0); ui.filesetCombo->setCurrentIndex(0); ui.clientCombo->setCurrentIndex(0); ui.storageCombo->setCurrentIndex(0); ui.poolCombo->setCurrentIndex(0); ui.catalogCombo->setCurrentIndex(0); p = ui.jobCombo->currentText().toUtf8().data(); if (p && *p) { task *t = new task(); t->init(res, TASK_INFO); pm_strcpy(info, p); connect(t, SIGNAL(done(task *)), this, SLOT(jobInfo(task *)), Qt::QueuedConnection); t->arg = info.c_str(); // Jobname t->arg2 = NULL; // Level res->wrk->queue(t); } } void RunJob::levelChanged(int) { char *p; p = ui.jobCombo->currentText().toUtf8().data(); if (p && *p) { pm_strcpy(info, p); p = ui.levelCombo->currentText().toUtf8().data(); if (p && *p) { task *t = new task(); pm_strcpy(level, p); connect(t, SIGNAL(done(task *)), this, SLOT(jobInfo(task *)), Qt::QueuedConnection); t->init(res, TASK_INFO); t->arg = info.c_str(); // Jobname t->arg2 = level.c_str(); // Level res->wrk->queue(t); } } } void RunJob::jobInfo(task *t) { char ed1[50]; res->mutex->lock(); if (res->infos.CorrNbJob == 0) { ui.boxEstimate->setVisible(false); } else { QString t; edit_uint64_with_suffix(res->infos.JobBytes, ed1); bstrncat(ed1, "B", sizeof(ed1)); ui.labelJobBytes->setText(QString(ed1)); ui.labelJobFiles->setText(QString(edit_uint64_with_commas(res->infos.JobFiles, ed1))); ui.labelJobLevel->setText(QString(job_level_to_str(res->infos.JobLevel))); t = tr("Computed over %1 job%2, the correlation is %3/100.").arg(res->infos.CorrNbJob).arg(res->infos.CorrNbJob>1?"s":"").arg(res->infos.CorrJobBytes); ui.labelJobBytes_2->setToolTip(t); t = tr("Computed over %1 job%2, The correlation is %3/100.").arg(res->infos.CorrNbJob).arg(res->infos.CorrNbJob>1?"s":"").arg(res->infos.CorrJobFiles); ui.labelJobFiles_2->setToolTip(t); ui.boxEstimate->setVisible(true); } res->mutex->unlock(); t->deleteLater(); } static void set_combo(QComboBox *dest, char *str) { if (str) { int idx = dest->findText(QString(str), Qt::MatchExactly); if (idx >= 0) { dest->setCurrentIndex(idx); } } } void RunJob::fill_defaults(task *t) { if (t->status == true) { res->mutex->lock(); set_combo(ui.levelCombo, res->defaults.level); set_combo(ui.filesetCombo, res->defaults.fileset); set_combo(ui.clientCombo, res->defaults.client); set_combo(ui.storageCombo, res->defaults.storage); set_combo(ui.poolCombo, res->defaults.pool); set_combo(ui.catalogCombo, res->defaults.catalog); res->mutex->unlock(); } ui.tab2->setEnabled(true); t->deleteLater(); } RunJob::~RunJob() { Dmsg0(10, "~RunJob()\n"); if (tabAdvanced) { delete tabAdvanced; } } void TSched::init(const char *cmd_dir) { bool started = (timer >= 0); if (started) { stop(); } bfree_and_null(command_dir); command_dir = bstrdup(cmd_dir); if (started) { start(); } } TSched::TSched() { timer = -1; command_dir = NULL; } TSched::~TSched() { if (timer >= 0) { stop(); } bfree_and_null(command_dir); } #include int breaddir(DIR *dirp, POOLMEM *&dname); bool TSched::read_command_file(const char *file, alist *lst, btime_t mtime) { POOLMEM *line; bool ret=false; char *p; TSchedJob *s; Dmsg1(50, "open command file %s\n", file); FILE *fp = fopen(file, "r"); if (!fp) { return false; } line = get_pool_memory(PM_FNAME); /* Get the first line, client/component:command */ while (bfgets(line, fp) != NULL) { strip_trailing_junk(line); Dmsg1(50, "%s\n", line); if (line[0] == '#') { continue; } if ((p = strchr(line, ':')) != NULL) { *p=0; s = new TSchedJob(line, p+1, mtime); lst->append(s); ret = true; } } free_pool_memory(line); fclose(fp); return ret; } #include "lib/plugins.h" #include "lib/cmd_parser.h" void TSched::timerEvent(QTimerEvent *event) { Q_UNUSED(event) POOL_MEM tmp, command; TSchedJob *j; alist lst(10, not_owned_by_alist); arg_parser parser; int i; task *t; RESMON *res; scan_for_commands(&lst); foreach_alist(j, (&lst)) { if (parser.parse_cmd(j->command) == bRC_OK) { if ((i = parser.find_arg_with_value("job")) > 0) { QMessageBox msgbox; foreach_res(res, R_CLIENT) { if (strcmp(res->hdr.name, j->component) == 0) { break; } } if (!res) { foreach_res(res, R_DIRECTOR) { if (strcmp(res->hdr.name, j->component) == 0) { break; } } } if (!res) { msgbox.setIcon(QMessageBox::Information); msgbox.setText(QString("Unable to find the component \"%1\" to run the job \"%2\".").arg(j->component, j->command)); msgbox.setStandardButtons(QMessageBox::Ignore); } else { msgbox.setIcon(QMessageBox::Information); msgbox.setText(QString("The job \"%1\" will start automatically in few seconds...").arg(parser.argv[i])); msgbox.setStandardButtons(QMessageBox::Ok | QMessageBox::Ignore); msgbox.setDefaultButton(QMessageBox::Ok); msgbox.button(QMessageBox::Ok)->animateClick(6000); } switch(msgbox.exec()) { case QMessageBox::Ok: Mmsg(command, "%s yes", j->command); if (res->type == R_CLIENT) { pm_strcat(command, " fdcalled=1"); } // Build the command and run it! t = new task(); t->init(res, TASK_RUN); connect(t, SIGNAL(done(task *)), this, SLOT(jobStarted(task *)), Qt::QueuedConnection); t->arg = command.c_str(); res->wrk->queue(t); break; case QMessageBox::Cancel: case QMessageBox::Ignore: break; } } } delete j; } } void TSched::jobStarted(task *t) { Dmsg1(10, "-> jobid=%d\n", t->result.i); t->deleteLater(); } bool TSched::scan_for_commands(alist *commands) { int name_max, len; DIR* dp = NULL; POOL_MEM fname(PM_FNAME), fname2(PM_FNAME); POOL_MEM dir_entry; bool ret=false, found=false; struct stat statp; name_max = pathconf(".", _PC_NAME_MAX); if (name_max < 1024) { name_max = 1024; } if (!(dp = opendir(command_dir))) { berrno be; Dmsg2(0, "Failed to open directory %s: ERR=%s\n", command_dir, be.bstrerror()); goto bail_out; } for ( ;; ) { if (breaddir(dp, dir_entry.addr()) != 0) { if (!found) { goto bail_out; } break; } if (strcmp(dir_entry.c_str(), ".") == 0 || strcmp(dir_entry.c_str(), "..") == 0) { continue; } len = strlen(dir_entry.c_str()); if (len <= 5) { continue; } if (strcmp(dir_entry.c_str() + len - 5, ".bcmd") != 0) { continue; } Mmsg(fname, "%s/%s", command_dir, dir_entry.c_str()); if (lstat(fname.c_str(), &statp) != 0 || !S_ISREG(statp.st_mode)) { continue; /* ignore directories & special files */ } if (read_command_file(fname.c_str(), commands, statp.st_mtime)) { Mmsg(fname2, "%s.ok", fname.c_str()); unlink(fname2.c_str()); rename(fname.c_str(), fname2.c_str()); // TODO: We should probably unlink the file } } bail_out: if (dp) { closedir(dp); } return ret; } bacula-15.0.3/src/qt-console/tray-monitor/FileDaemonConfigPage.qml0000644000175000017500000000542614771010173024651 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 import io.qt.bmob.fdconfigcontroller 1.0 Page { id: fdConfigPage visible: true height: parent.height width: parent.width FdConfigUiController { id: controller onFileDaemonConfigRead: { fdConfigFilePage.text = text } } header: ToolBar { id: toolbar height: 48 background: Rectangle { color: "#d32f2f" } RowLayout { anchors.fill: parent anchors.leftMargin: 8 anchors.rightMargin: 8 ToolButton { id: backButton onClicked: stackView.pop() anchors.left: parent.left contentItem: Text { text: qsTr("<") font.pixelSize: 24 opacity: enabled ? 1.0 : 0.3 color: "white" } background: Rectangle { color: backButton.down ? "#b71c1c" : "#d32f2f" } } Label { id: titleLabel text: "bacula-fd.conf" color: "white" font.pixelSize: 18 anchors.centerIn: parent } } } Flickable { id: fdConfigFilePage flickableDirection: Flickable.VerticalFlick anchors.fill: parent property alias text: textArea.text TextArea.flickable: TextArea { id: textArea text: controller.readFileDaemonConfig() textFormat: Qt.PlainText wrapMode: TextArea.Wrap readOnly: false persistentSelection: true leftPadding: 6 rightPadding: 6 topPadding: 0 bottomPadding: 0 background: null } ScrollBar.vertical: ScrollBar {} onVisibleChanged: { if (!visible) { controller.writeFileDaemonConfig(textArea.text) } } } } bacula-15.0.3/src/qt-console/tray-monitor/FeaturesTutorialPage.qml0000644000175000017500000000707514771010173025024 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 Page { id: featuresTutorialPage visible: true background: Rectangle { color: "#d32f2f" } GridLayout { id: featuresGrid columns: 2 anchors.left: parent.left anchors.right: parent.right anchors.top: parent.top anchors.bottom: featureText.top anchors.topMargin: 24 anchors.bottomMargin: 24 anchors.leftMargin: 16 anchors.rightMargin: 16 Image { source: "images/ss_main.jpg" Layout.preferredWidth: featuresGrid.width / 2 Layout.preferredHeight: featuresGrid.height / 2 Rectangle { anchors.fill: parent anchors.margins: -border.width z: -1 border.width: 1 color: "#000" } } Image { source: "images/ss_dir.jpg" Layout.preferredWidth: featuresGrid.width / 2 Layout.preferredHeight: featuresGrid.height / 2 Rectangle { anchors.fill: parent anchors.margins: -border.width z: -1 border.width: 1 color: "#000" } } Image { source: "images/ss_backup.jpg" Layout.preferredWidth: featuresGrid.width / 2 Layout.preferredHeight: featuresGrid.height / 2 Rectangle { anchors.fill: parent anchors.margins: -border.width z: -1 border.width: 1 color: "#000" } } Image { source: "images/ss_restore.jpg" Layout.preferredWidth: featuresGrid.width / 2 Layout.preferredHeight: featuresGrid.height / 2 Rectangle { anchors.fill: parent anchors.margins: -border.width z: -1 border.width: 1 color: "#000" } } } Text { id: featureText text: "With the Bacula® Mobile app, users can backup their phones and manage Bacula resources." wrapMode: TextEdit.WordWrap font.pixelSize: 18 color: "white" anchors.bottom: nextButton.top anchors.left: parent.left anchors.right: parent.right anchors.bottomMargin: 16 anchors.leftMargin: 16 anchors.rightMargin: 16 } ToolButton { id: nextButton onClicked: { tutorialSwipe.currentIndex = 1 } anchors.right: parent.right anchors.bottom: parent.bottom anchors.rightMargin: 16 anchors.bottomMargin: 16 contentItem: Text { text: qsTr(">") font.pixelSize: 28 opacity: enabled ? 1.0 : 0.3 color: "white" } background: Rectangle { color: parent.down ? "#b71c1c" : "#d32f2f" } } } bacula-15.0.3/src/qt-console/tray-monitor/TrayUiPage.qml0000644000175000017500000001742514771010173022737 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2020 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ import QtQuick 2.10 import QtQuick.Window 2.10 import QtQuick.Layouts 1.3 import QtQuick.Controls 2.3 import QtQuick.Controls.Material 2.1 import QtQuick.Dialogs 1.2 import io.qt.bmob.traycontroller 1.0 Page { visible: true width: parent.width height: parent.height property variant tabTitles: [qsTr("Clients"), qsTr("Directors"), qsTr("Storages"), qsTr("Log")] // Our C++ component TrayUiController { id: controller // Events triggered by our c++ code onClientsChanged: { clientsPage.model = controller.clients } onDirectorsChanged: { directorsPage.model = controller.directors } onStoragesChanged: { storagesPage.model = controller.storages } onInfoDialogTextChanged: { infoDialog.text = controller.infoDialogText infoDialog.open() } onFdTraceChanged: { traymonLogView.text = controller.fetchFile(controller.trayTracePath) traymonLogView.scrollToBottom() } Component.onCompleted: { traymonLogView.text = controller.fetchFile(controller.trayTracePath) traymonLogView.scrollToBottom() } } onVisibleChanged: { if(visible) { controller.fetchClients() controller.fetchDirectors() controller.fetchStorages() } } header: ToolBar { id: toolbar height: 48 background: Rectangle { color: "#d32f2f" } RowLayout { anchors.fill: parent anchors.leftMargin: 8 anchors.rightMargin: 8 Label { id: title text: tabTitles[bar.currentIndex] color: "white" font.pixelSize: 18 anchors.centerIn: parent } ToolButton { id: newResButton anchors.right: parent.right property int resType: bar.currentIndex onClicked: { if (resType == 0) { clientsPage.resType = 0 clientsPage.panelVisible = true } else if (resType == 1) { directorsPage.resType = 1 directorsPage.panelVisible = true } else if (resType == 2) { storagesPage.resType = 2 storagesPage.panelVisible = true } } contentItem: Text { text: bar.currentIndex >= 3 ? qsTr("") : qsTr("+") font.pixelSize: 24 opacity: enabled ? 1.0 : 0.3 color: "white" } background: Rectangle { color: newResButton.down ? "#b71c1c" : "#d32f2f" } } } } StackLayout { anchors.fill: parent currentIndex: bar.currentIndex ResourceListPage { id: clientsPage model: controller.clients } ResourceListPage { id: directorsPage model: controller.directors } ResourceListPage { id: storagesPage model: controller.storages } Page { id: trayMonLogPage height: parent.height width: parent.width Text { id: labelLogLevel text: "Log Level:" color: "black" anchors.left: parent.left anchors.leftMargin: 16 anchors.verticalCenter: monLogLevel.verticalCenter } TextField { id: monLogLevel height: 44 anchors.top: parent.top anchors.right: parent.right anchors.left: labelLogLevel.right placeholderText: qsTr("value") text: controller.trayLogLevel inputMethodHints: Qt.ImhDigitsOnly maximumLength: 3 onTextChanged: controller.trayLogLevel = text background: Rectangle { implicitHeight: 48 implicitWidth: 100 color: "transparent" border.color: "transparent" } } Rectangle { id: divider width: parent.width height: 1 color: "#d32f2f" anchors.bottom: monLogLevel.bottom } ScrollView { id: traymonLogView anchors.top: divider.bottom anchors.bottom: parent.bottom width: parent.width contentWidth: parent.width ScrollBar.horizontal.policy: ScrollBar.AlwaysOff ScrollBar.horizontal.interactive: false clip: true property alias text: traymonLog.text Text { id: traymonLog width: traymonLogView.width leftPadding: 12 rightPadding: 12 topPadding: 8 bottomPadding: 12 wrapMode: Text.WrapAnywhere } onVisibleChanged: { if(visible) { traymonLogView.text = controller.fetchFile(controller.trayTracePath); } } function scrollToBottom() { if (traymonLog.height > contentItem.height) { contentItem.contentY = traymonLog.height - contentItem.height } } } } } footer: TabBar { id: bar width: parent.width background: Rectangle { color: "#d32f2f" } TabButton { text: tabTitles[0] background: Rectangle { anchors.fill: parent color: bar.currentIndex === 0 ? "#ffcdd2" : "#d32f2f" } } TabButton { text: tabTitles[1] background: Rectangle { anchors.fill: parent color: bar.currentIndex === 1 ? "#ffcdd2" : "#d32f2f" } } TabButton { text: tabTitles[2] background: Rectangle { anchors.fill: parent color: bar.currentIndex === 2 ? "#ffcdd2" : "#d32f2f" } } TabButton { text: tabTitles[3] background: Rectangle { anchors.fill: parent color: bar.currentIndex === 3 ? "#ffcdd2" : "#d32f2f" } } } MessageDialog { id: confirmDialog property var res modality: Qt.ApplicationModal standardButtons: StandardButton.No | StandardButton.Yes onYes: controller.deleteResource(res) visible: false } MessageDialog { id: infoDialog modality: Qt.ApplicationModal standardButtons: StandardButton.Ok visible: false } } bacula-15.0.3/src/qt-console/tray-monitor/desktop-gui/0000755000175000017500000000000014771010173022434 5ustar bsbuildbsbuildbacula-15.0.3/src/qt-console/tray-monitor/desktop-gui/cdp-main-ui.h0000644000175000017500000001534314771010173024716 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef CDPUI_H #define CPDUI_H #include "../tray-monitor/common.h" #include "backupservice.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include class CdpUi: public QWidget { Q_OBJECT private: QListWidget *_lw; BackupService *_bservice; private slots: void handleAddFolder() { QFileDialog dialog(this); dialog.setLabelText(QFileDialog::Accept, "Watch"); dialog.setDirectory("/"); dialog.setFileMode(QFileDialog::Directory); dialog.setOption(QFileDialog::ShowDirsOnly); int rc = dialog.exec(); if (rc) { QString folder = dialog.selectedFiles()[0]; POOLMEM *err_msg = _bservice->watch(folder.toLatin1().constData()); if (err_msg == NULL) { _lw->addItem(folder); int r = _lw->count() - 1; _lw->setCurrentRow(r); } else { QMessageBox::critical(this, "Error", err_msg); free_and_null_pool_memory(err_msg); } } } void handleRemoveFolder() { int r = _lw->currentRow(); if (r != -1) { QListWidgetItem *item = _lw->takeItem(r); _bservice->stopWatching(item->text().toLatin1().constData()); delete item; } } void handleRestoreFile() { QFileDialog dialog(this); //dialog.setLabelText(QFileDialog::Accept, "Restore"); dialog.setDirectory(_bservice->_spoolPath); dialog.setFileMode(QFileDialog::ExistingFile); dialog.setViewMode(QFileDialog::Detail); dialog.setAcceptMode(QFileDialog::AcceptOpen); int rc = dialog.exec(); if (rc) { QString file = dialog.selectedFiles()[0]; QFileDialog dialog(this); dialog.setDirectory("/"); dialog.setFileMode(QFileDialog::Directory); dialog.setOption(QFileDialog::ShowDirsOnly); rc = dialog.exec(); if (rc) { QString folder = dialog.selectedFiles()[0]; if (QFile::copy(file, folder)) { Dmsg2(0, "Copied file %s into %s\n", file.toLatin1().constData(), folder.toLatin1().constData() ); } else { Dmsg2(0, "Could not copy file %s into %s\n", file.toLatin1().constData(), folder.toLatin1().constData() ); } } } } private: void setupButtons(QHBoxLayout *parentBox) { QPushButton *add = new QPushButton("Add", this); QPushButton *remove = new QPushButton("Remove", this); //QPushButton *restore = new QPushButton("Restore", this); QVBoxLayout *vbox = new QVBoxLayout(); vbox->setSpacing(10); vbox->setSpacing(3); vbox->addStretch(1); vbox->addWidget(add); vbox->addWidget(remove); //vbox->addWidget(restore); vbox->addStretch(1); connect(add, SIGNAL(clicked()), this, SLOT(handleAddFolder())); connect(remove, SIGNAL(clicked()), this, SLOT(handleRemoveFolder())); //connect(restore, &QPushButton::clicked, this, &CdpUi::handleRestoreFile); parentBox->addLayout(vbox); } /*Reads watched folders inside the Backup Service and show them to the user */ void setupFoldersList(QHBoxLayout *parentLayout) { _lw = new QListWidget(this); std::map::iterator it; for (it = _bservice->_watchers.begin(); it != _bservice->_watchers.end(); it++) { QString fpath = QString::fromStdString(it->first); _lw->addItem(fpath); } parentLayout->addWidget(_lw); parentLayout->addSpacing(15); } void setupSpooldirLink(QVBoxLayout *parentBox, const char *spoolPath) { POOLMEM *linkText = get_pool_memory(PM_FNAME); Mmsg(linkText, "Spool Directory = %s", spoolPath, spoolPath); QLabel *slink = new QLabel(); slink->setText(linkText); slink->setOpenExternalLinks(true); slink->setTextInteractionFlags(Qt::TextSelectableByMouse | Qt::LinksAccessibleByMouse); parentBox->addWidget(slink); } void setupJournalLink(QVBoxLayout *parentBox, const char *journalPath) { POOLMEM *linkText = get_pool_memory(PM_FNAME); Mmsg(linkText, "Journal File = %s", journalPath, journalPath); QLabel *jlink = new QLabel(); jlink->setText(linkText); jlink->setOpenExternalLinks(true); jlink->setTextInteractionFlags(Qt::TextSelectableByMouse | Qt::LinksAccessibleByMouse); parentBox->addWidget(jlink); } void setupTitle(QVBoxLayout *parentBox) { QLabel *title = new QLabel(); title->setText("You can select folders to be watched for file changes.\n" "The Tray Monitor will create copies of the modified files \n" "so that they can be backed up by Bacula."); parentBox->addWidget(title); } void setupContent(QVBoxLayout *parentBox) { QHBoxLayout *hbox = new QHBoxLayout(); this->setupButtons(hbox); this->setupFoldersList(hbox); parentBox->addLayout(hbox); } public: CdpUi(BackupService *bs, QWidget *parent = NULL): QWidget(parent) { this->_bservice = bs; this->setWindowTitle(QApplication::translate("CDPClient", "Watched Folders", 0)); this->setToolTip("Watched Folders"); QVBoxLayout *rootBox = new QVBoxLayout(this); setupSpooldirLink(rootBox, _bservice->_spoolPath); setupJournalLink(rootBox, _bservice->_journal->_jPath); setupTitle(rootBox); setupContent(rootBox); } }; #endif bacula-15.0.3/src/qt-console/tray-monitor/respanel-ui-controller.cpp0000644000175000017500000000341314771010173025313 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "respanel-ui-controller.h" ResPanelUiController::ResPanelUiController(QObject *parent): QObject(parent) { m_storage = &ConfigStorage::getInstance(); } void ResPanelUiController::saveChanges() { const char *error_msg = NULL; QByteArray resNameBytes = m_resourceName.toUtf8(); QByteArray resAddressBytes = m_resourceAddress.toUtf8(); QByteArray resPasswordBytes = m_resourcePassword.toUtf8(); RESMON *newRes = new RESMON(); newRes->code = m_resCode; newRes->hdr.name = resNameBytes.data(); newRes->address = resAddressBytes.data(); newRes->password = resPasswordBytes.data(); newRes->port = m_resourcePort.toUInt(); newRes->use_remote = m_remoteClient; if(m_res == NULL) { error_msg = m_storage->addResource(newRes, m_resCode); } else { error_msg = m_storage->editResource(m_res, newRes, m_resCode); } if(error_msg == NULL) { setSuccessMessage("Changes Saved"); } else { setErrorMessage(error_msg); } m_res = m_storage->getResourceByName(newRes->code, newRes->hdr.name); delete newRes; } ResPanelUiController::~ResPanelUiController() {} bacula-15.0.3/src/qt-console/make-win320000755000175000017500000000704214771010173017343 0ustar bsbuildbsbuild#!/bin/sh # # Used to build the Win32/Win64 version of bat # # # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # prepare_bat_build () { local version=$1 local dpkgs_mingw if test -d ../win32/release${version}; then if [ ${version} -eq 32 ] then depkgs_mingw='depkgs-mingw32' elif [ ${version} -eq 64 ] then depkgs_mingw='depkgs-mingw-w64' else echo "wrong version - '${version}'." exit 1 fi cp -f ${DEPKGS}/${depkgs_mingw}/bin/Qt5Gui.dll ../win32/release${version} cp -f ${DEPKGS}/${depkgs_mingw}/bin/Qt5Core.dll ../win32/release${version} cp -f ${DEPKGS}/${depkgs_mingw}/bin/Qt5Network.dll ../win32/release${version} cp -f ${DEPKGS}/${depkgs_mingw}/bin/Qt5Widgets.dll ../win32/release${version} cp -f ${DEPKGS}/${depkgs_mingw}/lib/libQt5Gui.a ../win32/release${version} cp -f ${DEPKGS}/${depkgs_mingw}/lib/libQt5Core.a ../win32/release${version} cp -f ${DEPKGS}/${depkgs_mingw}/lib/libQt5Network.a ../win32/release${version} cp -f ${DEPKGS}/${depkgs_mingw}/lib/libQt5Widgets.a ../win32/release${version} cp -f ${DEPKGS}/${depkgs_mingw}/lib/libqtmain.a ../win32/release${version} cp -f ${DEPKGS}/${depkgs_mingw}/bin/platforms/qwindows.dll ../win32/release${version}/ rm -rf ../win32/release${version}/help mkdir ../win32/release${version}/help cp -f help/*.html ../win32/release${version}/help/ cp -f images/status.png ../win32/release${version}/help/ cp -f images/mail-message-new.png ../win32/release${version}/help/ qmake -spec $PWD/win32 -unix -o Makefile.mingw${version} bat.pro.mingw${version} # Don't know why this libGL is added sed -i s/-lGL// Makefile.mingw${version} # Linux headers are also added automatically perl -plne "s:-I/usr/[^ ]+::g" Makefile.mingw${version} > 1 perl -plne "s:-L/usr/[^ ]+::g" 1 > Makefile.mingw${version} echo "Make Windows bat" make -j3 -f Makefile.mingw${version} $2 if test -f bat.exe; then cp -f bat.exe ../win32/release${version} elif test -f release/bat.exe; then cp -f release/bat.exe ../win32/release${version} else cp -f debug/bat.exe ../win32/release${version} fi rm -f bat.exe release/bat.exe debug/bat.exe fi } prepare_tray_monitor_build () { local version=$1 cd tray-monitor qmake -spec ../win32 -unix -o Makefile.mingw${version} tray-monitor.pro.mingw${version} # Don't know why this libGL is added sed -i s/-lGL// Makefile.mingw${version} # Linux headers are also added automatically perl -plne "s:-I/usr/[^ ]+::g" Makefile.mingw${version} > 1 perl -plne "s:-L/usr/[^ ]+::g" 1 > Makefile.mingw${version} echo "Make Windows tray-monitor" make -j3 -f Makefile.mingw${version} $2 if test -f bacula-tray-monitor.exe; then cp -f bacula-tray-monitor.exe ../../win32/release${version} elif test -f release/bacula-tray-monitor.exe; then cp -f release/bacula-tray-monitor.exe ../../win32/release${version} else cp -f debug/bacula-tray-monitor.exe ../../win32/release${version} fi rm -f bacula-tray-monitor.exe release/bacula-tray-monitor.exe debug/bacula-tray-monitor.exe cd .. } readonly BUILD_ARCH="$1" rm -f debug/bat.exe if test -f ../config.h ; then mv -f ../config.h ../config.h.orig fi prepare_bat_build ${BUILD_ARCH:-64} if test -e ../config.h.orig ; then mv -f ../config.h.orig ../config.h fi prepare_tray_monitor_build ${BUILD_ARCH:-64} bacula-15.0.3/src/qt-console/bat.conf.in0000644000175000017500000000040414771010173017555 0ustar bsbuildbsbuild# # Bacula Administration Tool (bat) configuration file # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # Director { Name = @basename@-dir DIRport = @dir_port@ address = @hostname@ Password = "@dir_password@" } bacula-15.0.3/src/stored/0000755000175000017500000000000014771010173014751 5ustar bsbuildbsbuildbacula-15.0.3/src/stored/fd_cmds.c0000644000175000017500000004005714771010173016522 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * This file handles commands from the File daemon. * * Written by Kern Sibbald, MM * * We get here because the Director has initiated a Job with * the Storage daemon, then done the same with the File daemon, * then when the Storage daemon receives a proper connection from * the File daemon, control is passed here to handle the * subsequent File daemon commands. * * */ #include "bacula.h" #include "stored.h" /* Forward referenced functions */ static bool response(JCR *jcr, BSOCK *bs, const char *resp, const char *cmd); /* Imported variables */ extern STORES *me; /* Static variables */ static char ferrmsg[] = "3900 Invalid command\n"; static char OK_data[] = "3000 OK data\n"; /* Imported functions */ extern bool do_append_data(JCR *jcr); extern bool do_read_data(JCR *jcr); extern bool do_backup_job(JCR *jcr); /* Forward referenced FD commands */ static bool append_open_session(JCR *jcr); static bool append_close_session(JCR *jcr); static bool append_data_cmd(JCR *jcr); static bool append_end_session(JCR *jcr); static bool read_open_session(JCR *jcr); static bool read_data_cmd(JCR *jcr); static bool read_close_session(JCR *jcr); static bool read_control_cmd(JCR *jcr); static bool sd_testnetwork_cmd(JCR *jcr); /* Exported function */ bool get_bootstrap_file(JCR *jcr, BSOCK *bs); struct fd_cmds { const char *cmd; bool (*func)(JCR *jcr); }; /* * The following are the recognized commands from the File daemon */ static struct fd_cmds fd_cmds[] = { {"append open", append_open_session}, {"append data", append_data_cmd}, {"append end", append_end_session}, {"append close", append_close_session}, {"read open", read_open_session}, {"read data", read_data_cmd}, {"read close", read_close_session}, {"read control", read_control_cmd}, {"testnetwork", sd_testnetwork_cmd}, {NULL, NULL} /* list terminator */ }; /* Commands from the File daemon that require additional scanning */ static char read_open[] = "read open session = %127s %ld %ld %ld %ld %ld %ld\n"; /* Responses sent to the File daemon */ static char NO_open[] = "3901 Error session already open\n"; static char NOT_opened[] = "3902 Error session not opened\n"; static char ERROR_open[] = "3904 Error open session, bad parameters\n"; static char OK_end[] = "3000 OK end\n"; static char OK_close[] = "3000 OK close Status = %d\n"; static char OK_open[] = "3000 OK open ticket = %d\n"; static char ERROR_append[] = "3903 Error append data: %s\n"; /* Information sent to the Director */ static char Job_start[] = "3010 Job %s start\n"; char Job_end[] = "3099 Job %s end JobStatus=%d JobFiles=%d JobBytes=%s JobErrors=%u ErrMsg=%s\n"; /* * Run a Client Job -- Client already authorized * Note: this can be either a backup or restore or * migrate/copy job. * * Basic task here is: * - Read a command from the Client -- FD or SD * - Execute it * */ void run_job(JCR *jcr) { BSOCK *dir = jcr->dir_bsock; char ec1[30]; dir->set_jcr(jcr); Dmsg1(120, "Start run Job=%s\n", jcr->Job); dir->fsend(Job_start, jcr->Job); jcr->start_time = time(NULL); jcr->run_time = jcr->start_time; jcr->sendJobStatus(JS_Running); /* TODO: Remove when the new match_all is well tested */ jcr->use_new_match_all = use_new_match_all; /* * A migrate or copy job does both a restore (read_data) and * a backup (append_data). * Otherwise we do the commands that the client sends * which are for normal backup or restore jobs. */ Dmsg3(50, "==== JobType=%c run_job=%d sd_client=%d\n", jcr->getJobType(), jcr->JobId, jcr->sd_client); if (jcr->is_JobType(JT_BACKUP) && jcr->sd_client) { jcr->session_opened = true; Dmsg0(050, "Do: receive for 3000 OK data then append\n"); if (!response(jcr, jcr->file_bsock, "3000 OK data\n", "Append data")) { Dmsg1(050, "Expect: 3000 OK data, got: %s", jcr->file_bsock->msg); Jmsg0(jcr, M_FATAL, 0, "Append data not accepted\n"); goto bail_out; } if (!append_data_cmd(jcr)) { Jmsg0(jcr, M_FATAL, 0, "Appending data failed\n"); goto bail_out; } if (!append_end_session(jcr)) { Dmsg1(050, "append_end_session() failed for JobId: %d\n", jcr->JobId); goto bail_out; } } else if (jcr->is_JobType(JT_MIGRATE) || jcr->is_JobType(JT_COPY)) { jcr->session_opened = true; /* send "3000 OK data" now to avoid a dead lock, the other side is also * waiting for one. The old peace of code was reading the "3000 OK" reply * at the end of the backup (not really appropriate). * dedup need duplex communication with the other side and need the * "3000 OK" to be out of the socket, and be handle here by the right * peace of code */ Dmsg0(DT_DEDUP|215, "send OK_data\n"); jcr->file_bsock->fsend(OK_data); jcr->is_ok_data_sent = true; Dmsg1(050, "Do: read_data_cmd file_bsock=%p\n", jcr->file_bsock); Dmsg0(050, "Do: receive for 3000 OK data then read\n"); if (!response(jcr, jcr->file_bsock, "3000 OK data\n", "Data received")) { Dmsg1(050, "Expect 3000 OK data, got: %s", jcr->file_bsock->msg); Jmsg0(jcr, M_FATAL, 0, "Read data not accepted\n"); jcr->file_bsock->signal(BNET_EOD); goto bail_out; } if (!read_data_cmd(jcr)) { Jmsg0(jcr, M_FATAL, 0, "Reading data failed\n"); goto bail_out; } jcr->file_bsock->signal(BNET_EOD); } else { /* Either a Backup or Restore job */ Dmsg0(050, "Do: do_client_commands\n"); do_client_commands(jcr); } bail_out: jcr->end_time = time(NULL); flush_jobmedia_queue(jcr); dequeue_messages(jcr); /* send any queued messages */ jcr->setJobStatus(JS_Terminated); /* Keep track of the important events */ events_send_msg(jcr, "SJ0002", EVENTS_TYPE_JOB, jcr->director->hdr.name, (intptr_t)jcr, "Job End jobid=%i job=%s status=%c", jcr->JobId, jcr->Job, jcr->JobStatus); generate_daemon_event(jcr, "JobEnd"); generate_plugin_event(jcr, bsdEventJobEnd); bash_spaces(jcr->StatusErrMsg); dir->fsend(Job_end, jcr->Job, jcr->JobStatus, jcr->JobFiles, edit_uint64(jcr->JobBytes, ec1), jcr->JobErrors, jcr->StatusErrMsg); Dmsg1(100, "==== %s", dir->msg); unbash_spaces(jcr->StatusErrMsg); dequeue_daemon_messages(jcr); dir->signal(BNET_EOD); /* send EOD to Director daemon */ free_plugins(jcr); /* release instantiated plugins */ garbage_collect_memory_pool(); return; } /* * Now talk to the Client (FD/SD) and do what he says */ void do_client_commands(JCR *jcr) { int i; bool found, quit; BSOCK *fd = jcr->file_bsock; if (!fd) { return; } fd->set_jcr(jcr); for (quit=false; !quit;) { int stat; /* Read command coming from the File daemon */ stat = fd->recv(); if (fd->is_stop()) { /* hard eof or error */ break; /* connection terminated */ } if (stat <= 0) { continue; /* ignore signals and zero length msgs */ } Dmsg1(110, "msg); found = false; for (i=0; fd_cmds[i].cmd; i++) { if (strncmp(fd_cmds[i].cmd, fd->msg, strlen(fd_cmds[i].cmd)) == 0) { found = true; /* indicate command found */ jcr->errmsg[0] = 0; if (!fd_cmds[i].func(jcr)) { /* do command */ /* Note fd->msg command may be destroyed by comm activity */ if (!job_canceled(jcr) && !jcr->is_incomplete()) { strip_trailing_junk(fd->msg); char buf[64]; strip_trailing_junk(jcr->errmsg); Dmsg2(50, _("Command error with FD msg=\"%s\", SD hanging up. ERR=%s\n"), asciidump(fd->msg, fd->msglen, buf, sizeof(buf)), jcr->errmsg); Jmsg1(jcr, M_FATAL, 0, _("Command error with FD, SD hanging up. ERR=%s\n"), jcr->errmsg); jcr->setJobStatus(JS_ErrorTerminated); } quit = true; } break; } } if (!found) { /* command not found */ if (!job_canceled(jcr)) { Jmsg0(jcr, M_FATAL, 0, _("FD command not found\n")); Dmsg1(110, "msg); } fd->fsend(ferrmsg); break; } } fd->signal(BNET_TERMINATE); /* signal to FD job is done */ } /* * Append Data command * Open Data Channel and receive Data for archiving * Write the Data to the archive device */ static bool append_data_cmd(JCR *jcr) { BSOCK *fd = jcr->file_bsock; Dmsg1(120, "Append data: %s", fd->msg); if (jcr->session_opened) { Dmsg1(110, "msg); jcr->setJobType(JT_BACKUP); jcr->errmsg[0] = 0; if (do_append_data(jcr)) { return true; } else { fd->suppress_error_messages(true); /* ignore errors at this point */ fd->fsend(ERROR_append, jcr->errmsg); } } else { pm_strcpy(jcr->errmsg, _("Attempt to append on non-open session.\n")); fd->fsend(NOT_opened); } return false; } static bool append_end_session(JCR *jcr) { BSOCK *fd = jcr->file_bsock; Dmsg1(120, "storemsg); if (!jcr->session_opened) { pm_strcpy(jcr->errmsg, _("Attempt to close non-open session.\n")); fd->fsend(NOT_opened); return false; } return fd->fsend(OK_end); } /* * Test the FD/SD connectivity */ static bool sd_testnetwork_cmd(JCR *jcr) { BSOCK *fd = jcr->file_bsock; int64_t nb=0, nbrtt=0, rtt=0, bandwidth=0; int32_t ok=1; bool can_compress; btime_t start, end; if (scan_string(fd->msg, "testnetwork bytes=%lld rtt=%lld bw=%lld", &nb, &nbrtt, &bandwidth) != 3) { if (scan_string(fd->msg, "testnetwork bytes=%lld", &nb) != 1) { Dmsg1(50, "Invalid command %s\n", fd->msg); return false; } } /* We disable the comline compression for this test */ can_compress = fd->can_compress(); fd->clear_compress(); if (nb > 0) { /* First, get data from the FD */ while (fd->recv() > 0) { } /* Then, send back data to the FD */ memset(fd->msg, 0xBB, sizeof_pool_memory(fd->msg)); fd->msglen = sizeof_pool_memory(fd->msg); while(nb > 0 && ok > 0) { if (nb < fd->msglen) { fd->msglen = nb; } ok = fd->send(); nb -= fd->msglen; } fd->signal(BNET_EOD); } /* Compute RTT */ ok = 1; if (nbrtt > 0) { while (ok > 0) { start = get_current_btime(); ok = fd->recv(); if (ok > 0) { ok = fd->send(); end = get_current_btime() + 1; rtt += end - start; } } if (nbrtt) { fd->set_bandwidth(bandwidth); fd->set_rtt(rtt / nbrtt); } } if (can_compress) { fd->set_compress(); } return true; } /* * Append Open session command * */ static bool append_open_session(JCR *jcr) { BSOCK *fd = jcr->file_bsock; Dmsg1(120, "Append open session: %s", fd->msg); if (jcr->session_opened) { pm_strcpy(jcr->errmsg, _("Attempt to open already open session.\n")); fd->fsend(NO_open); return false; } jcr->session_opened = true; /* Send "Ticket" to File Daemon */ fd->fsend(OK_open, jcr->VolSessionId); Dmsg1(110, ">filed: %s", fd->msg); return true; } /* * Append Close session command * Close the append session and send back Statistics * (need to fix statistics) */ static bool append_close_session(JCR *jcr) { BSOCK *fd = jcr->file_bsock; Dmsg1(120, "msg); if (!jcr->session_opened) { pm_strcpy(jcr->errmsg, _("Attempt to close non-open session.\n")); fd->fsend(NOT_opened); return false; } /* Send final statistics to File daemon */ fd->fsend(OK_close, jcr->JobStatus); Dmsg1(120, ">filed: %s", fd->msg); fd->signal(BNET_EOD); /* send EOD to File daemon */ jcr->session_opened = false; return true; } /* * Read Data command * Open Data Channel, read the data from * the archive device and send to File * daemon. */ static bool read_data_cmd(JCR *jcr) { BSOCK *fd = jcr->file_bsock; Dmsg1(120, "Read data: %s", fd->msg); if (jcr->session_opened) { Dmsg1(120, "msg); return do_read_data(jcr); } else { pm_strcpy(jcr->errmsg, _("Attempt to read on non-open session.\n")); fd->fsend(NOT_opened); return false; } } /* * Read Open session command * * We need to scan for the parameters of the job * to be restored. */ static bool read_open_session(JCR *jcr) { BSOCK *fd = jcr->file_bsock; Dmsg1(120, "%s", fd->msg); if (jcr->session_opened) { pm_strcpy(jcr->errmsg, _("Attempt to open an already open session.\n")); fd->fsend(NO_open); return false; } if (sscanf(fd->msg, read_open, jcr->read_dcr->VolumeName, &jcr->read_VolSessionId, &jcr->read_VolSessionTime, &jcr->read_StartFile, &jcr->read_EndFile, &jcr->read_StartBlock, &jcr->read_EndBlock) == 7) { Dmsg4(100, "read_open_session got: JobId=%d Vol=%s VolSessId=%ld VolSessT=%ld\n", jcr->JobId, jcr->read_dcr->VolumeName, jcr->read_VolSessionId, jcr->read_VolSessionTime); Dmsg4(100, " StartF=%ld EndF=%ld StartB=%ld EndB=%ld\n", jcr->read_StartFile, jcr->read_EndFile, jcr->read_StartBlock, jcr->read_EndBlock); } else { pm_strcpy(jcr->errmsg, _("Cannot open session, received bad parameters.\n")); fd->fsend(ERROR_open); return false; } jcr->session_opened = true; jcr->setJobType(JT_RESTORE); /* Send "Ticket" to File Daemon */ fd->fsend(OK_open, jcr->VolSessionId); Dmsg1(110, ">filed: %s", fd->msg); return true; } static bool read_control_cmd(JCR *jcr) { BSOCK *fd = jcr->file_bsock; Dmsg1(120, "Read control: %s\n", fd->msg); if (!jcr->session_opened) { fd->fsend(NOT_opened); return false; } jcr->interactive_session = true; return true; } /* * Read Close session command * Close the read session */ static bool read_close_session(JCR *jcr) { BSOCK *fd = jcr->file_bsock; Dmsg1(120, "Read close session: %s\n", fd->msg); if (!jcr->session_opened) { fd->fsend(NOT_opened); return false; } /* Send final close msg to File daemon */ fd->fsend(OK_close, jcr->JobStatus); Dmsg1(160, ">filed: %s\n", fd->msg); fd->signal(BNET_EOD); /* send EOD to File daemon */ jcr->session_opened = false; return true; } /* * Get response from FD or SD * sent. Check that the response agrees with what we expect. * * Returns: false on failure * true on success */ static bool response(JCR *jcr, BSOCK *bs, const char *resp, const char *cmd) { int n; if (bs->is_error()) { return false; } if ((n = bs->recv()) >= 0) { if (strcmp(bs->msg, resp) == 0) { return true; } Jmsg(jcr, M_FATAL, 0, _("Bad response to %s command: wanted %s, got %s\n"), cmd, resp, bs->msg); return false; } Jmsg(jcr, M_FATAL, 0, _("Socket error on %s command: ERR=%s\n"), cmd, bs->bstrerror()); return false; } bacula-15.0.3/src/stored/vtape_dev.h0000644000175000017500000000721314771010173017102 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * vtape.h - Emulate the Linux st (scsi tape) driver on file. * for regression and bug hunting purpose * */ #ifndef VTAPE_H #define VTAPE_H #include #include #include "bacula.h" #include "tape_dev.h" void vtape_debug(int level); #ifdef USE_VTAPE #define FTAPE_MAX_DRIVE 50 #define VTAPE_MAX_BLOCK 20*1024*2048; /* 20GB */ typedef enum { VT_READ_EOF, /* Need to read the entire EOF struct */ VT_SKIP_EOF /* Have already read the EOF byte */ } VT_READ_FM_MODE; class vtape: public tape_dev { private: int fd; /* Our file descriptor */ int lockfd; /* File descriptor for the lock file */ boffset_t file_block; /* size */ boffset_t max_block; boffset_t last_FM; /* last file mark (last file) */ boffset_t next_FM; /* next file mark (next file) */ boffset_t cur_FM; /* current file mark */ bool atEOF; /* End of file */ bool atEOT; /* End of media */ bool atEOD; /* End of data */ bool atBOT; /* Begin of tape */ bool online; /* volume online */ bool needEOF; /* check if last operation need eof */ int32_t last_file; /* last file of the volume */ int32_t current_file; /* current position */ int32_t current_block; /* current position */ char * lockfile; /* Name of the lock file */ void destroy(); int truncate_file(); void check_eof() { if(needEOF) weof();}; void update_pos(); bool read_fm(VT_READ_FM_MODE readfirst); public: int fsf(); int fsr(int count); int weof(); int bsf(); int bsr(int count); vtape(); ~vtape(); int get_fd(); void dump(); int tape_op(struct mtop *mt_com); int tape_get(struct mtget *mt_com); int tape_pos(struct mtpos *mt_com); /* DEVICE virtual interfaces that we redefine */ int d_close(int); int d_open(const char *pathname, int flags); int d_ioctl(int fd, ioctl_req_t request, char *op=NULL); ssize_t d_read(int, void *buffer, size_t count); ssize_t d_write(int, const void *buffer, size_t count); bool offline(DCR *dcr); boffset_t lseek(DCR *dcr, off_t offset, int whence) { return -1; } boffset_t lseek(int fd, off_t offset, int whence) { return ::lseek(fd, offset, whence); } const char *print_type(); }; #else /*!USE_VTAPE */ class vtape: public DEVICE { public: vtape(); ~vtape(); int d_open(const char *pathname, int flags) { return -1; } ssize_t d_read(int fd, void *buffer, size_t count) { return -1; } ssize_t d_write(int fd, const void *buffer, size_t count) { return -1; } int d_close(int) { return -1; } int d_ioctl(int fd, ioctl_req_t request, char *mt=NULL) { return -1; } boffset_t lseek(DCR *dcr, off_t offset, int whence) { return -1; } bool open_device(DCR *dcr, int omode) { return true; } const char *print_type(); }; #endif /* USE_VTAPE */ #endif /* !VTAPE_H */ bacula-15.0.3/src/stored/vol_mgr.c0000644000175000017500000007300514771010173016567 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Volume management functions for Storage Daemon * * Kern Sibbald, MM * * Split from reserve.c October 2008 * */ #include "bacula.h" #include "stored.h" const int dbglvl = 150; static dlist *vol_list = NULL; static brwlock_t vol_list_lock; static dlist *read_vol_list = NULL; static bthread_mutex_t read_vol_lock = BTHREAD_MUTEX_PRIORITY(PRIO_SD_READ_VOL_LIST); /* Forward referenced functions */ static void free_vol_item(VOLRES *vol); static VOLRES *new_vol_item(DCR *dcr, const char *VolumeName); static void debug_list_volumes(const char *imsg); /* * For append volumes the key is the VolumeName. */ static int name_compare(void *item1, void *item2) { return strcmp(((VOLRES *)item1)->vol_name, ((VOLRES *)item2)->vol_name); } /* * For read volumes the key is JobId, VolumeName. */ static int read_compare(void *item1, void *item2) { VOLRES *vol1 = (VOLRES *)item1; VOLRES *vol2 = (VOLRES *)item2; if (vol1->get_jobid() == vol2->get_jobid()) { return strcmp(vol1->vol_name, vol2->vol_name); } if (vol1->get_jobid() < vol2->get_jobid()) { return -1; } return 1; } bool is_vol_list_empty() { return vol_list->empty(); } int vol_list_lock_count = 0; /* * Initialized the main volume list. Note, we are using a recursive lock. */ void init_vol_list_lock() { int errstat; if ((errstat=rwl_init(&vol_list_lock, PRIO_SD_VOL_LIST)) != 0) { berrno be; Emsg1(M_ABORT, 0, _("Unable to initialize volume list lock. ERR=%s\n"), be.bstrerror(errstat)); } } void term_vol_list_lock() { rwl_destroy(&vol_list_lock); } /* * This allows a given thread to recursively call to lock_volumes() */ void _lock_volumes(const char *file, int line) { int errstat; vol_list_lock_count++; if ((errstat=rwl_writelock_p(&vol_list_lock, file, line)) != 0) { berrno be; Emsg2(M_ABORT, 0, "rwl_writelock failure. stat=%d: ERR=%s\n", errstat, be.bstrerror(errstat)); } } void _unlock_volumes() { int errstat; vol_list_lock_count--; if ((errstat=rwl_writeunlock(&vol_list_lock)) != 0) { berrno be; Emsg2(M_ABORT, 0, "rwl_writeunlock failure. stat=%d: ERR=%s\n", errstat, be.bstrerror(errstat)); } } #define lock_read_volumes() lock_read_volumes_p(__FILE__, __LINE__) static void lock_read_volumes_p(const char *file="**Unknown", int line=0) { bthread_mutex_lock_p(&read_vol_lock, file, line); } static void unlock_read_volumes() { bthread_mutex_unlock(&read_vol_lock); } /* * Add a volume to the read list. * Note, we use VOLRES because it simplifies the code * even though, the only part of VOLRES that we need is * the volume name. The same volume may be in the list * multiple times, but each one is distinguished by the * JobId. We use JobId, VolumeName as the key. * We can get called multiple times for the same volume because * when parsing the bsr, the volume name appears multiple times. */ void add_read_volume(JCR *jcr, const char *VolumeName) { VOLRES *nvol, *vol; nvol = new_vol_item(NULL, VolumeName); nvol->set_jobid(jcr->JobId); nvol->set_reading(); lock_read_volumes(); vol = (VOLRES *)read_vol_list->binary_insert(nvol, read_compare); if (vol != nvol) { free_vol_item(nvol); Dmsg2(dbglvl, "read_vol=%s JobId=%d already in list.\n", VolumeName, jcr->JobId); } else { Dmsg2(dbglvl, "add read_vol=%s JobId=%d\n", VolumeName, jcr->JobId); } unlock_read_volumes(); } /* * Check if volume name is in the read list. */ bool is_read_volume(JCR *jcr, const char *VolumeName) { VOLRES vol, *fvol; lock_read_volumes(); vol.vol_name = bstrdup(VolumeName); fvol = (VOLRES *)read_vol_list->binary_search(&vol, name_compare); free(vol.vol_name); unlock_read_volumes(); return fvol != NULL; } /* * Check if volume name is in the read list. */ bool is_writing_volume(const char *VolumeName) { VOLRES vol, *fvol; lock_volumes(); vol.vol_name = bstrdup(VolumeName); fvol = (VOLRES *)vol_list->binary_search(&vol, name_compare); free(vol.vol_name); unlock_volumes(); return fvol && fvol->is_writing(); } /* * Remove a given volume name from the read list. */ void remove_read_volume(JCR *jcr, const char *VolumeName) { VOLRES vol, *fvol; lock_read_volumes(); vol.vol_name = bstrdup(VolumeName); vol.set_jobid(jcr->JobId); fvol = (VOLRES *)read_vol_list->binary_search(&vol, read_compare); free(vol.vol_name); if (fvol) { Dmsg3(dbglvl, "remove_read_vol=%s JobId=%d found=%d\n", VolumeName, jcr->JobId, fvol!=NULL); } if (fvol) { read_vol_list->remove(fvol); free_vol_item(fvol); } unlock_read_volumes(); // pthread_cond_broadcast(&wait_next_vol); } /* * List Volumes -- this should be moved to status.c */ enum { debug_lock = true, debug_nolock = false }; static void debug_list_volumes(const char *imsg) { VOLRES *vol; POOL_MEM msg(PM_MESSAGE); if (debug_level < dbglvl) { return; } foreach_vol(vol) { if (vol->dev) { Mmsg(msg, "List %s: %s in_use=%d swap=%d slot=%d on %s device %s\n", imsg, vol->vol_name, vol->is_in_use(), vol->is_swapping(), vol->get_slot(), vol->dev->print_type(), vol->dev->print_name()); } else { Mmsg(msg, "List %s: %s in_use=%d swap=%d slot=%d no dev\n", imsg, vol->vol_name, vol->is_in_use(), vol->is_swapping(), vol->get_slot()); } Dmsg1(dbglvl, "%s", msg.c_str()); } endeach_vol(vol); } /* * List Volumes -- this should be moved to status.c */ void list_volumes(void sendit(const char *msg, int len, void *sarg), void *arg) { VOLRES *vol; POOL_MEM msg(PM_MESSAGE); int len; foreach_vol(vol) { DEVICE *dev = vol->dev; if (dev) { len = Mmsg(msg, "Reserved volume: %s on %s device %s\n", vol->vol_name, dev->print_type(), dev->print_name()); sendit(msg.c_str(), len, arg); len = Mmsg(msg, " Reader=%d writers=%d reserves=%d volinuse=%d worm=%d\n", dev->can_read()?1:0, dev->num_writers, dev->num_reserved(), vol->is_in_use(), dev->is_worm()); sendit(msg.c_str(), len, arg); } else { len = Mmsg(msg, "Volume %s no device. volinuse=%d\n", vol->vol_name, vol->is_in_use()); sendit(msg.c_str(), len, arg); } } endeach_vol(vol); lock_read_volumes(); foreach_dlist(vol, read_vol_list) { DEVICE *dev = vol->dev; if (dev) { len = Mmsg(msg, "Read volume: %s on %s device %s\n", vol->vol_name, dev->print_type(), dev->print_name()); sendit(msg.c_str(), len, arg); len = Mmsg(msg, " Reader=%d writers=%d reserves=%d volinuse=%d JobId=%d\n", dev->can_read()?1:0, dev->num_writers, dev->num_reserved(), vol->is_in_use(), vol->get_jobid()); sendit(msg.c_str(), len, arg); } else { len = Mmsg(msg, "Volume: %s no device. volinuse=%d\n", vol->vol_name, vol->is_in_use()); sendit(msg.c_str(), len, arg); } } unlock_read_volumes(); } /* * Create a Volume item to put in the Volume list * Ensure that the device points to it. */ static VOLRES *new_vol_item(DCR *dcr, const char *VolumeName) { VOLRES *vol; vol = (VOLRES *)malloc(sizeof(VOLRES)); memset(vol, 0, sizeof(VOLRES)); vol->vol_name = bstrdup(VolumeName); if (dcr) { vol->dev = dcr->dev; Dmsg4(dbglvl, "new Vol=%s slot=%d at %p dev=%s\n", VolumeName, vol->get_slot(), vol->vol_name, vol->dev->print_name()); } vol->init_mutex(); vol->inc_use_count(); return vol; } static void free_vol_item(VOLRES *vol) { DEVICE *dev = NULL; vol->dec_use_count(); vol->vLock(); if (vol->use_count() > 0) { vol->vUnlock(); return; } vol->vUnlock(); free(vol->vol_name); if (vol->dev) { dev = vol->dev; } vol->destroy_mutex(); free(vol); if (dev) { dev->vol = NULL; } } /* * Put a new Volume entry in the Volume list. This * effectively reserves the volume so that it will * not be mounted again. * * If the device has any current volume associated with it, * and it is a different Volume, and the device is not busy, * we release the old Volume item and insert the new one. * * It is assumed that the device is free and locked so that * we can change the device structure. * * Some details of the Volume list handling: * * 1. The Volume list entry is attached to the drive (rather than * attached to a job as it was previously. I.e. the drive that "owns" * the volume (in use, mounted) * must point to the volume (still to be maintained in a list). * * 2. The Volume is entered in the list when a drive is reserved. * * 3. When a drive is in use, the device code must appropriately update the * volume name as it changes. * This code keeps the same list entry as long as the drive * has any volume associated with it but the volume name in the list * must be updated when the drive has a different volume mounted. * * 4. A job that has reserved a volume, can un-reserve the volume, and if the * volume is not mounted, and not reserved, and not in use, it will be * removed from the list. * * 5. If a job wants to reserve a drive with a different Volume from the one on * the drive, it can re-use the drive for the new Volume. * 6. If a job wants a Volume that is in a different drive, it can either use the * other drive or take the volume, only if the other drive is not in use or * not reserved. * * One nice aspect of this is that the reserve use count and the writer use count * already exist and are correctly programmed and will need no changes -- use * counts are always very tricky. * * The old code had a concept of "reserving" a Volume, but was changed * to reserving and using a drive. A volume is must be attached to (owned by) a * drive and can move from drive to drive or be unused given certain specific * conditions of the drive. The key is that the drive must "own" the Volume. * * Return: VOLRES entry on success * NULL volume busy on another drive * jcr->errmsg has details */ VOLRES *reserve_volume(DCR *dcr, const char *VolumeName) { VOLRES *vol, *nvol; DEVICE * volatile dev = dcr->dev; JCR *jcr = dcr->jcr; jcr->errmsg[0] = 0; if (job_canceled(dcr->jcr) || jcr->is_incomplete()) { Mmsg1(jcr->errmsg, _("Could not reserve volume \"%s\", because job canceled.\n"), dev->VolHdr.VolumeName); return NULL; } ASSERT2(dev != NULL, "No device in reserve_volume!"); Dmsg2(dbglvl, "enter reserve_volume=%s drive=%s\n", VolumeName, dcr->dev->print_name()); /* If acquiring to write, don't accept a Volume in read list */ if (dcr->is_writing() && is_read_volume(dcr->jcr, VolumeName)) { Mmsg1(jcr->errmsg, _("Could not reserve volume \"%s\" for append, because it will be read.\n"), dev->VolHdr.VolumeName); return NULL; } /* * We lock the reservations system here to ensure * when adding a new volume that no newly scheduled * job can reserve it. */ lock_volumes(); debug_list_volumes("begin reserve_volume"); /* * First, remove any old volume attached to this device as it * is no longer used. */ if (dev->vol) { vol = dev->vol; Dmsg4(dbglvl, "Vol attached=%s, newvol=%s volinuse=%d on %s\n", vol->vol_name, VolumeName, vol->is_in_use(), dev->print_name()); /* * Make sure we don't remove the current volume we are inserting * because it was probably inserted by another job, or it * is not being used and is marked as not reserved. */ if (strcmp(vol->vol_name, VolumeName) == 0) { Dmsg3(dbglvl, "set reserved vol=%s slot=%d dev=%s\n", VolumeName, vol->get_slot(), vol->dev->print_name()); goto get_out; /* Volume already on this device */ } else { /* Don't release a volume if it was reserved by someone other than us */ if (vol->is_in_use() && !dcr->reserved_volume) { Dmsg2(dbglvl, "Set wait(). Cannot free vol=%s for %s. It is reserved.\n", vol->vol_name, VolumeName); Mmsg1(dcr->jcr->errmsg, _("Cannot free Volume \"%s\", because it is reserved by someone else.\n"), vol->vol_name); dev->set_wait(); vol = NULL; /* vol in use */ goto get_out; } Dmsg2(dbglvl, "reserve_vol free vol=%s at %p\n", vol->vol_name, vol->vol_name); /* If old Volume is still mounted, must unload it */ if (strcmp(vol->vol_name, dev->VolHdr.VolumeName) == 0) { Dmsg2(50, "set_unload vol=%s slot=%d\n", vol->vol_name, vol->get_slot()); dev->set_unload(); /* have to unload current volume */ } free_volume(dev); /* Release old volume entry */ debug_list_volumes("reserve_vol free"); } } /* Create a new Volume entry */ nvol = new_vol_item(dcr, VolumeName); /* * Handle request for read volume for file * device, for which we assume we can open multiple * devices to read the Volume. * * Note: when doing multiple simultaneous reads * of the same volume, the volume names are not * inserted into the write volume list. */ if (dcr->is_reading() && dev->is_file()) { nvol->set_jobid(dcr->jcr->JobId); nvol->set_reading(); vol = nvol; dev->vol = vol; goto get_out; } else { vol = (VOLRES *)vol_list->binary_insert(nvol, name_compare); } /* * This part handles any write volumes or read volumes that * cannot be simultaneously on multiple devices. */ if (vol != nvol) { /* * At this point, a Volume with this name already is in the list, * so we simply release our new Volume entry. Note, this should * only happen if we are moving the volume from one drive to another. */ Dmsg2(dbglvl, "Found vol=%s dev-same=%d\n", vol->vol_name, dev==vol->dev); Dmsg2(dbglvl, "reserve_vol free-tmp vol=%s at %p\n", vol->vol_name, vol->vol_name); /* * Clear dev pointer so that free_vol_item() doesn't * take away our volume. */ nvol->dev = NULL; /* don't zap dev entry */ free_vol_item(nvol); if (vol->dev) { Dmsg2(dbglvl, "dev=%s vol->dev=%s\n", dev->print_name(), vol->dev->print_name()); } /* * Check if we are trying to use the Volume on a different drive * dev is our device * vol->dev is where the Volume we want is */ if (dev != vol->dev) { /* Caller wants to switch Volume to another device */ if (!vol->dev->is_busy() && !vol->is_swapping()) { int32_t slot; Dmsg3(dbglvl, "==== Swap vol=%s from dev=%s to %s\n", VolumeName, vol->dev->print_name(), dev->print_name()); free_volume(dev); /* free any volume attached to our drive */ Dmsg3(50, "set_unload vol=%s slot=%d dev=%s\n", vol->vol_name, vol->get_slot(), dev->print_name()); dev->set_unload(); /* Unload any volume that is on our drive */ dcr->set_dev(vol->dev); /* temp point to other dev */ slot = get_autochanger_loaded_slot(dcr); /* get slot on other drive */ dcr->set_dev(dev); /* restore dev */ vol->set_slot(slot); /* save slot */ vol->dev->set_unload(); /* unload the other drive */ vol->set_swapping(); /* swap from other drive */ dev->swap_dev = vol->dev; /* remember to get this vol */ dev->set_load(); /* then reload on our drive */ vol->dev->vol = NULL; /* remove volume from other drive */ vol->dev = dev; /* point the Volume at our drive */ dev->vol = vol; /* point our drive at the Volume */ } else { if (dev) { Jmsg8(jcr, M_WARNING, 0, "Need volume for %s from other drive, " "but swap not possible. Status: reader=%d writers=%d " "reserves=%d swap=%d vol=%s from dev=%s to %s\n", dcr->is_writing()?"write":"read", vol->dev->can_read(), vol->dev->num_writers, vol->dev->num_reserved(), vol->is_swapping(), VolumeName, vol->dev->print_name(), dev->print_name()); } if (vol->is_swapping()) { DEVICE *swapdev = dev->swap_dev; if (vol && dev && swapdev) { Mmsg3(jcr->errmsg, _("Volume %s is busy swapping from %s to %s\n"), NPRT(vol->vol_name), dev->print_name(), swapdev->print_name()); } else { Mmsg1(jcr->errmsg, _("Volume %s is busy swapping.\n"), NPRT(vol->vol_name)); } } else if (vol->dev) { Mmsg2(jcr->errmsg, _("%s device %s is busy.\n"), vol->dev->print_type(), vol->dev->print_name()); } else { Mmsg1(jcr->errmsg, _("Volume %s is busy swapping.\n"), NPRT(vol->vol_name)); } debug_list_volumes("failed swap"); vol = NULL; /* device busy */ goto get_out; } } else { dev->vol = vol; } } else { dev->vol = vol; /* point to newly inserted volume */ } get_out: if (vol) { Dmsg2(dbglvl, "set in_use. vol=%s dev=%s\n", vol->vol_name, vol->dev->print_name()); vol->set_in_use(); dcr->reserved_volume = true; bstrncpy(dcr->VolumeName, vol->vol_name, sizeof(dcr->VolumeName)); } debug_list_volumes("end new volume"); unlock_volumes(); return vol; } /* * Start walk of vol chain * The proper way to walk the vol chain is: * VOLRES *vol; * foreach_vol(vol) { * ... * } * endeach_vol(vol); * * It is possible to leave out the endeach_vol(vol), but * in that case, the last vol referenced must be explicitly * released with: * * free_vol_item(vol); * */ VOLRES *vol_walk_start() { VOLRES *vol; lock_volumes(); vol = (VOLRES *)vol_list->first(); if (vol) { vol->inc_use_count(); Dmsg2(dbglvl, "Inc walk_start use_count=%d volname=%s\n", vol->use_count(), vol->vol_name); } unlock_volumes(); return vol; } /* * Get next vol from chain, and release current one */ VOLRES *vol_walk_next(VOLRES *prev_vol) { VOLRES *vol; lock_volumes(); vol = (VOLRES *)vol_list->next(prev_vol); if (vol) { vol->inc_use_count(); Dmsg2(dbglvl, "Inc walk_next use_count=%d volname=%s\n", vol->use_count(), vol->vol_name); } if (prev_vol) { free_vol_item(prev_vol); } unlock_volumes(); return vol; } /* * Release last vol referenced */ void vol_walk_end(VOLRES *vol) { if (vol) { lock_volumes(); Dmsg2(dbglvl, "Free walk_end use_count=%d volname=%s\n", vol->use_count(), vol->vol_name); free_vol_item(vol); unlock_volumes(); } } /* * Search for a Volume name in the Volume list. * * Returns: VOLRES entry on success * NULL if the Volume is not in the list */ static VOLRES *find_volume(const char *VolumeName) { VOLRES vol, *fvol; if (vol_list->empty()) { return NULL; } /* Do not lock reservations here */ lock_volumes(); vol.vol_name = bstrdup(VolumeName); fvol = (VOLRES *)vol_list->binary_search(&vol, name_compare); free(vol.vol_name); Dmsg2(dbglvl, "find_vol=%s found=%d\n", VolumeName, fvol!=NULL); debug_list_volumes("find_volume"); unlock_volumes(); return fvol; } /* * Search for a Volume name in the read Volume list. * * Returns: VOLRES entry on success * NULL if the Volume is not in the list */ static VOLRES *find_read_volume(const char *VolumeName) { VOLRES vol, *fvol; if (read_vol_list->empty()) { Dmsg0(dbglvl, "find_read_vol: read_vol_list empty.\n"); return NULL; } /* Do not lock reservations here */ lock_read_volumes(); vol.vol_name = bstrdup(VolumeName); /* Note, we do want a simple name_compare on volume name only here */ fvol = (VOLRES *)read_vol_list->binary_search(&vol, name_compare); free(vol.vol_name); Dmsg2(dbglvl, "find_read_vol=%s found=%d\n", VolumeName, fvol!=NULL); unlock_read_volumes(); return fvol; } /* * Free a Volume from the Volume list if it is no longer used * Note, for tape drives we want to remember where the Volume * was when last used, so rather than free the volume entry, * we simply mark it "not reserved" so when the drive is really * needed for another volume, we can reuse it. * * Returns: true if the Volume found and "removed" from the list * false if the Volume is not in the list or is in use */ bool volume_unused(DCR *dcr) { DEVICE *dev = dcr->dev; if (!dev->vol) { Dmsg1(dbglvl, "vol_unused: no vol on %s\n", dev->print_name()); debug_list_volumes("null vol cannot unreserve_volume"); return false; } Dmsg2(dbglvl, "Clear in_use vol=%s slot=%d\n", dev->vol->vol_name, dev->vol->get_slot()); dev->vol->clear_in_use(); if (dev->vol->is_swapping()) { Dmsg1(dbglvl, "vol_unused: vol being swapped on %s\n", dev->print_name()); debug_list_volumes("swapping vol cannot free_volume"); return false; } /* * If this is a tape, we do not free the volume, rather we wait * until the autoloader unloads it, or until another tape is * explicitly read in this drive. This allows the SD to remember * where the tapes are or last were. */ Dmsg5(dbglvl, "set not reserved vol=%s slot=%d writers=%d reserves=%d dev=%s\n", dev->vol->vol_name, dev->vol->get_slot(), dev->num_writers, dev->num_reserved(), dev->print_name()); if (dev->is_tape() || dev->is_autochanger()) { return true; } else if (dcr->session_interactive) { /* When the session is interactive, we want to keep the entire volume list */ return true; } else { /* * Note, this frees the volume reservation entry, but the * file descriptor remains open with the OS. */ return free_volume(dev); } } /* * Unconditionally release the volume entry * Note: read volumes are not in the list, so * do not attempt to remove them. */ bool free_volume(DEVICE *dev) { VOLRES *vol; lock_volumes(); vol = dev->vol; if (vol == NULL) { Dmsg1(dbglvl, "No vol on dev %s\n", dev->print_name()); unlock_volumes(); return false; } /* Don't free a volume while it is being swapped */ if (!vol->is_swapping()) { Dmsg2(dbglvl, "Clear in_use vol=%s slot=%d\n", vol->vol_name, vol->get_slot()); dev->vol = NULL; if (vol->is_writing()) { vol_list->remove(vol); } Dmsg3(dbglvl, "Remove volume %s slot=%d dev=%s\n", vol->vol_name, vol->get_slot(), dev->print_name()); free_vol_item(vol); debug_list_volumes("free_volume"); } else { Dmsg1(dbglvl, "=== Cannot clear. Swapping vol=%s\n", vol->vol_name); } unlock_volumes(); return true; } /* Create the Volume list */ void create_volume_lists() { VOLRES *vol = NULL; if (vol_list == NULL) { vol_list = New(dlist(vol, &vol->link)); } if (read_vol_list == NULL) { read_vol_list = New(dlist(vol, &vol->link)); } } /* * Free normal append volumes list */ static void free_volume_list() { VOLRES *vol; if (vol_list) { lock_volumes(); foreach_dlist(vol, vol_list) { if (vol->dev) { Dmsg2(dbglvl, "free vol_list Volume=%s dev=%s\n", vol->vol_name, vol->dev->print_name()); } else { Dmsg1(dbglvl, "free vol_list Volume=%s No dev\n", vol->vol_name); } free(vol->vol_name); vol->vol_name = NULL; vol->destroy_mutex(); } delete vol_list; vol_list = NULL; unlock_volumes(); } } /* Release all Volumes from the list */ void free_volume_lists() { VOLRES *vol; free_volume_list(); /* normal append list */ if (read_vol_list) { lock_read_volumes(); foreach_dlist(vol, read_vol_list) { if (vol->dev) { Dmsg2(dbglvl, "free read_vol_list Volume=%s dev=%s\n", vol->vol_name, vol->dev->print_name()); } else { Dmsg1(dbglvl, "free read_vol_list Volume=%s No dev\n", vol->vol_name); } free(vol->vol_name); vol->vol_name = NULL; vol->destroy_mutex(); } delete read_vol_list; read_vol_list = NULL; unlock_read_volumes(); } } /* * Determine if caller can write on volume. * If not, return reason in jcr->errmsg */ bool DCR::can_i_write_volume() { VOLRES *vol; vol = find_read_volume(VolumeName); if (vol) { Mmsg(jcr->errmsg, "Found in read list; cannot write vol=%s\n", VolumeName); Dmsg1(100, "Found in read list; cannot write vol=%s\n", VolumeName); return false; } if (dev->device->set_vol_immutable && dev->check_for_immutable(VolumeName) && (dev->check_volume_protection_time(VolumeName) == false)) { MmsgD1(dbglvl, jcr->errmsg, _("Skipping Volume %s, " "because Volume's Protection Period has not expired yet\n"), VolumeName); return false; } if (dev->device->set_vol_read_only && dev->check_for_read_only(-1, VolumeName) && dev->check_volume_protection_time(VolumeName) == false) { MmsgD1(dbglvl, jcr->errmsg, _("Skipping Volume %s, " "because Volume's Protection Period has not expired yet\n"), VolumeName); return false; } return can_i_use_volume(); } /* * Determine if caller can read or write volume. * If not, return reason in jcr->errmsg */ bool DCR::can_i_use_volume() { bool rtn = true; VOLRES *vol; if (job_canceled(jcr) || jcr->is_incomplete()) { Mmsg(jcr->errmsg, "Job is canceled\n"); return false; } lock_volumes(); vol = find_volume(VolumeName); if (!vol) { Dmsg1(dbglvl, "Vol=%s not in use.\n", VolumeName); goto get_out; /* vol not in list */ } ASSERT2(vol->dev != NULL, "No device in can_i_use_volume!"); if (dev == vol->dev) { /* same device OK */ Dmsg1(dbglvl, "Vol=%s on same dev.\n", VolumeName); goto get_out; } else { Dmsg3(dbglvl, "Vol=%s on %s we have %s\n", VolumeName, vol->dev->print_name(), dev->print_name()); } /* ***FIXME*** check this ... */ if (!vol->dev->is_busy()) { Dmsg2(dbglvl, "Vol=%s dev=%s not busy.\n", VolumeName, vol->dev->print_name()); goto get_out; } else { Dmsg2(dbglvl, "Vol=%s dev=%s busy.\n", VolumeName, vol->dev->print_name()); } Mmsg(jcr->errmsg, "Volume=%s in use on another device %s.\n", VolumeName, vol->dev->print_name()); Dmsg2(dbglvl, "Volume=%s in use on another device %s.\n", VolumeName, vol->dev->print_name()); rtn = false; get_out: unlock_volumes(); return rtn; } /* * Create a temporary copy of the volume list. We do this, * to avoid having the volume list locked during the * call to reserve_device(), which would cause a deadlock. * Note, we may want to add an update counter on the vol_list * so that if it is modified while we are traversing the copy * we can take note and act accordingly (probably redo the * search at least a few times). */ dlist *dup_vol_list(JCR *jcr) { dlist *temp_vol_list; VOLRES *vol = NULL; Dmsg0(dbglvl, "lock volumes\n"); Dmsg0(dbglvl, "duplicate vol list\n"); temp_vol_list = New(dlist(vol, &vol->link)); foreach_vol(vol) { VOLRES *nvol; VOLRES *tvol = (VOLRES *)malloc(sizeof(VOLRES)); memset(tvol, 0, sizeof(VOLRES)); tvol->vol_name = bstrdup(vol->vol_name); tvol->dev = vol->dev; tvol->init_mutex(); tvol->inc_use_count(); nvol = (VOLRES *)temp_vol_list->binary_insert(tvol, name_compare); if (tvol != nvol) { tvol->dev = NULL; /* don't zap dev entry */ free_vol_item(tvol); Pmsg0(000, "Logic error. Duplicating vol list hit duplicate.\n"); Jmsg(jcr, M_WARNING, 0, "Logic error. Duplicating vol list hit duplicate.\n"); } } endeach_vol(vol); Dmsg0(dbglvl, "unlock volumes\n"); return temp_vol_list; } /* * Free the specified temp list. */ void free_temp_vol_list(dlist *temp_vol_list) { dlist *save_vol_list; lock_volumes(); save_vol_list = vol_list; vol_list = temp_vol_list; free_volume_list(); /* release temp_vol_list */ vol_list = save_vol_list; Dmsg0(dbglvl, "deleted temp vol list\n"); Dmsg0(dbglvl, "unlock volumes\n"); unlock_volumes(); debug_list_volumes("after free temp table"); } bacula-15.0.3/src/stored/generic_driver.h0000644000175000017500000001122514771010173020112 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Routines for writing Cloud drivers * * Written by Norbert Bizet, June MMXVIII * */ #ifndef GENERIC_CLOUD_DRIVER_H #define GENERIC_CLOUD_DRIVER_H #include "cloud_driver.h" class generic_driver: public cloud_driver { protected: uint32_t buf_len; char *host_name; char *bucket_name; char *access_key; char *secret_key; char *region; int32_t protocol; int32_t uri_style; char *driver_command; char *blob_endpoint; char *file_endpoint; char *queue_endpoint; char *table_endpoint; char *endpoint_suffix; uint32_t max_concurrent_uploads; uint32_t max_concurrent_downloads; uint32_t objects_default_tier; /* read_cb_type : returns the number of proceed bytes from char*. char* is the data buffer. size_t is the buffer size. void* is arg (opaque handler) */ typedef size_t (read_cb_type)(char*, size_t, void*); typedef struct { read_cb_type* fct; void *arg; } read_callback; /* write_cb_type : returns the number of proceed bytes from char*. char* is the data buffer. size_t is the buffer size. void* is arg (opaque handler) int& is the cb file status. */ #define WRITE_CB_EOF 1 #define WRITE_CB_ERR 2 typedef size_t (write_cb_type)(char*, size_t, void*, int&); typedef struct { write_cb_type* fct; void *arg; } write_callback; /* accessors with difference prototypes */ int call_fct( const char* fct_name, const char* volume_name, int part_number, read_callback *read_cb, write_callback *write_cb, cancel_callback *cancel_cb, POOLMEM *&err, const char* cache_path_name = NULL ); int call_fct(const char* fct_name, const char* volume_name, const char* part_name, read_callback *read_cb, write_callback *write_cb, cancel_callback *cancel_cb, POOLMEM *&err); /* actual working function */ int call_fct( char* cmd, read_callback *read_cb, write_callback *write_cb, cancel_callback *cancel_cb, POOLMEM *&err); public: generic_driver(); virtual ~generic_driver(); bool copy_cache_part_to_cloud(transfer *xfer); bool move_cloud_part(const char *VolumeName, uint32_t apart , const char *to, cancel_callback *cancel_cb, POOLMEM *&err, int& exists); int copy_cloud_part_to_cache(transfer *xfer); bool restore_cloud_object(transfer *xfer, const char *cloud_fname); bool is_waiting_on_server(transfer *xfer); bool truncate_cloud_volume(const char *volume_name, ilist *trunc_parts, cancel_callback *cancel_cb, POOLMEM *&err); bool clean_cloud_volume(const char *VolumeName, cleanup_cb_type *cb, cleanup_ctx_type *ctx, cancel_callback *cancel_cb, POOLMEM *&err); bool init(CLOUD *cloud, POOLMEM *&err); bool term(POOLMEM *&err); bool start_of_job(POOLMEM *&err); bool end_of_job(POOLMEM *&err); bool get_cloud_volume_parts_list(const char* volume_name, ilist *parts, cancel_callback *cancel_cb, POOLMEM *&err); bool get_cloud_volumes_list(alist *volumes, cancel_callback *cancel_cb, POOLMEM *&err); private: POOLMEM *access_key_env; POOLMEM *secret_key_env; POOLMEM *bucket_env; POOLMEM *host_env; POOLMEM *region_env; POOLMEM *protocol_env; POOLMEM *uri_style_env; POOLMEM *blob_endpoint_env; POOLMEM *file_endpoint_env; POOLMEM *queue_endpoint_env; POOLMEM *table_endpoint_env; POOLMEM *endpoint_suffix_env; POOLMEM *max_concurrent_uploads_env; POOLMEM *max_concurrent_downloads_env; POOLMEM *upload_limit_env; POOLMEM *download_limit_env; POOLMEM *transfer_priority_env; POOLMEM *transfer_retention_env; POOLMEM *unset_lctime_env; POOLMEM *debug_env; POOLMEM *working_path_env; POOLMEM *home_path_env; POOLMEM *objects_default_tier_env; char *envs[24]; }; #endif /* GENERIC_CLOUD_DRIVER_H*/ bacula-15.0.3/src/stored/org_stored_prepare.c0000644000175000017500000000154214771010173021004 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "stored.h" #include "prepare.h" bool prepare(JCR *jcr, prepare_ctx &pctx, DEV_RECORD &rec) { return false; } bool prepare_sd_end(JCR *jcr, prepare_ctx &pctx, DEV_RECORD &rec) { return false; } bacula-15.0.3/src/stored/mount.c0000644000175000017500000007353514771010173016274 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Routines for handling mounting tapes for reading and for * writing. * * Kern Sibbald, August MMII * */ #include "bacula.h" /* pull in global headers */ #include "stored.h" /* pull in Storage Deamon headers */ /* Make sure some EROFS is defined */ #ifndef EROFS /* read-only file system */ #define EROFS -1 /* make impossible errno */ #endif static pthread_mutex_t mount_mutex = PTHREAD_MUTEX_INITIALIZER; enum { try_next_vol = 1, try_read_vol, try_error, try_default }; enum { check_next_vol = 1, check_ok, check_read_vol, check_error }; /* * If release is set, we rewind the current volume, * which we no longer want, and ask the user (console) * to mount the next volume. * * Continue trying until we get it, and then ensure * that we can write on it. * * This routine returns a 0 only if it is REALLY * impossible to get the requested Volume. * * This routine is entered with the device blocked, but not * locked. * */ bool DCR::mount_next_write_volume() { int retry = 0; bool ask = false, recycle, autochanger; DCR *dcr = this; Enter(200); set_ameta(); Dmsg2(100, "Enter mount_next_volume(release=%d) dev=%s\n", dev->must_unload(), dev->print_name()); init_device_wait_timers(dcr); P(mount_mutex); /* * Attempt to mount the next volume. If something non-fatal goes * wrong, we come back here to re-try (new op messages, re-read * Volume, ...) */ mount_next_vol: Dmsg1(100, "mount_next_vol retry=%d\n", retry); /* Ignore retry if this is poll request */ if (dev->is_nospace() || retry++ > 4) { /* Last ditch effort before giving up, force operator to respond */ VolCatInfo.Slot = 0; V(mount_mutex); if (!dir_ask_sysop_to_mount_volume(dcr, SD_APPEND)) { Jmsg(jcr, M_FATAL, 0, _("Too many errors trying to mount %s device %s.\n"), dev->print_type(), dev->print_name()); goto no_lock_bail_out; } P(mount_mutex); Dmsg1(90, "Continue after dir_ask_sysop_to_mount. must_load=%d\n", dev->must_load()); } if (job_canceled(jcr) || jcr->is_incomplete()) { Jmsg(jcr, M_FATAL, 0, _("Job %d canceled.\n"), jcr->JobId); goto bail_out; } recycle = false; if (dev->must_unload()) { ask = true; /* ask operator to mount tape */ } do_unload(); do_swapping(SD_APPEND); do_load(SD_APPEND); if (!find_a_volume()) { goto bail_out; } if (job_canceled(jcr) || jcr->is_incomplete()) { goto bail_out; } Dmsg3(100, "After find_a_volume. Vol=%s Slot=%d VolType=%d\n", getVolCatName(), VolCatInfo.Slot, VolCatInfo.VolCatType); dev->notify_newvol_in_attached_dcrs(getVolCatName()); /* * Get next volume and ready it for append * This code ensures that the device is ready for * writing. We start from the assumption that there * may not be a tape mounted. * * If the device is a file, we create the output * file. If it is a tape, we check the volume name * and move the tape to the end of data. * */ dcr->setVolCatInfo(false); /* out of date when Vols unlocked */ if (autoload_device(dcr, SD_APPEND, NULL) > 0) { autochanger = true; ask = false; } else { autochanger = false; VolCatInfo.Slot = 0; if (dev->is_autochanger() && !VolCatInfo.InChanger) { ask = true; /* not in changer, do not retry */ } else { ask = retry >= 2; } } Dmsg1(100, "autoload_dev returns %d\n", autochanger); /* * If we autochanged to correct Volume or (we have not just * released the Volume AND we can automount) we go ahead * and read the label. If there is no tape in the drive, * we will fail, recurse and ask the operator the next time. */ if (!dev->must_unload() && dev->is_tape() && dev->has_cap(CAP_AUTOMOUNT)) { Dmsg0(250, "(1)Ask=0\n"); ask = false; /* don't ask SYSOP this time */ } /* Don't ask if not removable */ if (!dev->is_removable()) { Dmsg0(250, "(2)Ask=0\n"); ask = false; } Dmsg2(100, "Ask=%d autochanger=%d\n", ask, autochanger); if (ask) { V(mount_mutex); dcr->setVolCatInfo(false); /* out of date when Vols unlocked */ if (!dir_ask_sysop_to_mount_volume(dcr, SD_APPEND)) { Dmsg0(150, "Error return ask_sysop ...\n"); goto no_lock_bail_out; } P(mount_mutex); } if (job_canceled(jcr) || jcr->is_incomplete()) { goto bail_out; } Dmsg3(100, "want vol=%s devvol=%s dev=%s\n", VolumeName, dev->VolHdr.VolumeName, dev->print_name()); if (dev->poll && dev->has_cap(CAP_CLOSEONPOLL)) { if (!dev->close(this)) { Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); } free_volume(dev); } /* Try autolabel if enabled */ Dmsg1(100, "Try open Vol=%s\n", getVolCatName()); if (!dev->open_device(dcr, OPEN_READ_WRITE)) { Dmsg1(100, "Try autolabel Vol=%s\n", getVolCatName()); if (!dev->poll) { try_autolabel(false); /* try to create a new volume label */ } } while (!dev->open_device(dcr, OPEN_READ_WRITE)) { Dmsg1(100, "open_device failed: ERR=%s", dev->bstrerror()); if (dev->is_file() && dev->is_removable()) { bool ok = true; Dmsg0(150, "call scan_dir_for_vol\n"); if (ok && dev->scan_dir_for_volume(dcr)) { if (dev->open_device(dcr, OPEN_READ_WRITE)) { break; /* got a valid volume */ } } } if (try_autolabel(false) == try_read_vol) { break; /* created a new volume label */ } /* ***FIXME*** if autochanger, before giving up try unload and load */ Jmsg4(jcr, M_WARNING, 0, _("Open of %s device %s Volume \"%s\" failed: ERR=%s\n"), dev->print_type(), dev->print_name(), dcr->VolumeName, dev->bstrerror()); /* If not removable, Volume is broken. This is a serious issue here. */ if (dev->is_file() && !dev->is_removable()) { Dmsg3(40, "Volume \"%s\" not loaded on %s device %s.\n", dcr->VolumeName, dev->print_type(), dev->print_name()); if (dev->dev_errno == EACCES || dev->dev_errno == EROFS) { mark_volume_read_only(); } else { mark_volume_in_error(); } } else { Dmsg0(100, "set_unload\n"); if (dev->dev_errno == EACCES || dev->dev_errno == EROFS) { mark_volume_read_only(); } dev->set_unload(); /* force ask sysop */ ask = true; } Dmsg0(100, "goto mount_next_vol\n"); goto mount_next_vol; } /* * Now check the volume label to make sure we have the right tape mounted */ read_volume: switch (check_volume_label(ask, autochanger)) { case check_next_vol: Dmsg0(50, "set_unload\n"); dev->set_unload(); /* want a different Volume */ Dmsg0(100, "goto mount_next_vol\n"); goto mount_next_vol; case check_read_vol: goto read_volume; case check_error: goto bail_out; case check_ok: break; } /* * Check that volcatinfo is good */ if (!dev->haveVolCatInfo()) { Dmsg0(100, "Do not have volcatinfo\n"); if (!find_a_volume()) { goto mount_next_vol; } if (strcmp(dev->VolHdr.VolumeName, dcr->VolCatInfo.VolCatName) == 0) { dev->set_volcatinfo_from_dcr(this); } else { /* We have a volume, but not the current one */ Dmsg2(5, "Will need to re-mount volumes %s %s\n", dev->VolHdr.VolumeName, dcr->VolCatInfo.VolCatName); goto mount_next_vol; } } /* * See if we have a fresh tape or a tape with data. * * Note, if the LabelType is PRE_LABEL, it was labeled * but never written. If so, rewrite the label but set as * VOL_LABEL. We rewind and return the label (reconstructed) * in the block so that in the case of a new tape, data can * be appended just after the block label. If we are writing * a second volume, the calling routine will write the label * before writing the overflow block. * * If the tape is marked as Recycle, we rewrite the label. */ recycle = strcmp(dev->VolCatInfo.VolCatStatus, "Recycle") == 0; if (dev->VolHdr.LabelType == PRE_LABEL || recycle) { dcr->WroteVol = false; if (!dev->rewrite_volume_label(dcr, recycle)) { mark_volume_in_error(); goto mount_next_vol; } /* Set the append flag on the volume */ if (dev->device->set_vol_append_only) { if (!dev->set_append_only(getVolCatName(), &dev->errmsg)) { Jmsg(jcr, M_WARNING, 0, _("Unable to set the APPEND flag on the volume: %s, err: %s\n"), getVolCatName(), dev->bstrerror()); goto mount_next_vol; } } } else { /* * OK, at this point, we have a valid Bacula label, but * we need to position to the end of the volume, since we are * just now putting it into append mode. */ Dmsg1(100, "Device previously written, moving to end of data. Expect %lld bytes\n", dev->VolCatInfo.VolCatBytes); Jmsg(jcr, M_INFO, 0, _("Volume \"%s\" previously written, moving to end of data.\n"), VolumeName); if (!dev->eod(dcr)) { Dmsg3(050, "Unable to position to end of data on %s device %s: ERR=%s\n", dev->print_type(), dev->print_name(), dev->bstrerror()); Jmsg(jcr, M_ERROR, 0, _("Unable to position to end of data on %s device %s: ERR=%s\n"), dev->print_type(), dev->print_name(), dev->bstrerror()); mark_volume_in_error(); goto mount_next_vol; } if (!dev->is_eod_valid(dcr)) { Dmsg0(100, "goto mount_next_vol\n"); goto mount_next_vol; } dev->VolCatInfo.VolCatMounts++; /* Update mounts */ Dmsg1(150, "update volinfo mounts=%d\n", dev->VolCatInfo.VolCatMounts); if (!dir_update_volume_info(dcr, false, false)) { goto bail_out; } /* Return an empty block */ empty_block(block); /* we used it for reading so set for write */ } dev->set_append(); Dmsg1(150, "set APPEND, normal return from mount_next_write_volume. dev=%s\n", dev->print_name()); V(mount_mutex); Leave(200); return true; bail_out: V(mount_mutex); no_lock_bail_out: Leave(200); return false; } /* * This routine is meant to be called once the first pass * to ensure that we have a candidate volume to mount. * Otherwise, we ask the sysop to created one. * Note, mount_mutex is already locked on entry and thus * must remain locked on exit from this function. */ bool DCR::find_a_volume() { DCR *dcr = this; bool ok; if (!is_suitable_volume_mounted()) { bool have_vol = false; /* Do we have a candidate volume? */ if (dev->vol) { bstrncpy(VolumeName, dev->vol->vol_name, sizeof(VolumeName)); have_vol = dir_get_volume_info(this, VolumeName, GET_VOL_INFO_FOR_WRITE); } /* * Get Director's idea of what tape we should have mounted. * in dcr->VolCatInfo */ if (!have_vol) { Dmsg0(200, "Before dir_find_next_appendable_volume.\n"); while (!dir_find_next_appendable_volume(dcr)) { Dmsg0(200, "not dir_find_next\n"); if (job_canceled(jcr) || jcr->is_incomplete()) { return false; } /* * Unlock the mount mutex while waiting or * the Director for a new volume */ V(mount_mutex); if (dev->must_wait()) { int retries = 5; Dmsg0(40, "No appendable volume. Calling wait_for_device\n"); wait_for_device(dcr, retries); ok = true; } else { ok = dir_ask_sysop_to_create_appendable_volume(dcr); } P(mount_mutex); if (!ok || job_canceled(jcr) || jcr->is_incomplete()) { return false; } Dmsg0(150, "Again dir_find_next_append...\n"); } dev->clear_wait(); } } if (dcr->haveVolCatInfo()) { return true; } return dir_get_volume_info(dcr, VolumeName, GET_VOL_INFO_FOR_WRITE); } int DCR::check_volume_label(bool &ask, bool &autochanger) { int vol_label_status; Enter(200); set_ameta(); /* * If we are writing to a stream device, ASSUME the volume label * is correct. */ if (dev->has_cap(CAP_STREAM)) { vol_label_status = VOL_OK; create_volume_header(dev, VolumeName, "Default", false); dev->VolHdr.LabelType = PRE_LABEL; } else { vol_label_status = dev->read_dev_volume_label(this); } if (job_canceled(jcr)) { goto check_bail_out; } Dmsg2(150, "Want dirVol=%s dirStat=%s\n", VolumeName, VolCatInfo.VolCatStatus); /* * At this point, dev->VolCatInfo has what is in the drive, if anything, * and dcr->VolCatInfo has what the Director wants. */ switch (vol_label_status) { case VOL_OK: Dmsg1(150, "Vol OK name=%s\n", dev->VolHdr.VolumeName); dev->VolCatInfo = VolCatInfo; /* structure assignment */ break; /* got a Volume */ case VOL_NAME_ERROR: VOLUME_CAT_INFO dcrVolCatInfo, devVolCatInfo; char saveVolumeName[MAX_NAME_LENGTH]; Dmsg2(40, "Vol NAME Error Have=%s, want=%s\n", dev->VolHdr.VolumeName, VolumeName); if (dev->is_volume_to_unload()) { ask = true; goto check_next_volume; } #ifdef xxx /* If not removable, Volume is broken */ if (!dev->is_removable()) { Jmsg3(jcr, M_WARNING, 0, _("Volume \"%s\" not loaded on %s device %s.\n"), VolumeName, dev->print_type(), dev->print_name()); Dmsg3(40, "Volume \"%s\" not loaded on %s device %s.\n", VolumeName, dev->print_type(), dev->print_name()); mark_volume_in_error(); goto check_next_volume; } #endif /* * OK, we got a different volume mounted. First save the * requested Volume info (dcr) structure, then query if * this volume is really OK. If not, put back the desired * volume name, mark it not in changer and continue. */ dcrVolCatInfo = VolCatInfo; /* structure assignment */ devVolCatInfo = dev->VolCatInfo; /* structure assignment */ /* Check if this is a valid Volume in the pool */ bstrncpy(saveVolumeName, VolumeName, sizeof(saveVolumeName)); bstrncpy(VolumeName, dev->VolHdr.VolumeName, sizeof(VolumeName)); if (!dir_get_volume_info(this, VolumeName, GET_VOL_INFO_FOR_WRITE)) { POOL_MEM vol_info_msg; pm_strcpy(vol_info_msg, jcr->dir_bsock->msg); /* save error message */ /* Restore desired volume name, note device info out of sync */ /* This gets the info regardless of the Pool */ bstrncpy(VolumeName, dev->VolHdr.VolumeName, sizeof(VolumeName)); if (autochanger && !dir_get_volume_info(this, VolumeName, GET_VOL_INFO_FOR_READ)) { /* * If we get here, we know we cannot write on the Volume, * and we know that we cannot read it either, so it * is not in the autochanger. */ mark_volume_not_inchanger(); } dev->VolCatInfo = devVolCatInfo; /* structure assignment */ dev->set_unload(); /* unload this volume */ Jmsg(jcr, M_WARNING, 0, _("Director wanted Volume \"%s\".\n" " Current Volume \"%s\" not acceptable because:\n" " %s"), dcrVolCatInfo.VolCatName, dev->VolHdr.VolumeName, vol_info_msg.c_str()); ask = true; /* Restore saved DCR before continuing */ bstrncpy(VolumeName, saveVolumeName, sizeof(VolumeName)); VolCatInfo = dcrVolCatInfo; /* structure assignment */ goto check_next_volume; } /* * This was not the volume we expected, but it is OK with * the Director, so use it. */ Dmsg1(150, "Got new Volume name=%s\n", VolumeName); dev->VolCatInfo = VolCatInfo; /* structure assignment */ Dmsg1(100, "Call reserve_volume=%s\n", dev->VolHdr.VolumeName); if (reserve_volume(this, dev->VolHdr.VolumeName) == NULL) { if (!jcr->errmsg[0]) { Jmsg3(jcr, M_WARNING, 0, _("Could not reserve volume %s on %s device %s\n"), dev->VolHdr.VolumeName, dev->print_type(), dev->print_name()); } else { Jmsg(jcr, M_WARNING, 0, "%s", jcr->errmsg); } ask = true; dev->setVolCatInfo(false); setVolCatInfo(false); goto check_next_volume; } break; /* got a Volume */ case VOL_ENC_ERROR: volume_is_unavailable(); goto check_next_volume; break; /* * At this point, we assume we have a blank tape mounted. */ case VOL_IO_ERROR: /* Fall through wanted */ case VOL_NO_LABEL: switch (try_autolabel(true)) { case try_next_vol: goto check_next_volume; case try_read_vol: goto check_read_volume; case try_error: goto check_bail_out; case try_default: break; } /* NOTE! Fall-through wanted. */ case VOL_NO_MEDIA: default: Dmsg0(200, "VOL_NO_MEDIA or default.\n"); /* Send error message */ if (!dev->poll) { } else { Dmsg1(200, "Msg suppressed by poll: %s\n", jcr->errmsg); } ask = true; /* Needed, so the medium can be changed */ if (dev->requires_mount()) { if (!dev->close(this)) { Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); } free_volume(dev); } goto check_next_volume; } Leave(200); return check_ok; check_next_volume: dev->setVolCatInfo(false); setVolCatInfo(false); Leave(200); return check_next_vol; check_bail_out: volume_unused(this); Leave(200); return check_error; check_read_volume: Leave(200); return check_read_vol; } bool DCR::is_suitable_volume_mounted() { bool ok; /* Volume mounted? */ if (dev->VolHdr.VolumeName[0] == 0 || dev->swap_dev || dev->must_unload()) { return false; /* no */ } bstrncpy(VolumeName, dev->VolHdr.VolumeName, sizeof(VolumeName)); ok = dir_get_volume_info(this, VolumeName, GET_VOL_INFO_FOR_WRITE); if (!ok) { Dmsg1(40, "dir_get_volume_info failed: %s", jcr->errmsg); dev->set_wait(); } return ok; } bool DCR::do_unload() { if (dev->must_unload()) { Dmsg1(100, "must_unload release %s\n", dev->print_name()); release_volume(); } return false; } bool DCR::do_load(bool is_writing) { if (dev->must_load()) { Dmsg1(100, "Must load dev=%s\n", dev->print_name()); if (autoload_device(this, is_writing, NULL) > 0) { dev->clear_load(); return true; } return false; } return true; } void DCR::do_swapping(bool is_writing) { /* * See if we are asked to swap the Volume from another device * if so, unload the other device here, and attach the * volume to our drive. */ if (dev->swap_dev) { if (dev->swap_dev->must_unload()) { if (dev->vol) { dev->swap_dev->set_slot(dev->vol->get_slot()); } Dmsg2(100, "Swap unloading slot=%d %s\n", dev->swap_dev->get_slot(), dev->swap_dev->print_name()); unload_dev(this, dev->swap_dev); } if (dev->vol) { dev->vol->clear_swapping(); Dmsg1(100, "=== set in_use vol=%s\n", dev->vol->vol_name); dev->vol->clear_in_use(); dev->VolHdr.VolumeName[0] = 0; /* don't yet have right Volume */ } else { Dmsg1(100, "No vol on dev=%s\n", dev->print_name()); } if (dev->swap_dev->vol) { Dmsg2(100, "Vol=%s on dev=%s\n", dev->swap_dev->vol->vol_name, dev->swap_dev->print_name()); } Dmsg2(100, "Set swap_dev=NULL for dev=%s swap_dev=%s\n", dev->print_name(), dev->swap_dev->print_name()); dev->swap_dev = NULL; } else { if (dev->vol) { Dmsg1(100, "No swap_dev set. dev->vol=%p\n", dev->vol); } else { Dmsg1(100, "No swap_dev set. dev->vol=%p\n", dev->vol); } } } /* * If permitted, we label the device, make sure we can do * it by checking that the VolCatBytes is zero => not labeled, * once the Volume is labeled we don't want to label another * blank tape with the same name. For disk, we go ahead and * label it anyway, because the OS insures that there is only * one Volume with that name. * As noted above, at this point dcr->VolCatInfo has what * the Director wants and dev->VolCatInfo has info on the * previous tape (or nothing). * * Return codes are: * try_next_vol label failed, look for another volume * try_read_vol labeled volume, now re-read the label * try_error hard error (catalog update) * try_default I couldn't do anything */ int DCR::try_autolabel(bool opened) { DCR *dcr = this; if (dev->poll && !dev->is_tape()) { Dmsg0(100, "No autolabel because polling.\n"); return try_default; /* if polling, don't try to create new labels */ } /* For a tape require it to be opened and read before labeling */ if (!opened && (dev->is_tape() || dev->is_null())) { return try_default; } if (dev->has_cap(CAP_LABEL) && (VolCatInfo.VolCatBytes == 0 || (!dev->is_tape() && strcmp(VolCatInfo.VolCatStatus, "Recycle") == 0))) { Dmsg1(40, "Create new volume label vol=%s\n", VolumeName); /* Create a new Volume label and write it to the device */ if (!dev->write_volume_label(dcr, VolumeName, pool_name, false, /* no relabel */ false /* defer label */)) { Dmsg2(100, "write_vol_label failed. vol=%s, pool=%s\n", VolumeName, pool_name); if (opened) { mark_volume_in_error(); } return try_next_vol; } Dmsg0(150, "dir_update_vol_info. Set Append\n"); /* Copy Director's info into the device info */ dev->VolCatInfo = VolCatInfo; /* structure assignment */ if (!dir_update_volume_info(dcr, true, true)) { /* indicate tape labeled */ Dmsg3(100, "Update_vol_info failed no autolabel Volume \"%s\" on %s device %s.\n", VolumeName, dev->print_type(), dev->print_name()); return try_error; } Jmsg(dcr->jcr, M_INFO, 0, _("Labeled new Volume \"%s\" on %s device %s.\n"), VolumeName, dev->print_type(), dev->print_name()); Dmsg3(100, "Labeled new Volume \"%s\" on %s device %s.\n", VolumeName, dev->print_type(), dev->print_name()); return try_read_vol; /* read label we just wrote */ } else { Dmsg4(40, "=== Cannot autolabel: cap_label=%d VolCatBytes=%lld is_tape=%d VolCatStatus=%s\n", dev->has_cap(CAP_LABEL), VolCatInfo.VolCatBytes, dev->is_tape(), VolCatInfo.VolCatStatus); } if (!dev->has_cap(CAP_LABEL) && VolCatInfo.VolCatBytes == 0) { Jmsg(jcr, M_WARNING, 0, _("%s device %s not configured to autolabel Volumes.\n"), dev->print_type(), dev->print_name()); } #ifdef xxx /* If not removable, Volume is broken */ if (!dev->is_removable()) { Jmsg3(jcr, M_WARNING, 0, _("Volume \"%s\" not loaded on %s device %s.\n"), VolumeName, dev->print_type(), dev->print_name()); Dmsg3(40, "Volume \"%s\" not loaded on %s device %s.\n", VolumeName, dev->print_type(), dev->print_name()); mark_volume_in_error(); return try_next_vol; } #endif return try_default; } /* * The volume is unavailable (temporarily?) don't used it this time */ void DCR::volume_is_unavailable() { Jmsg(jcr, M_INFO, 0, _("The Volume \"%s\" is unavailable now.\n"), VolumeName); volume_unused(this); Dmsg0(50, "set_unload\n"); dev->set_unload(); /* must get a new volume */ } /* * Mark volume in error in catalog */ void DCR::mark_volume_in_error() { Jmsg(jcr, M_INFO, 0, _("Marking Volume \"%s\" in Error in Catalog.\n"), VolumeName); dev->VolCatInfo = VolCatInfo; /* structure assignment */ dev->setVolCatStatus("Error"); Dmsg0(150, "dir_update_vol_info. Set Error.\n"); dir_update_volume_info(this, false, false); volume_unused(this); Dmsg0(50, "set_unload\n"); dev->set_unload(); /* must get a new volume */ } /* * Mark volume read_only in catalog */ void DCR::mark_volume_read_only() { Jmsg(jcr, M_INFO, 0, _("Marking Volume \"%s\" Read-Only in Catalog.\n"), VolumeName); dev->VolCatInfo = VolCatInfo; /* structure assignment */ dev->setVolCatStatus("Read-Only"); Dmsg0(150, "dir_update_vol_info. Set Read-Only.\n"); dir_update_volume_info(this, false, false); volume_unused(this); Dmsg0(50, "set_unload\n"); dev->set_unload(); /* must get a new volume */ } /* * The Volume is not in the correct slot, so mark this * Volume as not being in the Changer. */ void DCR::mark_volume_not_inchanger() { Jmsg(jcr, M_ERROR, 0, _("Autochanger Volume \"%s\" not found in slot %d.\n" " Setting InChanger to zero in catalog.\n"), getVolCatName(), VolCatInfo.Slot); dev->VolCatInfo = VolCatInfo; /* structure assignment */ VolCatInfo.InChanger = false; dev->VolCatInfo.InChanger = false; Dmsg0(400, "update vol info in mount\n"); dir_update_volume_info(this, true, false); /* set new status */ } /* * Either because we are going to hang a new volume, or because * of explicit user request, we release the current volume. */ void DCR::release_volume() { unload_autochanger(this, -1); if (WroteVol) { Jmsg0(jcr, M_ERROR, 0, _("Hey!!!!! WroteVol non-zero !!!!!\n")); Pmsg0(190, "Hey!!!!! WroteVol non-zero !!!!!\n"); } if (dev->is_open() && (!dev->is_tape() || !dev->has_cap(CAP_ALWAYSOPEN))) { generate_plugin_event(jcr, bsdEventDeviceClose, this); if (!dev->close(this)) { Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); } } /* If we have not closed the device, then at least rewind the tape */ if (dev->is_open()) { dev->offline_or_rewind(this); } /* * Erase all memory of the current volume */ free_volume(dev); dev->block_num = dev->file = 0; dev->EndBlock = dev->EndFile = 0; memset(&dev->VolCatInfo, 0, sizeof(dev->VolCatInfo)); dev->clear_volhdr(); /* Force re-read of label */ dev->clear_labeled(); dev->clear_read(); dev->clear_append(); dev->label_type = B_BACULA_LABEL; VolumeName[0] = 0; Dmsg0(190, "release_volume\n"); } /* * Insanity check * * Check to see if the tape position as defined by the OS is * the same as our concept. If it is not, * it means the user has probably manually rewound the tape. * Note, we check only if num_writers == 0, but this code will * also work fine for any number of writers. If num_writers > 0, * we probably should cancel all jobs using this device, or * perhaps even abort the SD, or at a minimum, mark the tape * in error. Another strategy with num_writers == 0, would be * to rewind the tape and do a new eod() request. */ bool DCR::is_tape_position_ok() { if (dev->is_tape() && dev->num_writers == 0) { int32_t file = dev->get_os_tape_file(); if (file >= 0 && file != (int32_t)dev->get_file()) { Jmsg(jcr, M_ERROR, 0, _("Invalid tape position on volume \"%s\"" " on device %s. Expected %d, got %d\n"), dev->VolHdr.VolumeName, dev->print_name(), dev->get_file(), file); /* * If the current file is greater than zero, it means we probably * have some bad count of EOF marks, so mark tape in error. Otherwise * the operator might have moved the tape, so we just release it * and try again. */ if (file > 0) { mark_volume_in_error(); } release_volume(); return false; } } return true; } /* * If we are reading, we come here at the end of the tape * and see if there are more volumes to be mounted. */ bool mount_next_read_volume(DCR *dcr) { DEVICE *dev = dcr->dev; JCR *jcr = dcr->jcr; Dmsg2(90, "NumReadVolumes=%d CurReadVolume=%d\n", jcr->NumReadVolumes, jcr->CurReadVolume); volume_unused(dcr); /* release current volume */ /* * End Of Tape -- mount next Volume (if another specified) */ if (jcr->NumReadVolumes > 1 && jcr->CurReadVolume < jcr->NumReadVolumes) { dev->Lock(); if (!dev->close(dcr)) { Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); } dev->set_read(); dcr->set_reserved_for_read(); dev->Unlock(); if (!acquire_device_for_read(dcr)) { Jmsg3(jcr, M_FATAL, 0, _("Cannot open %s Dev=%s, Vol=%s for reading.\n"), dev->print_type(), dev->print_name(), dcr->VolumeName); jcr->setJobStatus(JS_FatalError); /* Jmsg is not working for *SystemJob* */ return false; } return true; /* next volume mounted */ } Dmsg0(90, "End of Device reached.\n"); return false; } bacula-15.0.3/src/stored/protos.h0000644000175000017500000003255014771010173016455 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Protypes for stored * * Written by Kern Sibbald, MM */ /* From stored.c */ uint32_t new_VolSessionId(); /* From acquire.c */ DCR *acquire_device_for_append(DCR *dcr); bool acquire_device_for_read(DCR *dcr, uint32_t retry_count=10); bool release_device(DCR *dcr); bool clean_device(DCR *dcr); DCR *new_dcr(JCR *jcr, DCR *dcr, DEVICE *dev, bool writing=true); void free_dcr(DCR *dcr); /* From append.c */ bool send_attrs_to_dir(JCR *jcr, DEV_RECORD *rec); /* From askdir.c */ enum get_vol_info_rw { GET_VOL_INFO_FOR_WRITE, GET_VOL_INFO_FOR_READ }; bool dir_get_volume_info(DCR *dcr, const char *VolumeName, enum get_vol_info_rw); bool dir_find_next_appendable_volume(DCR *dcr); bool dir_update_volume_info(DCR *dcr, bool label, bool update_LastWritten, bool use_dcr=false); bool dir_ask_sysop_to_create_appendable_volume(DCR *dcr); bool dir_ask_sysop_to_mount_volume(DCR *dcr, bool read_access); bool dir_update_file_attributes(DCR *dcr, DEV_RECORD *rec); bool dir_create_jobmedia_record(DCR *dcr, bool zero=false); void create_jobmedia_queue(JCR *jcr); bool flush_jobmedia_queue(JCR *jcr); bool dir_update_device(JCR *jcr, DEVICE *dev); bool dir_update_changer(JCR *jcr, AUTOCHANGER *changer); bool dir_create_filemedia_record(DCR *dcr); bool dir_get_pool_info(DCR *dcr, VOLUME_CAT_INFO *volcatinfo); bool flush_filemedia_queue(JCR*); /* class AskDirHandler allows btool's utilities to overwrite the functions above * and even to overwrite the functions in their own sub-class like in btape */ class AskDirHandler { /* this class should be an abstract class */ public: AskDirHandler() {} virtual ~AskDirHandler() {} virtual bool dir_find_next_appendable_volume(DCR *dcr) { return 1;} virtual bool dir_update_volume_info(DCR *dcr, bool relabel, bool update_LastWritten, bool use_dcr) { return 1; } virtual bool dir_create_jobmedia_record(DCR *dcr, bool zero) { return 1; } virtual bool dir_create_filemedia_record(DCR *dcr) { return 1; } virtual bool flush_jobmedia_queue(JCR *jcr) { return true; } virtual bool dir_ask_sysop_to_create_appendable_volume(DCR *dcr) { return 1; } virtual bool dir_update_file_attributes(DCR *dcr, DEV_RECORD *rec) { return 1;} virtual bool dir_ask_sysop_to_mount_volume(DCR *dcr, bool writing); virtual bool dir_get_volume_info(DCR *dcr, const char *VolumeName, enum get_vol_info_rw writing); }; class BtoolsAskDirHandler: public AskDirHandler { /* if AskDirHandler was an abstract class, implement the method here */ public: BtoolsAskDirHandler() {} virtual ~BtoolsAskDirHandler() {} }; /* activate the functions provided by the btool's handler */ AskDirHandler *init_askdir_handler(AskDirHandler *new_askdir_handler); /* Get the current handler defined */ AskDirHandler *get_askdir_handler(); /* authenticate.c */ class SDAuthenticateDIR: public AuthenticateBase { public: SDAuthenticateDIR(JCR *jcr); virtual ~SDAuthenticateDIR() {}; bool authenticate_director(); }; int authenticate_filed(JCR *jcr, BSOCK *fd, int FDVersion); bool send_hello_and_authenticate_sd(JCR *jcr, char *Job); /* From autochanger.c */ bool init_autochangers(); int autoload_device(DCR *dcr, bool writing, BSOCK *dir); bool autochanger_cmd(DCR *dcr, BSOCK *dir, const char *cmd); bool unload_autochanger(DCR *dcr, int loaded); bool unload_dev(DCR *dcr, DEVICE *dev); void edit_device_codes(DCR *dcr, POOLMEM **omsg, const char *imsg, const char *cmd); int get_autochanger_loaded_slot(DCR *dcr); /* From block.c */ void dump_block(DEVICE *dev, DEV_BLOCK *b, const char *msg, bool force=false); DEV_BLOCK *dup_block(DEV_BLOCK *eblock); void init_block_write(DEV_BLOCK *block); void empty_block(DEV_BLOCK *block); void free_block(DEV_BLOCK *block); void print_block_read_errors(JCR *jcr, DEV_BLOCK *block); bool is_block_empty(DEV_BLOCK *block); bool terminate_writing_volume(DCR *dcr); /* From block_util.c */ bool terminate_writing_volume(DCR *dcr); bool is_user_volume_size_reached(DCR *dcr, bool quiet); bool check_for_newvol_or_newfile(DCR *dcr); bool do_new_file_bookkeeping(DCR *dcr); void reread_last_block(DCR *dcr); bool is_pool_size_reached(DCR *dcr, bool quiet); /* From butil.c -- utilities for SD tool programs */ void setup_me(); void print_ls_output(const char *fname, const char *link, int type, struct stat *statp); JCR *setup_jcr(const char *name, char *dev_name, BSR *bsr, const char *VolumeName, bool writing, bool read_dedup_data=true, uint32_t retry_count=10); void display_tape_error_status(JCR *jcr, DEVICE *dev); /* From dev.c */ void sd_list_loaded_drivers(alist *list); DEVICE *init_dev(JCR *jcr, DEVRES *device, bool adata=false, bstatcollect *statcollector=NULL, bool clone=false); bool can_open_mounted_dev(DEVICE *dev); bool load_dev(DEVICE *dev); int write_block(DEVICE *dev); uint32_t status_dev(DEVICE *dev); void attach_jcr_to_device(DEVICE *dev, JCR *jcr); void detach_jcr_from_device(DEVICE *dev, JCR *jcr); JCR *next_attached_jcr(DEVICE *dev, JCR *jcr); void init_device_wait_timers(DCR *dcr); void init_jcr_device_wait_timers(JCR *jcr); bool double_dev_wait_time(DEVICE *dev); /* From device.c */ bool first_open_device(DCR *dcr); bool fixup_device_block_write_error(DCR *dcr, int retries=4); void set_start_vol_position(DCR *dcr); void set_new_volume_parameters(DCR *dcr); void set_new_file_parameters(DCR *dcr); /* From dircmd.c */ void *handle_connection_request(void *arg); extern int use_new_match_all; /* Temp variable, to be removed when the new code is tested */ /* From fd_cmds.c */ void run_job(JCR *jcr); void do_fd_commands(JCR *jcr); bool do_handle_command(JCR *jcr); void do_client_commands(JCR *jcr); /* From job.c */ void stored_free_jcr(JCR *jcr); /* From hello.c */ bool validate_dir_hello(JCR* jcr); bool send_hello_ok(BSOCK *bs); bool send_sorry(BSOCK *bs); bool send_hello_sd(JCR *jcr, char *Job, int tlspsk); bool send_hello_client(JCR *jcr, char *Job); bool read_client_hello(JCR *jcr); bool is_client_connection(BSOCK *bs); void handle_client_connection(BSOCK *fd); /* From label.c */ void create_session_label(DCR *dcr, DEV_RECORD *rec, int label); void create_volume_header(DEVICE *dev, const char *VolName, const char *PoolName, bool no_prelabel); #define ANSI_VOL_LABEL 0 #define ANSI_EOF_LABEL 1 #define ANSI_EOV_LABEL 2 bool write_ansi_ibm_labels(DCR *dcr, int type, const char *VolName); int read_ansi_ibm_label(DCR *dcr); bool write_session_label(DCR *dcr, int label); int dump_label_record(DEVICE *dev, DEV_RECORD *rec, int verbose, bool check_err); bool unser_volume_label(DEVICE *dev, DEV_RECORD *rec); bool unser_session_label(SESSION_LABEL *label, DEV_RECORD *rec); /* From locks.c */ void _lock_device(const char *file, int line, DEVICE *dev); void _unlock_device(const char *file, int line, DEVICE *dev); void _block_device(const char *file, int line, DEVICE *dev, int state); void _unblock_device(const char *file, int line, DEVICE *dev); void _give_back_device_block(const char *file, int line, DEVICE *dev, bsteal_lock_t *hold); /* From match_bsr.c */ int match_bsr(BSR *bsr, DEV_RECORD *rec, VOLUME_LABEL *volrec, SESSION_LABEL *sesrec, JCR *jcr); int match_bsr_block(BSR *bsr, DEV_BLOCK *block); void position_bsr_block(BSR *bsr, DEV_BLOCK *block); BSR *find_next_bsr(BSR *root_bsr, DEVICE *dev); bool is_this_bsr_done(JCR *jcr, BSR *bsr, DEV_RECORD *rec); uint64_t get_bsr_start_addr(BSR *bsr); /* From mount.c */ bool mount_next_read_volume(DCR *dcr); /* From parse_bsr.c */ BSR *new_bsr(); BSR *parse_bsr(JCR *jcr, char *lf); void dump_bsr(DEVICE *dev, BSR *bsr, bool recurse); void free_bsr(BSR *bsr); void free_restore_volume_list(JCR *jcr); void create_restore_volume_list(JCR *jcr, bool add_to_read_list); /* From record.c */ void create_filemedia(DCR *dcr, DEV_BLOCK *block, DEV_RECORD *rec); const char *FI_to_ascii(char *buf, int fi); const char *stream_to_ascii(char *buf, int stream, int fi); const char *stream_to_ascii_ex(char *buf, int stream, int fi); bool write_record_to_block(DCR *dcr, DEV_RECORD *rec); bool can_write_record_to_block(DEV_BLOCK *block, DEV_RECORD *rec); bool read_record_from_block(DCR *dcr, DEV_RECORD *rec); DEV_RECORD *new_record(); void free_record(DEV_RECORD *rec); void empty_record(DEV_RECORD *rec); uint64_t get_record_address(DEV_RECORD *rec); uint64_t get_record_start_address(DEV_RECORD *rec); bool flush_adata_to_device(DCR *dcr); /* From record_util.c */ void dump_record(DEV_RECORD *rec); /* From read_record.c */ bool mount_next_vol(JCR *jcr, DCR *dcr, BSR *bsr, SESSION_LABEL *sessrec, bool *should_stop, bool record_cb(DCR *dcr, DEV_RECORD *rec), bool mount_cb(DCR *dcr)); BSR *position_to_first_file(JCR *jcr, DCR *dcr, BSR *bsr); bool read_records(DCR *dcr, bool record_cb(DCR *dcr, DEV_RECORD *rec), bool mount_cb(DCR *dcr)); /* From reserve.c */ void init_reservations_lock(); void term_reservations_lock(); void send_drive_reserve_messages(JCR *jcr, void sendit(const char *msg, int len, void *sarg), void *arg); bool find_suitable_device_for_job(JCR *jcr, RCTX &rctx); int search_res_for_device(RCTX &rctx); void release_reserve_messages(JCR *jcr); extern int reservations_lock_count; /* From status.c */ bool send_shstore_blocked_status(DEVICE *dev, POOLMEM **msg, int *len); void list_shstore(DEVICE *dev, OutputWriter *ow); bool list_shstore(DEVICE *dev, POOLMEM **msg, int *len); void _dbg_list_one_device(DEVICE *dev, const char *f, int l); #define dbg_list_one_device(x, dev) if (chk_dbglvl(x)) \ _dbg_list_one_device(dev, __FILE__, __LINE__) /* From vol_mgr.c */ bool is_writing_volume(const char *VolumeName); void init_vol_list_lock(); void term_vol_list_lock(); VOLRES *reserve_volume(DCR *dcr, const char *VolumeName); bool free_volume(DEVICE *dev); bool is_vol_list_empty(); dlist *dup_vol_list(JCR *jcr); void free_temp_vol_list(dlist *temp_vol_list); bool volume_unused(DCR *dcr); void create_volume_lists(); void free_volume_lists(); void list_volumes(void sendit(const char *msg, int len, void *sarg), void *arg); extern int vol_list_lock_count; void add_read_volume(JCR *jcr, const char *VolumeName); void remove_read_volume(JCR *jcr, const char *VolumeName); /* From spool.c */ bool begin_data_spool (DCR *dcr); bool discard_data_spool (DCR *dcr); bool commit_data_spool (DCR *dcr); bool are_attributes_spooled (JCR *jcr); bool begin_attribute_spool (JCR *jcr); bool discard_attribute_spool (JCR *jcr); bool commit_attribute_spool (JCR *jcr); bool write_block_to_spool_file (DCR *dcr); void list_spool_stats (void sendit(const char *msg, int len, void *sarg), void *arg); /* From tape_alert.c */ extern void alert_callback(void *ctx, const char *short_msg, const char *long_msg, char *Volume, int severity, int flags, int alertno, utime_t alert_time); /* From wait.c */ int wait_for_sysop(DCR *dcr); bool wait_for_any_device(JCR *jcr, int &retries); bool wait_for_device(DCR *dcr, int &retries); /* stored_conf.c */ void store_protocol(LEX *lc, RES_ITEM *item, int index, int pass); void store_uri_style(LEX *lc, RES_ITEM *item, int index, int pass); void store_truncate(LEX *lc, RES_ITEM *item, int index, int pass); bool find_truncate_option(const char* truncate, uint32_t& truncate_option); void store_upload(LEX *lc, RES_ITEM *item, int index, int pass); void store_devtype(LEX *lc, RES_ITEM *item, int index, int pass); void store_enctype(LEX *lc, RES_ITEM *item, int index, int pass); void store_cloud_driver(LEX *lc, RES_ITEM *item, int index, int pass); void store_maxblocksize(LEX *lc, RES_ITEM *item, int index, int pass); void store_transfer_priority(LEX *lc, RES_ITEM *item, int index, int pass); void store_objects_default_tier(LEX *lc, RES_ITEM *item, int index, int pass); /* from sdcollect.c */ bool update_permanent_stats(void *data); void initialize_statcollector(); void start_collector_threads(); void terminate_collector_threads(); #define SIR_OK 0 #define SIR_BREAK 1 #define SIR_CONTINUE 2 int sir_init_loop(DCR *dcr, DEVICE **dev, DEV_BLOCK **block, bool record_cb(DCR *dcr, DEV_RECORD *rec), bool mount_cb(DCR *dcr)); bool sir_init(DCR *dcr); /* from BEE */ #if BEEF void store_dedup_driver(LEX *lc, RES_ITEM *item, int index, int pass); void bee_setdebug_cmd_parse_options(JCR *, char *); void reset_bsr(BSR *root); #else #define bee_setdebug_cmd_parse_options(a, b) #endif bacula-15.0.3/src/stored/read_records.c0000644000175000017500000004735614771010173017570 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * This routine provides a routine that will handle all * the gory little details of reading a record from a Bacula * archive. It uses a callback to pass you each record in turn, * as well as a callback for mounting the next tape. It takes * care of reading blocks, applying the bsr, ... * Note, this routine is really the heart of the restore routines, * and we are *really* bit pushing here so be careful about making * any modifications. * * Kern E. Sibbald, August MMII * */ #include "bacula.h" #include "stored.h" /* Forward referenced functions */ static void handle_session_record(DEVICE *dev, DEV_RECORD *rec, SESSION_LABEL *sessrec); static bool try_repositioning(JCR *jcr, DEV_RECORD *rec, DCR *dcr); #ifdef DEBUG static char *rec_state_bits_to_str(DEV_RECORD *rec); #endif static const int dbglvl = 150; static const int no_FileIndex = -999999; bool mount_next_vol(JCR *jcr, DCR *dcr, BSR *bsr, SESSION_LABEL *sessrec, bool *should_stop, bool record_cb(DCR *dcr, DEV_RECORD *rec), bool mount_cb(DCR *dcr)) { bool ok = true; DEVICE *dev = dcr->dev; *should_stop = false; /* We need an other volume */ volume_unused(dcr); /* mark volume unused */ if (!mount_cb(dcr)) { *should_stop = true; /* * Create EOT Label so that Media record may * be properly updated because this is the last * tape. */ DEV_RECORD *trec = new_record(); trec->FileIndex = EOT_LABEL; trec->Addr = dev->get_full_addr(); ok = record_cb(dcr, trec); free_record(trec); if (jcr->mount_next_volume) { jcr->mount_next_volume = false; dev->clear_eot(); } return ok; } jcr->mount_next_volume = false; /* * The Device can change at the end of a tape, so refresh it * from the dcr. */ dev = dcr->dev; /* * We just have a new tape up, now read the label (first record) * and pass it off to the callback routine, then continue * most likely reading the previous record. */ dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK); DEV_RECORD *trec = new_record(); read_record_from_block(dcr, trec); handle_session_record(dev, trec, sessrec); ok = record_cb(dcr, trec); free_record(trec); position_to_first_file(jcr, dcr, bsr); /* We jump to the specified position */ return ok; } /* * This subroutine reads all the records and passes them back to your * callback routine (also mount routine at EOM). * You must not change any values in the DEV_RECORD packet */ bool read_records(DCR *dcr, bool record_cb(DCR *dcr, DEV_RECORD *rec), bool mount_cb(DCR *dcr)) { JCR *jcr = dcr->jcr; DEVICE *dev = dcr->dev; DEV_BLOCK *block = dcr->block; DEV_RECORD *rec = NULL; uint32_t record; int32_t lastFileIndex; bool ok = true; bool done = false; bool should_stop; SESSION_LABEL sessrec; dlist *recs; /* linked list of rec packets open */ char ed1[50]; bool first_block = true; int ret; recs = New(dlist(rec, &rec->link)); if (!sir_init(dcr)) { /* We go to the first_file unless we need to reposition during an * interactive restore session (the reposition will be done with a different * BSR in the for loop */ position_to_first_file(jcr, dcr, jcr->bsr); } jcr->mount_next_volume = false; for ( ; ok && !done; ) { if (job_canceled(jcr)) { ok = false; break; } ASSERT2(!dcr->dev->adata, "Called with adata block. Wrong!"); ret = sir_init_loop(dcr, &dev, &block, record_cb, mount_cb); if (ret == SIR_CONTINUE) { continue; } else if (ret == SIR_BREAK) { break; } if (! first_block || dev->dev_type != B_FIFO_DEV ) { if (dev->at_eot() || !dcr->read_block_from_device(CHECK_BLOCK_NUMBERS)) { if (dev->at_eot()) { Jmsg(jcr, M_INFO, 0, _("End of Volume \"%s\" at addr=%s on device %s.\n"), dcr->VolumeName, dev->print_addr(ed1, sizeof(ed1), dev->EndAddr), dev->print_name()); ok = mount_next_vol(jcr, dcr, jcr->bsr, &sessrec, &should_stop, record_cb, mount_cb); /* Might have changed after the mount request */ dev = dcr->dev; block = dcr->block; if (should_stop) { break; } continue; } else if (dev->at_eof()) { Dmsg3(200, "EOF at addr=%s on device %s, Volume \"%s\"\n", dev->print_addr(ed1, sizeof(ed1), dev->EndAddr), dev->print_name(), dcr->VolumeName); continue; } else if (dev->is_short_block()) { Jmsg1(jcr, M_ERROR, 0, "%s", dev->errmsg); continue; } else { /* I/O error or strange end of tape */ display_tape_error_status(jcr, dev); if (forge_on || jcr->ignore_label_errors) { dev->fsr(1); /* try skipping bad record */ Pmsg0(000, _("Did fsr in attempt to skip bad record.\n")); continue; /* try to continue */ } ok = false; /* stop everything */ break; } } Dmsg1(dbglvl, "Read new block at pos=%s\n", dev->print_addr(ed1, sizeof(ed1))); } first_block = false; #ifdef if_and_when_FAST_BLOCK_REJECTION_is_working /* this does not stop when file/block are too big */ if (!match_bsr_block(jcr->bsr, block)) { if (try_repositioning(jcr, rec, dcr)) { break; /* get next volume */ } continue; /* skip this record */ } #endif /* * Get a new record for each Job as defined by * VolSessionId and VolSessionTime */ bool found = false; foreach_dlist(rec, recs) { if (rec->VolSessionId == block->VolSessionId && rec->VolSessionTime == block->VolSessionTime) { /* When the previous Block of the current record is not correctly ordered, * if we concat the previous record to the next one, the result is probably * incorrect. At least the vacuum command should not use this kind of record */ if (rec->remainder) { if (rec->BlockNumber != (block->BlockNumber - 1) && rec->BlockNumber != block->BlockNumber) { Dmsg6(10, "invalid: rec=%ld block=%ld state=%s in %s VolSessionId=%ld VolSessionTime=%ld\n", rec->BlockNumber, block->BlockNumber, rec_state_bits_to_str(rec), dcr->VolumeName, block->VolSessionId, block->VolSessionTime); rec->invalid = true; /* We can discard the current data if needed. The code is very * tricky in the read_records loop, so it's better to not * introduce new subtle errors. */ if (dcr->discard_invalid_records) { empty_record(rec); } } } found = true; break; } } if (!found) { rec = new_record(); recs->prepend(rec); Dmsg3(dbglvl, "New record for state=%s SI=%d ST=%d\n", rec_state_bits_to_str(rec), block->VolSessionId, block->VolSessionTime); } Dmsg4(dbglvl, "Before read rec loop. stat=%s blk=%d rem=%d invalid=%d\n", rec_state_bits_to_str(rec), block->BlockNumber, rec->remainder, rec->invalid); record = 0; rec->state_bits = 0; rec->BlockNumber = block->BlockNumber; lastFileIndex = no_FileIndex; Dmsg1(dbglvl, "Block %s empty\n", is_block_marked_empty(rec)?"is":"NOT"); for (rec->state_bits=0; ok && !is_block_marked_empty(rec); ) { /* Stop to read records if we need to seek */ if (dcr->need_to_reposition()) { Dmsg3(200, "!read-break. state_bits=%s blk=%d rem=%d\n", rec_state_bits_to_str(rec), block->BlockNumber, rec->remainder); break; } if (!read_record_from_block(dcr, rec)) { Dmsg3(200, "!read-break. state_bits=%s blk=%d rem=%d\n", rec_state_bits_to_str(rec), block->BlockNumber, rec->remainder); break; } Dmsg5(dbglvl, "read-OK. state_bits=%s blk=%d rem=%d volume:addr=%s:%llu\n", rec_state_bits_to_str(rec), block->BlockNumber, rec->remainder, NPRT(rec->VolumeName), rec->Addr); /* * At this point, we have at least a record header. * Now decide if we want this record or not, but remember * before accessing the record, we may need to read again to * get all the data. */ record++; Dmsg6(dbglvl, "recno=%d state_bits=%s blk=%d SI=%d ST=%d FI=%d\n", record, rec_state_bits_to_str(rec), block->BlockNumber, rec->VolSessionId, rec->VolSessionTime, rec->FileIndex); if (rec->FileIndex == EOM_LABEL) { /* end of tape? */ Dmsg0(40, "Get EOM LABEL\n"); break; /* yes, get out */ } /* Some sort of label? */ if (rec->FileIndex < 0) { handle_session_record(dev, rec, &sessrec); if (jcr->bsr) { /* We just check block FI and FT not FileIndex */ rec->match_stat = match_bsr_block(jcr->bsr, block); } else { rec->match_stat = 0; } if (rec->invalid) { Dmsg5(0, "The record %d in block %ld SI=%ld ST=%ld FI=%ld was marked as invalid\n", rec->RecNum, rec->BlockNumber, rec->VolSessionId, rec->VolSessionTime, rec->FileIndex); } //ASSERTD(!rec->invalid || dcr->discard_invalid_records, "Got an invalid record"); /* * Note, we pass *all* labels to the callback routine. If * he wants to know if they matched the bsr, then he must * check the match_stat in the record */ ok = record_cb(dcr, rec); rec->invalid = false; /* The record was maybe invalid, but the next one is probably good */ #ifdef xxx /* * If this is the end of the Session (EOS) for this record * we can remove the record. Note, there is a separate * record to read each session. If a new session is seen * a new record will be created at approx line 157 above. * However, it seg faults in the for line at lineno 196. */ if (rec->FileIndex == EOS_LABEL) { Dmsg2(dbglvl, "Remove EOS rec. SI=%d ST=%d\n", rec->VolSessionId, rec->VolSessionTime); recs->remove(rec); free_record(rec); } #endif continue; } /* end if label record */ /* * Apply BSR filter */ if (jcr->bsr) { rec->match_stat = match_bsr(jcr->bsr, rec, &dev->VolHdr, &sessrec, jcr); Dmsg2(dbglvl, "match_bsr=%d bsr->reposition=%d\n", rec->match_stat, jcr->bsr->reposition); if (rec->match_stat == -1) { /* no more possible matches */ done = true; /* all items found, stop */ Dmsg1(dbglvl, "All done Addr=%s\n", dev->print_addr(ed1, sizeof(ed1))); break; } else if (rec->match_stat == 0) { /* no match */ Dmsg3(dbglvl, "BSR no match: clear rem=%d FI=%d before set_eof pos %s\n", rec->remainder, rec->FileIndex, dev->print_addr(ed1, sizeof(ed1))); rec->remainder = 0; rec->state_bits &= ~REC_PARTIAL_RECORD; if (try_repositioning(jcr, rec, dcr)) { break; /* We moved on the volume, read next block */ } continue; /* we don't want record, read next one */ } } dcr->VolLastIndex = rec->FileIndex; /* let caller know where we are */ if (is_partial_record(rec)) { Dmsg6(dbglvl, "Partial, break. recno=%d state_bits=%s blk=%d SI=%d ST=%d FI=%d\n", record, rec_state_bits_to_str(rec), block->BlockNumber, rec->VolSessionId, rec->VolSessionTime, rec->FileIndex); break; /* read second part of record */ } Dmsg6(dbglvl, "OK callback. recno=%d state_bits=%s blk=%d SI=%d ST=%d FI=%d\n", record, rec_state_bits_to_str(rec), block->BlockNumber, rec->VolSessionId, rec->VolSessionTime, rec->FileIndex); if (lastFileIndex != no_FileIndex && lastFileIndex != rec->FileIndex) { if (is_this_bsr_done(jcr, jcr->bsr, rec) && try_repositioning(jcr, rec, dcr)) { Dmsg1(dbglvl, "This bsr done, break pos %s\n", dev->print_addr(ed1, sizeof(ed1))); break; } Dmsg2(dbglvl, "==== inside LastIndex=%d FileIndex=%d\n", lastFileIndex, rec->FileIndex); } Dmsg2(dbglvl, "==== LastIndex=%d FileIndex=%d\n", lastFileIndex, rec->FileIndex); lastFileIndex = rec->FileIndex; if (rec->invalid) { Dmsg5(0, "The record %d in block %ld SI=%ld ST=%ld FI=%ld was marked as invalid\n", rec->RecNum, rec->BlockNumber, rec->VolSessionId, rec->VolSessionTime, rec->FileIndex); } //ASSERTD(!rec->invalid || dcr->discard_invalid_records, "Got an invalid record"); ok = record_cb(dcr, rec); rec->invalid = false; /* The record was maybe invalid, but the next one is probably good */ #if 0 /* * If we have a digest stream, we check to see if we have * finished the current bsr, and if so, repositioning will * be turned on. */ if (crypto_digest_stream_type(rec->Stream) != CRYPTO_DIGEST_NONE) { Dmsg3(dbglvl, "=== Have digest FI=%u before bsr check pos %s\n", rec->FileIndex, dev->print_addr(ed1, sizeof(ed1)); if (is_this_bsr_done(jcr, jcr->bsr, rec) && try_repositioning(jcr, rec, dcr)) { Dmsg1(dbglvl, "==== BSR done at FI=%d\n", rec->FileIndex); Dmsg1(dbglvl, "This bsr done, break pos=%s\n", dev->print_addr(ed1, sizeof(ed1))); break; } Dmsg2(900, "After is_bsr_done pos %s\n", dev->print_addr(ed1, sizeof(ed1)); } #endif } /* end for loop over records */ Dmsg1(dbglvl, "After end recs in block. pos=%s\n", dev->print_addr(ed1, sizeof(ed1))); } /* end for loop over blocks */ /* Walk down list and free all remaining allocated recs */ while (!recs->empty()) { rec = (DEV_RECORD *)recs->first(); recs->remove(rec); free_record(rec); } delete recs; print_block_read_errors(jcr, block); return ok; } /* * See if we can reposition. * Returns: true if at end of volume * false otherwise */ static bool try_repositioning(JCR *jcr, DEV_RECORD *rec, DCR *dcr) { BSR *bsr; DEVICE *dev = dcr->dev; char ed1[50]; bsr = find_next_bsr(jcr->bsr, dev); Dmsg2(dbglvl, "nextbsr=%p mount_next_volume=%d\n", bsr, jcr->bsr->mount_next_volume); if (bsr == NULL && jcr->bsr->mount_next_volume) { Dmsg0(dbglvl, "Would mount next volume here\n"); Dmsg1(dbglvl, "Current position Addr=%s\n", dev->print_addr(ed1, sizeof(ed1))); jcr->bsr->mount_next_volume = false; if (!dev->at_eot()) { /* Set EOT flag to force mount of next Volume */ jcr->mount_next_volume = true; dev->set_eot(); } rec->Addr = 0; return true; } if (bsr) { /* * ***FIXME*** gross kludge to make disk seeking work. Remove * when find_next_bsr() is fixed not to return a bsr already * completed. */ uint64_t dev_addr = dev->get_full_addr(); uint64_t bsr_addr = get_bsr_start_addr(bsr); /* Do not position backwards */ if (dev_addr > bsr_addr) { return false; } Dmsg2(dbglvl, "Try_Reposition from addr=%llu to %llu\n", dev_addr, bsr_addr); dev->reposition(dcr, bsr_addr); rec->Addr = 0; return true; /* We want the next block */ } return false; } /* * Position to the first file on this volume */ BSR *position_to_first_file(JCR *jcr, DCR *dcr, BSR *bsr) { DEVICE *dev = dcr->dev; uint64_t bsr_addr; char ed1[50], ed2[50]; Enter(150); /* * Now find and position to first file and block * on this tape. */ if (bsr) { bsr->reposition = true; /* force repositioning */ bsr = find_next_bsr(bsr, dev); if ((bsr_addr=get_bsr_start_addr(bsr)) > 0) { Jmsg(jcr, M_INFO, 0, _("Forward spacing Volume \"%s\" to addr=%s\n"), dev->VolHdr.VolumeName, dev->print_addr(ed1, sizeof(ed1), bsr_addr)); dev->clear_eot(); /* TODO: See where to put this clear() exactly */ Dmsg2(dbglvl, "pos_to_first_file from addr=%s to %s\n", dev->print_addr(ed1, sizeof(ed1)), dev->print_addr(ed2, sizeof(ed2), bsr_addr)); dev->reposition(dcr, bsr_addr); } } Leave(150); return bsr; } static void handle_session_record(DEVICE *dev, DEV_RECORD *rec, SESSION_LABEL *sessrec) { const char *rtype; char buf[100]; memset(sessrec, 0, sizeof(SESSION_LABEL)); switch (rec->FileIndex) { case PRE_LABEL: rtype = _("Fresh Volume Label"); break; case VOL_LABEL: rtype = _("Volume Label"); unser_volume_label(dev, rec); break; case SOS_LABEL: rtype = _("Begin Session"); unser_session_label(sessrec, rec); break; case EOS_LABEL: rtype = _("End Session"); break; case EOM_LABEL: rtype = _("End of Media"); break; default: bsnprintf(buf, sizeof(buf), _("Unknown code %d\n"), rec->FileIndex); rtype = buf; break; } Dmsg5(dbglvl, _("%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n"), rtype, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); } #ifdef DEBUG static char *rec_state_bits_to_str(DEV_RECORD *rec) { static char buf[200]; bsnprintf(buf, sizeof(buf), "%d ", rec->state_bits); if (rec->state_bits & REC_NO_HEADER) { bstrncat(buf, "Nohdr,", sizeof(buf)); } if (is_partial_record(rec)) { bstrncat(buf, "partial,", sizeof(buf)); } if (rec->state_bits & REC_BLOCK_EMPTY) { bstrncat(buf, "empty,", sizeof(buf)); } if (rec->state_bits & REC_NO_MATCH) { bstrncat(buf, "Nomatch,", sizeof(buf)); } if (rec->state_bits & REC_CONTINUATION) { bstrncat(buf, "cont,", sizeof(buf)); } if (buf[0]) { buf[strlen(buf)-1] = 0; } return buf; } #endif bacula-15.0.3/src/stored/org_libsd_filemedia.c0000644000175000017500000000177514771010173021072 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "stored.h" bool flush_filemedia_queue(JCR *jcr) { return true; } void create_filemedia(DCR *dcr, DEV_BLOCK *block, DEV_RECORD *rec) { } bool dir_create_filemedia_record(DCR *dcr) { AskDirHandler *askdir_handler = get_askdir_handler(); if (askdir_handler) { return askdir_handler->dir_create_filemedia_record(dcr); } return true; } bacula-15.0.3/src/stored/bsr.h0000644000175000017500000001127614771010173015717 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * BootStrap record definition -- for restoring files. * * Kern Sibbald, June 2002 * */ #ifndef __BSR_H #define __BSR_H 1 #ifndef HAVE_REGEX_H #include "lib/bregex.h" #else #include #endif /* * List of Volume names to be read by Storage daemon. * Formed by Storage daemon from BSR */ struct VOL_LIST { VOL_LIST *next; char VolumeName[MAX_NAME_LENGTH]; char MediaType[MAX_NAME_LENGTH]; char device[MAX_NAME_LENGTH]; /* ***FIXME*** use alist here */ int Slot; uint32_t start_file; }; /* * !!!!!!!!!!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!!!!!!!!!!!!! * !!! !!! * !!! All records must have a pointer to !!! * !!! the next item as the first item defined. !!! * !!! !!! * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */ struct BSR_VOLUME { BSR_VOLUME *next; char VolumeName[MAX_NAME_LENGTH]; char MediaType[MAX_NAME_LENGTH]; char device[MAX_NAME_LENGTH]; /* ***FIXME*** use alist here */ int32_t Slot; /* Slot */ }; struct BSR_CLIENT { BSR_CLIENT *next; char ClientName[MAX_NAME_LENGTH]; }; struct BSR_SESSID { BSR_SESSID *next; uint32_t sessid; uint32_t sessid2; }; struct BSR_SESSTIME { BSR_SESSTIME *next; uint32_t sesstime; }; struct BSR_VOLFILE { BSR_VOLFILE *next; uint32_t sfile; /* start file */ uint32_t efile; /* end file */ bool done; /* local done */ }; struct BSR_VOLBLOCK { BSR_VOLBLOCK *next; uint32_t sblock; /* start block */ uint32_t eblock; /* end block */ bool done; /* local done */ }; struct BSR_VOLADDR { BSR_VOLADDR *next; uint64_t saddr; /* start address */ uint64_t eaddr; /* end address */ bool done; /* local done */ }; struct BSR_FINDEX { BSR_FINDEX *next; int32_t findex; /* start file index */ int32_t findex2; /* end file index */ bool done; /* local done */ }; struct BSR_JOBID { BSR_JOBID *next; uint32_t JobId; uint32_t JobId2; }; struct BSR_JOBTYPE { BSR_JOBTYPE *next; uint32_t JobType; }; struct BSR_JOBLEVEL { BSR_JOBLEVEL *next; uint32_t JobLevel; }; struct BSR_JOB { BSR_JOB *next; char Job[MAX_NAME_LENGTH]; bool done; /* local done */ }; struct BSR_STREAM { BSR_STREAM *next; int32_t stream; /* stream desired */ }; struct BSR { /* NOTE!!! next must be the first item */ BSR *next; /* pointer to next one */ BSR *prev; /* pointer to previous one */ BSR *root; /* root bsr */ BSR *cur_bsr; bool reposition; /* set when any bsr is marked done */ bool mount_next_volume; /* set when next volume should be mounted */ bool done; /* set when everything found for this bsr */ bool use_fast_rejection; /* set if fast rejection can be used */ bool use_positioning; /* set if we can position the archive */ bool skip_file; /* skip all records for current file */ BSR_VOLUME *volume; uint32_t count; /* count of files to restore this bsr */ uint32_t found; /* count of restored files this bsr */ int32_t LastFI; /* LastFI seen by this bsr */ BSR_VOLFILE *volfile; BSR_VOLBLOCK *volblock; BSR_VOLADDR *voladdr; BSR_SESSTIME *sesstime; BSR_SESSID *sessid; BSR_JOBID *JobId; BSR_JOB *job; BSR_CLIENT *client; BSR_FINDEX *FileIndex; BSR_JOBTYPE *JobType; BSR_JOBLEVEL *JobLevel; BSR_STREAM *stream; char *fileregex; /* set if restore is filtered on filename */ regex_t *fileregex_re; ATTR *attr; /* scratch space for unpacking */ }; #endif bacula-15.0.3/src/stored/label.c0000644000175000017500000013664014771010173016206 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * label.c Bacula routines to handle labels * * Kern Sibbald, MM * */ #include "bacula.h" /* pull in global headers */ #include "stored.h" /* pull in Storage Deamon headers */ static const int dbglvl = 100; /* Forward referenced functions */ static void create_volume_label_record(DCR *dcr, DEVICE *dev, DEV_RECORD *rec, bool adata); /* * Read the volume label * * If dcr->VolumeName == NULL, we accept any Bacula Volume * If dcr->VolumeName[0] == 0, we accept any Bacula Volume * otherwise dcr->VolumeName must match the Volume. * * If VolName given, ensure that it matches * * Returns VOL_ code as defined in record.h * VOL_NOT_READ * VOL_OK good label found * VOL_NO_LABEL volume not labeled * VOL_IO_ERROR I/O error reading tape * VOL_NAME_ERROR label has wrong name * VOL_CREATE_ERROR Error creating label * VOL_VERSION_ERROR label has wrong version * VOL_LABEL_ERROR bad label type * VOL_NO_MEDIA no media in drive * VOL_TYPE_ERROR aligned/non-aligned/dedup error * * The dcr block is emptied on return, and the Volume is * rewound. * * Handle both the ameta and adata volumes. */ int DEVICE::read_dev_volume_label(DCR *dcr) { JCR *jcr = dcr->jcr; char *VolName = dcr->VolumeName; DEV_RECORD *record; bool ok = false; int stat; bool want_ansi_label; bool have_ansi_label = false; Enter(dbglvl); Dmsg5(dbglvl, "Enter read_volume_label adata=%d res=%d device=%s vol=%s dev_Vol=%s\n", dcr->block->adata, num_reserved(), print_name(), VolName, VolHdr.VolumeName[0]?VolHdr.VolumeName:"*NULL*"); if (!is_open()) { if (!open_device(dcr, OPEN_READ_ONLY)) { Leave(dbglvl); return VOL_IO_ERROR; } } clear_labeled(); clear_append(); clear_read(); label_type = B_BACULA_LABEL; set_worm(get_tape_worm(dcr)); Dmsg1(dbglvl, "==== worm=%d ====\n", is_worm()); if (!rewind(dcr)) { Mmsg(jcr->errmsg, _("Couldn't rewind %s device %s: ERR=%s\n"), print_type(), print_name(), print_errmsg()); Dmsg1(dbglvl, "return VOL_NO_MEDIA: %s", jcr->errmsg); Leave(dbglvl); return VOL_NO_MEDIA; } bstrncpy(VolHdr.Id, "**error**", sizeof(VolHdr.Id)); /* Read ANSI/IBM label if so requested */ want_ansi_label = dcr->VolCatInfo.LabelType != B_BACULA_LABEL || dcr->device->label_type != B_BACULA_LABEL; if (want_ansi_label || has_cap(CAP_CHECKLABELS)) { stat = read_ansi_ibm_label(dcr); /* If we want a label and didn't find it, return error */ if (want_ansi_label && stat != VOL_OK) { goto bail_out; } if (stat == VOL_NAME_ERROR || stat == VOL_LABEL_ERROR) { Mmsg(jcr->errmsg, _("Wrong Volume mounted on %s device %s: Wanted %s have %s\n"), print_type(), print_name(), VolName, VolHdr.VolumeName); if (!poll && jcr->label_errors++ > 100) { Jmsg(jcr, M_FATAL, 0, _("Too many tries: %s"), jcr->errmsg); } goto bail_out; } if (stat != VOL_OK) { /* Not an ANSI/IBM label, so re-read */ rewind(dcr); } else { have_ansi_label = true; } } /* Read the Bacula Volume label block */ record = new_record(); empty_block(dcr->block); Dmsg0(130, "Big if statement in read_volume_label\n"); dcr->reading_label = true; if (!dcr->read_block_from_dev(NO_BLOCK_NUMBER_CHECK)) { Mmsg(jcr->errmsg, _("Read label block failed: requested Volume \"%s\" on %s device %s is not a Bacula " "labeled Volume, because: ERR=%s"), NPRT(VolName), print_type(), print_name(), print_errmsg()); Dmsg1(dbglvl, "%s", jcr->errmsg); } else if (!read_record_from_block(dcr, record)) { Mmsg(jcr->errmsg, _("Could not read Volume label from block.\n")); Dmsg1(dbglvl, "%s", jcr->errmsg); } else if (!unser_volume_label(this, record)) { Mmsg(jcr->errmsg, _("Could not unserialize Volume label: ERR=%s\n"), print_errmsg()); Dmsg1(dbglvl, "%s", jcr->errmsg); } else if (strcmp(VolHdr.Id, BaculaId) != 0 && strcmp(VolHdr.Id, OldBaculaId) != 0 && strcmp(VolHdr.Id, BaculaMetaDataId) != 0 && strcmp(VolHdr.Id, BaculaAlignedDataId) != 0 && strcmp(VolHdr.Id, BaculaDedupMetaDataId) != 0 && strcmp(VolHdr.Id, BaculaS3CloudId) != 0) { Mmsg(jcr->errmsg, _("Volume Header Id bad: %s\n"), VolHdr.Id); Dmsg1(dbglvl, "%s", jcr->errmsg); } else { ok = true; Dmsg1(dbglvl, "VolHdr.Id OK: %s\n", VolHdr.Id); } dcr->reading_label = false; free_record(record); /* finished reading Volume record */ if (!is_volume_to_unload()) { clear_unload(); } if (!ok) { if (jcr->ignore_label_errors) { set_labeled(); /* set has Bacula label */ if (jcr->errmsg[0]) { Jmsg(jcr, M_ERROR, 0, "%s", jcr->errmsg); } empty_block(dcr->block); Leave(dbglvl); return VOL_OK; } Dmsg0(dbglvl, "No volume label - bailing out\n"); stat = VOL_NO_LABEL; goto bail_out; } /* At this point, we have read the first Bacula block, and * then read the Bacula Volume label. Now we need to * make sure we have the right Volume. */ if (VolHdr.VerNum != BaculaTapeVersion && VolHdr.VerNum != BaculaMetaDataVersion && VolHdr.VerNum != BaculaDedupMetaDataVersion && VolHdr.VerNum != BaculaS3CloudVersion && VolHdr.VerNum != OldCompatibleBaculaTapeVersion1 && VolHdr.VerNum != OldCompatibleBaculaTapeVersion2) { Mmsg(jcr->errmsg, _("Volume on %s device %s has wrong Bacula version. Wanted %d got %d\n"), print_type(), print_name(), BaculaTapeVersion, VolHdr.VerNum); Dmsg1(dbglvl, "VOL_VERSION_ERROR: %s", jcr->errmsg); stat = VOL_VERSION_ERROR; goto bail_out; } Dmsg1(dbglvl, "VolHdr.VerNum=%ld OK.\n", VolHdr.VerNum); /* We are looking for either an unused Bacula tape (PRE_LABEL) or * a Bacula volume label (VOL_LABEL) */ if (VolHdr.LabelType != PRE_LABEL && VolHdr.LabelType != VOL_LABEL) { Mmsg(jcr->errmsg, _("Volume on %s device %s has bad Bacula label type: %ld\n"), print_type(), print_name(), VolHdr.LabelType); Dmsg1(dbglvl, "%s", jcr->errmsg); if (!poll && jcr->label_errors++ > 100) { Jmsg(jcr, M_FATAL, 0, _("Too many tries: %s"), jcr->errmsg); } Dmsg0(dbglvl, "return VOL_LABEL_ERROR\n"); stat = VOL_LABEL_ERROR; goto bail_out; } set_labeled(); /* set has Bacula label */ /* Compare Volume Names */ Dmsg2(130, "Compare Vol names: VolName=%s hdr=%s\n", VolName?VolName:"*", VolHdr.VolumeName); if (VolName && *VolName && *VolName != '*' && strcmp(VolHdr.VolumeName, VolName) != 0) { Mmsg(jcr->errmsg, _("Wrong Volume mounted on %s device %s: Wanted %s have %s\n"), print_type(), print_name(), VolName, VolHdr.VolumeName); Dmsg1(dbglvl, "%s", jcr->errmsg); /* * Cancel Job if too many label errors * => we are in a loop */ if (!poll && jcr->label_errors++ > 100) { Jmsg(jcr, M_FATAL, 0, "Too many tries: %s", jcr->errmsg); } Dmsg0(dbglvl, "return VOL_NAME_ERROR\n"); stat = VOL_NAME_ERROR; goto bail_out; } /* Compare VolType to Device Type */ switch (dev_type) { case B_DEDUP_DEV: if (strcmp(VolHdr.Id, BaculaDedupMetaDataId) != 0) { Mmsg(jcr->errmsg, _("Wrong Volume Type. Wanted a Dedup Volume %s on device %s, but got: %s\n"), VolHdr.VolumeName, print_name(), VolHdr.Id); stat = VOL_TYPE_ERROR; goto bail_out; } break; case B_FILE_DEV: if (strcmp(VolHdr.Id, BaculaId) != 0) { Mmsg(jcr->errmsg, _("Wrong Volume Type. Wanted a File or Tape Volume %s on device %s, but got: %s\n"), VolHdr.VolumeName, print_name(), VolHdr.Id); stat = VOL_TYPE_ERROR; goto bail_out; } break; case B_ALIGNED_DEV: case B_ADATA_DEV: if (strcmp(VolHdr.Id, BaculaMetaDataId) != 0) { Mmsg(jcr->errmsg, _("Wrong Volume Type. Wanted an Aligned Volume %s on device %s, but got: %s\n"), VolHdr.VolumeName, print_name(), VolHdr.Id); stat = VOL_TYPE_ERROR; goto bail_out; } break; case B_CLOUD_DEV: if (strcmp(VolHdr.Id, BaculaS3CloudId) != 0) { Mmsg(jcr->errmsg, _("Wrong Volume Type. Wanted a Cloud Volume %s on device %s, but got: %s\n"), VolHdr.VolumeName, print_name(), VolHdr.Id); stat = VOL_TYPE_ERROR; goto bail_out; } default: break; } if (chk_dbglvl(100)) { dump_volume_label(); } Dmsg0(dbglvl, "Leave read_volume_label() VOL_OK\n"); /* If we are a streaming device, we only get one chance to read */ if (!has_cap(CAP_STREAM)) { rewind(dcr); if (have_ansi_label) { stat = read_ansi_ibm_label(dcr); /* If we want a label and didn't find it, return error */ if (stat != VOL_OK) { goto bail_out; } } } if (!load_encryption_key(dcr, "READ", VolHdr.VolumeName, &VolHdr.EncCypherKeySize, VolHdr.EncCypherKey, &VolHdr.MasterKeyIdSize, VolHdr.MasterKeyId)) { stat = VOL_ENC_ERROR; goto bail_out; /* a message is already loaded into jcr->errmsg */ } Dmsg1(100, "Call reserve_volume=%s\n", VolHdr.VolumeName); if (reserve_volume(dcr, VolHdr.VolumeName) == NULL) { if (!jcr->errmsg[0]) { Mmsg3(jcr->errmsg, _("Could not reserve volume %s on %s device %s\n"), VolHdr.VolumeName, print_type(), print_name()); } Dmsg2(dbglvl, "Could not reserve volume %s on %s\n", VolHdr.VolumeName, print_name()); stat = VOL_NAME_ERROR; goto bail_out; } if (dcr->is_writing()) { empty_block(dcr->block); } Leave(dbglvl); return VOL_OK; bail_out: empty_block(dcr->block); rewind(dcr); Dmsg2(dbglvl, "return stat=%d %s", stat, jcr->errmsg); Leave(dbglvl); return stat; } /* * Create and put a volume label into the block * * Returns: false on failure * true on success * * Handle both the ameta and adata volumes. */ bool DEVICE::write_volume_label_to_block(DCR *dcr) { DEVICE *dev; DEV_RECORD rec; JCR *jcr = dcr->jcr; bool ok = true; Enter(100); dev = dcr->dev; memset(&rec, 0, sizeof(rec)); rec.data = get_memory(SER_LENGTH_Volume_Label); memset(rec.data, 0, SER_LENGTH_Volume_Label); empty_block(dcr->block); /* Volume label always at beginning */ create_volume_label_record(dcr, dcr->dev, &rec, dcr->block->adata); dcr->block->BlockNumber = 0; /* Note for adata this also writes to disk */ Dmsg1(100, "write_record_to_block adata=%d\n", dcr->dev->adata); if (!write_record_to_block(dcr, &rec)) { free_pool_memory(rec.data); Jmsg2(jcr, M_FATAL, 0, _("Cannot write Volume label to block for %s device %s\n"), dev->print_type(), dev->print_name()); ok = false; goto get_out; } else { Dmsg4(100, "Wrote fd=%d adata=%d label of %d bytes to block. Vol=%s\n", dev->fd(), dcr->block->adata, rec.data_len, dcr->VolumeName); } free_pool_memory(rec.data); get_out: Leave(100); return ok; } /* * Write a Volume Label * !!! Note, this is ONLY used for writing * a fresh volume label. Any data * after the label will be destroyed, * in fact, we write the label 5 times !!!! * * This routine should be used only when labeling a blank tape or * when recylcing a volume. * * Handle both the ameta and adata volumes. */ bool DEVICE::write_volume_label(DCR *dcr, const char *VolName, const char *PoolName, bool relabel, bool no_prelabel) { DEVICE *dev; Enter(100); Dmsg4(230, "Write: block=%p ameta=%p dev=%p ameta_dev=%p\n", dcr->block, dcr->ameta_block, dcr->dev, dcr->ameta_dev); dcr->set_ameta(); dev = dcr->dev; Dmsg0(150, "write_volume_label()\n"); if (*VolName == 0) { if (dcr->jcr) { Mmsg(dcr->jcr->errmsg, "ERROR: new_volume_label_to_dev called with NULL VolName\n"); } Pmsg0(0, "=== ERROR: write_volume_label called with NULL VolName\n"); goto bail_out; } if (relabel) { volume_unused(dcr); /* mark current volume unused */ /* Truncate device */ if (!dev->truncate(dcr)) { goto bail_out; } dev->close_part(dcr); /* make sure closed for rename */ } /* Set the new filename for open, ... */ dev->setVolCatName(VolName); dcr->setVolCatName(VolName); dev->clearVolCatBytes(); Dmsg1(100, "New VolName=%s\n", VolName); if (!dev->open_device(dcr, OPEN_READ_WRITE)) { /* If device is not tape, attempt to create it */ if (dev->is_tape() || !dev->open_device(dcr, CREATE_READ_WRITE)) { Jmsg4(dcr->jcr, M_WARNING, 0, _("Open %s device %s Volume \"%s\" failed: ERR=%s"), dev->print_type(), dev->print_name(), dcr->VolumeName, dev->bstrerror()); goto bail_out; } } Dmsg1(150, "Label type=%d\n", dev->label_type); if (!load_encryption_key(dcr, "LABEL", VolName, &VolHdr.EncCypherKeySize, VolHdr.EncCypherKey, &VolHdr.MasterKeyIdSize, VolHdr.MasterKeyId)) { close(dcr); goto bail_out; } if (!write_volume_label_to_dev(dcr, VolName, PoolName, relabel, no_prelabel)) { /* close(dcr); uncomment if you get mismatched label like in #10453 */ goto bail_out; } if (!dev->is_aligned()) { /* Not aligned data */ if (dev->weof(dcr, 1)) { dev->set_labeled(); } if (chk_dbglvl(100)) { dev->dump_volume_label(); } Dmsg0(50, "Call reserve_volume\n"); /**** ***FIXME*** if dev changes, dcr must be updated */ if (reserve_volume(dcr, VolName) == NULL) { if (!dcr->jcr->errmsg[0]) { Mmsg3(dcr->jcr->errmsg, _("Could not reserve volume %s on %s device %s\n"), dev->VolHdr.VolumeName, dev->print_type(), dev->print_name()); } Dmsg1(50, "%s", dcr->jcr->errmsg); goto bail_out; } dev = dcr->dev; /* may have changed in reserve_volume */ } dev->clear_append(); /* remove append since this is PRE_LABEL */ Leave(100); return true; bail_out: dcr->adata_label = false; dcr->set_ameta(); volume_unused(dcr); dcr->dev->clear_append(); /* remove append since this is PRE_LABEL */ Leave(100); return false; } bool DEVICE::write_volume_label_to_dev(DCR *dcr, const char *VolName, const char *PoolName, bool relabel, bool no_prelabel) { DEVICE *dev, *ameta_dev; DEV_RECORD *rec = new_record(); bool rtn = false; Enter(100); dev = dcr->dev; ameta_dev = dcr->ameta_dev; empty_block(dcr->block); if (!dev->rewind(dcr)) { Dmsg2(130, "Bad status on %s from rewind: ERR=%s\n", dev->print_name(), dev->print_errmsg()); goto bail_out; } /* Temporarily mark in append state to enable writing */ dev->set_append(); /* Create PRE_LABEL or VOL_LABEL */ create_volume_header(dev, VolName, PoolName, no_prelabel); /* * If we have already detected an ANSI label, re-read it * to skip past it. Otherwise, we write a new one if * so requested. */ if (!dcr->block->adata) { if (dev->label_type != B_BACULA_LABEL) { if (read_ansi_ibm_label(dcr) != VOL_OK) { dev->rewind(dcr); goto bail_out; } } else if (!write_ansi_ibm_labels(dcr, ANSI_VOL_LABEL, VolName)) { goto bail_out; } } create_volume_label_record(dcr, dev, rec, dcr->block->adata); rec->Stream = 0; rec->maskedStream = 0; Dmsg2(100, "write_record_to_block adata=%d FI=%d\n", dcr->dev->adata, rec->FileIndex); /* For adata label this also writes to disk */ if (!write_record_to_block(dcr, rec)) { Dmsg2(40, "Bad Label write on %s: ERR=%s\n", dev->print_name(), dev->print_errmsg()); goto bail_out; } else { Dmsg3(100, "Wrote label=%d bytes adata=%d block: %s\n", rec->data_len, dcr->block->adata, dev->print_name()); } Dmsg3(100, "New label adata=%d VolCatBytes=%lld VolCatStatus=%s\n", dev->adata, ameta_dev->VolCatInfo.VolCatBytes, ameta_dev->VolCatInfo.VolCatStatus); if (dcr->block->adata) { /* Empty block and set data start address */ empty_block(dcr->adata_block); } else { Dmsg4(130, "Call write_block_to_dev() fd=%d adata=%d block=%p Addr=%lld\n", dcr->dev->fd(), dcr->block->adata, dcr->block, dcr->block->dev->lseek(dcr, 0, SEEK_CUR)); Dmsg1(100, "write_record_to_dev adata=%d\n", dcr->dev->adata); /* Write ameta block to device */ if (!dcr->write_block_to_dev()) { Dmsg2(40, "Bad Label write on %s: ERR=%s\n", dev->print_name(), dev->print_errmsg()); goto bail_out; } } Dmsg3(100, "Wrote new Vol label adata=%d VolCatBytes=%lld VolCatStatus=%s\n", dev->adata, ameta_dev->VolCatInfo.VolCatBytes, ameta_dev->VolCatInfo.VolCatStatus); rtn = true; bail_out: free_record(rec); Leave(100); return rtn; } /* * Write a volume label. This is ONLY called if we have a valid Bacula * label of type PRE_LABEL or we are recyling an existing Volume. * * By calling write_volume_label_to_block, both ameta and adata * are updated. * * Returns: true if OK * false if unable to write it * N.B. 2022: Function is called in a loop and should avoid FATAL errors as much as possible. * In order to fix MT9419 (corner case with cloud device), I moved the !truncate(dcr) error handling * from FATAL to ERROR so the truncated volumes can be effectively set in Error state. */ bool DEVICE::rewrite_volume_label(DCR *dcr, bool recycle) { char ed1[50]; JCR *jcr = dcr->jcr; Enter(100); ASSERT2(dcr->VolumeName[0], "Empty Volume name"); ASSERT(!dcr->block->adata); if (is_worm()) { Jmsg3(jcr, M_FATAL, 0, _("Cannot relabel worm %s device %s Volume \"%s\"\n"), print_type(), print_name(), dcr->VolumeName); Leave(100); return false; } if (!open_device(dcr, OPEN_READ_WRITE)) { Jmsg4(jcr, M_WARNING, 0, _("Open %s device %s Volume \"%s\" failed: ERR=%s\n"), print_type(), print_name(), dcr->VolumeName, bstrerror()); Leave(100); return false; } Dmsg2(190, "set append found freshly labeled volume. fd=%d dev=%x\n", fd(), this); VolHdr.LabelType = VOL_LABEL; /* set Volume label */ set_append(); Dmsg0(100, "Rewrite_volume_label set volcatbytes=0\n"); clearVolCatBytes(); /* resets both ameta and adata byte counts */ setVolCatStatus("Append"); /* set append status */ if (!has_cap(CAP_STREAM)) { if (!rewind(dcr)) { Jmsg3(jcr, M_FATAL, 0, _("Rewind error on %s device %s: ERR=%s\n"), print_type(), print_name(), print_errmsg()); Leave(100); return false; } if (recycle) { Dmsg1(150, "Doing recycle. Vol=%s\n", dcr->VolumeName); if (!truncate(dcr)) { Jmsg3(jcr, M_ERROR, 0, _("Truncate error on %s device %s: ERR=%s\n"), print_type(), print_name(), print_errmsg()); Leave(100); return false; } if (!open_device(dcr, OPEN_READ_WRITE)) { Jmsg3(jcr, M_FATAL, 0, _("Failed to re-open device after truncate on %s device %s: ERR=%s"), print_type(), print_name(), print_errmsg()); Leave(100); return false; } } } if (!load_encryption_key(dcr, "LABEL", dcr->VolumeName, &VolHdr.EncCypherKeySize, VolHdr.EncCypherKey, &VolHdr.MasterKeyIdSize, VolHdr.MasterKeyId)) { Leave(100); return false; } if (!write_volume_label_to_block(dcr)) { Dmsg0(150, "Error from write volume label.\n"); Leave(100); return false; } Dmsg2(100, "wrote vol label to block. adata=%d Vol=%s\n", dcr->block->adata, dcr->VolumeName); ASSERT2(dcr->VolumeName[0], "Empty Volume name"); setVolCatInfo(false); /* * If we are not dealing with a streaming device, * write the block now to ensure we have write permission. * It is better to find out now rather than later. * We do not write the block now if this is an ANSI label. This * avoids re-writing the ANSI label, which we do not want to do. */ if (!has_cap(CAP_STREAM)) { /* * If we have already detected an ANSI label, re-read it * to skip past it. Otherwise, we write a new one if * so requested. */ if (label_type != B_BACULA_LABEL) { if (read_ansi_ibm_label(dcr) != VOL_OK) { rewind(dcr); Leave(100); return false; } } else if (!write_ansi_ibm_labels(dcr, ANSI_VOL_LABEL, VolHdr.VolumeName)) { Leave(100); return false; } /* Attempt write to check write permission */ Dmsg1(200, "Attempt to write to device fd=%d.\n", fd()); if (!dcr->write_block_to_dev()) { Jmsg3(jcr, M_ERROR, 0, _("Unable to write %s device %s: ERR=%s\n"), print_type(), print_name(), print_errmsg()); Dmsg0(200, "===ERROR write block to dev\n"); Leave(100); return false; } } ASSERT2(dcr->VolumeName[0], "Empty Volume name"); setVolCatName(dcr->VolumeName); if (!dir_get_volume_info(dcr, dcr->VolumeName, GET_VOL_INFO_FOR_WRITE)) { Leave(100); return false; } set_labeled(); /* Set or reset Volume statistics */ VolCatInfo.VolCatJobs = 0; VolCatInfo.VolCatFiles = 0; VolCatInfo.VolCatErrors = 0; VolCatInfo.VolCatBlocks = 0; VolCatInfo.VolCatRBytes = 0; VolCatInfo.VolCatCloudParts = 0; VolCatInfo.VolLastPartBytes = 0; VolCatInfo.VolCatType = 0; /* Will be set by dir_update_volume_info() */ VolCatInfo.UseProtect = use_protect(); VolCatInfo.VolEncrypted = use_volume_encryption(); if (recycle) { VolCatInfo.VolCatMounts++; VolCatInfo.VolCatRecycles++; } else { VolCatInfo.VolCatMounts = 1; VolCatInfo.VolCatRecycles = 0; VolCatInfo.VolCatWrites = 1; VolCatInfo.VolCatReads = 1; } dcr->VolMediaId = dcr->VolCatInfo.VolMediaId; /* make create_jobmedia work */ dir_create_jobmedia_record(dcr, true); Dmsg1(100, "dir_update_vol_info. Set Append vol=%s\n", dcr->VolumeName); VolCatInfo.VolFirstWritten = time(NULL); setVolCatStatus("Append"); if (!dir_update_volume_info(dcr, true, true)) { /* indicate relabel */ Leave(100); return false; } events_send_msg(dcr->jcr, "SJ0004", EVENTS_TYPE_VOLUME, me->hdr.name, (intptr_t)dcr->jcr, "Wrote label on %s volume=\"%s\"", recycle?"recycled":"prelabeled", dcr->VolumeName); if (recycle) { Jmsg(jcr, M_INFO, 0, _("Recycled volume \"%s\" on %s device %s, all previous data lost.\n"), dcr->VolumeName, print_type(), print_name()); } else { Jmsg(jcr, M_INFO, 0, _("Wrote label to prelabeled Volume \"%s\" on %s device %s\n"), dcr->VolumeName, print_type(), print_name()); } /* * End writing real Volume label (from pre-labeled tape), or recycling * the volume. */ Dmsg4(100, "OK rewrite vol label. Addr=%s adata=%d slot=%d Vol=%s\n", print_addr(ed1, sizeof(ed1)), dcr->block->adata, VolCatInfo.Slot, dcr->VolumeName); Leave(100); return true; } /* * create_volume_label_record * Note: it is assumed that you have created the volume_header * (label) prior to calling this subroutine. * Serialize label (from dev->VolHdr structure) into device record. * Assumes that the dev->VolHdr structure is properly * initialized. */ static void create_volume_label_record(DCR *dcr, DEVICE *dev, DEV_RECORD *rec, bool adata) { ser_declare; struct date_time dt; JCR *jcr = dcr->jcr; char buf[100]; /* Serialize the label into the device record. */ Enter(100); rec->data = check_pool_memory_size(rec->data, SER_LENGTH_Volume_Label); memset(rec->data, 0, SER_LENGTH_Volume_Label); ser_begin(rec->data, SER_LENGTH_Volume_Label); ser_string(dev->VolHdr.Id); ser_uint32(dev->VolHdr.VerNum); if (dev->VolHdr.VerNum >= 11) { ser_btime(dev->VolHdr.label_btime); dev->VolHdr.write_btime = get_current_btime(); ser_btime(dev->VolHdr.write_btime); dev->VolHdr.write_date = 0; dev->VolHdr.write_time = 0; } else { /* OLD WAY DEPRECATED */ ser_float64(dev->VolHdr.label_date); ser_float64(dev->VolHdr.label_time); get_current_time(&dt); dev->VolHdr.write_date = dt.julian_day_number; dev->VolHdr.write_time = dt.julian_day_fraction; } ser_float64(dev->VolHdr.write_date); /* 0 if VerNum >= 11 */ ser_float64(dev->VolHdr.write_time); /* 0 if VerNum >= 11 */ ser_string(dev->VolHdr.VolumeName); ser_string(dev->VolHdr.PrevVolumeName); ser_string(dev->VolHdr.PoolName); ser_string(dev->VolHdr.PoolType); ser_string(dev->VolHdr.MediaType); if (dev->device->volume_encryption == ET_STRONG) { ser_string("OBFUSCATED"); } else { ser_string(dev->VolHdr.HostName); } ser_string(dev->VolHdr.LabelProg); ser_string(dev->VolHdr.ProgVersion); ser_string(dev->VolHdr.ProgDate); /* ***FIXME*** */ dev->VolHdr.AlignedVolumeName[0] = 0; ser_string(dev->VolHdr.AlignedVolumeName); /* This is adata Volume information */ ser_uint64(dev->VolHdr.FirstData); ser_uint32(dev->VolHdr.FileAlignment); ser_uint32(dev->VolHdr.PaddingSize); /* adata and dedup volumes */ ser_uint32(dev->VolHdr.BlockSize); /* new in for BB03 */ ser_uint32(dev->VolHdr.EncCypherKeySize); ser_bytes(dev->VolHdr.EncCypherKey, dev->VolHdr.EncCypherKeySize); ser_uint32(dev->VolHdr.MasterKeyIdSize); ser_bytes(dev->VolHdr.MasterKeyId, dev->VolHdr.MasterKeyIdSize); ser_uint32(0); /* This for a Signature Key identifier a upcoming feature */ ser_end(rec->data, SER_LENGTH_Volume_Label); if (!adata) { bstrncpy(dcr->VolumeName, dev->VolHdr.VolumeName, sizeof(dcr->VolumeName)); } dcr->block->first_block = true; ASSERT2(dcr->VolumeName[0], "Empty Volume name"); rec->data_len = ser_length(rec->data); rec->FileIndex = dev->VolHdr.LabelType; Dmsg2(100, "LabelType=%d adata=%d\n", dev->VolHdr.LabelType, dev->adata); rec->VolSessionId = jcr->VolSessionId; rec->VolSessionTime = jcr->VolSessionTime; rec->Stream = jcr->NumWriteVolumes; rec->maskedStream = jcr->NumWriteVolumes; Dmsg3(100, "Created adata=%d Vol label rec: FI=%s len=%d\n", adata, FI_to_ascii(buf, rec->FileIndex), rec->data_len); Dmsg2(100, "reclen=%d recdata=%s", rec->data_len, rec->data); Leave(100); } /* * Create a volume header (label) in memory * The volume record is created after this header (label) * is created. */ void create_volume_header(DEVICE *dev, const char *VolName, const char *PoolName, bool no_prelabel) { DEVRES *device = (DEVRES *)dev->device; Enter(130); ASSERT2(dev != NULL, "dev ptr is NULL"); dev->VolHdr.BlockVer = BLOCK_VER; if (dev->is_aligned()) { bstrncpy(dev->VolHdr.Id, BaculaMetaDataId, sizeof(dev->VolHdr.Id)); dev->VolHdr.VerNum = BaculaMetaDataVersion; dev->VolHdr.FirstData = dev->file_alignment; dev->VolHdr.FileAlignment = dev->file_alignment; dev->VolHdr.PaddingSize = dev->padding_size; dev->VolHdr.BlockSize = dev->adata_size; } else if (dev->is_adata()) { bstrncpy(dev->VolHdr.Id, BaculaAlignedDataId, sizeof(dev->VolHdr.Id)); dev->VolHdr.VerNum = BaculaAlignedDataVersion; dev->VolHdr.FirstData = dev->file_alignment; dev->VolHdr.FileAlignment = dev->file_alignment; dev->VolHdr.PaddingSize = dev->padding_size; dev->VolHdr.BlockSize = dev->adata_size; } else if (dev->is_dedup()) { bstrncpy(dev->VolHdr.Id, BaculaDedupMetaDataId, sizeof(dev->VolHdr.Id)); dev->VolHdr.VerNum = BaculaDedupMetaDataVersion; dev->VolHdr.BlockSize = dev->max_block_size; } else if (dev->is_cloud()) { bstrncpy(dev->VolHdr.Id, BaculaS3CloudId, sizeof(dev->VolHdr.Id)); dev->VolHdr.VerNum = BaculaS3CloudVersion; dev->VolHdr.BlockSize = dev->max_block_size; dev->VolHdr.MaxPartSize = dev->max_part_size; dev->VolHdr.MaxVolPartsNum = dev->max_vol_parts_num; } else { bstrncpy(dev->VolHdr.Id, BaculaId, sizeof(dev->VolHdr.Id)); dev->VolHdr.VerNum = BaculaTapeVersion; dev->VolHdr.BlockSize = dev->max_block_size; } if ((dev->has_cap(CAP_STREAM) && no_prelabel) || dev->is_worm()) { /* We do not want to re-label so write VOL_LABEL now */ dev->VolHdr.LabelType = VOL_LABEL; } else { dev->VolHdr.LabelType = PRE_LABEL; /* Mark Volume as unused */ } bstrncpy(dev->VolHdr.VolumeName, VolName, sizeof(dev->VolHdr.VolumeName)); bstrncpy(dev->VolHdr.PoolName, PoolName, sizeof(dev->VolHdr.PoolName)); bstrncpy(dev->VolHdr.MediaType, device->media_type, sizeof(dev->VolHdr.MediaType)); bstrncpy(dev->VolHdr.PoolType, "Backup", sizeof(dev->VolHdr.PoolType)); dev->VolHdr.label_btime = get_current_btime(); dev->VolHdr.label_date = 0; dev->VolHdr.label_time = 0; if (gethostname(dev->VolHdr.HostName, sizeof(dev->VolHdr.HostName)) != 0) { dev->VolHdr.HostName[0] = 0; } bstrncpy(dev->VolHdr.LabelProg, my_name, sizeof(dev->VolHdr.LabelProg)); sprintf(dev->VolHdr.ProgVersion, "Ver. %s %s ", VERSION, BDATE); sprintf(dev->VolHdr.ProgDate, "Build %s %s ", __DATE__, __TIME__); /* Warning in BB03 EncCypherKey* & MasterKeyId* have already been set, don't reset */ dev->set_labeled(); /* set has Bacula label */ if (chk_dbglvl(100)) { dev->dump_volume_label(); } Leave(130); } /* * Create session (Job) label * The pool memory must be released by the calling program */ void create_session_label(DCR *dcr, DEV_RECORD *rec, int label) { JCR *jcr = dcr->jcr; ser_declare; Enter(100); rec->VolSessionId = jcr->VolSessionId; rec->VolSessionTime = jcr->VolSessionTime; rec->Stream = jcr->JobId; rec->maskedStream = jcr->JobId; rec->data = check_pool_memory_size(rec->data, SER_LENGTH_Session_Label); ser_begin(rec->data, SER_LENGTH_Session_Label); ser_string(BaculaId); ser_uint32(BaculaTapeVersion); ser_uint32(jcr->JobId); /* Changed in VerNum 11 */ ser_btime(get_current_btime()); ser_float64(0); ser_string(dcr->pool_name); ser_string(dcr->pool_type); ser_string(jcr->job_name); /* base Job name */ ser_string(jcr->client_name); /* Added in VerNum 10 */ ser_string(jcr->Job); /* Unique name of this Job */ ser_string(jcr->fileset_name); ser_uint32(jcr->getJobType()); ser_uint32(jcr->getJobLevel()); /* Added in VerNum 11 */ ser_string(jcr->fileset_md5); if (label == EOS_LABEL) { ser_uint32(jcr->JobFiles); ser_uint64(jcr->JobBytes); ser_uint32((uint32_t)dcr->StartAddr); /* Start Block */ ser_uint32((uint32_t)dcr->EndAddr); /* End Block */ ser_uint32((uint32_t)(dcr->StartAddr>>32)); /* Start File */ ser_uint32((uint32_t)(dcr->EndAddr>>32)); /* End File */ ser_uint32(jcr->JobErrors); /* Added in VerNum 11 */ ser_uint32(jcr->JobStatus); } ser_end(rec->data, SER_LENGTH_Session_Label); rec->data_len = ser_length(rec->data); Leave(100); } /* Write session (Job) label * Returns: false on failure * true on success */ bool write_session_label(DCR *dcr, int label) { JCR *jcr = dcr->jcr; DEVICE *dev = dcr->dev; DEV_RECORD *rec; DEV_BLOCK *block = dcr->block; char buf1[100], buf2[100]; Enter(100); dev->Lock(); Dmsg2(140, "=== write_session_label label=%d Vol=%s.\n", label, dev->getVolCatName()); if (!check_for_newvol_or_newfile(dcr)) { Pmsg0(000, "ERR: !check_for_new_vol_or_newfile\n"); dev->Unlock(); Leave(100); return false; } rec = new_record(); Dmsg1(130, "session_label record=%x\n", rec); switch (label) { case SOS_LABEL: set_start_vol_position(dcr); break; case EOS_LABEL: dcr->EndAddr = dev->get_full_addr(); break; default: Jmsg1(jcr, M_ABORT, 0, _("Bad Volume session label request=%d\n"), label); break; } create_session_label(dcr, rec, label); rec->FileIndex = label; dev->Unlock(); /* * We guarantee that the session record can totally fit * into a block. If not, write the block, and put it in * the next block. Having the sesssion record totally in * one block makes reading them much easier (no need to * read the next block). */ if (!can_write_record_to_block(block, rec)) { Dmsg0(150, "Cannot write session label to block.\n"); if (!dcr->write_block_to_device()) { Dmsg0(130, "Got session label write_block_to_dev error.\n"); free_record(rec); Leave(100); return false; } } /* * We use write_record() because it handles the case that * the maximum user size has been reached. */ if (!dcr->write_record(rec)) { Dmsg0(150, "Bad return from write_record\n"); free_record(rec); Leave(100); return false; } Dmsg6(150, "Write sesson_label record JobId=%d FI=%s SessId=%d Strm=%s len=%d " "remainder=%d\n", jcr->JobId, FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len, rec->remainder); free_record(rec); Dmsg2(150, "Leave write_session_label Block=%u File=%u\n", dev->get_block_num(), dev->get_file()); Leave(100); return true; } /* unser_volume_label * * Unserialize the Bacula Volume label into the device Volume_Label * structure. * * Assumes that the record is already read. * * Returns: false on error * true on success */ bool unser_volume_label(DEVICE *dev, DEV_RECORD *rec) { ser_declare; char buf1[100], buf2[100]; Enter(100); if (rec->FileIndex != VOL_LABEL && rec->FileIndex != PRE_LABEL) { Mmsg3(dev->errmsg, _("Expecting Volume Label, got FI=%s Stream=%s len=%d\n"), FI_to_ascii(buf1, rec->FileIndex), stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len); if (!forge_on) { Leave(100); return false; } } dev->VolHdr.LabelType = rec->FileIndex; dev->VolHdr.LabelSize = rec->data_len; dev->VolHdr.BlockVer = rec->BlockVer; dev->VolHdr.blkh_options = rec->blkh_options; /* Unserialize the record into the Volume Header */ Dmsg2(100, "reclen=%d recdata=%s", rec->data_len, rec->data); rec->data = check_pool_memory_size(rec->data, SER_LENGTH_Volume_Label); Dmsg2(100, "reclen=%d recdata=%s", rec->data_len, rec->data); ser_begin(rec->data, SER_LENGTH_Volume_Label); unser_string(dev->VolHdr.Id); unser_uint32(dev->VolHdr.VerNum); if (dev->VolHdr.VerNum >= 11) { unser_btime(dev->VolHdr.label_btime); unser_btime(dev->VolHdr.write_btime); } else { /* old way */ unser_float64(dev->VolHdr.label_date); unser_float64(dev->VolHdr.label_time); } unser_float64(dev->VolHdr.write_date); /* Unused with VerNum >= 11 */ unser_float64(dev->VolHdr.write_time); /* Unused with VerNum >= 11 */ unser_string(dev->VolHdr.VolumeName); unser_string(dev->VolHdr.PrevVolumeName); unser_string(dev->VolHdr.PoolName); unser_string(dev->VolHdr.PoolType); unser_string(dev->VolHdr.MediaType); unser_string(dev->VolHdr.HostName); unser_string(dev->VolHdr.LabelProg); unser_string(dev->VolHdr.ProgVersion); unser_string(dev->VolHdr.ProgDate); unser_string(dev->VolHdr.AlignedVolumeName); dev->VolHdr.AlignedVolumeName[0] = 0; unser_uint64(dev->VolHdr.FirstData); unser_uint32(dev->VolHdr.FileAlignment); unser_uint32(dev->VolHdr.PaddingSize); unser_uint32(dev->VolHdr.BlockSize); if (rec->BlockVer == 3) { unser_uint32(dev->VolHdr.EncCypherKeySize); unser_bytes(dev->VolHdr.EncCypherKey, dev->VolHdr.EncCypherKeySize); unser_uint32(dev->VolHdr.MasterKeyIdSize); unser_bytes(dev->VolHdr.MasterKeyId, dev->VolHdr.MasterKeyIdSize); uint32_t SignatureKeyIdSize; unser_uint32(SignatureKeyIdSize); (void)SignatureKeyIdSize; /* don't complain about unused variable */ } else { dev->VolHdr.EncCypherKeySize = 0; dev->VolHdr.MasterKeyIdSize = 0; } ser_end(rec->data, SER_LENGTH_Volume_Label); Dmsg0(190, "unser_vol_label\n"); if (chk_dbglvl(100)) { dev->dump_volume_label(); } Leave(100); return true; } bool unser_session_label(SESSION_LABEL *label, DEV_RECORD *rec) { ser_declare; Enter(100); rec->data = check_pool_memory_size(rec->data, SER_LENGTH_Session_Label); unser_begin(rec->data, SER_LENGTH_Session_Label); unser_string(label->Id); unser_uint32(label->VerNum); unser_uint32(label->JobId); if (label->VerNum >= 11) { unser_btime(label->write_btime); } else { unser_float64(label->write_date); } unser_float64(label->write_time); unser_string(label->PoolName); unser_string(label->PoolType); unser_string(label->JobName); unser_string(label->ClientName); if (label->VerNum >= 10) { unser_string(label->Job); /* Unique name of this Job */ unser_string(label->FileSetName); unser_uint32(label->JobType); unser_uint32(label->JobLevel); } if (label->VerNum >= 11) { unser_string(label->FileSetMD5); } else { label->FileSetMD5[0] = 0; } if (rec->FileIndex == EOS_LABEL) { unser_uint32(label->JobFiles); unser_uint64(label->JobBytes); unser_uint32(label->StartBlock); unser_uint32(label->EndBlock); unser_uint32(label->StartFile); unser_uint32(label->EndFile); unser_uint32(label->JobErrors); if (label->VerNum >= 11) { unser_uint32(label->JobStatus); } else { label->JobStatus = JS_Terminated; /* kludge */ } } Leave(100); return true; } void DEVICE::dump_volume_label() { int64_t dbl = debug_level; uint32_t File; const char *LabelType; char buf[30]; struct tm tm; struct date_time dt; debug_level = 1; File = file; switch (VolHdr.LabelType) { case PRE_LABEL: LabelType = "PRE_LABEL"; break; case VOL_LABEL: LabelType = "VOL_LABEL"; break; case EOM_LABEL: LabelType = "EOM_LABEL"; break; case SOS_LABEL: LabelType = "SOS_LABEL"; break; case EOS_LABEL: LabelType = "EOS_LABEL"; break; case EOT_LABEL: goto bail_out; default: LabelType = buf; sprintf(buf, _("Unknown %d"), VolHdr.LabelType); break; } Pmsg15(-1, _("\nVolume Label:\n" "Adata : %d\n" "Id : %s" "VerNo : %d\n" "VolName : %s\n" "PrevVolName : %s\n" "VolFile : %d\n" "LabelType : %s\n" "LabelSize : %d\n" "PoolName : %s\n" "MediaType : %s\n" "PoolType : %s\n" "HostName : %s\n" "BlockVer : BB%02d\n" "EncCypherKeySize : %ld\n" "MasterKeyIdSize : %ld\n" ""), adata, VolHdr.Id, VolHdr.VerNum, VolHdr.VolumeName, VolHdr.PrevVolumeName, File, LabelType, VolHdr.LabelSize, VolHdr.PoolName, VolHdr.MediaType, VolHdr.PoolType, VolHdr.HostName, VolHdr.BlockVer, VolHdr.EncCypherKeySize, VolHdr.MasterKeyIdSize); if (VolHdr.VerNum >= 11) { char dt[50]; bstrftime(dt, sizeof(dt), btime_to_utime(VolHdr.label_btime)); Pmsg1(-1, _("Date label written: %s\n"), dt); } else { dt.julian_day_number = VolHdr.label_date; dt.julian_day_fraction = VolHdr.label_time; tm_decode(&dt, &tm); Pmsg5(-1, _("Date label written: %04d-%02d-%02d at %02d:%02d\n"), tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, tm.tm_hour, tm.tm_min); } bail_out: debug_level = dbl; } static void dump_session_label(DEV_RECORD *rec, const char *type) { int64_t dbl; struct date_time dt; struct tm tm; SESSION_LABEL label; char ec1[30], ec2[30], ec3[30], ec4[30], ec5[30], ec6[30], ec7[30]; unser_session_label(&label, rec); dbl = debug_level; debug_level = 1; Pmsg7(-1, _("\n%s Record:\n" "JobId : %d\n" "VerNum : %d\n" "PoolName : %s\n" "PoolType : %s\n" "JobName : %s\n" "ClientName : %s\n" ""), type, label.JobId, label.VerNum, label.PoolName, label.PoolType, label.JobName, label.ClientName); if (label.VerNum >= 10) { Pmsg4(-1, _( "Job (unique name) : %s\n" "FileSet : %s\n" "JobType : %c\n" "JobLevel : %c\n" ""), label.Job, label.FileSetName, label.JobType, label.JobLevel); } if (rec->FileIndex == EOS_LABEL) { Pmsg8(-1, _( "JobFiles : %s\n" "JobBytes : %s\n" "StartBlock : %s\n" "EndBlock : %s\n" "StartFile : %s\n" "EndFile : %s\n" "JobErrors : %s\n" "JobStatus : %c\n" ""), edit_uint64_with_commas(label.JobFiles, ec1), edit_uint64_with_commas(label.JobBytes, ec2), edit_uint64_with_commas(label.StartBlock, ec3), edit_uint64_with_commas(label.EndBlock, ec4), edit_uint64_with_commas(label.StartFile, ec5), edit_uint64_with_commas(label.EndFile, ec6), edit_uint64_with_commas(label.JobErrors, ec7), label.JobStatus); } if (label.VerNum >= 11) { char dt[50]; bstrftime(dt, sizeof(dt), btime_to_utime(label.write_btime)); Pmsg1(-1, _("Date written : %s\n"), dt); } else { dt.julian_day_number = label.write_date; dt.julian_day_fraction = label.write_time; tm_decode(&dt, &tm); Pmsg5(-1, _("Date written : %04d-%02d-%02d at %02d:%02d\n"), tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, tm.tm_hour, tm.tm_min); } debug_level = dbl; } static int check_label(SESSION_LABEL *label) { int errors = 0; if (label->JobId > 10000000 || label->JobId < 0) { Pmsg0(-1, _("***** ERROR ****** : Found error with the JobId\n")); errors++; } if (!errors) { switch (label->JobLevel) { case L_FULL: case L_INCREMENTAL: case L_DIFFERENTIAL: case L_SINCE: case L_VERIFY_CATALOG: case L_VERIFY_INIT: case L_VERIFY_VOLUME_TO_CATALOG: case L_VERIFY_DISK_TO_CATALOG: case L_VERIFY_DATA: case L_BASE: case L_NONE: case L_VIRTUAL_FULL: break; default: Pmsg0(-1, _("***** ERROR ****** : Found error with the JobLevel\n")); errors++; } } if (!errors) { switch (label->JobType) { case JT_BACKUP: case JT_MIGRATED_JOB: case JT_VERIFY: case JT_RESTORE: case JT_CONSOLE: case JT_SYSTEM: case JT_ADMIN: case JT_ARCHIVE: case JT_JOB_COPY: case JT_COPY: case JT_MIGRATE: case JT_SCAN: break; default: Pmsg0(-1, _("***** ERROR ****** : Found error with the JobType\n")); errors++; } } if (!errors) { POOLMEM *err = get_pool_memory(PM_EMSG); if (!is_name_valid(label->Job, &err)) { Pmsg1(-1, _("***** ERROR ****** : Found error with the Job name %s\n"), err); errors++; } free_pool_memory(err); } return errors; } int dump_label_record(DEVICE *dev, DEV_RECORD *rec, int verbose, bool check_err) { const char *type; int64_t dbl; int errors = 0; if (rec->FileIndex == 0 && rec->VolSessionId == 0 && rec->VolSessionTime == 0) { return 0; } dbl = debug_level; debug_level = 1; switch (rec->FileIndex) { case PRE_LABEL: type = _("Fresh Volume"); break; case VOL_LABEL: type = _("Volume"); break; case SOS_LABEL: type = _("Begin Job Session"); break; case EOS_LABEL: type = _("End Job Session"); break; case EOM_LABEL: type = _("End of Media"); break; case EOT_LABEL: type = _("End of Tape"); break; default: type = _("Unknown"); break; } if (verbose) { switch (rec->FileIndex) { case PRE_LABEL: case VOL_LABEL: unser_volume_label(dev, rec); dev->dump_volume_label(); break; case EOS_LABEL: case SOS_LABEL: dump_session_label(rec, type); break; case EOM_LABEL: Pmsg7(-1, _("%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d DataLen=%d\n"), type, dev->file, dev->block_num, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); break; case EOT_LABEL: Pmsg0(-1, _("Bacula \"End of Tape\" label found.\n")); break; default: Pmsg7(-1, _("%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d DataLen=%d\n"), type, dev->file, dev->block_num, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); break; } } else { SESSION_LABEL label; char dt[50]; switch (rec->FileIndex) { case SOS_LABEL: unser_session_label(&label, rec); bstrftimes(dt, sizeof(dt), btime_to_utime(label.write_btime)); Pmsg6(-1, _("%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d\n"), type, dev->file, dev->block_num, rec->VolSessionId, rec->VolSessionTime, label.JobId); Pmsg4(-1, _(" Job=%s Date=%s Level=%c Type=%c\n"), label.Job, dt, label.JobLevel, label.JobType); if (check_err) { errors += check_label(&label); } break; case EOS_LABEL: char ed1[30], ed2[30]; unser_session_label(&label, rec); bstrftimes(dt, sizeof(dt), btime_to_utime(label.write_btime)); Pmsg6(-1, _("%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d\n"), type, dev->file, dev->block_num, rec->VolSessionId, rec->VolSessionTime, label.JobId); Pmsg7(-1, _(" Date=%s Level=%c Type=%c Files=%s Bytes=%s Errors=%d Status=%c\n"), dt, label.JobLevel, label.JobType, edit_uint64_with_commas(label.JobFiles, ed1), edit_uint64_with_commas(label.JobBytes, ed2), label.JobErrors, (char)label.JobStatus); if (check_err) { errors += check_label(&label); } break; case EOM_LABEL: case PRE_LABEL: case VOL_LABEL: default: Pmsg7(-1, _("%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d DataLen=%d\n"), type, dev->file, dev->block_num, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); break; case EOT_LABEL: break; } } debug_level = dbl; return errors; } bacula-15.0.3/src/stored/stored.conf.in0000644000175000017500000000276414771010173017536 0ustar bsbuildbsbuild# # Kern's Bacula Storage Daemon Configuration file # # For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ @DISTVER@ # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # # "Global" Storage daemon configuration specifications # Storage { Name = "Storage daemon" Address = @hostname@ SDPort = @sd_port@ # Directors port WorkingDirectory = "@working_dir@" Pid Directory = "@piddir@" Subsys Directory = "@subsysdir@" Encryption Command = "@scriptdir@/key-manager.py getkey" } # # List Directors who are permitted to contact Storage daemon # Director { Name = @hostname@-dir Password = local_storage_password } # # Devices supported by this Storage daemon # To connect, the Director must have the same Name and MediaType, # which are sent to the File daemon # Device { Name = "HP DLT 80" Media Type = DLT8000 Archive Device = /dev/nst0 # LabelMedia = yes; # lets Bacula label unlabelled media AutomaticMount = yes; # when device opened, read it AlwaysOpen = yes; RemovableMedia = yes; } #Device { # Name = "Exabyte 8mm" # Media Type = "8mm" # Archive Device = /dev/nst1 # Hardware end of medium = No; ## LabelMedia = yes; # lets Bacula label unlabelled media # AutomaticMount = yes; # when device opened, read it # AlwaysOpen = Yes; # RemovableMedia = yes; #} Messages { Name = Standard director = @hostname@-dir = all operator = @dump_email@ = mount } bacula-15.0.3/src/stored/store_mngr.h0000644000175000017500000000220014771010173017273 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Storage Daemon's part of Storage Manager - responsible for retrieving needed * information in response to Director's query. */ #ifndef STORE_MNGR_H #define STORE_MNGR_H 1 #include "bacula.h" /* FreeSpace policy struct to easily get rid of device duplicates in the list */ struct query_size { struct dlink link; uint64_t devno; /* Unique device number */ uint64_t size; /* Device's freespace in bytes */ }; /* Main FreeSpace policy handler */ bool query_store(JCR *jcr); #endif // STORE_MNGR_H bacula-15.0.3/src/stored/block.h0000644000175000017500000001714414771010173016223 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Block definitions for Bacula media data format. * * Kern Sibbald, MM * */ #ifndef __BLOCK_H #define __BLOCK_H 1 #define MAX_BLOCK_LENGTH 20000000 /* this is a sort of sanity check */ #define DEFAULT_BLOCK_SIZE (512 * 126) /* 64,512 N.B. do not use 65,636 here */ #define MIN_DEDUP_BLOCK_SIZE (512 * 2) /* Minimum block (bucket) size */ #define DEDUP_BLOCK_SIZE (512 * 128) /* For now use a fixed dedup block size */ /* Block Header definitions. */ #define BLKHDR1_ID "BB01" #define BLKHDR2_ID "BB02" #define BLKHDR3_ID "BB03" #define BLKHDR_ID_LENGTH 4 #define BLKHDR_CS_LENGTH 4 /* checksum length */ #define BLKHDR1_LENGTH 16 /* Total length */ #define BLKHDR2_LENGTH 24 /* Total length */ #define BLKHDR3_LENGTH 32 /* Total length including the checksum) */ #define BLKHDR_CS64_OFFSET 24 /* 64bits checksum offset */ #define BLKHDR_CS64_LENGTH 8 /* 64bits checksum length */ #define WRITE_BLKHDR_ID BLKHDR3_ID #define WRITE_BLKHDR_LENGTH BLKHDR3_LENGTH #define WRITE_ADATA_BLKHDR_LENGTH (6*sizeof(int32_t)+sizeof(uint64_t)) #define BLOCK_VER 3 enum BLKH_OPTIONS { BLKHOPT_NONE = 0x0, BLKHOPT_CHKSUM = 0x1, // set if the checksum is enable BLKHOPT_ENCRYPT_VOL = 0x2, // set if the volume is encrypted BLKHOPT_ENCRYPT_BLOCK = 0x4, // set if this block is encrypted (volume label are not) BLKHOPT_MASK = 0x7, // Update this one after adding some values above BLKHOPT_LAST = 0x8, // Update this one too }; /* Record header definitions */ #define RECHDR1_LENGTH 20 /* * Record header consists of: * int32_t FileIndex * int32_t Stream * uint32_t data_length */ #define RECHDR2_LENGTH (3*sizeof(int32_t)) #define RECHDR3_LENGTH RECHDR2_LENGTH #define WRITE_RECHDR_LENGTH RECHDR2_LENGTH /* * An adata record header includes: * int32_t FileIndex * int32_t Stream STREAM_ADATA_RECORD_HEADER * uint32_t data_length * uint32_t block length (binbuf to that point in time) * int32_t Stream (original stream) * uint64_t FileOffset (if offset_stream) */ #define WRITE_ADATA_RECHDR_LENGTH (5*sizeof(int32_t)) #define WRITE_ADATA_RECHDR_LENGTH_MAX (WRITE_ADATA_RECHDR_LENGTH + sizeof(uint64_t)) /* Tape label and version definitions */ #define BaculaId "Bacula 1.0 immortal\n" #define OldBaculaId "Bacula 0.9 mortal\n" #define BaculaTapeVersion 11 #define OldCompatibleBaculaTapeVersion1 10 #define OldCompatibleBaculaTapeVersion2 9 #define BaculaMetaDataId "Bacula 1.0 Metadata\n" #define BaculaAlignedDataId "Bacula 1.0 Aligned Data\n" #define BaculaMetaDataVersion 10000 #define BaculaAlignedDataVersion 20000 #define BaculaDedupMetaDataId "Bacula 1.0 Dedup Metadata\n" #define BaculaDedupMetaDataVersion 30000 #define BaculaS3CloudId "Bacula 1.0 S3 Cloud Data\n" #define BaculaS3CloudVersion 40000 /* * This is the Media structure for a block header * Note, when written, it is serialized. 16 bytes uint32_t CheckSum; uint32_t block_len; uint32_t BlockNumber; char Id[BLKHDR_ID_LENGTH]; * for BB02 block, we have 24 bytes uint32_t CheckSum; uint32_t block_len; uint32_t BlockNumber; char Id[BLKHDR_ID_LENGTH]; uint32_t VolSessionId; uint32_t VolSessionTime; * for an adata block header (in ameta file), we have 32 bytes uint32_t BlockNumber; int32_t Stream; STREAM_ADATA_BLOCK_HEADER uint32_t block_len; uint32_t CheckSum; uint32_t VolSessionId; uint32_t VolSessionTime; uint64_t BlockAddr; * for BB03 block, we have variable header length, minimum is 24 bytes uint32_t blkh_options; // bit field describing the extra fields uint32_t block_len; uint32_t BlockNumber; char Id[BLKHDR_ID_LENGTH]; uint32_t VolSessionId; uint32_t VolSessionTime; ---- no come extra field ---- uint64_t xxhash64; if (blkh_options&BLKHOPT_CHKSUM) */ class DEVICE; /* for forward reference */ /* * DEV_BLOCK for reading and writing blocks. * This is the basic unit that is written to the device, and * it contains a Block Header followd by Records. Note, * at times (when reading a file), this block may contain * multiple blocks. * * This is the memory structure for a device block. */ struct DEV_BLOCK { DEV_BLOCK *next; /* pointer to next one */ DEVICE *dev; /* pointer to device */ /* binbuf is the number of bytes remaining in the buffer. * For writes, it is bytes not yet written. * For reads, it is remaining bytes not yet read. */ uint64_t BlockAddr; /* Block address */ uint32_t binbuf; /* bytes in buffer */ uint32_t block_len; /* length of current block read */ uint32_t buf_len; /* max/default block length */ uint32_t reclen; /* Last record length put in adata block */ uint32_t BlockNumber; /* sequential Bacula block number */ uint32_t read_len; /* bytes read into buffer, if zero, block empty */ uint32_t VolSessionId; /* */ uint32_t VolSessionTime; /* */ uint32_t read_errors; /* block errors (checksum, header, ...) */ uint64_t CheckSum64; /* Block checksum */ uint32_t RecNum; /* Number of records read from the current block */ uint32_t extra_bytes; /* the extra size that must be accounted in VolABytes */ int BlockVer; /* block version 1 or 2 */ bool write_failed; /* set if write failed */ bool block_read; /* set when block read */ bool needs_write; /* block must be written */ bool adata; /* adata block */ bool no_header; /* Set if no block header */ bool new_fi; /* New FI arrived */ bool first_block; /* Is the block holding a volume label */ int32_t FirstIndex; /* first index this block */ int32_t LastIndex; /* last index this block */ int32_t rechdr_items; /* number of items in rechdr queue */ char *bufp; /* pointer into buffer */ POOLMEM *rechdr_queue; /* record header queue */ POOLMEM *buf; /* actual data buffer */ POOLMEM *buf_enc; /* buffer with the encrypted data */ char *buf_out; /* point to the data that must be written to the media */ alist *filemedia; /* Filemedia attached to the current block */ uint32_t blkh_options; /* see BLKH_OPTIONS */ }; #define block_is_empty(block) ((block)->read_len == 0) /* Information associated with a file OFFSET */ struct FILEMEDIA_ITEM { dlink link; uint32_t RecordNo; uint32_t FileIndex; uint64_t BlockAddr; uint64_t FileOffset; int64_t VolMediaId; }; #endif bacula-15.0.3/src/stored/aligned_read.c0000644000175000017500000000143414771010173017515 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * record_read.c -- Volume (tape/disk) record read functions * * Kern Sibbald, April MMI * added BB02 format October MMII */ bacula-15.0.3/src/stored/vol_mgr.h0000644000175000017500000000616314771010173016575 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Pulled out of dev.h * * Kern Sibbald, MMXIII * */ /* * Some details of how volume reservations work * * class VOLRES: * set_in_use() volume being used on current drive * clear_in_use() no longer being used. Can be re-used or moved. * set_swapping() set volume being moved to another drive * is_swapping() volume is being moved to another drive * clear_swapping() volume normal * */ #ifndef __VOL_MGR_H #define __VOL_MGR_H 1 class VOLRES; VOLRES *vol_walk_start(); VOLRES *vol_walk_next(VOLRES *prev_vol); void vol_walk_end(VOLRES *vol); /* * Volume reservation class -- see vol_mgr.c and reserve.c */ class VOLRES { bool m_swapping; /* set when swapping to another drive */ bool m_in_use; /* set when volume reserved or in use */ bool m_reading; /* set when reading */ int32_t m_slot; /* slot of swapping volume */ uint32_t m_JobId; /* JobId for read volumes */ volatile int32_t m_use_count; /* Use count */ pthread_mutex_t m_mutex; /* Vol muntex */ public: dlink link; char *vol_name; /* Volume name */ DEVICE *dev; /* Pointer to device to which we are attached */ void init_mutex() { pthread_mutex_init(&m_mutex, NULL); }; void destroy_mutex() { pthread_mutex_destroy(&m_mutex); }; void vLock() { P(m_mutex); }; void vUnlock() { V(m_mutex); }; void inc_use_count(void) {P(m_mutex); m_use_count++; V(m_mutex); }; void dec_use_count(void) {P(m_mutex); m_use_count--; V(m_mutex); }; int32_t use_count() const { return m_use_count; }; bool is_swapping() const { return m_swapping; }; bool is_reading() const { return m_reading; }; bool is_writing() const { return !m_reading; }; void set_reading() { m_reading = true; }; void clear_reading() { m_reading = false; }; void set_swapping() { m_swapping = true; }; void clear_swapping() { m_swapping = false; }; bool is_in_use() const { return m_in_use; }; void set_in_use() { m_in_use = true; }; void clear_in_use() { m_in_use = false; }; void set_slot(int32_t slot) { m_slot = slot; }; void clear_slot() { m_slot = -1; }; int32_t get_slot() const { return m_slot; }; uint32_t get_jobid() const { return m_JobId; }; void set_jobid(uint32_t JobId) { m_JobId = JobId; }; }; #define foreach_vol(vol) \ for (vol=vol_walk_start(); vol; (vol=vol_walk_next(vol)) ) #define endeach_vol(vol) vol_walk_end(vol) #endif bacula-15.0.3/src/stored/match_bsr.c0000644000175000017500000005247714771010173017076 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Match Bootstrap Records (used for restores) against * Volume Records * * Kern Sibbald, June MMII * */ /* * ***FIXME*** * Also for efficiency, once a bsr is done, it really should be * delinked from the bsr chain. This will avoid the above * problem and make traversal of the bsr chain more efficient. * * To be done ... */ #include "bacula.h" #include "stored.h" #ifdef HAVE_FNMATCH #include #else #include "lib/fnmatch.h" #endif const int dbglevel = 200; #if BEEF int new_match_all(BSR *bsr, DEV_RECORD *rec, VOLUME_LABEL *volrec, SESSION_LABEL *sessrec, bool done, JCR *jcr); /* Code to test the new match_all code. Can be disabled with a setdebug command */ int use_new_match_all = 1; /* Make all functions non static for linking with bee module */ #define static #else #define new_match_all match_all /* Code to test the new match_all code. Can be disabled with a setdebug command */ int use_new_match_all = 0; #endif /* Forward references */ static int match_volume(BSR *bsr, BSR_VOLUME *volume, VOLUME_LABEL *volrec, bool done); static int match_sesstime(BSR *bsr, BSR_SESSTIME *sesstime, DEV_RECORD *rec, bool done); static int match_sessid(BSR *bsr, BSR_SESSID *sessid, DEV_RECORD *rec); static int match_client(BSR *bsr, BSR_CLIENT *client, SESSION_LABEL *sessrec, bool done); static int match_job(BSR *bsr, BSR_JOB *job, SESSION_LABEL *sessrec, bool done); static int match_job_type(BSR *bsr, BSR_JOBTYPE *job_type, SESSION_LABEL *sessrec, bool done); static int match_job_level(BSR *bsr, BSR_JOBLEVEL *job_level, SESSION_LABEL *sessrec, bool done); static int match_jobid(BSR *bsr, BSR_JOBID *jobid, SESSION_LABEL *sessrec, bool done); static int match_findex(BSR *bsr, DEV_RECORD *rec, bool done); static int match_voladdr(BSR *bsr, BSR_VOLADDR *voladdr, DEV_RECORD *rec, bool done); static int match_stream(BSR *bsr, BSR_STREAM *stream, DEV_RECORD *rec, bool done); static int match_all(BSR *bsr, DEV_RECORD *rec, VOLUME_LABEL *volrec, SESSION_LABEL *sessrec, bool done, JCR *jcr); static int match_block_sesstime(BSR *bsr, BSR_SESSTIME *sesstime, DEV_BLOCK *block); static int match_block_sessid(BSR *bsr, BSR_SESSID *sessid, DEV_BLOCK *block); static BSR *find_smallest_volfile(BSR *fbsr, BSR *bsr); /********************************************************************* * * If possible, position the archive device (tape) to read the * next block. */ void position_bsr_block(BSR *bsr, DEV_BLOCK *block) { /* To be implemented */ } /********************************************************************* * * Do fast block rejection based on bootstrap records. * use_fast_rejection will be set if we have VolSessionId and VolSessTime * in each record. When BlockVer is >= 2, we have those in the block header * so can do fast rejection. * * returns: 1 if block may contain valid records * 0 if block may be skipped (i.e. it contains no records of * that can match the bsr). * */ int match_bsr_block(BSR *bsr, DEV_BLOCK *block) { if (!bsr || !bsr->use_fast_rejection || (block->BlockVer < 2)) { return 1; /* cannot fast reject */ } for ( ; bsr; bsr=bsr->next) { if (!match_block_sesstime(bsr, bsr->sesstime, block)) { continue; } if (!match_block_sessid(bsr, bsr->sessid, block)) { continue; } return 1; } return 0; } static int match_block_sesstime(BSR *bsr, BSR_SESSTIME *sesstime, DEV_BLOCK *block) { if (!sesstime) { return 1; /* no specification matches all */ } if (sesstime->sesstime == block->VolSessionTime) { return 1; } if (sesstime->next) { return match_block_sesstime(bsr, sesstime->next, block); } return 0; } static int match_block_sessid(BSR *bsr, BSR_SESSID *sessid, DEV_BLOCK *block) { if (!sessid) { return 1; /* no specification matches all */ } if (sessid->sessid <= block->VolSessionId && sessid->sessid2 >= block->VolSessionId) { return 1; } if (sessid->next) { return match_block_sessid(bsr, sessid->next, block); } return 0; } static int match_fileregex(BSR *bsr, DEV_RECORD *rec, JCR *jcr) { if (bsr->fileregex_re == NULL) return 1; if (bsr->attr == NULL) { bsr->attr = new_attr(jcr); } /* * The code breaks if the first record associated with a file is * not of this type */ if (rec->maskedStream == STREAM_UNIX_ATTRIBUTES || rec->maskedStream == STREAM_UNIX_ATTRIBUTES_EX) { bsr->skip_file = false; if (unpack_attributes_record(jcr, rec->Stream, rec->data, rec->data_len, bsr->attr)) { if (regexec(bsr->fileregex_re, bsr->attr->fname, 0, NULL, 0) == 0) { Dmsg2(dbglevel, "Matched pattern, fname=%s FI=%d\n", bsr->attr->fname, rec->FileIndex); } else { Dmsg2(dbglevel, "Didn't match, skipping fname=%s FI=%d\n", bsr->attr->fname, rec->FileIndex); bsr->skip_file = true; } } } return 1; } /********************************************************************* * * Match Bootstrap records * returns 1 on match * returns 0 no match and reposition is set if we should * reposition the tape * returns -1 no additional matches possible */ int match_bsr(BSR *bsr, DEV_RECORD *rec, VOLUME_LABEL *volrec, SESSION_LABEL *sessrec, JCR *jcr) { int stat; /* * The bsr->reposition flag is set any time a bsr is done. * In this case, we can probably reposition the * tape to the next available bsr position. */ if (jcr->use_new_match_all) { if (bsr->cur_bsr) { bsr = bsr->cur_bsr; } } if (bsr) { bsr->reposition = false; /* Temp code to test the new match_all */ if (jcr->use_new_match_all) { stat = new_match_all(bsr, rec, volrec, sessrec, true, jcr); } else { stat = match_all(bsr, rec, volrec, sessrec, true, jcr); } /* * Note, bsr->reposition is set by match_all when * a bsr is done. We turn it off if a match was * found or if we cannot use positioning */ if (stat != 0 || !bsr->use_positioning) { bsr->reposition = false; } } else { stat = 1; /* no bsr => match all */ } return stat; } /* * Find the next bsr that applies to the current tape. * It is the one with the smallest VolFile position. */ BSR *find_next_bsr(BSR *root_bsr, DEVICE *dev) { BSR *bsr; BSR *found_bsr = NULL; /* Do tape/disk seeking only if CAP_POSITIONBLOCKS is on */ if (!root_bsr) { Dmsg0(dbglevel, "NULL root bsr pointer passed to find_next_bsr.\n"); return NULL; } if (!root_bsr->use_positioning || !root_bsr->reposition || !dev->has_cap(CAP_POSITIONBLOCKS)) { Dmsg2(dbglevel, "No nxt_bsr use_pos=%d repos=%d\n", root_bsr->use_positioning, root_bsr->reposition); return NULL; } Dmsg2(dbglevel, "use_pos=%d repos=%d\n", root_bsr->use_positioning, root_bsr->reposition); root_bsr->mount_next_volume = false; /* Walk through all bsrs to find the next one to use => smallest file,block */ for (bsr=root_bsr; bsr; bsr=bsr->next) { if (bsr->done || !match_volume(bsr, bsr->volume, &dev->VolHdr, 1)) { continue; } if (found_bsr == NULL) { found_bsr = bsr; } else { found_bsr = find_smallest_volfile(found_bsr, bsr); } } /* * If we get to this point and found no bsr, it means * that any additional bsr's must apply to the next * tape, so set a flag. */ if (found_bsr == NULL) { root_bsr->mount_next_volume = true; } return found_bsr; } /* * Get the smallest address from this voladdr part * Don't use "done" elements */ static bool get_smallest_voladdr(BSR_VOLADDR *va, uint64_t *ret) { bool ok=false; uint64_t min_val=0; for (; va ; va = va->next) { if (!va->done) { if (ok) { min_val = MIN(min_val, va->saddr); } else { min_val = va->saddr; ok=true; } } } *ret = min_val; return ok; } /* FIXME * This routine needs to be fixed to only look at items that * are not marked as done. Otherwise, it can find a bsr * that has already been consumed, and this will cause the * bsr to be used, thus we may seek back and re-read the * same records, causing an error. This deficiency must * be fixed. For the moment, it has been kludged in * read_record.c to avoid seeking back if find_next_bsr * returns a bsr pointing to a smaller address (file/block). * */ static BSR *find_smallest_volfile(BSR *found_bsr, BSR *bsr) { BSR *return_bsr = found_bsr; uint64_t found_bsr_saddr, bsr_saddr; /* if we have VolAddr, use it, else try with File and Block */ if (get_smallest_voladdr(found_bsr->voladdr, &found_bsr_saddr)) { if (get_smallest_voladdr(bsr->voladdr, &bsr_saddr)) { if (found_bsr_saddr > bsr_saddr) { return bsr; } else { return found_bsr; } } } return return_bsr; } /* * Called after the signature record so that * we can see if the current bsr has been * fully processed (i.e. is done). * The bsr argument is not used, but is included * for consistency with the other match calls. * * Returns: true if we should reposition * : false otherwise. */ bool is_this_bsr_done(JCR *jcr, BSR *bsr, DEV_RECORD *rec) { BSR *rbsr = rec->bsr; Dmsg1(dbglevel, "match_set %d\n", rbsr != NULL); if (!rbsr) { return false; } rec->bsr = NULL; /* TODO: When the new code is stable, drop the else part */ if (jcr->use_new_match_all) { if (!rbsr->next) { rbsr->found++; } /* Normally the loop must stop only *after* the last record has been read, * and we are about to read the next record. */ if (rbsr->count && rbsr->found > rbsr->count) { rbsr->done = true; rbsr->root->reposition = true; Dmsg2(dbglevel, "is_end_this_bsr set reposition=1 count=%d found=%d\n", rbsr->count, rbsr->found); return true; } } else { /* Old code that is stable */ rbsr->found++; if (rbsr->count && rbsr->found >= rbsr->count) { rbsr->done = true; rbsr->root->reposition = true; Dmsg2(dbglevel, "is_end_this_bsr set reposition=1 count=%d found=%d\n", rbsr->count, rbsr->found); return true; } } Dmsg2(dbglevel, "is_end_this_bsr not done count=%d found=%d\n", rbsr->count, rbsr->found); return false; } /* * Match all the components of current record * returns 1 on match * returns 0 no match * returns -1 no additional matches possible */ static int match_all(BSR *bsr, DEV_RECORD *rec, VOLUME_LABEL *volrec, SESSION_LABEL *sessrec, bool done, JCR *jcr) { Dmsg0(dbglevel, "Enter match_all\n"); if (bsr->done) { goto no_match; } if (!match_volume(bsr, bsr->volume, volrec, 1)) { Dmsg2(dbglevel, "bsr fail bsr_vol=%s != rec read_vol=%s\n", bsr->volume->VolumeName, volrec->VolumeName); goto no_match; } Dmsg2(dbglevel, "OK bsr match bsr_vol=%s read_vol=%s\n", bsr->volume->VolumeName, volrec->VolumeName); if (!match_voladdr(bsr, bsr->voladdr, rec, 1)) { if (bsr->voladdr) { Dmsg3(dbglevel, "Fail on Addr=%llu. bsr=%llu,%llu\n", get_record_address(rec), bsr->voladdr->saddr, bsr->voladdr->eaddr); dump_record(rec); } goto no_match; } if (!match_sesstime(bsr, bsr->sesstime, rec, 1)) { Dmsg2(dbglevel, "Fail on sesstime. bsr=%u rec=%u\n", bsr->sesstime->sesstime, rec->VolSessionTime); goto no_match; } /* NOTE!! This test MUST come after the sesstime test */ if (!match_sessid(bsr, bsr->sessid, rec)) { Dmsg2(dbglevel, "Fail on sessid. bsr=%u rec=%u\n", bsr->sessid->sessid, rec->VolSessionId); goto no_match; } /* NOTE!! This test MUST come after sesstime and sessid tests */ if (!match_findex(bsr, rec, 1)) { Dmsg3(dbglevel, "Fail on findex=%d. bsr=%d,%d\n", rec->FileIndex, bsr->FileIndex->findex, bsr->FileIndex->findex2); goto no_match; } if (bsr->FileIndex) { Dmsg3(dbglevel, "match on findex=%d. bsr=%d,%d\n", rec->FileIndex, bsr->FileIndex->findex, bsr->FileIndex->findex2); } if (!match_fileregex(bsr, rec, jcr)) { Dmsg1(dbglevel, "Fail on fileregex='%s'\n", NPRT(bsr->fileregex)); goto no_match; } /* This flag is set by match_fileregex (and perhaps other tests) */ if (bsr->skip_file) { Dmsg1(dbglevel, "Skipping findex=%d\n", rec->FileIndex); goto no_match; } /* * If a count was specified and we have a FileIndex, assume * it is a Bacula created bsr (or the equivalent). We * then save the bsr where the match occurred so that * after processing the record or records, we can update * the found count. I.e. rec->bsr points to the bsr that * satisfied the match. */ if (bsr->count && bsr->FileIndex) { rec->bsr = bsr; Dmsg0(dbglevel, "Leave match_all 1\n"); return 1; /* this is a complete match */ } /* * The selections below are not used by Bacula's * restore command, and don't work because of * the rec->bsr = bsr optimization above. */ if (!match_jobid(bsr, bsr->JobId, sessrec, 1)) { Dmsg0(dbglevel, "fail on JobId\n"); goto no_match; } if (!match_job(bsr, bsr->job, sessrec, 1)) { Dmsg0(dbglevel, "fail on Job\n"); goto no_match; } if (!match_client(bsr, bsr->client, sessrec, 1)) { Dmsg0(dbglevel, "fail on Client\n"); goto no_match; } if (!match_job_type(bsr, bsr->JobType, sessrec, 1)) { Dmsg0(dbglevel, "fail on Job type\n"); goto no_match; } if (!match_job_level(bsr, bsr->JobLevel, sessrec, 1)) { Dmsg0(dbglevel, "fail on Job level\n"); goto no_match; } if (!match_stream(bsr, bsr->stream, rec, 1)) { Dmsg0(dbglevel, "fail on stream\n"); goto no_match; } return 1; no_match: if (bsr->next) { return match_all(bsr->next, rec, volrec, sessrec, bsr->done && done, jcr); } if (bsr->done && done) { Dmsg0(dbglevel, "Leave match all -1\n"); return -1; } Dmsg0(dbglevel, "Leave match all 0\n"); return 0; } static int match_volume(BSR *bsr, BSR_VOLUME *volume, VOLUME_LABEL *volrec, bool done) { if (!volume) { return 0; /* Volume must match */ } if (strcmp(volume->VolumeName, volrec->VolumeName) == 0) { Dmsg2(dbglevel, "OK match volume=%s volrec=%s\n", volume->VolumeName, volrec->VolumeName); return 1; } Dmsg3(dbglevel, "NO match volume=%s volrec=%s next=%p\n", volume->VolumeName, volrec->VolumeName, volume->next); if (volume->next) { return match_volume(bsr, volume->next, volrec, 1); } return 0; } static int match_client(BSR *bsr, BSR_CLIENT *client, SESSION_LABEL *sessrec, bool done) { if (!client) { return 1; /* no specification matches all */ } if (strcmp(client->ClientName, sessrec->ClientName) == 0) { return 1; } if (client->next) { return match_client(bsr, client->next, sessrec, 1); } return 0; } static int match_job(BSR *bsr, BSR_JOB *job, SESSION_LABEL *sessrec, bool done) { if (!job) { return 1; /* no specification matches all */ } if (strcmp(job->Job, sessrec->Job) == 0) { return 1; } if (job->next) { return match_job(bsr, job->next, sessrec, 1); } return 0; } static int match_job_type(BSR *bsr, BSR_JOBTYPE *job_type, SESSION_LABEL *sessrec, bool done) { if (!job_type) { return 1; /* no specification matches all */ } if (job_type->JobType == sessrec->JobType) { return 1; } if (job_type->next) { return match_job_type(bsr, job_type->next, sessrec, 1); } return 0; } static int match_job_level(BSR *bsr, BSR_JOBLEVEL *job_level, SESSION_LABEL *sessrec, bool done) { if (!job_level) { return 1; /* no specification matches all */ } if (job_level->JobLevel == sessrec->JobLevel) { return 1; } if (job_level->next) { return match_job_level(bsr, job_level->next, sessrec, 1); } return 0; } static int match_jobid(BSR *bsr, BSR_JOBID *jobid, SESSION_LABEL *sessrec, bool done) { if (!jobid) { return 1; /* no specification matches all */ } if (jobid->JobId <= sessrec->JobId && jobid->JobId2 >= sessrec->JobId) { return 1; } if (jobid->next) { return match_jobid(bsr, jobid->next, sessrec, 1); } return 0; } static int match_voladdr(BSR *bsr, BSR_VOLADDR *voladdr, DEV_RECORD *rec, bool done) { if (!voladdr) { return 1; /* no specification matches all */ } uint64_t addr = get_record_address(rec); Dmsg6(dbglevel, "match_voladdr: saddr=%llu eaddr=%llu recaddr=%llu sfile=%u efile=%u recfile=%u\n", voladdr->saddr, voladdr->eaddr, addr, (uint32_t)(voladdr->saddr>>32), (uint32_t)(voladdr->eaddr>>32), (uint32_t)(addr>>32)); if (voladdr->saddr <= addr && voladdr->eaddr >= addr) { Dmsg1(dbglevel, "OK match voladdr=%lld\n", addr); return 1; } /* Once we get past last eblock, we are done */ if (addr > voladdr->eaddr) { voladdr->done = true; /* set local done */ if (!voladdr->next) { /* done with everything? */ bsr->done = true; /* yes */ } } if (voladdr->next) { return match_voladdr(bsr, voladdr->next, rec, voladdr->done && done); } /* If we are done and all prior matches are done, this bsr is finished */ if (voladdr->done && done) { bsr->done = true; bsr->root->reposition = true; Dmsg2(dbglevel, "bsr done from voladdr rec=%llu voleaddr=%llu\n", addr, voladdr->eaddr); } return 0; } static int match_stream(BSR *bsr, BSR_STREAM *stream, DEV_RECORD *rec, bool done) { if (!stream) { return 1; /* no specification matches all */ } if (stream->stream == rec->Stream) { return 1; } if (stream->next) { return match_stream(bsr, stream->next, rec, 1); } return 0; } static int match_sesstime(BSR *bsr, BSR_SESSTIME *sesstime, DEV_RECORD *rec, bool done) { /* ATTN: incomplete jobs can make sesstime goes backward inside a volume */ if (!sesstime) { return 1; /* no specification matches all */ } if (sesstime->sesstime == rec->VolSessionTime) { return 1; } if (sesstime->next) { return match_sesstime(bsr, sesstime->next, rec, done); } return 0; } /* * Note, we cannot mark bsr done based on session id because we may * have interleaved records, and there may be more of what we want * later. */ static int match_sessid(BSR *bsr, BSR_SESSID *sessid, DEV_RECORD *rec) { if (!sessid) { return 1; /* no specification matches all */ } if (sessid->sessid <= rec->VolSessionId && sessid->sessid2 >= rec->VolSessionId) { return 1; } if (sessid->next) { return match_sessid(bsr, sessid->next, rec); } return 0; } /* * When reading the Volume, the Volume Findex (rec->FileIndex) always * are found in sequential order. Thus we can make optimizations. * */ static int match_findex(BSR *bsr, DEV_RECORD *rec, bool done) { BSR_FINDEX *findex = bsr->FileIndex; BSR_FINDEX *next; if (!findex) { return 1; /* no specification matches all */ } for ( ;; ) { if (findex->findex <= rec->FileIndex && findex->findex2 >= rec->FileIndex) { Dmsg3(dbglevel, "Match on recFindex=%d. bsrFIs=%d,%d\n", rec->FileIndex, findex->findex, findex->findex2); return 1; } if (rec->FileIndex > findex->findex2) { /* TODO: See if we really want to modify the findex when we will try * to seek backward */ if (findex->next) { next = findex->next; Dmsg3(dbglevel, "No match recFindex=%d. bsrFIs=%d,%d\n", rec->FileIndex, findex->findex, findex->findex2); free(findex); findex = next; bsr->FileIndex = findex; continue; } else { bsr->done = true; bsr->root->reposition = true; } } return 0; } } uint64_t get_bsr_start_addr(BSR *bsr) { uint64_t bsr_addr = 0; if (bsr) { if (bsr->voladdr) { bsr_addr = bsr->voladdr->saddr; } } return bsr_addr; } bacula-15.0.3/src/stored/file_driver.c0000644000175000017500000004547514771010173017426 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Routines for writing to a file from the Cloud device. * * This is for testing purposes only. # * NOTE!!! This cloud driver is not compatible with * any disk-changer script for changing Volumes. * It does however work with Bacula Virtual autochangers. * * Written by Kern Sibbald, May MMXVI * */ #include "file_driver.h" static const int64_t dbglvl = DT_CLOUD|50; #include /* Imported functions */ const char *mode_to_str(int mode); int breaddir(DIR *dirp, POOLMEM *&dname); /* Forward referenced functions */ /* Const and Static definitions */ /* * Put a cache object into the cloud (i.e. local disk) * or visa-versa. */ bool file_driver::put_object(transfer *xfer, const char *in_fname, const char *out_fname, bwlimit *limit) { struct stat statbuf; char *p, *f; char save_separator; ssize_t rbytes, wbytes; uint32_t read_len; int64_t obj_len; FILE *infile=NULL, *outfile=NULL; POOLMEM *buf = get_memory(buf_len); Enter(dbglvl); Dmsg2(dbglvl, "Put from: %s to %s\n", in_fname, out_fname); /* * First work on output file */ /* Split out_fname into path + file */ for (p=f=const_cast(out_fname); *p; p++) { if (IsPathSeparator(*p)) { f = p; /* set pos of last slash */ } } if (!IsPathSeparator(*f)) { /* did we find a slash? */ Mmsg1(xfer->m_message, "Could not find path name for output file: %s\n", out_fname); goto get_out; } save_separator = *f; *f = 0; /* terminate path */ /* const_cast should not be necessary here but is due the makedir interface */ if (!makedir(NULL, const_cast(out_fname), 0740)) { Mmsg1(xfer->m_message, "Could not makedir output directory: %s\n", out_fname); *f = save_separator; goto get_out; } *f = save_separator; if (lstat(out_fname, &statbuf) == -1) { outfile = bfopen(out_fname, "w"); } else { /* append to existing file */ outfile = bfopen(out_fname, "r+"); } if (!outfile) { berrno be; Mmsg2(xfer->m_message, "Could not open output file %s. ERR=%s\n", out_fname, be.bstrerror()); goto get_out; } /* * Now work on input file */ if (lstat(in_fname, &statbuf) == -1) { berrno be; Mmsg2(xfer->m_message, "Failed to stat input file %s. ERR=%s\n", in_fname, be.bstrerror()); goto get_out; } obj_len = statbuf.st_size; Dmsg1(dbglvl, "Object length to copy is: %lld bytes.\n", obj_len); if (obj_len == 0) { /* Not yet created nothing to do */ goto get_out; } infile = bfopen(in_fname, "r"); if (!infile) { berrno be; Mmsg2(xfer->m_message, "Failed to open input file %s. ERR=%s\n", in_fname, be.bstrerror()); goto get_out; } while (obj_len > 0) { if (xfer->is_canceled()) { Mmsg(xfer->m_message, "Job is canceled.\n"); goto get_out; } read_len = (obj_len > buf_len) ? buf_len : obj_len; Dmsg3(dbglvl+10, "obj_len=%d buf_len=%d read_len=%d\n", obj_len, buf_len, read_len); rbytes = fread(buf, 1, read_len, infile); Dmsg1(dbglvl+10, "Read %d bytes.\n", rbytes); if (rbytes <= 0) { berrno be; Mmsg2(xfer->m_message, "Error reading input file %s. ERR=%s\n", in_fname, be.bstrerror()); goto get_out; } wbytes = fwrite(buf, 1, rbytes, outfile); Dmsg2(dbglvl+10, "Wrote: %d bytes wanted %d bytes.\n", wbytes, rbytes); if (wbytes < 0) { berrno be; Mmsg2(xfer->m_message, "Error writing output file %s. ERR=%s\n", out_fname, be.bstrerror()); goto get_out; } obj_len -= rbytes; xfer->increment_processed_size(rbytes); if (limit->use_bwlimit()) { limit->control_bwlimit(rbytes); } } get_out: free_memory(buf); if (infile) { fclose(infile); } if (outfile) { if (fclose(outfile) != 0) { berrno be; Mmsg2(xfer->m_message, "Failed to close file %s: %s\n", out_fname, be.bstrerror()); } /* Get stats on the result part and fill the xfer res */ if (lstat(out_fname, &statbuf) == -1) { berrno be; Mmsg2(xfer->m_message, "Failed to stat file %s: %s\n", out_fname, be.bstrerror()); } else { xfer->m_res_size = statbuf.st_size; xfer->m_res_mtime = statbuf.st_mtime; } } Leave(dbglvl); return (xfer->m_message[0] == 0); } bool file_driver::get_cloud_object(transfer *xfer, const char *cloud_fname, const char *cache_fname) { return put_object(xfer, cloud_fname, cache_fname, &download_limit); } bool file_driver::truncate_cloud_volume(const char *VolumeName, ilist *trunc_parts, cancel_callback *cancel_cb, POOLMEM *&err) { bool rtn = true; int i; POOLMEM *filename = get_pool_memory(PM_FNAME); for (i=1; (i <= (int)trunc_parts->last_index()); i++) { if (!trunc_parts->get(i)) { continue; } make_cloud_filename(filename, VolumeName, "part", i); if (unlink(filename) != 0 && errno != ENOENT) { berrno be; Mmsg3(err, "truncate_cloud_volume for %s: Unable to delete %s. ERR=%s\n", VolumeName, filename, be.bstrerror()); rtn = false; } else { Mmsg2(err, "truncate_cloud_volume for %s: Unlink file %s.\n", VolumeName, filename); } } free_pool_memory(filename); return rtn; } bool file_driver::clean_cloud_volume(const char *VolumeName, cleanup_cb_type *cb, cleanup_ctx_type *ctx, cancel_callback *cancel_cb, POOLMEM *&err) { Enter(dbglvl); if (cb == NULL || ctx == NULL || strlen(VolumeName) == 0) { pm_strcpy(err, "Invalid argument"); return false; } POOLMEM *vol_dir = get_pool_memory(PM_NAME); pm_strcpy(vol_dir, hostName); if (!IsPathSeparator(vol_dir[strlen(vol_dir)-1])) { pm_strcat(vol_dir, "/"); } pm_strcat(vol_dir, VolumeName); DIR* dp = NULL; struct dirent *entry = NULL; struct stat statbuf; int name_max; bool ok = false; POOL_MEM dname(PM_FNAME); int status = 0; Dmsg1(dbglvl, "Searching for parts in: %s\n", vol_dir); if (!(dp = opendir(vol_dir))) { berrno be; Mmsg2(err, "Cannot opendir to get parts list. Volume %s does not exist. ERR=%s", VolumeName, be.bstrerror()); Dmsg1(dbglvl, "%s\n", err); if (errno == ENOENT) { ok=true; /* No volume, so no part */ } goto get_out; } name_max = pathconf(".", _PC_NAME_MAX); if (name_max < 1024) { name_max = 1024; } entry = (struct dirent *)malloc(sizeof(struct dirent) + name_max + 1000); for ( ;; ) { if (cancel_cb && cancel_cb->fct && cancel_cb->fct(cancel_cb->arg)) { pm_strcpy(err, "Job canceled"); goto get_out; } errno = 0; status = breaddir(dp, dname.addr()); if (status != 0) { if (status > 0) { Mmsg1(err, "breaddir failed: status=%d", status); Dmsg1(dbglvl, "%s\n", err); } break; } /* Always ignore . and .. */ if (strcmp(".", dname.c_str()) == 0 || strcmp("..", dname.c_str()) == 0) { continue; } POOLMEM *part_path = get_pool_memory(PM_NAME); pm_strcpy(part_path,vol_dir); if (!IsPathSeparator(part_path[strlen(vol_dir)-1])) { pm_strcat(part_path, "/"); } pm_strcat(part_path, dname.c_str()); /* Get size of part */ if (lstat(part_path, &statbuf) == -1) { berrno be; Mmsg(err, "Failed to stat file %s: %s", part_path, be.bstrerror()); Dmsg1(dbglvl, "%s\n", err); free_pool_memory(part_path); goto get_out; } POOLMEM *clean_part_path = get_pool_memory(PM_NAME); pm_strcpy(clean_part_path,VolumeName); if (!IsPathSeparator(clean_part_path[strlen(VolumeName)-1])) { pm_strcat(clean_part_path, "/"); } pm_strcat(clean_part_path, dname.c_str()); /* Look only for part files */ if (!cb(clean_part_path, ctx)) { free_pool_memory(clean_part_path); free_pool_memory(part_path); continue; } free_pool_memory(clean_part_path); if (unlink(part_path) != 0 && errno != ENOENT) { berrno be; Mmsg3(err, "truncate_cloud_volume for %s: Unable to delete %s. ERR=%s\n", VolumeName, part_path, be.bstrerror()); free_pool_memory(part_path); goto get_out; } else { Dmsg2(dbglvl, "clean_cloud_volume for %s: Unlink file %s.\n", VolumeName, part_path); } free_pool_memory(part_path); } ok = true; get_out: if (dp) { closedir(dp); } if (entry) { free(entry); } free_pool_memory(vol_dir); Leave(dbglvl); return ok; } void file_driver::make_cloud_filename(POOLMEM *&filename, const char *VolumeName, const char *file, uint32_t part) { Enter(dbglvl); pm_strcpy(filename, hostName); cloud_driver::add_vol_and_part(filename, VolumeName, file, part); Dmsg1(dbglvl, "make_cloud_filename: %s\n", filename); Leave(dbglvl); } void file_driver::make_cloud_filename(POOLMEM *&filename, const char *VolumeName, const char *file) { Enter(dbglvl); pm_strcpy(filename, hostName); cloud_driver::add_vol_and_part(filename, VolumeName, file); Dmsg1(dbglvl, "make_cloud_filename: %s\n", filename); Leave(dbglvl); } /* * Copy a single cache part to the cloud (local disk) */ bool file_driver::copy_cache_part_to_cloud(transfer *xfer) { Enter(dbglvl); POOLMEM *cloud_fname = get_pool_memory(PM_FNAME); make_cloud_filename(cloud_fname, xfer->m_volume_name, "part", xfer->m_part); Dmsg2(dbglvl, "Call put_object: %s, %s\n", xfer->m_cache_fname, cloud_fname); Dmsg1(dbglvl, "objects_default_tier: %d\n", objects_default_tier); bool rtn = put_object(xfer, xfer->m_cache_fname, cloud_fname, &upload_limit); free_pool_memory(cloud_fname); Leave(dbglvl); return rtn; } bool file_driver::move_cloud_part(const char *VolumeName, uint32_t apart , const char *to, cancel_callback *cancel_cb, POOLMEM *&err, int& exists) { Enter(dbglvl); bool rtn = false; POOLMEM *cloud_source_name = get_pool_memory(PM_FNAME); POOLMEM *cloud_dest_name = get_pool_memory(PM_FNAME); make_cloud_filename(cloud_source_name, VolumeName, "part", apart); make_cloud_filename(cloud_dest_name, VolumeName, to); struct stat statbuf; /* Get size of part */ if (lstat(cloud_source_name, &statbuf) != 0) { exists = 0; rtn = true; } else { exists = 1; transfer xfer(statbuf.st_size, NULL, cloud_source_name, VolumeName, dev->name(), apart, NULL, 0, NULL, NULL); rtn = put_object(&xfer, cloud_source_name, cloud_dest_name, &upload_limit); Mmsg(err,"%s",rtn ? to:xfer.m_message); } free_pool_memory(cloud_dest_name); free_pool_memory(cloud_source_name); Leave(dbglvl); return rtn; } /* * Copy a single object (part) from the cloud to the cache */ int file_driver::copy_cloud_part_to_cache(transfer *xfer) { Enter(dbglvl); POOL_MEM cloud_fname(PM_FNAME); make_cloud_filename(cloud_fname.addr(), xfer->m_volume_name, "part", xfer->m_part); if (getenv("CLOUD_FILE_DRIVER_SIMULATE_DELAYED_TRANSFER") && xfer->m_debug_retry) { restore_cloud_object(xfer, cloud_fname.c_str()); Leave(dbglvl); return CLOUD_DRIVER_COPY_PART_TO_CACHE_RETRY; } else { int ret = put_object(xfer, cloud_fname.c_str(), xfer->m_cache_fname, &download_limit); if (ret) { Leave(dbglvl); return CLOUD_DRIVER_COPY_PART_TO_CACHE_OK; } else { Leave(dbglvl); return CLOUD_DRIVER_COPY_PART_TO_CACHE_ERROR; } } Leave(dbglvl); return CLOUD_DRIVER_COPY_PART_TO_CACHE_OK; } bool file_driver::restore_cloud_object(transfer *xfer, const char *cloud_fname) { wait_timeout = time(NULL) + atoi(getenv("CLOUD_FILE_DRIVER_SIMULATE_DELAYED_TRANSFER")); /* retry only once for DELAYED_TRANSFER time */ xfer->m_debug_retry = false; return true; } bool file_driver::is_waiting_on_server(transfer *xfer) { (void) (xfer); return (time(NULL)host_name == NULL) { Mmsg1(err, "Failed to initialize File Cloud. ERR=Hostname not set in cloud resource %s\n", cloud->hdr.name); return false; } /* File I/O buffer */ buf_len = DEFAULT_BLOCK_SIZE; hostName = cloud->host_name; bucketName = cloud->bucket_name; protocol = cloud->protocol; uriStyle = cloud->uri_style; accessKeyId = cloud->access_key; secretAccessKey = cloud->secret_key; objects_default_tier = cloud->objects_default_tier; return true; } bool file_driver::start_of_job(POOLMEM *&msg) { Mmsg(msg, _("Using File cloud driver Host=%s Bucket=%s"), hostName, bucketName); return true; } bool file_driver::end_of_job(POOLMEM *&msg) { return true; } bool file_driver::term(POOLMEM *&msg) { return true; } bool file_driver::get_cloud_volume_parts_list(const char* VolumeName, ilist *parts, cancel_callback *cancel_cb, POOLMEM *&err) { Enter(dbglvl); if (parts == NULL || strlen(VolumeName) == 0) { pm_strcpy(err, "Invalid argument"); Leave(dbglvl); return false; } POOLMEM *vol_dir = get_pool_memory(PM_NAME); pm_strcpy(vol_dir, hostName); if (!IsPathSeparator(vol_dir[strlen(vol_dir)-1])) { pm_strcat(vol_dir, "/"); } pm_strcat(vol_dir, VolumeName); DIR* dp = NULL; struct dirent *entry = NULL; struct stat statbuf; int name_max; bool ok = false; POOL_MEM dname(PM_FNAME); int status = 0; Dmsg1(dbglvl, "Searching for parts in: %s\n", vol_dir); if (!(dp = opendir(vol_dir))) { berrno be; Mmsg2(err, "Cannot opendir to get parts list. Volume %s does not exist. ERR=%s", VolumeName, be.bstrerror()); Dmsg1(dbglvl, "%s\n", err); if (errno == ENOENT) { ok=true; /* No volume, so no part */ } goto get_out; } name_max = pathconf(".", _PC_NAME_MAX); if (name_max < 1024) { name_max = 1024; } entry = (struct dirent *)malloc(sizeof(struct dirent) + name_max + 1000); for ( ;; ) { if (cancel_cb && cancel_cb->fct && cancel_cb->fct(cancel_cb->arg)) { pm_strcpy(err, "Job canceled"); goto get_out; } errno = 0; status = breaddir(dp, dname.addr()); if (status != 0) { if (status > 0) { Mmsg1(err, "breaddir failed: status=%d", status); Dmsg1(dbglvl, "%s\n", err); } break; } /* Always ignore . and .. */ if (strcmp(".", dname.c_str()) == 0 || strcmp("..", dname.c_str()) == 0) { continue; } /* Look only for part files */ if (strncmp("part.", dname.c_str(), 5) != 0) { continue; } char *ext = strrchr (dname.c_str(), '.'); if (!ext || strlen(ext) < 2) { continue; } cloud_part *part = (cloud_part*) malloc(sizeof(cloud_part)); /* save extension (part number) to cloud_part struct index*/ part->index = atoi(&ext[1]); POOLMEM *part_path = get_pool_memory(PM_NAME); pm_strcpy(part_path,vol_dir); if (!IsPathSeparator(part_path[strlen(vol_dir)-1])) { pm_strcat(part_path, "/"); } pm_strcat(part_path, dname.c_str()); /* Get size of part */ if (lstat(part_path, &statbuf) == -1) { berrno be; Mmsg(err, "Failed to stat file %s: %s", part_path, be.bstrerror()); Dmsg1(dbglvl, "%s\n", err); free_pool_memory(part_path); free(part); goto get_out; } free_pool_memory(part_path); part->size = statbuf.st_size; part->mtime = statbuf.st_mtime; bmemzero(part->hash64, 64); parts->put(part->index, part); } ok = true; get_out: if (dp) { closedir(dp); } if (entry) { free(entry); } free_pool_memory(vol_dir); Leave(dbglvl); return ok; } bool file_driver::get_cloud_volumes_list(alist *volumes, cancel_callback *cancel_cb, POOLMEM *&err) { if (!volumes) { pm_strcpy(err, "Invalid argument"); return false; } Enter(dbglvl); DIR* dp = NULL; struct dirent *entry = NULL; struct stat statbuf; int name_max; bool ok = false; POOLMEM *fullpath = get_pool_memory(PM_NAME); POOL_MEM dname(PM_FNAME); int status = 0; if (!(dp = opendir(hostName))) { berrno be; Mmsg2(err, "Cannot opendir to get volumes list. host_name %s does not exist. ERR=%s", hostName, be.bstrerror()); Dmsg1(dbglvl, "%s\n", err); if (errno == ENOENT) { ok=true; /* No volume, so no part */ } goto get_out; } name_max = pathconf(".", _PC_NAME_MAX); if (name_max < 1024) { name_max = 1024; } entry = (struct dirent *)malloc(sizeof(struct dirent) + name_max + 1000); for ( ;; ) { if (cancel_cb && cancel_cb->fct && cancel_cb->fct(cancel_cb->arg)) { goto get_out; } errno = 0; status = breaddir(dp, dname.addr()); if (status != 0) { if (status > 0) { Mmsg1(err, "breaddir failed: status=%d", status); Dmsg1(dbglvl, "%s\n", err); } break; } /* Always ignore . and .. */ if (strcmp(".", dname.c_str()) == 0 || strcmp("..", dname.c_str()) == 0) { continue; } pm_strcpy(fullpath, hostName); if (!IsPathSeparator(fullpath[strlen(fullpath)-1])) { pm_strcat(fullpath, "/"); } pm_strcat(fullpath, dname.c_str()); if (lstat(fullpath, &statbuf) != 0) { berrno be; Dmsg2(dbglvl, "Failed to stat file %s: %s\n", fullpath, be.bstrerror()); continue; } if (S_ISDIR(statbuf.st_mode)) { volumes->append(bstrdup(dname.c_str())); } } ok = true; get_out: if (dp) { closedir(dp); } if (entry) { free(entry); } free_pool_memory(fullpath); Leave(dbglvl); return ok; } bacula-15.0.3/src/stored/lock.h0000644000175000017500000001050714771010173016055 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Definitions for locking and blocking functions in the SD * * Kern Sibbald, pulled out of dev.h June 2007 * */ #ifndef __LOCK_H #define __LOCK_H 1 void _lock_reservations(const char *file="**Unknown**", int line=0); void _unlock_reservations(); void _lock_volumes(const char *file="**Unknown**", int line=0); void _unlock_volumes(); #ifdef SD_DEBUG_LOCK #define lock_reservations() \ do { Dmsg3(sd_dbglvl, "lock_reservations at %s:%d precnt=%d\n", \ __FILE__, __LINE__, \ reservations_lock_count); \ _lock_reservations(__FILE__, __LINE__); \ Dmsg0(sd_dbglvl, "lock_reservations: got lock\n"); \ } while (0) #define unlock_reservations() \ do { Dmsg3(sd_dbglvl, "unlock_reservations at %s:%d precnt=%d\n", \ __FILE__, __LINE__, \ reservations_lock_count); \ _unlock_reservations(); } while (0) #define lock_volumes() \ do { Dmsg3(sd_dbglvl, "lock_volumes at %s:%d precnt=%d\n", \ __FILE__, __LINE__, \ vol_list_lock_count); \ _lock_volumes(__FILE__, __LINE__); \ Dmsg0(sd_dbglvl, "lock_volumes: got lock\n"); \ } while (0) #define unlock_volumes() \ do { Dmsg3(sd_dbglvl, "unlock_volumes at %s:%d precnt=%d\n", \ __FILE__, __LINE__, \ vol_list_lock_count); \ _unlock_volumes(); } while (0) #else #define lock_reservations() _lock_reservations(__FILE__, __LINE__) #define unlock_reservations() _unlock_reservations() #define lock_volumes() _lock_volumes(__FILE__, __LINE__) #define unlock_volumes() _unlock_volumes() #endif #ifdef DEV_DEBUG_LOCK #define Lock() dbg_Lock(__FILE__, __LINE__) #define Unlock() dbg_Unlock(__FILE__, __LINE__) #define rLock(locked) dbg_rLock(__FILE__, __LINE__, locked) #define rUnlock() dbg_rUnlock(__FILE__, __LINE__) #endif #ifdef SD_DEBUG_LOCK #define Lock_acquire() dbg_Lock_acquire(__FILE__, __LINE__) #define Unlock_acquire() dbg_Unlock_acquire(__FILE__, __LINE__) #define Lock_read_acquire() dbg_Lock_read_acquire(__FILE__, __LINE__) #define Unlock_read_acquire() dbg_Unlock_read_acquire(__FILE__, __LINE__) #define Lock_VolCatInfo() dbg_Lock_VolCatInfo(__FILE__, __LINE__) #define Unlock_VolCatInfo() dbg_Unlock_VolCatInfo(__FILE__, __LINE__) #endif #define block_device(d, s) _block_device(__FILE__, __LINE__, (d), s) #define unblock_device(d) _unblock_device(__FILE__, __LINE__, (d)) #define obtain_device_block(d, p, r, s) (d)->_obtain_device_block(__FILE__, __LINE__, (p), (r), (s)) #define give_back_device_block(d, p) _give_back_device_block(__FILE__, __LINE__, (d), (p)) /* m_blocked states (mutually exclusive) */ enum { BST_NOT_BLOCKED = 0, /* not blocked */ BST_UNMOUNTED, /* User unmounted device */ BST_WAITING_FOR_SYSOP, /* Waiting for operator to mount tape */ BST_DOING_ACQUIRE, /* Opening/validating/moving tape */ BST_WRITING_LABEL, /* Labeling a tape */ BST_UNMOUNTED_WAITING_FOR_SYSOP, /* User unmounted during wait for op */ BST_MOUNT, /* Mount request */ BST_DESPOOLING, /* Despooling -- i.e. multiple writes */ BST_RELEASING /* Releasing the device */ }; typedef struct s_steal_lock { pthread_t no_wait_id; /* id of no wait thread */ int dev_blocked; /* state */ int dev_prev_blocked; /* previous blocked state */ uint32_t blocked_by; /* previous blocker */ } bsteal_lock_t; /* * Used in unblock() call */ enum { DEV_LOCKED = true, DEV_UNLOCKED = false }; #endif bacula-15.0.3/src/stored/acquire.c0000644000175000017500000006763614771010173016570 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Routines to acquire and release a device for read/write * * Written by Kern Sibbald, August MMII */ #include "bacula.h" /* pull in global headers */ #include "stored.h" /* pull in Storage Deamon headers */ static int const rdbglvl = 100; /* Forward referenced functions */ static void set_dcr_from_vol(DCR *dcr, VOL_LIST *vol); /********************************************************************* * Acquire device for reading. * The drive should have previously been reserved by calling * reserve_device_for_read(). We read the Volume label from the block and * leave the block pointers just after the label. * * Returns: false if failed for any reason * true if successful */ bool acquire_device_for_read(DCR *dcr, uint32_t retry_count) { DEVICE *dev; JCR *jcr = dcr->jcr; bool ok = false; bool tape_previously_mounted; VOL_LIST *vol; bool try_autochanger = true; int i; int vol_label_status; uint32_t retry = 0; Enter(rdbglvl); dev = dcr->dev; ASSERT2(!dev->adata, "Called with adata dev. Wrong!"); dev->Lock_read_acquire(); Dmsg2(rdbglvl, "dcr=%p dev=%p\n", dcr, dcr->dev); Dmsg2(rdbglvl, "MediaType dcr=%s dev=%s\n", dcr->media_type, dev->device->media_type); dev->dblock(BST_DOING_ACQUIRE); if (dev->num_writers > 0) { Jmsg2(jcr, M_FATAL, 0, _("Acquire read: num_writers=%d not zero. Job %d canceled.\n"), dev->num_writers, jcr->JobId); goto get_out; } /* Find next Volume, if any */ vol = jcr->VolList; if (!vol) { char ed1[50]; Jmsg(jcr, M_FATAL, 0, _("No volumes specified for reading. Job %s canceled.\n"), edit_int64(jcr->JobId, ed1)); goto get_out; } jcr->CurReadVolume++; for (i=1; iCurReadVolume; i++) { vol = vol->next; } if (!vol) { Jmsg(jcr, M_FATAL, 0, _("Logic error: no next volume to read. Numvol=%d Curvol=%d\n"), jcr->NumReadVolumes, jcr->CurReadVolume); goto get_out; /* should not happen */ } set_dcr_from_vol(dcr, vol); if (generate_plugin_event(jcr, bsdEventDeviceOpen, dcr) != bRC_OK) { Jmsg(jcr, M_FATAL, 0, _("generate_plugin_event(bsdEventDeviceOpen) Failed\n")); goto get_out; } Dmsg2(rdbglvl, "Want Vol=%s Slot=%d\n", vol->VolumeName, vol->Slot); /* * If the MediaType requested for this volume is not the * same as the current drive, we attempt to find the same * device that was used to write the orginal volume. If * found, we switch to using that device. * * N.B. A lot of routines rely on the dcr pointer not changing * read_records.c even has multiple dcrs cached, so we take care * here to release all important parts of the dcr and re-acquire * them such as the block pointer (size may change), but we do * not release the dcr. */ Dmsg2(rdbglvl, "MediaType dcr=%s dev=%s\n", dcr->media_type, dev->device->media_type); if (dcr->media_type[0] && strcmp(dcr->media_type, dev->device->media_type) != 0) { RCTX rctx; DIRSTORE *store; int stat; Jmsg4(jcr, M_INFO, 0, _("Changing read device. Want Media Type=\"%s\" have=\"%s\"\n" " %s device=%s\n"), dcr->media_type, dev->device->media_type, dev->print_type(), dev->print_name()); Dmsg4(rdbglvl, "Changing read device. Want Media Type=\"%s\" have=\"%s\"\n" " %s device=%s\n", dcr->media_type, dev->device->media_type, dev->print_type(), dev->print_name()); generate_plugin_event(jcr, bsdEventDeviceClose, dcr); dev->dunblock(DEV_UNLOCKED); lock_reservations(); memset(&rctx, 0, sizeof(RCTX)); rctx.jcr = jcr; jcr->read_dcr = dcr; jcr->reserve_msgs = New(alist(10, not_owned_by_alist)); rctx.any_drive = true; rctx.device_name = vol->device; store = new DIRSTORE; memset(store, 0, sizeof(DIRSTORE)); store->name[0] = 0; /* No storage name */ bstrncpy(store->media_type, vol->MediaType, sizeof(store->media_type)); bstrncpy(store->pool_name, dcr->pool_name, sizeof(store->pool_name)); bstrncpy(store->pool_type, dcr->pool_type, sizeof(store->pool_type)); store->append = false; rctx.store = store; clean_device(dcr); /* clean up the dcr */ /* * Search for a new device */ stat = search_res_for_device(rctx); release_reserve_messages(jcr); /* release queued messages */ unlock_reservations(); if (stat == 1) { /* found new device to use */ /* * Switching devices, so acquire lock on new device, * then release the old one. */ dcr->dev->Lock_read_acquire(); /* lock new one */ dev->Unlock_read_acquire(); /* release old one */ dev = dcr->dev; /* get new device pointer */ dev->dblock(BST_DOING_ACQUIRE); dcr->VolumeName[0] = 0; Jmsg(jcr, M_INFO, 0, _("Media Type change. New read %s device %s chosen.\n"), dev->print_type(), dev->print_name()); Dmsg2(50, "Media Type change. New read %s device %s chosen.\n", dev->print_type(), dev->print_name()); if (generate_plugin_event(jcr, bsdEventDeviceOpen, dcr) != bRC_OK) { Jmsg(jcr, M_FATAL, 0, _("generate_plugin_event(bsdEventDeviceOpen) Failed\n")); goto get_out; } bstrncpy(dcr->VolumeName, vol->VolumeName, sizeof(dcr->VolumeName)); dcr->setVolCatName(vol->VolumeName); bstrncpy(dcr->media_type, vol->MediaType, sizeof(dcr->media_type)); dcr->VolCatInfo.Slot = vol->Slot; dcr->VolCatInfo.InChanger = vol->Slot > 0; bstrncpy(dcr->pool_name, store->pool_name, sizeof(dcr->pool_name)); bstrncpy(dcr->pool_type, store->pool_type, sizeof(dcr->pool_type)); } else { /* error */ Jmsg1(jcr, M_FATAL, 0, _("No suitable device found to read Volume \"%s\"\n"), vol->VolumeName); Dmsg1(rdbglvl, "No suitable device found to read Volume \"%s\"\n", vol->VolumeName); goto get_out; } } Dmsg2(rdbglvl, "MediaType dcr=%s dev=%s\n", dcr->media_type, dev->device->media_type); dev->clear_unload(); if (dev->vol && dev->vol->is_swapping()) { dev->vol->set_slot(vol->Slot); Dmsg3(rdbglvl, "swapping: slot=%d Vol=%s dev=%s\n", dev->vol->get_slot(), dev->vol->vol_name, dev->print_name()); } init_device_wait_timers(dcr); tape_previously_mounted = dev->can_read() || dev->can_append() || dev->is_labeled(); // tape_initially_mounted = tape_previously_mounted; /* Volume info is always needed because of VolType */ Dmsg1(rdbglvl, "dir_get_volume_info vol=%s\n", dcr->VolumeName); if (!dir_get_volume_info(dcr, dcr->VolumeName, GET_VOL_INFO_FOR_READ)) { Dmsg2(rdbglvl, "dir_get_vol_info failed for vol=%s: %s\n", dcr->VolumeName, jcr->errmsg); Jmsg1(jcr, M_WARNING, 0, "Read acquire: %s", jcr->errmsg); } dev->set_load(); /* set to load volume */ for ( ;; ) { /* If not polling limit retries */ if (!dev->poll && retry++ >= retry_count) { break; } dev->clear_labeled(); /* force reread of label */ if (job_canceled(jcr)) { char ed1[50]; Mmsg1(dev->errmsg, _("Job %s canceled.\n"), edit_int64(jcr->JobId, ed1)); Jmsg(jcr, M_INFO, 0, dev->errmsg); goto get_out; /* error return */ } dcr->do_unload(); dcr->do_swapping(SD_READ); dcr->do_load(SD_READ); set_dcr_from_vol(dcr, vol); /* refresh dcr with desired volume info */ /* * This code ensures that the device is ready for * reading. If it is a file, it opens it. * If it is a tape, it checks the volume name */ Dmsg1(rdbglvl, "open vol=%s\n", dcr->VolumeName); if (!dev->open_device(dcr, OPEN_READ_ONLY)) { if (!dev->poll) { Jmsg4(jcr, M_WARNING, 0, _("Read open %s device %s Volume \"%s\" failed: ERR=%s\n"), dev->print_type(), dev->print_name(), dcr->VolumeName, dev->bstrerror()); } goto default_path; } Dmsg1(rdbglvl, "opened dev %s OK\n", dev->print_name()); /* Read Volume Label */ Dmsg0(rdbglvl, "calling read-vol-label\n"); vol_label_status = dev->read_dev_volume_label(dcr); switch (vol_label_status) { case VOL_OK: Dmsg1(rdbglvl, "Got correct volume. VOL_OK: %s\n", dcr->VolCatInfo.VolCatName); ok = true; dev->VolCatInfo = dcr->VolCatInfo; /* structure assignment */ break; /* got it */ case VOL_IO_ERROR: Dmsg0(rdbglvl, "IO Error\n"); /* * Send error message generated by dev->read_dev_volume_label() * only we really had a tape mounted. This supresses superfluous * error messages when nothing is mounted. */ if (tape_previously_mounted) { Jmsg(jcr, M_WARNING, 0, "Read acquire: %s", jcr->errmsg); } goto default_path; case VOL_TYPE_ERROR: Jmsg(jcr, M_FATAL, 0, "%s", jcr->errmsg); goto get_out; case VOL_NAME_ERROR: Dmsg3(rdbglvl, "Vol name=%s want=%s drv=%s.\n", dev->VolHdr.VolumeName, dcr->VolumeName, dev->print_name()); if (dev->is_volume_to_unload()) { goto default_path; } dev->set_unload(); /* force unload of unwanted tape */ if (!unload_autochanger(dcr, -1)) { /* at least free the device so we can re-open with correct volume */ dev->close(dcr); free_volume(dev); } dev->set_load(); /* Fall through */ default: Jmsg1(jcr, M_WARNING, 0, "Read acquire: %s", jcr->errmsg); default_path: Dmsg0(rdbglvl, "default path\n"); tape_previously_mounted = true; /* * If the device requires mount, close it, so the device can be ejected. */ if (dev->requires_mount()) { dev->close(dcr); free_volume(dev); } /* Call autochanger only once unless ask_sysop called */ if (try_autochanger) { int stat; Dmsg2(rdbglvl, "calling autoload Vol=%s Slot=%d\n", dcr->VolumeName, dcr->VolCatInfo.Slot); stat = autoload_device(dcr, SD_READ, NULL); if (stat > 0) { try_autochanger = false; continue; /* try reading volume mounted */ } } /* Mount a specific volume and no other */ Dmsg0(rdbglvl, "calling dir_ask_sysop\n"); if (!dir_ask_sysop_to_mount_volume(dcr, SD_READ)) { goto get_out; /* error return */ } /* Volume info is always needed because of VolType */ Dmsg1(150, "dir_get_volume_info vol=%s\n", dcr->VolumeName); if (!dir_get_volume_info(dcr, dcr->VolumeName, GET_VOL_INFO_FOR_READ)) { Dmsg2(150, "dir_get_vol_info failed for vol=%s: %s\n", dcr->VolumeName, jcr->errmsg); Jmsg1(jcr, M_WARNING, 0, "Read acquire: %s", jcr->errmsg); } dev->set_load(); /* set to load volume */ try_autochanger = true; /* permit trying the autochanger again */ continue; /* try reading again */ } /* end switch */ break; } /* end for loop */ if (!ok) { Jmsg2(jcr, M_FATAL, 0, _("Too many errors trying to mount %s device %s for reading.\n"), dev->print_type(), dev->print_name()); goto get_out; } dev->clear_append(); dev->set_read(); jcr->sendJobStatus(JS_Running); Jmsg(jcr, M_INFO, 0, _("Ready to read from volume \"%s\" on %s device %s.\n"), dcr->VolumeName, dev->print_type(), dev->print_name()); get_out: dev->Lock(); /* If failed and not writing plugin close device */ if (!ok && dev->num_writers == 0 && dev->num_reserved() == 0) { generate_plugin_event(jcr, bsdEventDeviceClose, dcr); } /* * Normally we are blocked, but in at least one error case above * we are not blocked because we unsuccessfully tried changing * devices. */ if (dev->is_blocked()) { dev->dunblock(DEV_LOCKED); } else { dev->Unlock(); /* dunblock() unlock the device too */ } Dmsg2(rdbglvl, "dcr=%p dev=%p\n", dcr, dcr->dev); Dmsg2(rdbglvl, "MediaType dcr=%s dev=%s\n", dcr->media_type, dev->device->media_type); dev->Unlock_read_acquire(); Leave(rdbglvl); return ok; } /* * Acquire device for writing. We permit multiple writers. * If this is the first one, we read the label. * * Returns: NULL if failed for any reason * dcr if successful. * Note, normally reserve_device_for_append() is called * before this routine. */ DCR *acquire_device_for_append(DCR *dcr) { DEVICE *dev = dcr->dev; JCR *jcr = dcr->jcr; bool ok = false; bool have_vol = false; Enter(200); dcr->set_ameta(); init_device_wait_timers(dcr); dev->Lock_acquire(); /* only one job at a time */ dev->Lock(); Dmsg1(100, "acquire_append device is %s\n", dev->print_type()); /* * With the reservation system, this should not happen */ if (dev->can_read()) { Mmsg2(jcr->errmsg, "Want to append but %s device %s is busy reading.\n", dev->print_type(), dev->print_name()); Jmsg(jcr, M_FATAL, 0, "%s", jcr->errmsg); Dmsg1(50, "%s", jcr->errmsg); goto get_out; } dev->clear_unload(); /* * have_vol defines whether or not mount_next_write_volume should * ask the Director again about what Volume to use. */ if (dev->can_append() && dcr->is_suitable_volume_mounted() && strcmp(dcr->VolCatInfo.VolCatStatus, "Recycle") != 0) { Dmsg0(190, "device already in append.\n"); /* * At this point, the correct tape is already mounted, so * we do not need to do mount_next_write_volume(), unless * we need to recycle the tape. */ if (dev->num_writers == 0) { dev->VolCatInfo = dcr->VolCatInfo; /* structure assignment */ } have_vol = dcr->is_tape_position_ok(); } if (!have_vol) { dev->rLock(true); block_device(dev, BST_DOING_ACQUIRE); dev->Unlock(); Dmsg1(190, "jid=%u Do mount_next_write_vol\n", (uint32_t)jcr->JobId); if (!dcr->mount_next_write_volume()) { if (!job_canceled(jcr) && !jcr->is_incomplete()) { /* Reduce "noise" -- don't print if job canceled */ Mmsg2(jcr->errmsg, _("Could not ready %s device %s for append.\n"), dev->print_type(), dev->print_name()); Jmsg(jcr, M_FATAL, 0, "%s", jcr->errmsg); Dmsg1(50, "%s", jcr->errmsg); } dev->Lock(); unblock_device(dev); goto get_out; } Dmsg2(190, "Output pos=%u:%u\n", dcr->dev->file, dcr->dev->block_num); dev->Lock(); unblock_device(dev); } if (generate_plugin_event(jcr, bsdEventDeviceOpen, dcr) != bRC_OK) { Mmsg0(jcr->errmsg, _("generate_plugin_event(bsdEventDeviceOpen) Failed\n")); Jmsg(jcr, M_FATAL, 0, "%s", jcr->errmsg); Dmsg1(50, "%s", jcr->errmsg); goto get_out; } dev->num_writers++; /* we are now a writer */ if (jcr->NumWriteVolumes == 0) { jcr->NumWriteVolumes = 1; } dev->VolCatInfo.VolCatJobs++; /* increment number of jobs on vol */ ok = dir_update_volume_info(dcr, false, false); /* send Volume info to Director */ if (!ok) { /* We cannot use this volume/device */ Jmsg(jcr, M_WARNING, 0, _("Warning cannot use Volume \"%s\", update_volume_info failed.\n"), dev->VolCatInfo.VolCatName); dev->num_writers--; /* on fail update_volume do not update num_writers */ /* TODO: See if we revert the NumWriteVolumes as well */ } Dmsg4(100, "=== nwriters=%d nres=%d vcatjob=%d dev=%s\n", dev->num_writers, dev->num_reserved(), dev->VolCatInfo.VolCatJobs, dev->print_name()); get_out: /* Don't plugin close here, we might have multiple writers */ dcr->clear_reserved(); dev->Unlock(); dev->Unlock_acquire(); Leave(200); return ok ? dcr : NULL; } /* * This job is done, so release the device. From a Unix standpoint, * the device remains open. * * Note, if we were spooling, we may enter with the device blocked. * We unblock at the end, only if it was us who blocked the * device. * */ bool release_device(DCR *dcr) { JCR *jcr = dcr->jcr; DEVICE *dev = dcr->dev; bool ok = true; char tbuf[100]; bsteal_lock_t holder; dev->Lock(); if (!obtain_device_block(dev, &holder, 0, /* infinite wait */ BST_RELEASING)) { ASSERT2(0, "unable to obtain device block"); } lock_volumes(); Dmsg2(100, "release_device device %s is %s\n", dev->print_name(), dev->is_tape()?"tape":"disk"); /* if device is reserved, job never started, so release the reserve here */ dcr->clear_reserved(); if (dev->can_read()) { VOLUME_CAT_INFO *vol = &dev->VolCatInfo; generate_plugin_event(jcr, bsdEventDeviceClose, dcr); dev->clear_read(); /* clear read bit */ Dmsg2(150, "dir_update_vol_info. label=%d Vol=%s\n", dev->is_labeled(), vol->VolCatName); if (dev->is_labeled() && vol->VolCatName[0] != 0) { dir_update_volume_info(dcr, false, false); /* send Volume info to Director */ remove_read_volume(jcr, dcr->VolumeName); volume_unused(dcr); } } else if (dev->num_writers > 0) { /* * Note if WEOT is set, we are at the end of the tape * and may not be positioned correctly, so the * job_media_record and update_vol_info have already been * done, which means we skip them here. */ dev->num_writers--; Dmsg1(100, "There are %d writers in release_device\n", dev->num_writers); if (dev->is_labeled()) { if (!dev->at_weot()) { Dmsg2(200, "dir_create_jobmedia. Release vol=%s dev=%s\n", dev->getVolCatName(), dev->print_name()); } if (!dev->at_weot() && !dir_create_jobmedia_record(dcr)) { Jmsg2(jcr, M_FATAL, 0, _("Could not create JobMedia record for Volume=\"%s\" Job=%s\n"), dcr->getVolCatName(), jcr->Job); } /* If no more writers, and no errors, and wrote something, write an EOF */ if (!dev->num_writers && dev->can_write() && dev->block_num > 0) { dev->weof(dcr, 1); write_ansi_ibm_labels(dcr, ANSI_EOF_LABEL, dev->VolHdr.VolumeName); } if (!dev->at_weot()) { dev->VolCatInfo.VolCatFiles = dev->get_file(); /* set number of files */ /* Note! do volume update before close, which zaps VolCatInfo */ dir_update_volume_info(dcr, false, false); /* send Volume info to Director */ Dmsg2(200, "dir_update_vol_info. Release vol=%s dev=%s\n", dev->getVolCatName(), dev->print_name()); } if (dev->num_writers == 0) { /* if not being used */ volume_unused(dcr); /* we obviously are not using the volume */ generate_plugin_event(jcr, bsdEventDeviceClose, dcr); } else { /* Other jobs are writing, make sure our data is safely written */ if (!dev->sync_data(dcr)) { Jmsg(jcr, M_ERROR, 0, ("%s\n"), dev->errmsg); } } } } else { /* * If we reach here, it is most likely because the job * has failed, since the device is not in read mode and * there are no writers. It was probably reserved. */ volume_unused(dcr); generate_plugin_event(jcr, bsdEventDeviceClose, dcr); } Dmsg3(100, "%d writers, %d reserve, dev=%s\n", dev->num_writers, dev->num_reserved(), dev->print_name()); /* If no writers, close if file or !CAP_ALWAYS_OPEN */ if (dev->num_writers == 0 && (!dev->is_tape() || !dev->has_cap(CAP_ALWAYSOPEN))) { generate_plugin_event(jcr, bsdEventDeviceClose, dcr); /* Sync the data before close() if needed for this device */ if (!dev->sync_data(dcr)) { Jmsg(jcr, M_ERROR, 0, ("%s\n"), dev->errmsg); } if (!dev->close(dcr) && dev->errmsg[0]) { Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); } free_volume(dev); } unlock_volumes(); /* Do new tape alert code */ dev->get_tape_alerts(dcr); /* alert_callback is in tape_alert.c -- show only most recent (last) alert */ dev->show_tape_alerts(dcr, list_long, list_last, alert_callback); pthread_cond_broadcast(&dev->wait_next_vol); Dmsg2(100, "JobId=%u broadcast wait_device_release at %s\n", (uint32_t)jcr->JobId, bstrftimes(tbuf, sizeof(tbuf), (utime_t)time(NULL))); pthread_cond_broadcast(&wait_device_release); give_back_device_block(dev, &holder); /* * If we are the thread that blocked the device, then unblock it */ if (pthread_equal(dev->no_wait_id, pthread_self())) { dev->dunblock(true); } else { dev->Unlock(); } dev->end_of_job(dcr, TRUNC_CONF_DEFAULT); if (dcr->keep_dcr) { dev->detach_dcr_from_dev(dcr); } else { free_dcr(dcr); } Dmsg2(100, "Device %s released by JobId=%u\n", dev->print_name(), (uint32_t)jcr->JobId); return ok; } /* * Clean up the device for reuse without freeing the memory */ bool clean_device(DCR *dcr) { bool ok; dcr->keep_dcr = true; /* do not free the dcr */ ok = release_device(dcr); dcr->keep_dcr = false; return ok; } /* * Create a new Device Control Record and attach * it to the device (if this is a real job). * Note, this has been updated so that it can be called first * without a DEVICE, then a second or third time with a DEVICE, * and each time, it should cleanup and point to the new device. * This should facilitate switching devices. * Note, each dcr must point to the controlling job (jcr). However, * a job can have multiple dcrs, so we must not store in the jcr's * structure as previously. The higher level routine must store * this dcr in the right place * */ DCR *new_dcr(JCR *jcr, DCR *dcr, DEVICE *dev, bool writing) { DEVICE *odev; if (!dcr) { dcr = (DCR *)malloc(sizeof(DCR)); memset(dcr, 0, sizeof(DCR)); dcr->tid = pthread_self(); dcr->uploads = New(alist(100, false)); dcr->downloads = New(alist(100, false)); dcr->spool_fd = -1; } dcr->jcr = jcr; /* point back to jcr */ odev = dcr->dev; if (dcr->attached_to_dev && odev) { Dmsg2(100, "Detach 0x%x from olddev %s\n", dcr, odev->print_name()); odev->detach_dcr_from_dev(dcr); } ASSERT2(!dcr->attached_to_dev, "DCR is attached. Wrong!"); /* Set device information, possibly change device */ if (dev) { ASSERT2(!dev->adata, "Called with adata dev. Wrong!"); dev->free_dcr_blocks(dcr); dev->new_dcr_blocks(dcr); if (dcr->rec) { free_record(dcr->rec); } dcr->rec = new_record(); /* Use job spoolsize prior to device spoolsize */ if (jcr && jcr->spool_size) { dcr->max_job_spool_size = jcr->spool_size; } else { dcr->max_job_spool_size = dev->device->max_job_spool_size; } dcr->device = dev->device; dcr->set_dev(dev); Dmsg2(100, "Attach 0x%x to dev %s\n", dcr, dev->print_name()); dev->attach_dcr_to_dev(dcr); } if (writing) { dcr->set_writing(); } else { dcr->clear_writing(); } return dcr; } /* * Search the dcrs list for the given dcr. If it is found, * as it should be, then remove it. Also zap the jcr pointer * to the dcr if it is the same one. * * Note, this code will be turned on when we can write to multiple * dcrs at the same time. */ #ifdef needed static void remove_dcr_from_dcrs(DCR *dcr) { JCR *jcr = dcr->jcr; if (jcr->dcrs) { int i = 0; DCR *ldcr; int num = jcr->dcrs->size(); for (i=0; i < num; i++) { ldcr = (DCR *)jcr->dcrs->get(i); if (ldcr == dcr) { jcr->dcrs->remove(i); if (jcr->dcr == dcr) { jcr->dcr = NULL; } } } } } #endif void DEVICE::attach_dcr_to_dev(DCR *dcr) { JCR *jcr = dcr->jcr; Lock_dcrs(); jcr = dcr->jcr; if (jcr) Dmsg1(500, "JobId=%u enter attach_dcr_to_dev\n", (uint32_t)jcr->JobId); /* ***FIXME*** return error if dev not initiated */ if (!dcr->attached_to_dev && initiated && jcr && jcr->getJobType() != JT_SYSTEM) { ASSERT2(!adata, "Called on adata dev. Wrong!"); Dmsg4(200, "Attach Jid=%d dcr=%p size=%d dev=%s\n", (uint32_t)jcr->JobId, dcr, attached_dcrs->size(), print_name()); attached_dcrs->append(dcr); /* attach dcr to device */ dcr->attached_to_dev = true; } Unlock_dcrs(); } /* * Note!! Do not enter with dev->Lock() since unreserve_device() * is going to lock it too. */ void DEVICE::detach_dcr_from_dev(DCR *dcr) { Dmsg0(500, "Enter detach_dcr_from_dev\n"); /* jcr is NULL in some cases */ Lock(); Lock_dcrs(); /* Detach this dcr only if attached */ if (dcr->attached_to_dev) { ASSERT2(!adata, "Called with adata dev. Wrong!"); dcr->unreserve_device(true); Dmsg4(200, "Detach Jid=%d dcr=%p size=%d to dev=%s\n", (uint32_t)dcr->jcr->JobId, dcr, attached_dcrs->size(), print_name()); dcr->attached_to_dev = false; if (attached_dcrs->size()) { attached_dcrs->remove(dcr); /* detach dcr from device */ } } /* Check if someone accidentally left a drive reserved, and clear it */ if (attached_dcrs->size() == 0 && num_reserved() > 0) { Pmsg3(000, "Warning!!! Detach %s DCR: dcrs=0 reserved=%d setting reserved==0. dev=%s\n", dcr->is_writing() ? "writing" : "reading", num_reserved(), print_name()); m_num_reserved = 0; } dcr->attached_to_dev = false; Unlock_dcrs(); Unlock(); } /* * Free up all aspects of the given dcr -- i.e. dechain it, * release allocated memory, zap pointers, ... */ void free_dcr(DCR *dcr) { JCR *jcr; jcr = dcr->jcr; if (dcr->dev) { dcr->dev->detach_dcr_from_dev(dcr); } if (dcr->dev) { dcr->dev->free_dcr_blocks(dcr); } else { dcr->ameta_block = NULL; free_block(dcr->block); } if (dcr->rec) { free_record(dcr->rec); } if (jcr && jcr->dcr == dcr) { jcr->dcr = NULL; } if (jcr && jcr->read_dcr == dcr) { jcr->read_dcr = NULL; } delete dcr->uploads; delete dcr->downloads; free(dcr); } static void set_dcr_from_vol(DCR *dcr, VOL_LIST *vol) { /* * Note, if we want to be able to work from a .bsr file only * for disaster recovery, we must "simulate" reading the catalog */ bstrncpy(dcr->VolumeName, vol->VolumeName, sizeof(dcr->VolumeName)); dcr->setVolCatName(vol->VolumeName); bstrncpy(dcr->media_type, vol->MediaType, sizeof(dcr->media_type)); dcr->VolCatInfo.Slot = vol->Slot; dcr->VolCatInfo.InChanger = vol->Slot > 0; dcr->CurrentVol = vol; /* Keep VOL_LIST pointer (freed at the end of the job) */ } bacula-15.0.3/src/stored/org_stored_dedup.c0000644000175000017500000000207014771010173020444 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "stored.h" bool is_dedup_server_side(DEVICE *dev, int32_t stream, uint64_t stream_len) { return false; } bool is_dedup_ref(DEV_RECORD *rec, bool lazy) { return false; } void dedup_get_limits(int64_t *nofile, int64_t *memlock) { } /* dump the status of all dedupengines */ void list_dedupengines(char *cmd, STATUS_PKT *sp, const char *target) { } bool dedup_parse_filter(char *fltr) { return false; } bacula-15.0.3/src/stored/fifo_dev.c0000644000175000017500000000265614771010173016707 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * fifo_dev.c -- low level operations on fifo devices * * written by, Kern Sibbald, MM * separated from file_dev.c January 2016f4 * */ #include "bacula.h" #include "stored.h" bool fifo_dev::open_device(DCR *dcr, int omode) { return tape_dev::open_device(dcr, omode); } boffset_t fifo_dev::lseek(DCR *dcr, boffset_t offset, int whence) { /* Cannot seek */ return 0; } bool fifo_dev::truncate(DCR *dcr) { /* Cannot truncate */ return true; } const char *fifo_dev::print_type() { return "FIFO"; } int fifo_dev::device_specific_init(JCR *jcr, DEVRES *device) { set_cap(CAP_STREAM); /* Apparently it is not possible to sync the file descriptor on /dev/null */ if (strcmp(device->device_name, "/dev/null") == 0) { clear_cap(CAP_SYNCONCLOSE); } return 0; } bacula-15.0.3/src/stored/status.c0000644000175000017500000012362214771010173016446 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * This file handles the status command * * Kern Sibbald, May MMIII * * */ #include "bacula.h" #include "stored.h" #define USE_LIST_TERMINATED_JOBS #define USE_LIST_RESOURCE_LIMITS #include "lib/status.h" #include "sd_plugins.h" /* Imported functions */ extern void dbg_print_plugin(FILE *fp); /* Imported variables */ extern BSOCK *filed_chan; /* Static variables */ static char OKqstatus[] = "3000 OK .status\n"; static char DotStatusJob[] = "JobId=%d JobStatus=%c JobErrors=%d\n"; /* Forward referenced functions */ static void sendit(POOL_MEM &msg, int len, STATUS_PKT *sp); static void sendit(const char *msg, int len, void *arg); static void send_blocked_status(DEVICE *dev, STATUS_PKT *sp); static void send_device_status(DEVICE *dev, STATUS_PKT *sp); static void list_running_jobs(STATUS_PKT *sp); static void list_jobs_waiting_on_reservation(STATUS_PKT *sp); static void list_status_header(STATUS_PKT *sp); static void list_devices(STATUS_PKT *sp, char *name=NULL); static void list_plugins(STATUS_PKT *sp); static void list_cloud_transfers(STATUS_PKT *sp, bool verbose); static void list_collectors_status(STATUS_PKT *sp, char *collname); static void api_collectors_status(STATUS_PKT *sp, char *collname); void status_alert_callback(void *ctx, const char *short_msg, const char *long_msg, char *Volume, int severity, int flags, int alertno, utime_t alert_time) { STATUS_PKT *sp = (STATUS_PKT *)ctx; const char *type = "Unknown"; POOL_MEM send_msg(PM_MESSAGE); char edt[50]; int len; switch (severity) { case 'C': type = "Critical"; break; case 'W': type = "Warning"; break; case 'I': type = "Info"; break; } bstrftimes(edt, sizeof(edt), alert_time); if (chk_dbglvl(10)) { len = Mmsg(send_msg, _(" %s Alert: at %s Volume=\"%s\" flags=0x%x alert=%s\n"), type, edt, Volume, flags, long_msg); } else { len = Mmsg(send_msg, _(" %s Alert: at %s Volume=\"%s\" alert=%s\n"), type, edt, Volume, short_msg); } sendit(send_msg, len, sp); } /* * Status command from Director */ void output_status(STATUS_PKT *sp) { POOL_MEM msg(PM_MESSAGE); int len; list_status_header(sp); /* * List running jobs */ list_running_jobs(sp); /* * List jobs stuck in reservation system */ list_jobs_waiting_on_reservation(sp); /* * List terminated jobs (defined in lib/status.h) */ list_terminated_jobs(sp); /* * List devices */ list_devices(sp); /* * List cloud transfers */ list_cloud_transfers(sp, false); len = Mmsg(msg, _("Used Volume status:\n")); if (!sp->api) sendit(msg, len, sp); list_volumes(sendit, (void *)sp); if (!sp->api) sendit("====\n\n", 6, sp); list_spool_stats(sendit, (void *)sp); if (!sp->api) sendit("====\n\n", 6, sp); if (chk_dbglvl(10)) { dbg_print_plugin(stdout); } } static void show_config(STATUS_PKT *sp) { LockRes(); STORES *store = (STORES *)GetNextRes(R_STORAGE, NULL); UnlockRes(); dump_resource(R_STORAGE, (RES *)store, sendit, sp); } #ifdef xxxx static find_device(char *devname) { foreach_res(device, R_DEVICE) { if (strcasecmp(device->hdr.name, devname) == 0) { found = true; break; } } if (!found) { foreach_res(changer, R_AUTOCHANGER) { if (strcasecmp(changer->hdr.name, devname) == 0) { break; } } } } #endif static void list_shared_storage(STATUS_PKT *sp, const char *cmd) { POOL_MEM msg(PM_MESSAGE); char list_cmd[100]; char device[128]; int len; Dmsg0(100, "in shared storage\n"); if (sscanf(cmd, ".status shstore %99s %127s", list_cmd, device) != 2) { len = Mmsg(msg, _("3900 missing args in .status command: %s\n"), cmd); sendit(msg, len, sp); return; } unbash_spaces(device); len = Mmsg(msg, _("\nSD Shared Storage:\n")); if (!sp->api) sendit(msg, len, sp); len = Mmsg(msg, "cmd=%s device=\"%s\"\n", list_cmd, device); sendit(msg, len, sp); if (!sp->api) sendit("====\n\n", 6, sp); } #ifdef COMMUNITY bool send_shstore_blocked_status(DEVICE *dev, POOLMEM **msg, int *len) {return false;} void list_shstore(DEVICE *dev, OutputWriter *ow) {} bool list_shstore(DEVICE *dev, POOLMEM **msg, int *len) { return false;} #endif static void api_list_one_device(char *name, DEVICE *dev, STATUS_PKT *sp, OutputWriter *ow) { int zero=0; int blocked=0; uint64_t f, t; const char *p=NULL; if (!dev) { return; } dev->get_freespace(&f, &t); ow->get_output(OT_START_OBJ, OT_STRING, "name", dev->device->hdr.name, OT_STRING, "archive_device", dev->archive_name(), OT_STRING, "type", dev->print_type(), OT_STRING, "driver", dev->print_driver_type(), OT_STRING, "media_type", dev->device->media_type, OT_INT, "open", (int)dev->is_open(), OT_INT, "writers", dev->num_writers, OT_INT32, "maximum_concurrent_jobs", dev->max_concurrent_jobs, OT_INT64, "maximum_volume_size", dev->max_volume_size, OT_INT, "read_only", dev->device->read_only, OT_INT, "autoselect", dev->device->autoselect, OT_INT, "enabled", dev->enabled, OT_INT64, "free_space", f, OT_INT64, "total_space", t, OT_INT64, "devno", dev->devno, OT_END); if (dev->is_open()) { if (dev->is_labeled()) { ow->get_output(OT_STRING, "mounted", dev->blocked()?"0":"1", OT_STRING, "waiting", dev->blocked()?"1":"0", OT_STRING, "volume", dev->VolHdr.VolumeName, OT_STRING, "pool", NPRTB(dev->pool_name), OT_END); } else { ow->get_output(OT_INT, "mounted", zero, OT_INT, "waiting", zero, OT_STRING, "volume", "", OT_STRING, "pool", "", OT_END); } blocked = 1; switch(dev->blocked()) { case BST_UNMOUNTED: p = "User unmounted"; break; case BST_UNMOUNTED_WAITING_FOR_SYSOP: p = "User unmounted during wait for media/mount"; break; case BST_DOING_ACQUIRE: p = "Device is being initialized"; break; case BST_WAITING_FOR_SYSOP: p = "Waiting for mount or create a volume"; break; case BST_WRITING_LABEL: p = "Labeling a Volume"; break; default: blocked=0; p = NULL; } /* TODO: give more information about blocked status * and the volume needed if WAITING for SYSOP */ ow->get_output(OT_STRING, "blocked_desc", NPRTB(p), OT_INT, "blocked", blocked, OT_END); ow->get_output(OT_INT, "append", (int)dev->can_append(), OT_END); if (dev->can_append()) { ow->get_output(OT_INT64, "bytes", dev->VolCatInfo.VolCatBytes, OT_INT32, "blocks", dev->VolCatInfo.VolCatBlocks, OT_END); } else { /* reading */ ow->get_output(OT_INT64, "bytes", dev->VolCatInfo.VolCatRBytes, OT_INT32, "blocks", dev->VolCatInfo.VolCatReads, /* might not be blocks */ OT_END); } ow->get_output(OT_INT, "file", dev->file, OT_INT, "block", dev->block_num, OT_END); } else { ow->get_output(OT_INT, "mounted", zero, OT_INT, "waiting", zero, OT_STRING, "volume", "", OT_STRING, "pool", "", OT_STRING, "blocked_desc", "", OT_INT, "blocked", zero, OT_INT, "append", zero, OT_INT, "bytes", zero, OT_INT, "blocks", zero, OT_INT, "file", zero, OT_INT, "block", zero, OT_END); } list_shstore(dev, ow); p = ow->get_output(OT_END_OBJ, OT_END); sendit(p, strlen(p), sp); } static void list_one_device(char *name, DEVICE *dev, STATUS_PKT *sp, OutputWriter *ow) { char b1[35], b2[35], b3[35]; POOL_MEM msg(PM_MESSAGE); int len; int bpb; if (sp->api > 1) { api_list_one_device(name, dev, sp, ow); return; } if (!dev) { len = Mmsg(msg, _("\nDevice \"%s\" is not open or does not exist.\n"), name); sendit(msg, len, sp); if (!sp->api) sendit("==\n", 3, sp); return; } if (dev->is_open()) { if (dev->is_labeled()) { len = Mmsg(msg, _("\nDevice %s is %s %s:\n" " Volume: %s\n" " Pool: %s\n" " Media type: %s\n"), dev->print_full_type(), dev->print_name(), dev->blocked()?_("waiting for"):_("mounted with"), dev->VolHdr.VolumeName, dev->pool_name[0]?dev->pool_name:_("*unknown*"), dev->device->media_type); sendit(msg, len, sp); } else { len = Mmsg(msg, _("\nDevice %s: %s open but no Bacula volume is currently mounted.\n"), dev->print_full_type(), dev->print_name()); sendit(msg, len, sp); } if (dev->can_append()) { bpb = dev->VolCatInfo.VolCatBlocks; if (bpb <= 0) { bpb = 1; } bpb = dev->VolCatInfo.VolCatBytes / bpb; len = Mmsg(msg, _(" Total Bytes=%s Blocks=%s Bytes/block=%s\n"), edit_uint64_with_commas(dev->VolCatInfo.VolCatBytes, b1), edit_uint64_with_commas(dev->VolCatInfo.VolCatBlocks, b2), edit_uint64_with_commas(bpb, b3)); sendit(msg, len, sp); } else { /* reading */ bpb = dev->VolCatInfo.VolCatReads; if (bpb <= 0) { bpb = 1; } if (dev->VolCatInfo.VolCatRBytes > 0) { bpb = dev->VolCatInfo.VolCatRBytes / bpb; } else { bpb = 0; } len = Mmsg(msg, _(" Total Bytes Read=%s Blocks Read=%s Bytes/block=%s\n"), edit_uint64_with_commas(dev->VolCatInfo.VolCatRBytes, b1), edit_uint64_with_commas(dev->VolCatInfo.VolCatReads, b2), edit_uint64_with_commas(bpb, b3)); sendit(msg, len, sp); } len = Mmsg(msg, _(" Positioned at File=%s Block=%s\n"), edit_uint64_with_commas(dev->file, b1), edit_uint64_with_commas(dev->block_num, b2)); sendit(msg, len, sp); } else { len = Mmsg(msg, _("\nDevice %s: %s is not open.\n"), dev->print_full_type(), dev->print_name()); sendit(msg, len, sp); } send_blocked_status(dev, sp); /* TODO: We need to check with Mount command, maybe we can * display this number only when the device is open. */ if (dev->is_file()) { char ed1[50]; uint64_t f, t; dev->get_freespace(&f, &t); if (t > 0) { /* We might not have access to numbers */ len = Mmsg(msg, _(" Available %sSpace=%sB\n"), dev->is_cloud() ? _("Cache ") : "", edit_uint64_with_suffix(f, ed1)); sendit(msg, len, sp); } } if (list_shstore(dev, msg.handle(), &len)) { sendit(msg, len, sp); } dev->show_tape_alerts((DCR *)sp, list_short, list_all, status_alert_callback); if (!sp->api) sendit("==\n", 3, sp); } static void list_one_autochanger(char *name, AUTOCHANGER *changer, STATUS_PKT *sp, OutputWriter *ow) { int len; char *p; DEVRES *device; POOL_MEM msg(PM_MESSAGE); if (sp->api > 1) { ow->get_output(OT_START_OBJ, OT_STRING, "autochanger", changer->hdr.name, OT_END); ow->start_list("devices"); foreach_alist(device, changer->device) { // We build a list here with a CLEAR ow->get_output(OT_START_OBJ, OT_STRING, "name", device->hdr.name, OT_STRING, "device",device->device_name, OT_END_OBJ, OT_END); } ow->end_list(); p = ow->get_output(OT_END_OBJ, OT_END); sendit(p, strlen(p), sp); ow->get_output(OT_CLEAR, OT_END); } else { len = Mmsg(msg, _("Autochanger \"%s\" with devices:\n"), changer->hdr.name); sendit(msg, len, sp); foreach_alist(device, changer->device) { if (device->dev) { len = Mmsg(msg, " %s\n", device->dev->print_name()); sendit(msg, len, sp); } else { len = Mmsg(msg, " %s\n", device->hdr.name); sendit(msg, len, sp); } } } } static void list_devices(STATUS_PKT *sp, char *name) { int len; DEVRES *device; AUTOCHANGER *changer; OutputWriter ow(sp->api_opts); char *buf=NULL; bool first; if (!sp->api) { POOL_MEM msg(PM_MESSAGE); len = Mmsg(msg, _("\nDevice status:\n")); sendit(msg, len, sp); } else if (sp->api > 1) { ow.start_group("devices"); ow.get_output(OT_START_OBJ, OT_END); buf = ow.start_list("autochanger"); sendit(buf, strlen(buf), sp); } first = true; foreach_res(changer, R_AUTOCHANGER) { if (!name || strcmp(changer->hdr.name, name) == 0) { ow.get_output(OT_CLEAR, first? OT_NOP : OT_SEP, OT_END); list_one_autochanger(changer->hdr.name, changer, sp, &ow); first = false; } } if (sp->api > 1) { ow.get_output(OT_CLEAR, OT_END); ow.end_list(); buf = ow.start_list("device"); sendit(buf, strlen(buf), sp); } first = true; foreach_res(device, R_DEVICE) { if (!name || strcmp(device->hdr.name, name) == 0) { ow.get_output(OT_CLEAR, first? OT_NOP : OT_SEP, OT_END); list_one_device(device->hdr.name, device->dev, sp, &ow); first = false; } } if (sp->api > 1) { ow.get_output(OT_CLEAR, OT_END); ow.end_list(); ow.get_output(OT_END_OBJ, OT_END); buf = ow.end_group(); sendit(buf, strlen(buf), sp); } if (!sp->api) sendit("====\n\n", 6, sp); } static void list_cloud_transfers(STATUS_PKT *sp, bool verbose) { if (sp->api) { DEVRES *device; foreach_res(device, R_DEVICE) { if (device->dev && device->dev->is_cloud()) { char *p; OutputWriter ow(sp->api_opts); cloud_dev *cdev = (cloud_dev*)device->dev; ow.start_group("cloud"); ow.get_output(OT_START_OBJ, OT_END); cdev->get_api_cloud_upload_transfer_status(ow, verbose); cdev->get_api_cloud_download_transfer_status(ow, verbose); ow.get_output(OT_END_OBJ, OT_END); p = ow.end_group(); sendit(p, strlen(p), sp); break; // One transfer manager? } } } else { bool first=true; int len; DEVRES *device; POOL_MEM msg(PM_MESSAGE); foreach_res(device, R_DEVICE) { if (device->dev && device->dev->is_cloud()) { if (first) { len = Mmsg(msg, _("Cloud transfer status:\n")); sendit(msg, len, sp); first = false; } cloud_dev *cdev = (cloud_dev*)device->dev; len = cdev->get_cloud_upload_transfer_status(msg, verbose); sendit(msg, len, sp); len = cdev->get_cloud_download_transfer_status(msg, verbose); sendit(msg, len, sp); break; /* only once, transfer mgr are shared */ } } if (!first) sendit("====\n\n", 6, sp); } } static void api_list_sd_status_header(STATUS_PKT *sp) { char *p; alist drivers(10, not_owned_by_alist); alist tlist(10, not_owned_by_alist); OutputWriter wt(sp->api_opts); sd_list_loaded_drivers(&drivers); wt.start_group("header"); wt.get_output( OT_START_OBJ, OT_STRING, "name", my_name, OT_STRING, "version", VERSION " (" BDATE ")", OT_STRING, "uname", HOST_OS " " DISTNAME " " DISTVER, OT_UTIME, "started", daemon_start_time, OT_INT64, "pid", (int64_t)getpid(), OT_INT, "jobs_run", num_jobs_run, OT_INT, "jobs_running",job_count(), OT_INT, "ndevices", ((rblist *)res_head[R_DEVICE-r_first]->res_list)->size(), OT_INT, "nautochgr", ((rblist *)res_head[R_AUTOCHANGER-r_first]->res_list)->size(), OT_PLUGINS,"plugins", b_plugin_list, OT_ALIST_STR, "drivers", &drivers, OT_INT32, "fips", (int32_t)crypto_get_fips(), OT_STRING, "openssl", crypto_get_version(), OT_INT64, "debug", debug_level, OT_INT, "trace", get_trace(), OT_ALIST_STR, "tags", debug_get_tags_list(&tlist, debug_level_tags), OT_END_OBJ, OT_END); p = wt.end_group(); sendit(p, strlen(p), sp); } static void list_status_header(STATUS_PKT *sp) { char dt[MAX_TIME_LENGTH]; char b1[35], b2[35], b3[35], b4[35], b5[35]; POOL_MEM msg(PM_MESSAGE); int len; if (sp->api) { api_list_sd_status_header(sp); return; } len = Mmsg(msg, _("%s %sVersion: %s (%s) %s %s %s\n"), my_name, BDEMO, VERSION, BDATE, HOST_OS, DISTNAME, DISTVER); sendit(msg, len, sp); bstrftime_nc(dt, sizeof(dt), daemon_start_time); LockRes(); STORES *store = (STORES *)GetNextRes(R_STORAGE, NULL); UnlockRes(); len = Mmsg(msg, _("Daemon started %s. Jobs: run=%d, running=%d max=%ld.\n"), dt, num_jobs_run, job_count(), store->max_concurrent_jobs); sendit(msg, len, sp); int64_t nofile_l = 1000 + 5 * me->max_concurrent_jobs; int64_t memlock_l = 0; dedup_get_limits(&nofile_l, &memlock_l); list_resource_limits(sp, nofile_l, memlock_l); len = Mmsg(msg, _(" Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n"), edit_uint64_with_commas(heap_used(), b1), edit_uint64_with_commas(sm_bytes, b2), edit_uint64_with_commas(sm_max_bytes, b3), edit_uint64_with_commas(sm_buffers, b4), edit_uint64_with_commas(sm_max_buffers, b5)); sendit(msg, len, sp); len = Mmsg(msg, " Sizes: boffset_t=%d size_t=%d int32_t=%d int64_t=%d " "mode=%d,%d newbsr=%d\n", (int)sizeof(boffset_t), (int)sizeof(size_t), (int)sizeof(int32_t), (int)sizeof(int64_t), (int)DEVELOPER_MODE, (int)BEEF, use_new_match_all); sendit(msg, len, sp); len = Mmsg(msg, " Crypto: fips=%s crypto=%s\n", crypto_get_fips_enabled(), crypto_get_version()); sendit(msg, len, sp); len = Mmsg(msg, _(" Res: ndevices=%d nautochgr=%d\n"), ((rblist *)res_head[R_DEVICE-r_first]->res_list)->size(), ((rblist *)res_head[R_AUTOCHANGER-r_first]->res_list)->size()); sendit(msg, len, sp); list_plugins(sp); bool append_only, immutable; #if defined(HAVE_APPEND_FL) append_only = true; #else append_only = false; #endif // HAVE_APPEND_FL #if defined(HAVE_IMMUTABLE_FL) immutable = true; #else immutable = false; #endif // HAVE_IMMUTABLE_FL len = Mmsg(msg, " Caps: %s, %s\n", append_only ? "APPEND_ONLY" : "!APPEND_ONLY", immutable ? "IMMUTABLE" : "!IMMUTABLE"); sendit(msg, len, sp); } static void send_blocked_status(DEVICE *dev, STATUS_PKT *sp) { POOL_MEM msg(PM_MESSAGE); int len; if (!dev) { len = Mmsg(msg, _("No DEVICE structure.\n\n")); sendit(msg, len, sp); return; } if (send_shstore_blocked_status(dev, msg.handle(), &len)) { sendit(msg, len, sp); } if (!dev->enabled) { len = Mmsg(msg, _(" Device is disabled. User command.\n")); sendit(msg, len, sp); } switch (dev->blocked()) { case BST_UNMOUNTED: len = Mmsg(msg, _(" Device is BLOCKED. User unmounted.\n")); sendit(msg, len, sp); break; case BST_UNMOUNTED_WAITING_FOR_SYSOP: len = Mmsg(msg, _(" Device is BLOCKED. User unmounted during wait for media/mount.\n")); sendit(msg, len, sp); break; case BST_WAITING_FOR_SYSOP: { DCR *dcr; bool found_jcr = false; dev->Lock(); dev->Lock_dcrs(); foreach_dlist(dcr, dev->attached_dcrs) { if (dcr->jcr->JobStatus == JS_WaitMount) { len = Mmsg(msg, _(" Device is BLOCKED waiting for mount of volume \"%s\",\n" " Pool: %s\n" " Media type: %s\n"), dcr->VolumeName, dcr->pool_name, dcr->media_type); sendit(msg, len, sp); found_jcr = true; } else if (dcr->jcr->JobStatus == JS_WaitMedia) { len = Mmsg(msg, _(" Device is BLOCKED waiting to create a volume for:\n" " Pool: %s\n" " Media type: %s\n"), dcr->pool_name, dcr->media_type); sendit(msg, len, sp); found_jcr = true; } } dev->Unlock_dcrs(); dev->Unlock(); if (!found_jcr) { len = Mmsg(msg, _(" Device is BLOCKED waiting for media.\n")); sendit(msg, len, sp); } } break; case BST_DOING_ACQUIRE: len = Mmsg(msg, _(" Device is being initialized.\n")); sendit(msg, len, sp); break; case BST_WRITING_LABEL: len = Mmsg(msg, _(" Device is blocked labeling a Volume.\n")); sendit(msg, len, sp); break; default: break; } /* Send autochanger slot status */ if (dev->is_autochanger() && !dev->is_virtual_autochanger()) { if (dev->get_slot() > 0) { len = Mmsg(msg, _(" Slot %d %s loaded in drive %d.\n"), dev->get_slot(), dev->is_open()?"is": "was last", dev->drive_index); sendit(msg, len, sp); } else if (dev->get_slot() <= 0) { len = Mmsg(msg, _(" Drive %d is not loaded.\n"), dev->drive_index); sendit(msg, len, sp); } } if (chk_dbglvl(1)) { send_device_status(dev, sp); } } void send_device_status(DEVICE *dev, STATUS_PKT *sp) { POOL_MEM msg(PM_MESSAGE); int len; DCR *dcr = NULL; bool found = false; char b1[35]; if (chk_dbglvl(5)) { len = Mmsg(msg, _("Configured device capabilities:\n")); sendit(msg, len, sp); len = Mmsg(msg, " %sEOF %sBSR %sBSF %sFSR %sFSF %sEOM %sREM %sRACCESS %sAUTOMOUNT %sLABEL %sANONVOLS %sALWAYSOPEN %sSYNCONCLOSE\n", dev->capabilities & CAP_EOF ? "" : "!", dev->capabilities & CAP_BSR ? "" : "!", dev->capabilities & CAP_BSF ? "" : "!", dev->capabilities & CAP_FSR ? "" : "!", dev->capabilities & CAP_FSF ? "" : "!", dev->capabilities & CAP_EOM ? "" : "!", dev->capabilities & CAP_REM ? "" : "!", dev->capabilities & CAP_RACCESS ? "" : "!", dev->capabilities & CAP_AUTOMOUNT ? "" : "!", dev->capabilities & CAP_LABEL ? "" : "!", dev->capabilities & CAP_ANONVOLS ? "" : "!", dev->capabilities & CAP_ALWAYSOPEN ? "" : "!", dev->capabilities & CAP_SYNCONCLOSE ? "" : "!"); sendit(msg, len, sp); } len = Mmsg(msg, _("Device state:\n")); sendit(msg, len, sp); len = Mmsg(msg, " %sOPENED %sTAPE %sLABEL %sAPPEND %sREAD %sEOT %sWEOT %sEOF %sWORM %sNEXTVOL %sSHORT %sMOUNTED %sMALLOC\n", dev->is_open() ? "" : "!", dev->is_tape() ? "" : "!", dev->is_labeled() ? "" : "!", dev->can_append() ? "" : "!", dev->can_read() ? "" : "!", dev->at_eot() ? "" : "!", dev->state & ST_WEOT ? "" : "!", dev->at_eof() ? "" : "!", dev->is_worm() ? "" : "!", dev->state & ST_NEXTVOL ? "" : "!", dev->state & ST_SHORT ? "" : "!", dev->state & ST_MOUNTED ? "" : "!", dev->state & ST_MALLOC ? "" : "!"); sendit(msg, len, sp); len = Mmsg(msg, _(" Writers=%d reserves=%d blocked=%d enabled=%d usage=%s\n"), dev->num_writers, dev->num_reserved(), dev->blocked(), dev->enabled, edit_uint64_with_commas(dev->usage, b1)); sendit(msg, len, sp); len = Mmsg(msg, _("Attached JobIds: ")); sendit(msg, len, sp); dev->Lock(); dev->Lock_dcrs(); foreach_dlist(dcr, dev->attached_dcrs) { if (dcr->jcr) { if (found) { sendit(",", 1, sp); } len = Mmsg(msg, "%d", (int)dcr->jcr->JobId); sendit(msg, len, sp); found = true; } } dev->Unlock_dcrs(); dev->Unlock(); sendit("\n", 1, sp); len = Mmsg(msg, _("Device parameters:\n")); sendit(msg, len, sp); len = Mmsg(msg, _(" Archive name: %s Device name: %s\n"), dev->archive_name(), dev->name()); sendit(msg, len, sp); len = Mmsg(msg, _(" File=%u block=%u\n"), dev->file, dev->block_num); sendit(msg, len, sp); len = Mmsg(msg, _(" Min block=%u Max block=%u\n"), dev->min_block_size, dev->max_block_size); sendit(msg, len, sp); } static void api_list_running_jobs(STATUS_PKT *sp) { char *p1, *p2, *p3; int i1, i2, i3; OutputWriter ow(sp->api_opts); uint64_t inst_bps, total_bps; int inst_sec, total_sec; JCR *jcr; DCR *dcr, *rdcr; time_t now = time(NULL); bool add_sep=false; char *p; ow.start_group("running"); p = ow.get_output(OT_START_ARRAY, OT_END); sendit(p, strlen(p), sp); foreach_jcr(jcr) { p = ow.get_output(OT_CLEAR, add_sep ? OT_SEP : OT_NOP, OT_START_OBJ, OT_END); add_sep = true; if (jcr->JobId == 0 && jcr->dir_bsock) { int val = (jcr->dir_bsock && jcr->dir_bsock->tls)?1:0; p1 = ow.get_output(OT_UTIME, "DirectorConnected", (utime_t)jcr->start_time, OT_INT, "DirTLS", val, OT_END_OBJ, OT_END); sendit(p1, strlen(p1), sp); continue; } if (jcr->getJobType() == JT_SYSTEM) { add_sep = false; // Skip this one, it has been printed already continue; } ow.get_output(OT_INT32, "jobid", jcr->JobId, OT_STRING, "job", jcr->Job, OT_JOBLEVEL,"level", jcr->getJobLevel(), OT_JOBTYPE, "type", jcr->getJobType(), OT_JOBSTATUS,"status", jcr->JobStatus, OT_PINT64, "jobbytes", jcr->JobBytes, OT_INT32, "jobfiles", jcr->JobFiles, OT_UTIME, "starttime", jcr->start_time, OT_INT32, "errors", jcr->JobErrors, OT_INT32, "newbsr", (int32_t)jcr->use_new_match_all, OT_END); dcr = jcr->dcr; rdcr = jcr->read_dcr; p1 = p2 = p3 = NULL; if (rdcr && rdcr->device) { p1 = rdcr->VolumeName; p2 = rdcr->pool_name; p3 = rdcr->device->hdr.name; } ow.get_output(OT_STRING, "read_volume", NPRTB(p1), OT_STRING, "read_pool", NPRTB(p2), OT_STRING, "read_device", NPRTB(p3), OT_END); p1 = p2 = p3 = NULL; i1 = i2 = i3 = 0; if (dcr && dcr->device) { p1 = dcr->VolumeName; p2 = dcr->pool_name; p3 = dcr->device->hdr.name; i1 = dcr->spooling; i2 = dcr->despooling; i3 = dcr->despool_wait; } ow.get_output(OT_STRING, "write_volume", NPRTB(p1), OT_STRING, "write_pool", NPRTB(p2), OT_STRING, "write_device", NPRTB(p3), OT_INT, "spooling", i1, OT_INT, "despooling", i2, OT_INT, "despool_wait", i3, OT_END); if (jcr->last_time == 0) { jcr->last_time = jcr->run_time; } total_sec = now - jcr->run_time; inst_sec = now - jcr->last_time; if (total_sec <= 0) { total_sec = 1; } if (inst_sec <= 0) { inst_sec = 1; } /* Instanteous bps not smoothed */ inst_bps = (jcr->JobBytes - jcr->LastJobBytes) / inst_sec; if (jcr->LastRate == 0) { jcr->LastRate = inst_bps; } /* Smooth the instantaneous bps a bit */ inst_bps = (2 * jcr->LastRate + inst_bps) / 3; /* total bps (AveBytes/sec) since start of job */ total_bps = jcr->JobBytes / total_sec; p1 = ow.get_output(OT_PINT64, "avebytes_sec", total_bps, OT_PINT64, "lastbytes_sec", inst_bps, OT_END_OBJ, OT_END); sendit(p1, strlen(p1), sp); /* Update only every 10 seconds */ if (now - jcr->last_time > 10) { jcr->LastRate = inst_bps; jcr->LastJobBytes = jcr->JobBytes; jcr->last_time = now; } } endeach_jcr(jcr); // Finish the JSON output if needed ow.get_output(OT_CLEAR, OT_END_ARRAY, OT_END); p = ow.end_group(); sendit(p, strlen(p), sp); } static void list_running_jobs(STATUS_PKT *sp) { bool found = false; uint64_t inst_bps, total_bps; int inst_sec, total_sec; JCR *jcr; DCR *dcr, *rdcr; char JobName[MAX_NAME_LENGTH]; char b1[50], b2[50], b3[50], b4[50]; int len; POOL_MEM msg(PM_MESSAGE); time_t now = time(NULL); if (sp->api > 1) { api_list_running_jobs(sp); return; } len = Mmsg(msg, _("\nRunning Jobs:\n")); if (!sp->api) sendit(msg, len, sp); foreach_jcr(jcr) { if (jcr->JobStatus == JS_WaitFD) { len = Mmsg(msg, _("%s Job %s waiting for Client connection.\n"), job_type_to_str(jcr->getJobType()), jcr->Job); sendit(msg, len, sp); } dcr = jcr->dcr; rdcr = jcr->read_dcr; if ((dcr && dcr->device) || (rdcr && rdcr->device)) { bstrncpy(JobName, jcr->Job, sizeof(JobName)); /* There are three periods after the Job name */ char *p; for (int i=0; i<3; i++) { if ((p=strrchr(JobName, '.')) != NULL) { *p = 0; } } if (rdcr && rdcr->device) { len = Mmsg(msg, _("Reading: %s %s job %s JobId=%d Volume=\"%s\"\n" " pool=\"%s\" device=%s newbsr=%d\n"), job_level_to_str(jcr->getJobLevel()), job_type_to_str(jcr->getJobType()), JobName, jcr->JobId, rdcr->VolumeName, rdcr->pool_name, rdcr->dev?rdcr->dev->print_name(): rdcr->device->device_name, jcr->use_new_match_all ); sendit(msg, len, sp); } else if (dcr && dcr->device) { len = Mmsg(msg, _("Writing: %s %s job %s JobId=%d Volume=\"%s\"\n" " pool=\"%s\" device=%s\n"), job_level_to_str(jcr->getJobLevel()), job_type_to_str(jcr->getJobType()), JobName, jcr->JobId, dcr->VolumeName, dcr->pool_name, dcr->dev?dcr->dev->print_name(): dcr->device->device_name); sendit(msg, len, sp); len= Mmsg(msg, _(" spooling=%d despooling=%d despool_wait=%d\n"), dcr->spooling, dcr->despooling, dcr->despool_wait); sendit(msg, len, sp); } if (jcr->last_time == 0) { jcr->last_time = jcr->run_time; } total_sec = now - jcr->run_time; inst_sec = now - jcr->last_time; if (total_sec <= 0) { total_sec = 1; } if (inst_sec <= 0) { inst_sec = 1; } /* Instanteous bps not smoothed */ inst_bps = (jcr->JobBytes - jcr->LastJobBytes) / inst_sec; if (jcr->LastRate == 0) { jcr->LastRate = inst_bps; } /* Smooth the instantaneous bps a bit */ inst_bps = (2 * jcr->LastRate + inst_bps) / 3; /* total bps (AveBytes/sec) since start of job */ total_bps = jcr->JobBytes / total_sec; len = Mmsg(msg, _(" Files=%s Bytes=%s AveBytes/sec=%s LastBytes/sec=%s\n"), edit_uint64_with_commas(jcr->JobFiles, b1), edit_uint64_with_commas(jcr->JobBytes, b2), edit_uint64_with_commas(total_bps, b3), edit_uint64_with_commas(inst_bps, b4)); sendit(msg, len, sp); /* Update only every 10 seconds */ if (now - jcr->last_time > 10) { jcr->LastRate = inst_bps; jcr->LastJobBytes = jcr->JobBytes; jcr->last_time = now; } found = true; #ifdef DEBUG if (jcr->file_bsock) { len = Mmsg(msg, _(" FDReadSeqNo=%s in_msg=%u out_msg=%d fd=%d\n"), edit_uint64_with_commas(jcr->file_bsock->read_seqno, b1), jcr->file_bsock->in_msg_no, jcr->file_bsock->out_msg_no, jcr->file_bsock->m_fd); sendit(msg, len, sp); } else { len = Mmsg(msg, _(" FDSocket closed\n")); sendit(msg, len, sp); } #endif } else if (jcr->JobId == 0 && jcr->dir_bsock) { char dt[MAX_TIME_LENGTH]; bstrftime_nc(dt, sizeof(dt), jcr->start_time); len = Mmsg(msg, _("Director connected %sat: %s\n"), (jcr->dir_bsock && jcr->dir_bsock->tls)?_("using TLS "):"", dt); sendit(msg, len, sp); } } endeach_jcr(jcr); if (!found) { len = Mmsg(msg, _("No Jobs running.\n")); if (!sp->api) sendit(msg, len, sp); } if (!sp->api) sendit("====\n", 5, sp); } static void list_jobs_waiting_on_reservation(STATUS_PKT *sp) { JCR *jcr; POOL_MEM msg(PM_MESSAGE); int len; len = Mmsg(msg, _("\nJobs waiting to reserve a drive:\n")); if (!sp->api) sendit(msg, len, sp); foreach_jcr(jcr) { if (!jcr->reserve_msgs) { continue; } send_drive_reserve_messages(jcr, sendit, sp); } endeach_jcr(jcr); if (!sp->api) sendit("====\n", 5, sp); } static void sendit(const char *msg, int len, void *sp) { sendit(msg, len, (STATUS_PKT *)sp); } static void sendit(POOL_MEM &msg, int len, STATUS_PKT *sp) { BSOCK *bs = sp->bs; if (bs) { bs->msg = check_pool_memory_size(bs->msg, len+1); memcpy(bs->msg, msg.c_str(), len+1); bs->msglen = len+1; bs->send(); } else { sp->callback(msg.c_str(), len, sp->context); } } /* * Status command from Director */ bool status_cmd(JCR *jcr) { BSOCK *dir = jcr->dir_bsock; STATUS_PKT sp; dir->fsend("\n"); sp.bs = dir; output_status(&sp); dir->signal(BNET_EOD); return true; } /* * .status command from Director */ bool qstatus_cmd(JCR *jcr) { BSOCK *dir = jcr->dir_bsock; JCR *njcr; s_last_job* job; STATUS_PKT sp; POOLMEM *args = get_pool_memory(PM_MESSAGE); char *argk[MAX_CMD_ARGS]; /* argument keywords */ char *argv[MAX_CMD_ARGS]; /* argument values */ int argc; /* number of arguments */ bool ret=true; char *cmd; char *device=NULL; char *collname=NULL; char *name=NULL; int api = true; sp.bs = dir; parse_args(dir->msg, &args, &argc, argk, argv, MAX_CMD_ARGS); /* .status xxxx at the minimum */ if (argc < 2 || strcmp(argk[0], ".status") != 0) { pm_strcpy(jcr->errmsg, dir->msg); dir->fsend(_("3900 No arg in .status command: %s\n"), jcr->errmsg); dir->signal(BNET_EOD); return false; } cmd = argk[1]; unbash_spaces(cmd); /* The status command can contain some arguments * i=0 => .status * i=1 => [running | current | last | ... ] */ for (int i=0 ; i < argc ; i++) { if (!strcmp(argk[i], "device") && argv[i]) { device = argv[i]; unbash_spaces(device); } else if (!strcmp(argk[i], "api") && argv[i]) { api = atoi(argv[i]); } else if (!strcmp(argk[i], "statistics") && argv[i]) { collname = argv[i]; unbash_spaces(collname); } else if (!strcmp(argk[i], "api_opts") && argv[i]) { bstrncpy(sp.api_opts, argv[i], sizeof(sp.api_opts)); } else if (!strcmp(argk[i], "name") && argv[i]) { name = argv[i]; unbash_spaces(name); } } Dmsg1(100, "cmd=%s\n", cmd); if (strcasecmp(cmd, "current") == 0) { dir->fsend(OKqstatus, cmd); foreach_jcr(njcr) { if (njcr->JobId != 0) { dir->fsend(DotStatusJob, njcr->JobId, njcr->JobStatus, njcr->JobErrors); } } endeach_jcr(njcr); } else if (strcasecmp(cmd, "last") == 0) { dir->fsend(OKqstatus, cmd); if ((last_jobs) && (last_jobs->size() > 0)) { job = (s_last_job*)last_jobs->last(); dir->fsend(DotStatusJob, job->JobId, job->JobStatus, job->Errors); } } else if (strcasecmp(cmd, "header") == 0) { sp.api = api; list_status_header(&sp); } else if (strcasecmp(cmd, "running") == 0) { sp.api = api; list_running_jobs(&sp); } else if (strcasecmp(cmd, "waitreservation") == 0) { sp.api = api; list_jobs_waiting_on_reservation(&sp); } else if (strcasecmp(cmd, "devices") == 0) { sp.api = api; list_devices(&sp, device); } else if (strcasecmp(cmd, "volumes") == 0) { sp.api = api; list_volumes(sendit, &sp); } else if (strcasecmp(cmd, "spooling") == 0) { sp.api = api; list_spool_stats(sendit, &sp); } else if (strcasecmp(cmd, "terminated") == 0) { sp.api = api; list_terminated_jobs(&sp); /* defined in lib/status.h */ } else if (strcasecmp(cmd, "resources") == 0) { sp.api = api; show_config(&sp); /* ***BEEF*** */ } else if (strcasecmp(cmd, "dedupengine") == 0 || strcasecmp(cmd, "dedupengineandzerostats") == 0) { sp.api = api; list_dedupengines(cmd, &sp, name); } else if (strcasecmp(cmd, "shstore") == 0) { list_shared_storage(&sp, dir->msg); } else if (strcasecmp(cmd, "cloud") == 0) { sp.api = api; list_cloud_transfers(&sp, true); } else if (strcasecmp(cmd, "statistics") == 0) { sp.api = api; list_collectors_status(&sp, collname); } else { pm_strcpy(jcr->errmsg, dir->msg); dir->fsend(_("3900 Unknown arg in .status command: %s\n"), jcr->errmsg); dir->signal(BNET_EOD); ret = false; } dir->signal(BNET_EOD); free_pool_memory(args); return ret; } /* List plugins and drivers */ static void list_plugins(STATUS_PKT *sp) { POOL_MEM msg(PM_MESSAGE); alist drivers(10, not_owned_by_alist); int len; if (b_plugin_list && b_plugin_list->size() > 0) { Plugin *plugin; pm_strcpy(msg, " Plugin: "); foreach_alist(plugin, b_plugin_list) { len = pm_strcat(msg, plugin->name); if (plugin->pinfo) { pm_strcat(msg, "("); pm_strcat(msg, NPRT(sdplug_info(plugin)->plugin_version)); len = pm_strcat(msg, ")"); } if (len > 80) { pm_strcat(msg, "\n "); } else { pm_strcat(msg, " "); } } len = pm_strcat(msg, "\n"); sendit(msg.c_str(), len, sp); } sd_list_loaded_drivers(&drivers); if (drivers.size() > 0) { char *drv; pm_strcpy(msg, " Drivers: "); foreach_alist(drv, (&drivers)) { len = pm_strcat(msg, drv); if (len > 80) { pm_strcat(msg, "\n "); } else { pm_strcat(msg, " "); } } len = pm_strcat(msg, "\n"); sendit(msg.c_str(), len, sp); } } static void list_collectors_status(STATUS_PKT *sp, char *collname) { URES *res; int len; POOL_MEM buf(PM_MESSAGE); Dmsg2(200, "enter list_collectors_status() api=%i coll=%s\n", sp->api, NPRTB(collname)); if (sp->api > 1) { api_collectors_status(sp, collname); return; } LockRes(); foreach_res(res, R_COLLECTOR) { if (collname && !bstrcmp(collname, res->res_collector.hdr.name)){ continue; } Dmsg1(500, "processing: %s\n", res->res_collector.hdr.name); len = render_collector_status(res->res_collector, buf); sendit(buf.c_str(), len, sp); }; UnlockRes(); if (!collname){ len = render_updcollector_status(buf); sendit(buf.c_str(), len, sp); } Dmsg0(200, "leave list_collectors_status()\n"); } static void api_collectors_status(STATUS_PKT *sp, char *collname) { URES *res; OutputWriter ow(sp->api_opts); POOLMEM *buf; Dmsg1(200, "enter api_collectors_status() %s\n", NPRTB(collname)); ow.start_group("collector"); ow.get_output(OT_START_OBJ, OT_END); ow.start_list("collector_backends"); LockRes(); foreach_res(res, R_COLLECTOR) { if (collname && !bstrcmp(collname, res->res_collector.hdr.name)){ continue; } Dmsg1(500, "processing: %s\n", res->res_collector.hdr.name); api_render_collector_status(res->res_collector, ow); }; UnlockRes(); ow.end_list(); if (!collname) { ow.get_output(OT_SEP, OT_LABEL, "collector_update", OT_END); api_render_updcollector_status(ow); } ow.get_output(OT_END_OBJ, OT_END); buf = ow.end_group(); sendit(buf, strlen(buf), sp); Dmsg0(200, "leave api_collectors_status()\n"); }; bacula-15.0.3/src/stored/cloud_driver.h0000644000175000017500000001306514771010173017610 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Routines for writing Cloud drivers * * Written by Kern Sibbald, May MMXVI * */ #include "bacula.h" #include "stored.h" #include "cloud_parts.h" #include "cloud_transfer_mgr.h" #include "lib/bwlimit.h" #ifndef _CLOUD_DRIVER_H_ #define _CLOUD_DRIVER_H_ #define NUM_UPLOAD_RETRIES 10 class cloud_dev; /* define the cancel callback type */ typedef bool (cancel_cb_type)(void*); typedef struct { cancel_cb_type* fct; void *arg; } cancel_callback; typedef struct { const char *pattern; int pattern_num; } cleanup_ctx_type; typedef bool (cleanup_cb_type)(const char*, cleanup_ctx_type*); /* Abstract class cannot be instantiated */ class cloud_driver: public SMARTALLOC { public: cloud_driver() : max_upload_retries(NUM_UPLOAD_RETRIES) {}; virtual ~cloud_driver() {}; virtual bool copy_cache_part_to_cloud(transfer *xfer) = 0; virtual bool move_cloud_part(const char *VolumeName, uint32_t apart , const char *to, cancel_callback *cancel_cb, POOLMEM *&err, int& exists) = 0; enum { CLOUD_DRIVER_COPY_PART_TO_CACHE_OK = 0, CLOUD_DRIVER_COPY_PART_TO_CACHE_ERROR = 1, CLOUD_DRIVER_COPY_PART_TO_CACHE_RETRY = 2 }; virtual int copy_cloud_part_to_cache(transfer *xfer) = 0; virtual bool restore_cloud_object(transfer *xfer, const char *cloud_fname) = 0; virtual bool is_waiting_on_server(transfer *xfer) = 0; virtual bool truncate_cloud_volume(const char *VolumeName, ilist *trunc_parts, cancel_callback *cancel_cb, POOLMEM *&err) = 0; virtual bool clean_cloud_volume(const char *VolumeName, cleanup_cb_type *cb, cleanup_ctx_type *ctx, cancel_callback *cancel_cb, POOLMEM *&err) = 0; virtual bool init(CLOUD *cloud, POOLMEM *&err) = 0; virtual bool term(POOLMEM *&err) = 0; virtual bool start_of_job(POOLMEM *&err) = 0; virtual bool end_of_job(POOLMEM *&err) = 0; virtual bool get_cloud_volume_parts_list(const char* VolumeName, ilist *parts, cancel_callback *cancel_cb, POOLMEM *&err) = 0; virtual bool get_cloud_volumes_list(alist *volumes, cancel_callback *cancel_cb, POOLMEM *&err) = 0; static void add_vol_and_part(POOLMEM *&filename, const char *VolumeName, const char *name, uint32_t apart) { POOL_MEM partname; Mmsg(partname, "%s.%d", name, apart); add_vol_and_part(filename, VolumeName, partname.c_str()); } static void add_vol_and_part(POOLMEM *&filename, const char *VolumeName, const char *name) { POOL_MEM partnumber; int len = strlen(filename); if (len > 0 && !IsPathSeparator((filename)[len-1])) { pm_strcat(filename, "/"); } pm_strcat(filename, VolumeName); Mmsg(partnumber, "/%s", name); pm_strcat(filename, partnumber); } bwlimit upload_limit; bwlimit download_limit; uint32_t max_upload_retries; }; class dummy_driver: public cloud_driver { public: dummy_driver() {}; bool copy_cache_part_to_cloud(transfer *xfer) { Mmsg(xfer->m_message, "Cloud driver not properly loaded"); return false; }; bool move_cloud_part(const char *VolumeName, uint32_t apart , const char *to, cancel_callback *cancel_cb, POOLMEM *&err, int& exists) { Mmsg(err, "Cloud driver not properly loaded"); return false; }; bool clean_cloud_volume(const char *VolumeName, cleanup_cb_type *cb, cleanup_ctx_type *ctx, cancel_callback *cancel_cb, POOLMEM *&err) { Mmsg(err, "Cloud driver not properly loaded"); return false; }; int copy_cloud_part_to_cache(transfer *xfer) { Mmsg(xfer->m_message, "Cloud driver not properly loaded"); return CLOUD_DRIVER_COPY_PART_TO_CACHE_ERROR; }; bool restore_cloud_object(transfer *xfer, const char *cloud_fname) { Mmsg(xfer->m_message, "Cloud driver not properly loaded"); return false; } bool is_waiting_on_server(transfer *xfer) { Mmsg(xfer->m_message, "Cloud driver not properly loaded"); return false; }; bool truncate_cloud_volume(const char *VolumeName, ilist *trunc_parts, cancel_callback *cancel_cb, POOLMEM *&err) { Mmsg(err, "Cloud driver not properly loaded"); return false; }; bool init(CLOUD *cloud, POOLMEM *&err) { Mmsg(err, "Cloud driver not properly loaded"); return false; }; bool term(POOLMEM *&err) { Mmsg(err, "Cloud driver not properly loaded"); return false; }; bool start_of_job(POOLMEM *&err) { Mmsg(err, "Cloud driver not properly loaded"); return false; } bool end_of_job(POOLMEM *&err) { Mmsg(err, "Cloud driver not properly loaded"); return false; }; bool get_cloud_volume_parts_list(const char* VolumeName, ilist *parts, cancel_callback *cancel_cb, POOLMEM *&err) { Mmsg(err, "Cloud driver not properly loaded"); return false; }; bool get_cloud_volumes_list(alist *volumes, cancel_callback *cancel_cb, POOLMEM *&err) { Mmsg(err, "Cloud driver not properly loaded"); return false; }; }; #endif /* _CLOUD_DRIVER_H_ */ bacula-15.0.3/src/stored/butil.c0000644000175000017500000002200714771010173016235 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Utility routines for "tool" programs such as bscan, bls, * bextract, ... Some routines also used by Bacula. * * Kern Sibbald, MM * * Normally nothing in this file is called by the Storage * daemon because we interact more directly with the user * i.e. printf, ... * */ #include "bacula.h" #include "stored.h" /* Forward referenced functions */ static DCR *setup_to_access_device(JCR *jcr, char *dev_name, const char *VolumeName, bool writing, bool read_dedup_data, uint32_t retry_count=10); static DEVRES *find_device_res(char *device_name, bool writing); static void my_free_jcr(JCR *jcr); /* Imported variables -- eliminate some day */ extern char *configfile; #ifdef DEBUG char *rec_state_bits_to_str(DEV_RECORD *rec) { static char buf[200]; buf[0] = 0; if (rec->state_bits & REC_NO_HEADER) { strcat(buf, _("Nohdr,")); } if (is_partial_record(rec)) { strcat(buf, _("partial,")); } if (rec->state_bits & REC_BLOCK_EMPTY) { strcat(buf, _("empty,")); } if (rec->state_bits & REC_NO_MATCH) { strcat(buf, _("Nomatch,")); } if (rec->state_bits & REC_CONTINUATION) { strcat(buf, _("cont,")); } if (buf[0]) { buf[strlen(buf)-1] = 0; } return buf; } #endif /* * Setup a pointer to my resource me */ void setup_me() { LockRes(); me = (STORES *)GetNextRes(R_STORAGE, NULL); if (!me) { UnlockRes(); Emsg1(M_ERROR_TERM, 0, _("No Storage resource defined in %s. Cannot continue.\n"), configfile); } UnlockRes(); } /* * Setup a "daemon" JCR for the various standalone * tools (e.g. bls, bextract, bscan, ...) */ JCR *setup_jcr(const char *name, char *dev_name, BSR *bsr, const char *VolumeName, bool writing, bool read_dedup_data, uint32_t retry_count) { DCR *dcr; JCR *jcr = new_jcr(sizeof(JCR), my_free_jcr); jcr->bsr = bsr; jcr->VolSessionId = 1; jcr->VolSessionTime = (uint32_t)time(NULL); jcr->NumReadVolumes = 0; jcr->NumWriteVolumes = 0; jcr->JobId = 0; jcr->setJobType(JT_CONSOLE); jcr->setJobLevel(L_FULL); jcr->JobStatus = JS_Terminated; jcr->where = bstrdup(""); jcr->job_name = get_pool_memory(PM_FNAME); pm_strcpy(jcr->job_name, "Dummy.Job.Name"); jcr->client_name = get_pool_memory(PM_FNAME); pm_strcpy(jcr->client_name, "Dummy.Client.Name"); bstrncpy(jcr->Job, name, sizeof(jcr->Job)); jcr->fileset_name = get_pool_memory(PM_FNAME); pm_strcpy(jcr->fileset_name, "Dummy.fileset.name"); jcr->fileset_md5 = get_pool_memory(PM_FNAME); pm_strcpy(jcr->fileset_md5, "Dummy.fileset.md5"); init_autochangers(); create_volume_lists(); dcr = setup_to_access_device(jcr, dev_name, VolumeName, writing, read_dedup_data, retry_count); if (!dcr) { return NULL; } if (!bsr && VolumeName) { bstrncpy(dcr->VolumeName, VolumeName, sizeof(dcr->VolumeName)); } bstrncpy(dcr->pool_name, "Default", sizeof(dcr->pool_name)); bstrncpy(dcr->pool_type, "Backup", sizeof(dcr->pool_type)); return jcr; } /* * Setup device, jcr, and prepare to access device. * If the caller wants read access, acquire the device, otherwise, * the caller will do it. */ static DCR *setup_to_access_device(JCR *jcr, char *dev_name, const char *VolumeName, bool writing, bool read_dedup_data, uint32_t retry_count) { DEVICE *dev; char *p; DEVRES *device; DCR *dcr; char VolName[MAX_NAME_LENGTH]; init_reservations_lock(); /* * If no volume name already given and no bsr, and it is a file, * try getting name from Filename */ if (VolumeName) { bstrncpy(VolName, VolumeName, sizeof(VolName)); if (strlen(VolumeName) >= MAX_NAME_LENGTH) { Jmsg0(jcr, M_ERROR, 0, _("Volume name or names is too long. Please use a .bsr file.\n")); } } else { VolName[0] = 0; } if (!jcr->bsr && VolName[0] == 0) { if (strncmp(dev_name, "/dev/", 5) != 0) { /* Try stripping file part */ p = dev_name + strlen(dev_name); while (p >= dev_name && !IsPathSeparator(*p)) p--; if (IsPathSeparator(*p)) { bstrncpy(VolName, p+1, sizeof(VolName)); *p = 0; } } } if ((device=find_device_res(dev_name, writing)) == NULL) { Jmsg2(jcr, M_FATAL, 0, _("Cannot find device \"%s\" in config file %s.\n"), dev_name, configfile); return NULL; } dev = init_dev(jcr, device); if (!dev) { Jmsg1(jcr, M_FATAL, 0, _("Cannot init device %s\n"), dev_name); return NULL; } device->dev = dev; jcr->dcr = dcr = new_dcr(jcr, NULL, dev, writing); if (VolName[0]) { bstrncpy(dcr->VolumeName, VolName, sizeof(dcr->VolumeName)); } bstrncpy(dcr->dev_name, device->device_name, sizeof(dcr->dev_name)); if (!writing) { /* read only access? */ create_restore_volume_list(jcr, true); Dmsg0(100, "Acquire device for read\n"); if (!acquire_device_for_read(dcr, retry_count)) { return NULL; } jcr->read_dcr = dcr; if (read_dedup_data) { Dmsg0(DT_DEDUP|215, "Initialize dedup interface\n"); jcr->read_dcr->dev->setup_dedup_rehydration_interface(jcr->read_dcr); } else { dev->set_delay_adata(); /* Do not read aligned data */ } } else { if (!first_open_device(dcr)) { Jmsg1(jcr, M_FATAL, 0, _("Cannot open %s\n"), dev->print_name()); return NULL; } jcr->dcr = dcr; /* write dcr */ } return dcr; } /* * Called here when freeing JCR so that we can get rid * of "daemon" specific memory allocated. */ static void my_free_jcr(JCR *jcr) { if (jcr->job_name) { free_pool_memory(jcr->job_name); jcr->job_name = NULL; } if (jcr->client_name) { free_pool_memory(jcr->client_name); jcr->client_name = NULL; } if (jcr->fileset_name) { free_pool_memory(jcr->fileset_name); jcr->fileset_name = NULL; } if (jcr->fileset_md5) { free_pool_memory(jcr->fileset_md5); jcr->fileset_md5 = NULL; } if (jcr->comment) { free_pool_memory(jcr->comment); jcr->comment = NULL; } if (jcr->VolList) { free_restore_volume_list(jcr); } if (jcr->dcr) { free_dcr(jcr->dcr); jcr->dcr = NULL; } return; } /* * Search for device resource that corresponds to * device name on command line (or default). * * Returns: NULL on failure * Device resource pointer on success */ static DEVRES *find_device_res(char *device_name, bool write_access) { bool found = false; DEVRES *device; Dmsg0(900, "Enter find_device_res\n"); LockRes(); foreach_res(device, R_DEVICE) { Dmsg2(900, "Compare %s and %s\n", device->device_name, device_name); if (strcmp(device->device_name, device_name) == 0) { found = true; break; } } if (!found) { /* Search for name of Device resource rather than archive name */ if (device_name[0] == '"') { int len = strlen(device_name); bstrncpy(device_name, device_name+1, len+1); len--; if (len > 0) { device_name[len-1] = 0; /* zap trailing " */ } } foreach_res(device, R_DEVICE) { Dmsg2(900, "Compare %s and %s\n", device->hdr.name, device_name); if (strcmp(device->hdr.name, device_name) == 0) { found = true; break; } } } UnlockRes(); if (!found) { Pmsg2(0, _("Could not find device \"%s\" in config file %s.\n"), device_name, configfile); return NULL; } if (write_access) { Pmsg1(0, _("Using device: \"%s\" for writing.\n"), device_name); } else { Pmsg1(0, _("Using device: \"%s\" for reading.\n"), device_name); } return device; } /* * Device got an error, attempt to analyse it */ void display_tape_error_status(JCR *jcr, DEVICE *dev) { uint32_t status; status = status_dev(dev); Dmsg1(20, "Device status: %x\n", status); if (status & BMT_EOD) Jmsg(jcr, M_ERROR, 0, _("Unexpected End of Data\n")); else if (status & BMT_EOT) Jmsg(jcr, M_ERROR, 0, _("Unexpected End of Tape\n")); else if (status & BMT_EOF) Jmsg(jcr, M_ERROR, 0, _("Unexpected End of File\n")); else if (status & BMT_DR_OPEN) Jmsg(jcr, M_ERROR, 0, _("Tape Door is Open\n")); else if (!(status & BMT_ONLINE)) Jmsg(jcr, M_ERROR, 0, _("Unexpected Tape is Off-line\n")); } bacula-15.0.3/src/stored/bacula-sd.conf.in0000644000175000017500000002317314771010173020066 0ustar bsbuildbsbuild# # Default Bacula Storage Daemon Configuration file # # For Bacula release @VERSION@ (@DATE@) -- @DISTNAME@ @DISTVER@ # # You may need to change the name of your tape drive # on the "Archive Device" directive in the Device # resource. If you change the Name and/or the # "Media Type" in the Device resource, please ensure # that dird.conf has corresponding changes. # # # Copyright (C) 2000-2022 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # Storage { # definition of myself Name = @basename@-sd SDPort = @sd_port@ # Director's port WorkingDirectory = "@working_dir@" Pid Directory = "@piddir@" Plugin Directory = "@plugindir@" Maximum Concurrent Jobs = 20 Encryption Command = "@scriptdir@/key-manager.py getkey" } # # List Directors who are permitted to contact Storage daemon # Director { Name = @basename@-dir Password = "@sd_password@" } # # Restricted Director, used by tray-monitor to get the # status of the storage daemon # Director { Name = @basename@-mon Password = "@mon_sd_password@" Monitor = yes } # # Note, for a list of additional Device templates please # see the directory /examples/devices # Or follow the following link: # http://www.bacula.org/git/cgit.cgi/bacula/tree/bacula/examples/devices?h=Branch-7.4 # # # Devices supported by this Storage daemon # To connect, the Director's bacula-dir.conf must have the # same Name and MediaType. # # # Define a Virtual autochanger # Autochanger { Name = FileChgr1 Device = FileChgr1-Dev1, FileChgr1-Dev2 Changer Command = "" Changer Device = /dev/null } Device { Name = FileChgr1-Dev1 Media Type = File1 Archive Device = @archivedir@ LabelMedia = yes; # lets Bacula label unlabeled media Random Access = Yes; AutomaticMount = yes; # when device opened, read it RemovableMedia = no; AlwaysOpen = no; Maximum Concurrent Jobs = 5 } Device { Name = FileChgr1-Dev2 Media Type = File1 Archive Device = @archivedir@ LabelMedia = yes; # lets Bacula label unlabeled media Random Access = Yes; AutomaticMount = yes; # when device opened, read it RemovableMedia = no; AlwaysOpen = no; Maximum Concurrent Jobs = 5 } # # Define a second Virtual autochanger # Autochanger { Name = FileChgr2 Device = FileChgr2-Dev1, FileChgr2-Dev2 Changer Command = "" Changer Device = /dev/null } Device { Name = FileChgr2-Dev1 Media Type = File2 Archive Device = @archivedir@ LabelMedia = yes; # lets Bacula label unlabeled media Random Access = Yes; AutomaticMount = yes; # when device opened, read it RemovableMedia = no; AlwaysOpen = no; Maximum Concurrent Jobs = 5 } Device { Name = FileChgr2-Dev2 Media Type = File2 Archive Device = @archivedir@ LabelMedia = yes; # lets Bacula label unlabeled media Random Access = Yes; AutomaticMount = yes; # when device opened, read it RemovableMedia = no; AlwaysOpen = no; Maximum Concurrent Jobs = 5 } # # An autochanger device with two drives # #Autochanger { # Name = Autochanger # Device = Drive-1 # Device = Drive-2 # Changer Command = "@scriptdir@/mtx-changer %c %o %S %a %d" # Changer Device = /dev/sg0 #} #Device { # Name = Drive-1 # # Drive Index = 0 # Media Type = DLT-8000 # Archive Device = /dev/nst0 # AutomaticMount = yes; # when device opened, read it # AlwaysOpen = yes; # RemovableMedia = yes; # RandomAccess = no; # AutoChanger = yes # # # # New alert command in Bacula 9.0.0 # # Note: you must have the sg3_utils (rpms) or the # # sg3-utils (deb) installed on your system. # # and you must set the correct control device that # # corresponds to the Archive Device # Control Device = /dev/sg?? # must be SCSI ctl for /dev/nst0 # Alert Command = "@scriptdir@/tapealert %l" # # # # # Enable the Alert command only if you have the mtx package loaded # # Note, apparently on some systems, tapeinfo resets the SCSI controller # # thus if you turn this on, make sure it does not reset your SCSI # # controller. I have never had any problems, and smartctl does # # not seem to cause such problems. # # # Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" # If you have smartctl, enable this, it has more info than tapeinfo # Alert Command = "sh -c 'smartctl -H -l error %c'" #} #Device { # Name = Drive-2 # # Drive Index = 1 # Media Type = DLT-8000 # Archive Device = /dev/nst1 # AutomaticMount = yes; # when device opened, read it # AlwaysOpen = yes; # RemovableMedia = yes; # RandomAccess = no; # AutoChanger = yes # # Enable the Alert command only if you have the mtx package loaded # Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" # If you have smartctl, enable this, it has more info than tapeinfo # Alert Command = "sh -c 'smartctl -H -l error %c'" #} # # A Linux or Solaris LTO-2 tape drive # #Device { # Name = LTO-2 # Media Type = LTO-2 # Archive Device = @TAPEDRIVE@ # AutomaticMount = yes; # when device opened, read it # AlwaysOpen = yes; # RemovableMedia = yes; # RandomAccess = no; # Maximum File Size = 3GB ## Changer Command = "@scriptdir@/mtx-changer %c %o %S %a %d" ## Changer Device = /dev/sg0 ## AutoChanger = yes # # Enable the Alert command only if you have the mtx package loaded ## Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" ## If you have smartctl, enable this, it has more info than tapeinfo ## Alert Command = "sh -c 'smartctl -H -l error %c'" #} # # A Linux or Solaris LTO-3 tape drive # #Device { # Name = LTO-3 # Media Type = LTO-3 # Archive Device = @TAPEDRIVE@ # AutomaticMount = yes; # when device opened, read it # AlwaysOpen = yes; # RemovableMedia = yes; # RandomAccess = no; # Maximum File Size = 4GB # Changer Command = "@scriptdir@/mtx-changer %c %o %S %a %d" # Changer Device = /dev/sg0 # AutoChanger = yes # # # # New alert command in Bacula 9.0.0 # # Note: you must have the sg3_utils (rpms) or the # # sg3-utils (deb) installed on your system. # # and you must set the correct control device that # # corresponds to the Archive Device # Control Device = /dev/sg?? # must be SCSI ctl for @TAPEDRIVE@ # Alert Command = "@scriptdir@/tapealert %l" # # # Enable the Alert command only if you have the mtx package loaded ## Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" ## If you have smartctl, enable this, it has more info than tapeinfo ## Alert Command = "sh -c 'smartctl -H -l error %c'" #} # # A Linux or Solaris LTO-4 tape drive # #Device { # Name = LTO-4 # Media Type = LTO-4 # Archive Device = @TAPEDRIVE@ # AutomaticMount = yes; # when device opened, read it # AlwaysOpen = yes; # RemovableMedia = yes; # RandomAccess = no; # Maximum File Size = 5GB # Changer Command = "@scriptdir@/mtx-changer %c %o %S %a %d" # Changer Device = /dev/sg0 # AutoChanger = yes # # # # New alert command in Bacula 9.0.0 # # Note: you must have the sg3_utils (rpms) or the # # sg3-utils (deb) installed on your system. # # and you must set the correct control device that # # corresponds to the Archive Device # Control Device = /dev/sg?? # must be SCSI ctl for @TAPEDRIVE@ # Alert Command = "@scriptdir@/tapealert %l" # # # Enable the Alert command only if you have the mtx package loaded ## Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" ## If you have smartctl, enable this, it has more info than tapeinfo ## Alert Command = "sh -c 'smartctl -H -l error %c'" #} # # An HP-UX tape drive # #Device { # Name = Drive-1 # # Drive Index = 0 # Media Type = DLT-8000 # Archive Device = /dev/rmt/1mnb # AutomaticMount = yes; # when device opened, read it # AlwaysOpen = yes; # RemovableMedia = yes; # RandomAccess = no; # AutoChanger = no # Two EOF = yes # Hardware End of Medium = no # Fast Forward Space File = no # # # # New alert command in Bacula 9.0.0 # # Note: you must have the sg3_utils (rpms) or the # # sg3-utils (deb) installed on your system. # # and you must set the correct control device that # # corresponds to the Archive Device # Control Device = /dev/sg?? # must be SCSI ctl for /dev/rmt/1mnb # Alert Command = "@scriptdir@/tapealert %l" # # # # # Enable the Alert command only if you have the mtx package loaded # Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" # If you have smartctl, enable this, it has more info than tapeinfo # Alert Command = "sh -c 'smartctl -H -l error %c'" #} # # A FreeBSD tape drive # #Device { # Name = DDS-4 # Description = "DDS-4 for FreeBSD" # Media Type = DDS-4 # Archive Device = /dev/nsa1 # AutomaticMount = yes; # when device opened, read it # AlwaysOpen = yes # Offline On Unmount = no # Hardware End of Medium = no # BSF at EOM = yes # Backward Space Record = no # Fast Forward Space File = no # TWO EOF = yes # # # # New alert command in Bacula 9.0.0 # # Note: you must have the sg3_utils (rpms) or the # # sg3-utils (deb) installed on your system. # # and you must set the correct control device that # # corresponds to the Archive Device # Control Device = /dev/sg?? # must be SCSI ctl for /dev/nsa1 # Alert Command = "@scriptdir@/tapealert %l" # # If you have smartctl, enable this, it has more info than tapeinfo # Alert Command = "sh -c 'smartctl -H -l error %c'" #} # # Send all messages to the Director, # mount messages also are sent to the email address # Messages { Name = Standard director = @basename@-dir = all } bacula-15.0.3/src/stored/record.h0000644000175000017500000002675214771010173016414 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Record, and label definitions for Bacula * media data format. * * Kern Sibbald, MM * */ #ifndef __RECORD_H #define __RECORD_H 1 /* Return codes from read_device_volume_label() */ enum { VOL_NOT_READ = 1, /* Volume label not read */ VOL_OK = 2, /* volume name OK */ VOL_NO_LABEL = 3, /* volume not labeled */ VOL_IO_ERROR = 4, /* volume I/O error */ VOL_NAME_ERROR = 5, /* Volume name mismatch */ VOL_CREATE_ERROR = 6, /* Error creating label */ VOL_VERSION_ERROR = 7, /* Bacula version error */ VOL_LABEL_ERROR = 8, /* Bad label type */ VOL_NO_MEDIA = 9, /* Hard error -- no media present */ VOL_TYPE_ERROR = 10, /* Volume type (aligned/non-aligned) error */ VOL_ENC_ERROR = 11 /* something wrong with the encryption key */ }; enum rec_state { st_none, /* 0 No state */ st_header, /* 1 Write header */ st_cont_header, /* 2 Write continuation header */ st_data, /* 3 Write data record */ st_adata_blkhdr, /* 4 Adata block header */ st_adata_rechdr, /* 5 Adata record header */ st_cont_adata_rechdr, /* 6 Adata continuation rechdr */ st_adata, /* 7 Write aligned data */ st_cont_adata, /* 8 Write more aligned data */ st_adata_label, /* 9 Writing adata vol label */ st_header_only /* 10 Read only the header (aligned) */ }; /* See block.h for RECHDR_LENGTH */ /* * This is the Media structure for a record header. * NB: when it is written it is serialized. uint32_t VolSessionId; uint32_t VolSessionTime; * The above 8 bytes are only written in a BB01 block, BB02 * and later blocks contain these values in the block header * rather than the record header. int32_t FileIndex; int32_t Stream; uint32_t data_len; */ /* Record state bit definitions */ #define REC_NO_HEADER (1<<0) /* No header read */ #define REC_PARTIAL_RECORD (1<<1) /* returning partial record */ #define REC_BLOCK_EMPTY (1<<2) /* Not enough data in block */ #define REC_NO_MATCH (1<<3) /* No match on continuation data */ #define REC_CONTINUATION (1<<4) /* Continuation record found */ #define REC_ISTAPE (1<<5) /* Set if device is tape */ #define REC_ADATA_EMPTY (1<<6) /* Not enough adata in block */ #define REC_NO_SPLIT (1<<7) /* Do not split this record */ #define is_partial_record(r) ((r)->state_bits & REC_PARTIAL_RECORD) #define is_block_marked_empty(r) ((r)->state_bits & (REC_BLOCK_EMPTY|REC_ADATA_EMPTY)) /* * DEV_RECORD for reading and writing records. * It consists of a Record Header, and the Record Data * * This is the memory structure for the record header. */ struct BSR; /* satisfy forward reference */ struct VOL_LIST; struct DEV_RECORD { dlink link; /* link for chaining in read_record.c */ /* File and Block are always returned during reading * and writing records. */ uint64_t StreamLen; /* Expected data stream length */ uint64_t FileOffset; /* Offset of this record inside the file */ uint64_t StartAddr; /* Start address (when the record is partial) */ uint64_t Addr; /* Record address */ uint32_t VolSessionId; /* sequential id within this session */ uint32_t VolSessionTime; /* session start time */ int32_t FileIndex; /* sequential file number */ int32_t Stream; /* Full Stream number with high bits */ int32_t last_FI; /* previous fi for adata */ int32_t last_Stream; /* previous stream for adata */ int32_t maskedStream; /* Masked Stream without high bits */ uint32_t data_len; /* current record length */ uint32_t remainder; /* remaining bytes to read/write */ uint32_t adata_remainder; /* remaining adata bytes to read/write */ uint32_t remlen; /* temp remainder bytes */ uint32_t data_bytes; /* data_bytes */ uint32_t extra_bytes; /* the extra size that must be accounted in VolABytes */ uint32_t state_bits; /* state bits */ uint32_t RecNum; /* Record number in the block */ int BlockVer; /* from the block header: 1,2,3 from BB?? */ uint32_t blkh_options; /* from the block header: about block encryption */ uint32_t BlockNumber; /* Block number for this record (used in read_records()) */ bool invalid; /* The record may be invalid if it was merged with a previous record */ rec_state wstate; /* state of write_record_to_block */ rec_state rstate; /* state of read_record_from_block */ BSR *bsr; /* pointer to bsr that matched */ POOLMEM *data; /* Record data. This MUST be a memory pool item */ const char *VolumeName; /* From JCR::VolList::VolumeName, freed at the end */ int32_t match_stat; /* bsr match status */ uint32_t last_VolSessionId; /* used in sequencing FI for Vbackup */ uint32_t last_VolSessionTime; int32_t last_FileIndex; }; /* * Values for LabelType that are put into the FileIndex field * Note, these values are negative to distinguish them * from user records where the FileIndex is forced positive. */ #define PRE_LABEL -1 /* Vol label on unwritten tape */ #define VOL_LABEL -2 /* Volume label first file */ #define EOM_LABEL -3 /* Writen at end of tape */ #define SOS_LABEL -4 /* Start of Session */ #define EOS_LABEL -5 /* End of Session */ #define EOT_LABEL -6 /* End of physical tape (2 eofs) */ #define SOB_LABEL -7 /* Start of object -- file/directory */ #define EOB_LABEL -8 /* End of object (after all streams) */ /* * Volume Label Record. This is the in-memory definition. The * tape definition is defined in the serialization code itself * ser_volume_label() and unser_volume_label() and is slightly different. */ struct Volume_Label { /* * The first items in this structure are saved * in the DEVICE buffer, but are not actually written * to the tape. */ int32_t LabelType; /* This is written in header only */ uint32_t LabelSize; /* length of serialized label */ /* * The items below this line are stored on * the tape */ char Id[32]; /* Bacula Immortal ... */ uint32_t VerNum; /* Label version number */ int BlockVer; /* 1, 2 or .. from BB01, BB02 from the FIRST block */ uint32_t blkh_options; /* the block options bits from the FIRST block */ /* VerNum <= 10 */ float64_t label_date; /* Date tape labeled */ float64_t label_time; /* Time tape labeled */ /* VerNum >= 11 */ btime_t label_btime; /* tdate tape labeled */ btime_t write_btime; /* tdate tape written */ /* Unused with VerNum >= 11 */ float64_t write_date; /* Date this label written */ float64_t write_time; /* Time this label written */ char VolumeName[MAX_NAME_LENGTH]; /* Volume name */ char PrevVolumeName[MAX_NAME_LENGTH]; /* Previous Volume Name */ char PoolName[MAX_NAME_LENGTH]; /* Pool name */ char PoolType[MAX_NAME_LENGTH]; /* Pool type */ char MediaType[MAX_NAME_LENGTH]; /* Type of this media */ char HostName[MAX_NAME_LENGTH]; /* Host name of writing computer */ char LabelProg[50]; /* Label program name */ char ProgVersion[50]; /* Program version */ char ProgDate[50]; /* Program build date/time */ /* Mostly for aligned volumes, BlockSize also used for dedup volumes */ char AlignedVolumeName[MAX_NAME_LENGTH+4]; /* Aligned block volume name */ uint64_t FirstData; /* Offset to first data address */ uint32_t FileAlignment; /* File alignment factor */ uint32_t PaddingSize; /* Block padding */ uint32_t BlockSize; /* Basic block size */ /* For Cloud */ uint64_t MaxPartSize; /* Maximum Part Size */ uint32_t MaxVolPartsNum; /* Maximum Num of parts in a volume */ /* For Volume encryption */ bool is_vol_encrypted; uint32_t EncCypherKeySize; uint32_t MasterKeyIdSize; unsigned char EncCypherKey[MAX_ENC_CIPHER_KEY_LEN]; /* The encryption key encrypted with the MasterKey */ unsigned char MasterKeyId[MAX_MASTERKEY_ID_LEN]; /* the ID of the MasterKey */ /* For Volume Signature (not yet implemented) */ uint32_t SignatureKeyIdSize; unsigned char SignatureKeyId[MAX_MASTERKEY_ID_LEN]; }; #define SER_LENGTH_Volume_Label 2048 /* max serialised length of volume label */ #define SER_LENGTH_Session_Label 1024 /* max serialised length of session label */ typedef struct Volume_Label VOLUME_LABEL; /* * Session Start/End Label * This record is at the beginning and end of each session */ struct Session_Label { char Id[32]; /* Bacula Immortal ... */ uint32_t VerNum; /* Label version number */ uint32_t JobId; /* Job id */ uint32_t VolumeIndex; /* Sequence no of volume for this job */ /* VerNum >= 11 */ btime_t write_btime; /* Tdate this label written */ /* VerNum < 11 */ float64_t write_date; /* Date this label written */ /* Unused VerNum >= 11 */ float64_t write_time; /* Time this label written */ char PoolName[MAX_NAME_LENGTH]; /* Pool name */ char PoolType[MAX_NAME_LENGTH]; /* Pool type */ char JobName[MAX_NAME_LENGTH]; /* base Job name */ char ClientName[MAX_NAME_LENGTH]; char Job[MAX_NAME_LENGTH]; /* Unique name of this Job */ char FileSetName[MAX_NAME_LENGTH]; char FileSetMD5[MAX_NAME_LENGTH]; uint32_t JobType; uint32_t JobLevel; /* The remainder are part of EOS label only */ uint32_t JobFiles; uint64_t JobBytes; uint32_t StartBlock; uint32_t EndBlock; uint32_t StartFile; uint32_t EndFile; uint32_t JobErrors; uint32_t JobStatus; /* Job status */ }; typedef struct Session_Label SESSION_LABEL; #define SERIAL_BUFSIZE 1024 /* volume serialisation buffer size */ #endif bacula-15.0.3/src/stored/bsdjson.c0000644000175000017500000005715414771010173016573 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Bacula conf to json * * Kern Sibbald, MMXII * */ #include "bacula.h" #include "stored.h" /* Imported functions */ extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code); /* Imported variables */ #if defined(_MSC_VER) extern "C" { // work around visual compiler mangling variables extern URES res_all; } #else extern URES res_all; #endif extern s_kw msg_types[]; extern s_kw dev_types[]; extern s_kw tapelabels[]; extern s_kw cloud_drivers[]; extern s_kw dedup_drivers[]; extern s_kw trunc_opts[]; extern s_kw upload_opts[]; extern s_kw proto_opts[]; extern s_kw uri_opts[]; extern s_kw restore_prio_opts[]; extern s_kw objects_default_tiers[]; extern RES_TABLE resources[]; typedef struct { bool do_list; bool do_one; bool do_only_data; char *resource_type; char *resource_name; regex_t directive_reg; } display_filter; /* Forward referenced functions */ void terminate_stored(int sig); static int check_resources(); static void dump_json(display_filter *filter); #define CONFIG_FILE "bacula-sd.conf" /* Default config file */ /* Global variables exported */ STORES *me = NULL; /* our Global resource */ char *configfile = NULL; /* Global static variables */ static CONFIG *config; static void sendit(void *sock, const char *fmt, ...); static void usage() { fprintf(stderr, _( PROG_COPYRIGHT "\n%sVersion: %s (%s)\n\n" "Usage: bsdjson [options] [config_file]\n" " -r get resource type \n" " -n get resource \n" " -l get only directives matching dirs (use with -r)\n" " -D get only data\n" " -c use as configuration file\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -t test - read config and exit\n" " -v verbose user messages\n" " -? print this message.\n" "\n"), 2012, BDEMO, VERSION, BDATE); exit(1); } /********************************************************************* * * Main Bacula Unix Storage Daemon * */ #if defined(HAVE_WIN32) #define main BaculaMain #endif int main (int argc, char *argv[]) { int ch; bool test_config = false; display_filter filter; memset(&filter, 0, sizeof(filter)); setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); my_name_is(argc, argv, "bacula-sd"); init_msg(NULL, NULL); while ((ch = getopt(argc, argv, "Dc:d:tv?r:n:l:")) != -1) { switch (ch) { case 'D': filter.do_only_data = true; break; case 'l': filter.do_list = true; /* Might use something like -l '^(Name|Description)$' */ filter.do_list = true; if (regcomp(&filter.directive_reg, optarg, REG_EXTENDED) != 0) { Jmsg((JCR *)NULL, M_ERROR_TERM, 0, _("Please use valid -l argument: %s\n"), optarg); } break; case 'r': filter.resource_type = optarg; break; case 'n': filter.resource_name = optarg; break; case 'c': /* configuration file */ if (configfile != NULL) { free(configfile); } configfile = bstrdup(optarg); break; case 'd': /* debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } } break; case 't': test_config = true; break; case 'v': /* verbose */ verbose++; break; case '?': default: usage(); break; } } argc -= optind; argv += optind; if (argc) { if (configfile != NULL) { free(configfile); } configfile = bstrdup(*argv); argc--; argv++; } if (argc) { usage(); } if (filter.do_list && !filter.resource_type) { usage(); } if (filter.resource_type && filter.resource_name) { filter.do_one = true; } if (configfile == NULL || configfile[0] == 0) { configfile = bstrdup(CONFIG_FILE); } if (test_config && verbose > 0) { char buf[1024]; find_config_file(configfile, buf, sizeof(buf)); sendit(NULL, "config_file=%s\n", buf); } config = New(CONFIG()); config->encode_password(false); parse_sd_config(config, configfile, M_ERROR_TERM); if (!check_resources()) { Jmsg((JCR *)NULL, M_ERROR_TERM, 0, _("Please correct configuration file: %s\n"), configfile); } if (test_config) { terminate_stored(0); } my_name_is(0, (char **)NULL, me->hdr.name); /* Set our real name */ dump_json(&filter); if (filter.do_list) { regfree(&filter.directive_reg); } terminate_stored(0); } static void display_devtype(HPKT &hpkt) { int i; for (i=0; dev_types[i].name; i++) { if (*(int32_t *)(hpkt.ritem->value) == dev_types[i].token) { hpkt.sendit(hpkt, "\n \"%s\": \"%s\"", hpkt.ritem->name, dev_types[i].name); return; } } } static void display_enctype(HPKT &hpkt) { int i; for (i=0; enc_types[i].name; i++) { if (*(int32_t *)(hpkt.ritem->value) == enc_types[i].token) { hpkt.sendit(hpkt, "\n \"%s\": \"%s\"", hpkt.ritem->name, enc_types[i].name); return; } } } static void display_label(HPKT &hpkt) { int i; for (i=0; tapelabels[i].name; i++) { if (*(int32_t *)(hpkt.ritem->value) == tapelabels[i].token) { hpkt.sendit(hpkt, "\n \"%s\": \"%s\"", hpkt.ritem->name, tapelabels[i].name); return; } } } static void display_cloud_driver(HPKT &hpkt) { int i; for (i=0; cloud_drivers[i].name; i++) { if (*(int32_t *)(hpkt.ritem->value) == cloud_drivers[i].token) { hpkt.sendit(hpkt, "\n \"%s\": \"%s\"", hpkt.ritem->name, cloud_drivers[i].name); return; } } } #ifdef SD_DEDUP_SUPPORT static void display_dedup_driver(HPKT &hpkt) { int i; for (i=0; dedup_drivers[i].name; i++) { if (*(int32_t *)(hpkt.ritem->value) == dedup_drivers[i].token) { hpkt.sendit(hpkt, "\n \"%s\": \"%s\"", hpkt.ritem->name, dedup_drivers[i].name); return; } } } #endif static void display_protocol(HPKT &hpkt) { int i; for (i=0; proto_opts[i].name; i++) { if (*(int32_t *)(hpkt.ritem->value) == proto_opts[i].token) { hpkt.sendit(hpkt, "\n \"%s\": \"%s\"", hpkt.ritem->name, proto_opts[i].name); return; } } } static void display_truncate_cache(HPKT &hpkt) { int i; for (i=0; trunc_opts[i].name; i++) { if (*(int32_t *)(hpkt.ritem->value) == trunc_opts[i].token) { hpkt.sendit(hpkt, "\n \"%s\": \"%s\"", hpkt.ritem->name, trunc_opts[i].name); return; } } } static void display_uri_style(HPKT &hpkt) { int i; for (i=0; uri_opts[i].name; i++) { if (*(int32_t *)(hpkt.ritem->value) == uri_opts[i].token) { hpkt.sendit(hpkt, "\n \"%s\": \"%s\"", hpkt.ritem->name, uri_opts[i].name); return; } } } static void display_upload(HPKT &hpkt) { int i; for (i=0; upload_opts[i].name; i++) { if (*(int32_t *)(hpkt.ritem->value) == upload_opts[i].token) { hpkt.sendit(hpkt, "\n \"%s\": \"%s\"", hpkt.ritem->name, upload_opts[i].name); return; } } } static void display_transfer_priority(HPKT &hpkt) { int i; for (i=0; restore_prio_opts[i].name; i++) { if (*(int32_t *)(hpkt.ritem->value) == restore_prio_opts[i].token) { hpkt.sendit(hpkt, "\n \"%s\": \"%s\"", hpkt.ritem->name, restore_prio_opts[i].name); return; } } } static void display_objects_default_tier(HPKT &hpkt) { int i; for (i=0; objects_default_tiers[i].name; i++) { if (*(int32_t *)(hpkt.ritem->value) == objects_default_tiers[i].token) { hpkt.sendit(hpkt, "\n \"%s\": \"%s\"", hpkt.ritem->name, objects_default_tiers[i].name); return; } } } /* * Dump out all resources in json format. * Note!!!! This routine must be in this file rather * than in src/lib/parser_conf.c otherwise the pointers * will be all messed up. */ static void dump_json(display_filter *filter) { int resinx, item, directives, first_directive; bool first_res; RES_ITEM *items; RES *res; HPKT hpkt; regmatch_t pmatch[32]; STORES *me = (STORES *)GetNextRes(R_STORAGE, NULL); if (init_crypto() != 0) { Emsg0(M_ERROR_TERM, 0, _("Cryptography library initialization failed.\n")); } init_hpkt(hpkt); if (filter->do_only_data) { hpkt.sendit(hpkt, "["); /* List resources and directives */ /* { "aa": { "Name": "aa",.. }, "bb": { "Name": "bb", ... } * or print a single item */ } else if (filter->do_one || filter->do_list) { hpkt.sendit(hpkt, "{"); } else { /* [ { "Device": { "Name": "aa",.. } }, { "Director": { "Name": "bb", ... } } ]*/ hpkt.sendit(hpkt, "["); } first_res = true; /* Loop over all resource types */ for (resinx=0; resources[resinx].name; resinx++) { if (!resources[resinx].items) { continue; /* skip dummy entries */ } /* Skip this resource type */ if (filter->resource_type && strcasecmp(filter->resource_type, resources[resinx].name) != 0) { continue; } directives = 0; /* Loop over all resources of this type */ foreach_rblist(res, res_head[resinx]->res_list) { hpkt.res = res; items = resources[resinx].items; if (!items) { continue; } /* Copy the resource into res_all */ memcpy(&res_all, res, sizeof(res_all)); if (strcmp(resources[resinx].name, "Dedupengine") == 0 && strcmp(res_all.hdr.name, "*Dedup*" /*default_legacy_dedupengine*/) == 0) { continue; /* Don't display the dummy Dedupengine "*Dedup*" resource */ } if (filter->resource_name) { bool skip=true; /* The Name should be at the first place, so this is not a real loop */ for (item=0; items[item].name; item++) { if (strcasecmp(items[item].name, "Name") == 0) { if (strcasecmp(*(items[item].value), filter->resource_name) == 0) { skip = false; } break; } } if (skip) { /* The name doesn't match, so skip it */ continue; } } if (first_res) { hpkt.sendit(hpkt, "\n"); } else { hpkt.sendit(hpkt, ",\n"); } if (filter->do_only_data) { hpkt.sendit(hpkt, " {"); } else if (filter->do_one) { /* Nothing to print */ /* When sending the list, the form is: * { aa: { Name: aa, Description: aadesc...}, bb: { Name: bb */ } else if (filter->do_list) { /* Search and display Name, should be the first item */ for (item=0; items[item].name; item++) { if (strcmp(items[item].name, "Name") == 0) { hpkt.sendit(hpkt, "%s: {\n", quote_string(hpkt.edbuf2, *items[item].value)); break; } } } else { /* Begin new resource */ hpkt.sendit(hpkt, "{\n \"%s\": {", resources[resinx].name); } first_res = false; first_directive = 0; directives = 0; for (item=0; items[item].name; item++) { /* Check user argument -l */ if (filter->do_list && regexec(&filter->directive_reg, items[item].name, 32, pmatch, 0) != 0) { continue; } hpkt.ritem = &items[item]; if (bit_is_set(item, res_all.hdr.item_present)) { if (first_directive++ > 0) printf(","); /* 1: found, 0: not found, -1 found but empty */ int ret = display_global_item(hpkt); if (ret == -1) { /* Do not print a comma after this empty directive */ first_directive = 0; } else if (ret == 1) { /* Fall-through wanted */ } else if (items[item].handler == store_maxblocksize) { display_int32_pair(hpkt); } else if (items[item].handler == store_devtype) { display_devtype(hpkt); } else if (items[item].handler == store_enctype) { display_enctype(hpkt); } else if (items[item].handler == store_label) { display_label(hpkt); } else if (items[item].handler == store_cloud_driver) { display_cloud_driver(hpkt); #ifdef SD_DEDUP_SUPPORT } else if (items[item].handler == store_dedup_driver) { display_dedup_driver(hpkt); #endif } else if (items[item].handler == store_protocol) { display_protocol(hpkt); } else if (items[item].handler == store_uri_style) { display_uri_style(hpkt); } else if (items[item].handler == store_truncate) { display_truncate_cache(hpkt); } else if (items[item].handler == store_upload) { display_upload(hpkt); } else if (items[item].handler == store_coll_type) { display_collector_types(hpkt); } else if (items[item].handler == store_transfer_priority) { display_transfer_priority(hpkt); } else if (items[item].handler == store_objects_default_tier) { display_objects_default_tier(hpkt); } else { printf("\n \"%s\": \"null\"", items[item].name); } directives++; } else { /* end if is present */ /* For some directive, the bitmap is not set (like addresses) */ if (me && strcmp(resources[resinx].name, "Storage") == 0) { if (strcmp(items[item].name, "SdPort") == 0) { if (get_first_port_host_order(me->sdaddrs) != items[item].default_value) { if (first_directive++ > 0) hpkt.sendit(hpkt, ","); hpkt.sendit(hpkt, "\n \"SdPort\": %d", get_first_port_host_order(me->sdaddrs)); } } else if (me && strcmp(items[item].name, "SdAddress") == 0) { char buf[500]; get_first_address(me->sdaddrs, buf, sizeof(buf)); if (strcmp(buf, "0.0.0.0") != 0) { if (first_directive++ > 0) hpkt.sendit(hpkt, ","); hpkt.sendit(hpkt, "\n \"SdAddress\": \"%s\"", buf); } } } } if (items[item].flags & ITEM_LAST) { display_last(hpkt); /* If last bit set always call to cleanup */ } } /* { "aa": { "Name": "aa",.. }, "bb": { "Name": "bb", ... } */ if (filter->do_only_data || filter->do_list) { hpkt.sendit(hpkt, "\n }"); /* Finish the Resource with a single } */ } else { if (filter->do_one) { /* don't print anything */ } else if (first_directive > 0) { hpkt.sendit(hpkt, "\n }\n}"); /* end of resource */ } else { hpkt.sendit(hpkt, "}\n }"); } } } /* End loop over all resources of this type */ } /* End loop all resource types */ if (filter->do_only_data) { hpkt.sendit(hpkt, "\n]\n"); /* In list context, we are dealing with a hash */ } else if (filter->do_one || filter->do_list) { hpkt.sendit(hpkt, "\n}\n"); } else { hpkt.sendit(hpkt, "\n]\n"); } term_hpkt(hpkt); } /* Check Configuration file for necessary info */ static int check_resources() { bool OK = true; bool tls_needed; AUTOCHANGER *changer; DEVRES *device; me = (STORES *)GetNextRes(R_STORAGE, NULL); if (!me) { Jmsg1(NULL, M_ERROR, 0, _("No Storage resource defined in %s. Cannot continue.\n"), configfile); OK = false; } if (GetNextRes(R_STORAGE, (RES *)me) != NULL) { Jmsg1(NULL, M_ERROR, 0, _("Only one Storage resource permitted in %s\n"), configfile); OK = false; } if (GetNextRes(R_DIRECTOR, NULL) == NULL) { Jmsg1(NULL, M_ERROR, 0, _("No Director resource defined in %s. Cannot continue.\n"), configfile); OK = false; } if (GetNextRes(R_DEVICE, NULL) == NULL){ Jmsg1(NULL, M_ERROR, 0, _("No Device resource defined in %s. Cannot continue.\n"), configfile); OK = false; } if (me && !me->messages) { me->messages = (MSGS *)GetNextRes(R_MSGS, NULL); if (!me->messages) { Jmsg1(NULL, M_ERROR, 0, _("No Messages resource defined in %s. Cannot continue.\n"), configfile); OK = false; } } if (me && !me->working_directory) { Jmsg1(NULL, M_ERROR, 0, _("No Working Directory defined in %s. Cannot continue.\n"), configfile); OK = false; } DIRRES *director; STORES *store; foreach_res(store, R_STORAGE) { /* tls_require implies tls_enable */ if (store->tls_require) { if (have_tls) { if (store->tls_certfile || store->tls_keyfile) { store->tls_enable = true; } } else { Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); OK = false; continue; } } tls_needed = store->tls_enable || store->tls_authenticate; if (!store->tls_certfile && tls_needed) { Jmsg(NULL, M_FATAL, 0, _("\"TLS Certificate\" file not defined for Storage \"%s\" in %s.\n"), store->hdr.name, configfile); OK = false; } if (!store->tls_keyfile && tls_needed) { Jmsg(NULL, M_FATAL, 0, _("\"TLS Key\" file not defined for Storage \"%s\" in %s.\n"), store->hdr.name, configfile); OK = false; } if ((!store->tls_ca_certfile && !store->tls_ca_certdir) && tls_needed && store->tls_verify_peer) { Jmsg(NULL, M_FATAL, 0, _("Neither \"TLS CA Certificate File\"" " or \"TLS CA Certificate Dir\" are defined for Storage \"%s\" in %s." " At least one CA certificate store is required" " when using \"TLS Verify Peer\".\n"), store->hdr.name, configfile); OK = false; } } foreach_res(director, R_DIRECTOR) { /* tls_require implies tls_enable */ if (director->tls_require) { if (director->tls_certfile || director->tls_keyfile) { director->tls_enable = true; } } tls_needed = director->tls_enable || director->tls_authenticate; if (!director->tls_certfile && tls_needed) { Jmsg(NULL, M_FATAL, 0, _("\"TLS Certificate\" file not defined for Director \"%s\" in %s.\n"), director->hdr.name, configfile); OK = false; } if (!director->tls_keyfile && tls_needed) { Jmsg(NULL, M_FATAL, 0, _("\"TLS Key\" file not defined for Director \"%s\" in %s.\n"), director->hdr.name, configfile); OK = false; } if ((!director->tls_ca_certfile && !director->tls_ca_certdir) && tls_needed && director->tls_verify_peer) { Jmsg(NULL, M_FATAL, 0, _("Neither \"TLS CA Certificate File\"" " or \"TLS CA Certificate Dir\" are defined for Director \"%s\" in %s." " At least one CA certificate store is required" " when using \"TLS Verify Peer\".\n"), director->hdr.name, configfile); OK = false; } } CLOUD *cloud; /* TODO: Can use a table */ foreach_res(cloud, R_CLOUD) { if (cloud->driver_type == C_S3_DRIVER || cloud->driver_type == C_FILE_DRIVER) { if (cloud->host_name == NULL) { Jmsg(NULL, M_FATAL, 0, _("Failed to initialize Cloud. Hostname not defined for Cloud \"%s\"\n"), cloud->hdr.name); OK = false; } } if (cloud->driver_type == C_WAS_DRIVER || cloud->driver_type == C_S3_DRIVER) { if (cloud->access_key == NULL) { Jmsg(NULL, M_FATAL, 0, _("Failed to initialize Cloud. AccessKey not set for Cloud \"%s\"\n"), cloud->hdr.name); OK = false; } if (cloud->secret_key == NULL) { Jmsg(NULL, M_FATAL, 0, _("Failed to initialize Cloud. SecretKey not set for Cloud \"%s\"\n"), cloud->hdr.name); OK = false; } } } #ifdef SD_DEDUP_SUPPORT DEDUPRES *dedup; foreach_res(dedup, R_DEDUP) { if (strcmp(dedup->hdr.name, "*Dedup*" /*default_legacy_dedupengine*/) == 0) { break; /* this is the dummy "*Dedup* resource, test should be done in Storage */ } if (dedup->driver_type == D_LEGACY_DRIVER) { if (dedup->dedup_dir == NULL) { Jmsg(NULL, M_FATAL, 0, _("Failed to initialize Dedup Legacy. DedupDirectory not defined for Dedup \"%s\"\n"), dedup->hdr.name); OK = false; } } else if (dedup->driver_type == D_DEDUP2_DRIVER) { if (dedup->dedup_dir == NULL) { Jmsg(NULL, M_FATAL, 0, _("Failed to initialize Dedup2 . DedupDirectory not defined for Dedup \"%s\"\n"), dedup->hdr.name); OK = false; } } } #endif foreach_res(changer, R_AUTOCHANGER) { foreach_alist(device, changer->device) { device->cap_bits |= CAP_AUTOCHANGER; } } return OK; } /* Clean up and then exit */ void terminate_stored(int sig) { static bool in_here = false; if (in_here) { /* prevent loops */ bmicrosleep(2, 0); /* yield */ exit(1); } in_here = true; debug_level = 0; /* turn off any debug */ if (configfile) { free(configfile); configfile = NULL; } if (config) { delete config; config = NULL; } if (debug_level > 10) { print_memory_pool_stats(); } term_msg(); free(res_head); res_head = NULL; close_memory_pool(); //sm_dump(false); /* dump orphaned buffers */ exit(sig); } static void sendit(void *sock, const char *fmt, ...) { char buf[3000]; va_list arg_ptr; va_start(arg_ptr, fmt); bvsnprintf(buf, sizeof(buf), (char *)fmt, arg_ptr); va_end(arg_ptr); fputs(buf, stdout); fflush(stdout); } bacula-15.0.3/src/stored/s3_driver.h0000644000175000017500000000502114771010173017020 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Routines for writing to the Cloud using S3 protocol. * * Written by Kern Sibbald, May MMXVI * */ #ifndef _S3_DRV_H #define _S3_DRV_H #include "bacula.h" #include "stored.h" #ifdef HAVE_LIBS3 #include #include "cloud_driver.h" /* get base class definitions */ class cloud_glacier; class s3_driver: public cloud_driver { private: S3BucketContext s3ctx; /* Main S3 bucket context */ cloud_glacier *m_glacier_driver; public: s3_driver(); ~s3_driver(); void make_cloud_filename(POOLMEM *&filename, const char *VolumeName, uint32_t part); bool init(CLOUD *cloud, POOLMEM *&err); bool start_of_job(POOLMEM *&err); bool term(POOLMEM *&err); bool end_of_job(POOLMEM *&err); bool truncate_cloud_volume(const char *VolumeName, ilist *trunc_parts, cancel_callback *cancel_cb, POOLMEM *&err); bool clean_cloud_volume(const char *VolumeName, cleanup_cb_type *cb, cleanup_ctx_type *context, cancel_callback *cancel_cb, POOLMEM *&err); bool copy_cache_part_to_cloud(transfer *xfer); bool move_cloud_part(const char *VolumeName, uint32_t apart , const char *to, cancel_callback *cancel_cb, POOLMEM *&err, int& exists); int copy_cloud_part_to_cache(transfer *xfer); bool restore_cloud_object(transfer *xfer, const char *cloud_fname); bool is_waiting_on_server(transfer *xfer); bool get_cloud_volume_parts_list(const char* VolumeName, ilist *parts, cancel_callback *cancel_cb, POOLMEM *&err); bool get_cloud_volumes_list(alist *volumes, cancel_callback *cancel_cb, POOLMEM *&err); S3Status put_object(transfer *xfer, const char *cache_fname, const char *cloud_fname); bool retry_put_object(S3Status status, int retry); int get_cloud_object(transfer *xfer, const char *cloud_fname, const char *cache_fname); private: bool get_one_cloud_volume_part(const char* part_path_name, ilist *parts, POOLMEM *&err); }; #endif /* HAVE_LIBS3 */ #endif /* _S3_DRV_H */ bacula-15.0.3/src/stored/lintape.h0000644000175000017500000000522414771010173016561 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef LIN_TAPE_H #define LIN_TAPE_H #include "bacula.h" /* Interface to request information to a tape drive * Technical information found on IBM web site * https://www.ibm.com/support/knowledgecenter/STAKKZ/dd_pr_kc/con_a89p4_lnx_sioc_reqsense.html * TODO: May not work SPARC CPU */ #if defined(HAVE_LINUX_OS) #ifndef SIOC_REQSENSE # define SIOC_REQSENSE _IOR('C', 0x02, lintape_request_sense) #endif struct lintape_request_sense { uint8_t valid :1, /* sense data is valid */ err_code :7; /* error code */ uint8_t segnum; /* segment number */ uint32_t fm :1, /* filemark detected */ eom :1, /* end of medium */ ili :1, /* incorrect length indicator */ resvd1 :1, /* reserved */ key :4; /* sense key */ int32_t info; /* information bytes */ uint8_t addlen; /* additional sense length */ uint32_t cmdinfo; /* command specific information */ uint8_t asc; /* additional sense code */ uint8_t ascq; /* additional sense code qualifier */ uint8_t fru; /* field replaceable unit code */ uint32_t sksv :1, /* sense key specific valid */ cd :1, /* control/data */ resvd2 :2, /* reserved */ bpv :1, /* bit pointer valid */ sim :3; /* system information message */ uint8_t field[2]; /* field pointer */ uint8_t vendor[109]; /* vendor specific (padded to 127) */ }; #endif // HAVE_LINUX_OS #endif // LIN_TAPE_H bacula-15.0.3/src/stored/aligned_dev.c0000644000175000017500000000125514771010173017361 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Written by: Kern Sibbald, March MMXIII */ bacula-15.0.3/src/stored/autochanger.c0000644000175000017500000006511214771010173017422 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Routines for handling the autochanger. * * Written by Kern Sibbald, August MMII * */ #include "bacula.h" /* pull in global headers */ #include "stored.h" /* pull in Storage Deamon headers */ static const int dbglvl = 60; /* Forward referenced functions */ static void lock_changer(DCR *dcr); static void unlock_changer(DCR *dcr); static bool unload_other_drive(DCR *dcr, int slot, bool writing); bool DCR::is_virtual_autochanger() { return device->changer_command && (device->changer_command[0] == 0 || strcmp(device->changer_command, "/dev/null") == 0); } /* Init all the autochanger resources found */ bool init_autochangers() { bool OK = true; AUTOCHANGER *changer; /* Ensure that the media_type for each device is the same */ foreach_res(changer, R_AUTOCHANGER) { DEVRES *device; foreach_alist(device, changer->device) { /* * If the device does not have a changer name or changer command * defined, used the one from the Autochanger resource */ if (!device->changer_name && changer->changer_name) { device->changer_name = bstrdup(changer->changer_name); } if (!device->changer_command && changer->changer_command) { device->changer_command = bstrdup(changer->changer_command); } /* ***BEEF*** */ if (!device->lock_command && changer->lock_command) { device->lock_command = bstrdup(changer->lock_command); } if (!device->changer_name) { Jmsg(NULL, M_ERROR, 0, _("No Changer Name given for device %s. Cannot continue.\n"), device->hdr.name); OK = false; } if (!device->changer_command) { Jmsg(NULL, M_ERROR, 0, _("No Changer Command given for device %s. Cannot continue.\n"), device->hdr.name); OK = false; } } } return OK; } /* * Called here to do an autoload using the autochanger, if * configured, and if a Slot has been defined for this Volume. * On success this routine loads the indicated tape, but the * label is not read, so it must be verified. * * Note if dir is not NULL, it is the console requesting the * autoload for labeling, so we respond directly to the * dir bsock. * * Returns: 1 on success * 0 on failure (no changer available) * -1 on error on autochanger */ int autoload_device(DCR *dcr, bool writing, BSOCK *dir) { JCR *jcr = dcr->jcr; DEVICE * volatile dev = dcr->dev; char *new_vol_name = dcr->VolumeName; int slot; int drive = dev->drive_index; int rtn_stat = -1; /* error status */ POOLMEM *changer; if (!dev->is_autochanger()) { Dmsg1(dbglvl, "Device %s is not an autochanger\n", dev->print_name()); return 0; } /* An empty ChangerCommand => virtual disk autochanger */ if (dcr->is_virtual_autochanger()) { Dmsg0(dbglvl, "ChangerCommand=0, virtual disk changer\n"); return 1; /* nothing to load */ } slot = dcr->VolCatInfo.InChanger ? dcr->VolCatInfo.Slot : 0; /* * Handle autoloaders here. If we cannot autoload it, we * will return 0 so that the sysop will be asked to load it. */ if (writing && slot <= 0) { if (dir) { return 0; /* For user, bail out right now */ } /* ***FIXME*** this really should not be here */ if (dir_find_next_appendable_volume(dcr)) { slot = dcr->VolCatInfo.InChanger ? dcr->VolCatInfo.Slot : 0; } else { slot = 0; dev->clear_wait(); } } Dmsg4(dbglvl, "Want slot=%d drive=%d InChgr=%d Vol=%s\n", dcr->VolCatInfo.Slot, drive, dcr->VolCatInfo.InChanger, dcr->getVolCatName()); changer = get_pool_memory(PM_FNAME); if (slot <= 0) { /* Suppress info when polling */ if (!dev->poll) { Jmsg(jcr, M_INFO, 0, _("No slot defined in catalog (slot=%d) for Volume \"%s\" on %s.\n"), slot, dcr->getVolCatName(), dev->print_name()); Jmsg(jcr, M_INFO, 0, _("Cartridge change or \"update slots\" may be required.\n")); } rtn_stat = 0; } else if (!dcr->device->changer_name) { /* Suppress info when polling */ if (!dev->poll) { Jmsg(jcr, M_INFO, 0, _("No \"Changer Device\" for %s. Manual load of Volume may be required.\n"), dev->print_name()); } rtn_stat = 0; } else if (!dcr->device->changer_command) { /* Suppress info when polling */ if (!dev->poll) { Jmsg(jcr, M_INFO, 0, _("No \"Changer Command\" for %s. Manual load of Volume may be required.\n"), dev->print_name()); } rtn_stat = 0; } else { /* Attempt to load the Volume */ uint32_t timeout = dcr->device->max_changer_wait; int loaded, status; loaded = get_autochanger_loaded_slot(dcr); if (loaded < 0) { /* Autochanger error, try again */ loaded = get_autochanger_loaded_slot(dcr); } Dmsg2(dbglvl, "Found loaded=%d drive=%d\n", loaded, drive); if (loaded <= 0 || loaded != slot) { POOL_MEM results(PM_MESSAGE); /* Unload anything in our drive */ if (!unload_autochanger(dcr, loaded)) { goto bail_out; } /* Make sure desired slot is unloaded */ if (!unload_other_drive(dcr, slot, writing)) { goto bail_out; } /* * Load the desired cassette */ lock_changer(dcr); Dmsg2(dbglvl, "Doing changer load slot %d %s\n", slot, dev->print_name()); Jmsg(jcr, M_INFO, 0, _("3304 Issuing autochanger \"load Volume %s, Slot %d, Drive %d\" command.\n"), new_vol_name, slot, drive); Dmsg3(dbglvl, "3304 Issuing autochanger \"load Volume %s, Slot %d, Drive %d\" command.\n", new_vol_name, slot, drive); dcr->VolCatInfo.Slot = slot; /* slot to be loaded */ edit_device_codes(dcr, &changer, dcr->device->changer_command, "load"); dev->close(dcr); Dmsg1(dbglvl, "Run program=%s\n", changer); status = run_program_full_output(changer, timeout, results.addr()); if (status == 0) { Jmsg(jcr, M_INFO, 0, _("3305 Autochanger \"load Volume %s, Slot %d, Drive %d\", status is OK.\n"), new_vol_name, slot, drive); Dmsg3(dbglvl, "OK: load volume %s, slot %d, drive %d.\n", new_vol_name, slot, drive); bstrncpy(dev->LoadedVolName, new_vol_name, sizeof(dev->LoadedVolName)); dev->set_slot(slot); /* set currently loaded slot */ if (dev->vol) { /* We just swapped this Volume so it cannot be swapping any more */ dev->vol->clear_swapping(); } } else { berrno be; be.set_errno(status); Dmsg5(dbglvl, "Error: load Volume %s, Slot %d, Drive %d, bad stats=%s.\nResults=%s\n", new_vol_name, slot, drive, be.bstrerror(), results.c_str()); Jmsg(jcr, M_FATAL, 0, _("3992 Bad autochanger \"load Volume %s Slot %d, Drive %d\": " "ERR=%s.\nResults=%s\n"), new_vol_name, slot, drive, be.bstrerror(), results.c_str()); rtn_stat = -1; /* hard error */ dev->clear_slot(); /* mark unknown */ } unlock_changer(dcr); } else { status = 0; /* we got what we want */ dev->set_slot(slot); /* set currently loaded slot */ bstrncpy(dev->LoadedVolName, new_vol_name, sizeof(dev->LoadedVolName)); } Dmsg1(dbglvl, "After changer, status=%d\n", status); if (status == 0) { /* did we succeed? */ rtn_stat = 1; /* tape loaded by changer */ } } free_pool_memory(changer); return rtn_stat; bail_out: free_pool_memory(changer); return -1; } /* * Returns: -1 if error from changer command * slot otherwise * Note, this is safe to do without releasing the drive * since it does not attempt load/unload a slot. */ int get_autochanger_loaded_slot(DCR *dcr) { JCR *jcr = dcr->jcr; DEVICE *dev = dcr->dev; int status, loaded; uint32_t timeout = dcr->device->max_changer_wait; int drive = dcr->dev->drive_index; POOL_MEM results(PM_MESSAGE); POOLMEM *changer; if (!dev->is_autochanger()) { return -1; } if (!dcr->device->changer_command) { return -1; } if (dev->get_slot() > 0 && dev->has_cap(CAP_ALWAYSOPEN)) { Dmsg1(dbglvl, "Return cached slot=%d\n", dev->get_slot()); return dev->get_slot(); } /* Virtual disk autochanger */ if (dcr->is_virtual_autochanger()) { return 1; } /* Find out what is loaded, zero means device is unloaded */ changer = get_pool_memory(PM_FNAME); lock_changer(dcr); /* Suppress info when polling */ if (!dev->poll && chk_dbglvl(1)) { Jmsg(jcr, M_INFO, 0, _("3301 Issuing autochanger \"loaded? drive %d\" command.\n"), drive); } edit_device_codes(dcr, &changer, dcr->device->changer_command, "loaded"); Dmsg1(dbglvl, "Run program=%s\n", changer); status = run_program_full_output(changer, timeout, results.addr()); Dmsg3(dbglvl, "run_prog: %s stat=%d result=%s", changer, status, results.c_str()); if (status == 0) { loaded = str_to_int32(results.c_str()); if (loaded > 0) { /* Suppress info when polling */ if (!dev->poll && chk_dbglvl(1)) { Jmsg(jcr, M_INFO, 0, _("3302 Autochanger \"loaded? drive %d\", result is Slot %d.\n"), drive, loaded); } dev->set_slot(loaded); } else { /* Suppress info when polling */ if (!dev->poll && chk_dbglvl(1)) { Jmsg(jcr, M_INFO, 0, _("3302 Autochanger \"loaded? drive %d\", result: nothing loaded.\n"), drive); } if (loaded == 0) { /* no slot loaded */ dev->set_slot(0); } else { /* probably some error */ dev->clear_slot(); /* unknown */ } } } else { berrno be; be.set_errno(status); Jmsg(jcr, M_INFO, 0, _("3991 Bad autochanger \"loaded? drive %d\" command: " "ERR=%s.\nResults=%s\n"), drive, be.bstrerror(), results.c_str()); Dmsg3(dbglvl, "Error: autochanger loaded? drive %d " "ERR=%s.\nResults=%s\n", drive, be.bstrerror(), results.c_str()); loaded = -1; /* force unload */ dev->clear_slot(); /* slot unknown */ } unlock_changer(dcr); free_pool_memory(changer); return loaded; } static void lock_changer(DCR *dcr) { AUTOCHANGER *changer_res = dcr->device->changer_res; if (changer_res) { int errstat; Dmsg1(dbglvl, "Locking changer %s\n", changer_res->hdr.name); if ((errstat=rwl_writelock(&changer_res->changer_lock)) != 0) { berrno be; Jmsg(dcr->jcr, M_ERROR_TERM, 0, _("Lock failure on autochanger. ERR=%s\n"), be.bstrerror(errstat)); } } } static void unlock_changer(DCR *dcr) { AUTOCHANGER *changer_res = dcr->device->changer_res; if (changer_res) { int errstat; Dmsg1(dbglvl, "Unlocking changer %s\n", changer_res->hdr.name); if ((errstat=rwl_writeunlock(&changer_res->changer_lock)) != 0) { berrno be; Jmsg(dcr->jcr, M_ERROR_TERM, 0, _("Unlock failure on autochanger. ERR=%s\n"), be.bstrerror(errstat)); } } } /* * Unload the volume, if any, in this drive * On entry: loaded == 0 -- nothing to do * loaded < 0 -- check if anything to do * loaded > 0 -- load slot == loaded */ bool unload_autochanger(DCR *dcr, int loaded) { DEVICE *dev = dcr->dev; JCR *jcr = dcr->jcr; const char *old_vol_name; uint32_t timeout = dcr->device->max_changer_wait; bool ok = true; if (loaded == 0) { return true; } if (!dev->is_autochanger() || !dcr->device->changer_name || !dcr->device->changer_command) { return false; } /* Virtual disk autochanger */ if (dcr->is_virtual_autochanger()) { dev->clear_unload(); return true; } lock_changer(dcr); if (dev->LoadedVolName[0]) { old_vol_name = dev->LoadedVolName; } else { old_vol_name = "*Unknown*"; } if (loaded < 0) { loaded = get_autochanger_loaded_slot(dcr); if (loaded < 0) { /* try again, maybe autochanger error */ loaded = get_autochanger_loaded_slot(dcr); } } if (loaded > 0) { POOL_MEM results(PM_MESSAGE); POOLMEM *changer = get_pool_memory(PM_FNAME); int slot; Jmsg(jcr, M_INFO, 0, _("3307 Issuing autochanger \"unload Volume %s, Slot %d, Drive %d\" command.\n"), old_vol_name, loaded, dev->drive_index); Dmsg3(dbglvl, "3307 Issuing autochanger \"unload Volume %s, Slot %d, Drive %d\" command.\n", old_vol_name, loaded, dev->drive_index); slot = dcr->VolCatInfo.Slot; dcr->VolCatInfo.Slot = loaded; edit_device_codes(dcr, &changer, dcr->device->changer_command, "unload"); dev->close(dcr); Dmsg1(dbglvl, "Run program=%s\n", changer); int stat = run_program_full_output(changer, timeout, results.addr()); dcr->VolCatInfo.Slot = slot; if (stat != 0) { berrno be; be.set_errno(stat); Jmsg(jcr, M_INFO, 0, _("3995 Bad autochanger \"unload Volume %s, Slot %d, Drive %d\": " "ERR=%s\nResults=%s\n"), old_vol_name, loaded, dev->drive_index, be.bstrerror(), results.c_str()); Dmsg5(dbglvl, "Error: unload Volume %s, Slot %d, Drive %d, bad stats=%s.\nResults=%s\n", old_vol_name, loaded, dev->drive_index, be.bstrerror(), results.c_str()); ok = false; dev->clear_slot(); /* unknown */ } else { dev->set_slot(0); /* unload is OK, mark nothing loaded */ dev->clear_unload(); dev->LoadedVolName[0] = 0; /* clear loaded volume name */ } free_pool_memory(changer); } unlock_changer(dcr); if (ok) { free_volume(dev); } return ok; } /* * Unload the slot if mounted in a different drive */ static bool unload_other_drive(DCR *dcr, int slot, bool writing) { DEVICE *dev = NULL; DEVICE *dev_save; bool found = false; AUTOCHANGER *changer = dcr->dev->device->changer_res; DEVRES *device; int retries = 0; /* wait for device retries */ int loaded; if (!changer || !changer->device) { return false; } if (changer->device->size() == 1) { return true; } /* * We look for the slot number corresponding to the tape * we want in other drives, and if possible, unload * it. */ Dmsg1(dbglvl, "Begin wiffle through devices looking for slot=%d\n", slot); /* * foreach_alist(device, changer->device) { * * The above fails to loop through all devices. It is * probably a compiler bug. */ for (int i=0; i < changer->device->size(); i++) { device = (DEVRES *)changer->device->get(i); dev = device->dev; if (!dev) { Dmsg0(dbglvl, "No dev attached to device\n"); continue; } dev_save = dcr->dev; dcr->set_dev(dev); loaded = get_autochanger_loaded_slot(dcr); dcr->set_dev(dev_save); if (loaded > 0) { Dmsg4(dbglvl, "Want slot=%d, drive=%d loaded=%d dev=%s\n", slot, dev->drive_index, loaded, dev->print_name()); if (loaded == slot) { found = true; break; } } else { Dmsg4(dbglvl, "After slot=%d drive=%d loaded=%d dev=%s\n", slot, dev->drive_index, loaded, dev->print_name()); } } Dmsg1(dbglvl, "End wiffle through devices looking for slot=%d\n", slot); if (!found) { Dmsg1(dbglvl, "Slot=%d not found in another device\n", slot); return true; } else { Dmsg3(dbglvl, "Slot=%d drive=%d found in dev=%s\n", slot, dev->drive_index, dev->print_name()); } /* * The Volume we want is on another device. * If we want the Volume to read it, and the other device where the * Volume is currently is not open, we simply unload the Volume then * then the subsequent code will load it in the desired drive. * If want to write or the device is open, we attempt to wait for * the Volume to become available. */ if (writing || dev->is_open()) { if (dev->is_busy()) { Dmsg4(dbglvl, "Vol %s for dev=%s in use dev=%s slot=%d\n", dcr->VolumeName, dcr->dev->print_name(), dev->print_name(), slot); } for (int i=0; i < 3; i++) { if (dev->is_busy()) { Dmsg0(40, "Device is busy. Calling wait_for_device()\n"); wait_for_device(dcr, retries); continue; } break; } if (dev->is_busy()) { Jmsg(dcr->jcr, M_WARNING, 0, _("Volume \"%s\" wanted on %s is in use by device %s\n"), dcr->VolumeName, dcr->dev->print_name(), dev->print_name()); Dmsg4(dbglvl, "Vol %s for dev=%s is busy dev=%s slot=%d\n", dcr->VolumeName, dcr->dev->print_name(), dev->print_name(), dev->get_slot()); Dmsg2(dbglvl, "num_writ=%d reserv=%d\n", dev->num_writers, dev->num_reserved()); volume_unused(dcr); return false; } } return unload_dev(dcr, dev); } /* * Unconditionally unload a specified drive */ bool unload_dev(DCR *dcr, DEVICE *dev) { JCR *jcr = dcr->jcr; bool ok = true; uint32_t timeout = dcr->device->max_changer_wait; AUTOCHANGER *changer = dcr->dev->device->changer_res; const char *old_vol_name = dcr->VolumeName; DEVICE *save_dev; int save_slot; if (!changer) { return false; } save_dev = dcr->dev; /* save dcr device */ dcr->set_dev(dev); /* temporarily point dcr at other device */ get_autochanger_loaded_slot(dcr); /* Fail if we have no slot to unload */ if (dev->get_slot() <= 0) { if (dev->get_slot() < 0) { Dmsg1(dbglvl, "Cannot unload, slot not defined. dev=%s\n", dev->print_name()); } dcr->set_dev(save_dev); return false; } save_slot = dcr->VolCatInfo.Slot; dcr->VolCatInfo.Slot = dev->get_slot(); POOLMEM *changer_cmd = get_pool_memory(PM_FNAME); POOL_MEM results(PM_MESSAGE); if (old_vol_name[0] == 0) { if (dev->LoadedVolName[0]) { old_vol_name = dev->LoadedVolName; } else { old_vol_name = "*Unknown*"; } } lock_changer(dcr); Jmsg(jcr, M_INFO, 0, _("3307 Issuing autochanger \"unload Volume %s, Slot %d, Drive %d\" command.\n"), old_vol_name, dev->get_slot(), dev->drive_index); Dmsg3(0/*dbglvl*/, "Issuing autochanger \"unload Volume %s, Slot %d, Drive %d\" command.\n", old_vol_name, dev->get_slot(), dev->drive_index); edit_device_codes(dcr, &changer_cmd, dcr->device->changer_command, "unload"); dev->close(dcr); Dmsg2(dbglvl, "close dev=%s reserve=%d\n", dev->print_name(), dev->num_reserved()); Dmsg1(dbglvl, "Run program=%s\n", changer_cmd); int stat = run_program_full_output(changer_cmd, timeout, results.addr()); dcr->VolCatInfo.Slot = save_slot; if (stat != 0) { berrno be; be.set_errno(stat); Jmsg(jcr, M_INFO, 0, _("3997 Bad autochanger \"unload Volume %s, Slot %d, Drive %d\": ERR=%s.\n"), old_vol_name, dev->get_slot(), dev->drive_index, be.bstrerror()); Dmsg5(dbglvl, "Error: unload Volume %s, Slot %d, Drive %d bad stats=%s.\nResults=%s\n", old_vol_name, dev->get_slot(), dev->drive_index, be.bstrerror(), results.c_str()); ok = false; dev->clear_slot(); /* unknown */ } else { Dmsg3(dbglvl, "Volume %s, Slot %d unloaded %s\n", old_vol_name, dev->get_slot(), dev->print_name()); dev->set_slot(0); /* unload OK, mark nothing loaded */ dev->clear_unload(); dev->LoadedVolName[0] = 0; } unlock_changer(dcr); if (ok) { free_volume(dev); } dcr->set_dev(save_dev); free_pool_memory(changer_cmd); return ok; } /* * List the Volumes that are in the autoloader possibly * with their barcodes. * We assume that it is always the Console that is calling us. */ bool autochanger_cmd(DCR *dcr, BSOCK *dir, const char *cmd) { DEVICE *dev = dcr->dev; uint32_t timeout = dcr->device->max_changer_wait; POOLMEM *changer; BPIPE *bpipe; int len = sizeof_pool_memory(dir->msg) - 1; int stat; if (!dev->is_autochanger() || !dcr->device->changer_name || !dcr->device->changer_command) { if (strcasecmp(cmd, "drives") == 0) { dir->fsend("drives=1\n"); } dir->fsend(_("3993 Device %s not an autochanger device.\n"), dev->print_name()); return false; } if (strcasecmp(cmd, "drives") == 0) { AUTOCHANGER *changer_res = dcr->device->changer_res; int drives = 1; if (changer_res && changer_res->device) { drives = changer_res->device->size(); } dir->fsend("drives=%d\n", drives); Dmsg1(dbglvl, "drives=%d\n", drives); return true; } /* If listing, reprobe changer */ if (bstrcasecmp(cmd, "list") || bstrcasecmp(cmd, "listall")) { dcr->dev->set_slot(0); get_autochanger_loaded_slot(dcr); } changer = get_pool_memory(PM_FNAME); lock_changer(dcr); /* Now issue the command */ edit_device_codes(dcr, &changer, dcr->device->changer_command, cmd); dir->fsend(_("3306 Issuing autochanger \"%s\" command.\n"), cmd); bpipe = open_bpipe(changer, timeout, "r"); if (!bpipe) { dir->fsend(_("3996 Open bpipe to changer failed: %s.\n"), changer); goto bail_out; /* TODO: check if we need to return false */ } if (bstrcasecmp(cmd, "list") || bstrcasecmp(cmd, "listall")) { /* Get output from changer */ while (fgets(dir->msg, len, bpipe->rfd)) { dir->msglen = strlen(dir->msg); Dmsg1(dbglvl, "msg); dir->send(); } } else if (strcasecmp(cmd, "slots") == 0 ) { char buf[100], *p; /* For slots command, read a single line */ buf[0] = 0; fgets(buf, sizeof(buf)-1, bpipe->rfd); buf[sizeof(buf)-1] = 0; /* Strip any leading space in front of # of slots */ for (p=buf; B_ISSPACE(*p); p++) { } dir->fsend("slots=%s", p); Dmsg1(dbglvl, "msg); } stat = close_bpipe(bpipe); if (stat != 0) { berrno be; be.set_errno(stat); dir->fsend(_("Autochanger error: ERR=%s\n"), be.bstrerror()); } bail_out: unlock_changer(dcr); free_pool_memory(changer); return true; } /* * Edit codes into ChangerCommand * %% = % * %a = archive device name * %c = changer device name * %d = changer drive index * %f = Client's name * %i = JobId * %j = Job name * %l = archive control channel name * %o = command * %s = Slot base 0 * %S = Slot base 1 * %v = Volume name * %V = Volume name from dcr->VolCatInfo first * * * omsg = edited output message * imsg = input string containing edit codes (%x) * cmd = command string (load, unload, ...) * */ void edit_device_codes(DCR *dcr, POOLMEM **omsg, const char *imsg, const char *cmd) { const char *p; const char *str; char add[20]; **omsg = '\0'; if (imsg == NULL) { return; } Dmsg1(1800, "edit_device_codes: %s\n", imsg); for (p=imsg; *p; p++) { if (*p == '%') { switch (*++p) { case '%': str = "%"; break; case 'a': str = dcr->dev->archive_name(); break; case 'c': str = NPRT(dcr->device->changer_name); break; case 'l': str = NPRT(dcr->device->control_name); break; case 'd': sprintf(add, "%u", dcr->dev->drive_index); str = add; break; case 'o': str = NPRT(cmd); break; case 's': sprintf(add, "%d", dcr->VolCatInfo.Slot - 1); str = add; break; case 'S': sprintf(add, "%d", dcr->VolCatInfo.Slot); str = add; break; case 'i': sprintf(add, "%d", dcr->jcr ? dcr->jcr->JobId : 0); str = add; break; case 'j': /* Job name */ str = dcr->jcr ? dcr->jcr->Job : ""; break; case 'v': if (dcr->dev->LoadedVolName[0]) { str = dcr->dev->LoadedVolName; } else if (dcr->VolCatInfo.VolCatName[0]) { str = dcr->VolCatInfo.VolCatName; } else if (dcr->VolumeName[0]) { str = dcr->VolumeName; } else if (dcr->dev->vol && dcr->dev->vol->vol_name) { str = dcr->dev->vol->vol_name; } else { str = dcr->dev->VolHdr.VolumeName; } break; case 'V': if (dcr->VolCatInfo.VolCatName[0]) { str = dcr->VolCatInfo.VolCatName; } else if (dcr->VolumeName[0]) { str = dcr->VolumeName; } else if (dcr->dev->LoadedVolName[0]) { str = dcr->dev->LoadedVolName; } else if (dcr->dev->vol && dcr->dev->vol->vol_name) { str = dcr->dev->vol->vol_name; } else { str = dcr->dev->VolHdr.VolumeName; } break; case 'f': str = NPRT(dcr->jcr->client_name); break; default: add[0] = '%'; add[1] = *p; add[2] = 0; str = add; break; } } else { add[0] = *p; add[1] = 0; str = add; } Dmsg1(1900, "add_str %s\n", str); pm_strcat(omsg, (char *)str); Dmsg1(1800, "omsg=%s\n", *omsg); } Dmsg1(800, "omsg=%s\n", *omsg); } bacula-15.0.3/src/stored/global.c0000644000175000017500000000205514771010173016357 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "stored.h" STORES *me = NULL; /* our Global resource */ bool forge_on = false; /* proceed inspite of I/O errors */ pthread_mutex_t device_release_mutex = PTHREAD_MUTEX_INITIALIZER; pthread_cond_t wait_device_release = PTHREAD_COND_INITIALIZER; /* Determines if we run with capabilities required for APPEND and IMMUTABLE file attributes */ bool got_caps_needed = false; bacula-15.0.3/src/stored/ansi_label.c0000644000175000017500000003500214771010173017206 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * ansi_label.c routines to handle ANSI (and perhaps one day IBM) * tape labels. * * Kern Sibbald, MMV * */ #include "bacula.h" /* pull in global headers */ #include "stored.h" /* pull in Storage Deamon headers */ /* Imported functions */ void ascii_to_ebcdic(char *dst, char *src, int count); void ebcdic_to_ascii(char *dst, char *src, int count); /* Forward referenced functions */ static char *ansi_date(time_t td, char *buf); static bool same_label_names(char *bacula_name, char *ansi_name); /* * We read an ANSI label and compare the Volume name. We require * a VOL1 record of 80 characters followed by a HDR1 record containing * BACULA.DATA in the filename field. We then read up to 3 more * header records (they are not required) and an EOF, at which * point, all is good. * * Returns: * VOL_OK Volume name OK * VOL_NO_LABEL No ANSI label on Volume * VOL_IO_ERROR I/O error on read * VOL_NAME_ERROR Wrong name in VOL1 record * VOL_LABEL_ERROR Probably an ANSI label, but something wrong * */ int read_ansi_ibm_label(DCR *dcr) { DEVICE * volatile dev = dcr->dev; JCR *jcr = dcr->jcr; char label[80]; /* tape label */ int stat, i; char *VolName = dcr->VolumeName; bool ok = false; /* * Read VOL1, HDR1, HDR2 labels, but ignore the data * If tape read the following EOF mark, on disk do * not read. */ Dmsg0(100, "Read ansi label.\n"); if (!dev->is_tape()) { return VOL_OK; } dev->label_type = B_BACULA_LABEL; /* assume Bacula label */ /* Read a maximum of 5 records VOL1, HDR1, ... HDR4 */ for (i=0; i < 6; i++) { do { stat = dev->read(label, sizeof(label)); } while (stat == -1 && errno == EINTR); if (stat < 0) { berrno be; dev->clrerror(-1); Dmsg1(100, "Read device got: ERR=%s\n", be.bstrerror()); Mmsg2(jcr->errmsg, _("Read error on device %s in ANSI label. ERR=%s\n"), dev->dev_name, be.bstrerror()); Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); dev->VolCatInfo.VolCatErrors++; return VOL_IO_ERROR; } if (stat == 0) { if (dev->at_eof()) { dev->set_eot(); /* second eof, set eot bit */ Dmsg0(100, "EOM on ANSI label\n"); Mmsg0(jcr->errmsg, _("Insane! End of tape while reading ANSI label.\n")); return VOL_LABEL_ERROR; /* at EOM this shouldn't happen */ } else { dev->set_ateof(); /* set eof state */ } } switch (i) { case 0: /* Want VOL1 label */ if (stat == 80) { if (strncmp("VOL1", label, 4) == 0) { ok = true; dev->label_type = B_ANSI_LABEL; Dmsg0(100, "Got ANSI VOL1 label\n"); } else { /* Try EBCDIC */ ebcdic_to_ascii(label, label, sizeof(label)); if (strncmp("VOL1", label, 4) == 0) { ok = true;; dev->label_type = B_IBM_LABEL; Dmsg0(100, "Found IBM label.\n"); Dmsg0(100, "Got IBM VOL1 label\n"); } } } if (!ok) { Dmsg0(100, "No VOL1 label\n"); Mmsg0(jcr->errmsg, _("No VOL1 label while reading ANSI/IBM label.\n")); return VOL_NO_LABEL; /* No ANSI label */ } /* Compare Volume Names allow special wild card */ if (VolName && *VolName && *VolName != '*') { if (!same_label_names(VolName, &label[4])) { char *p = &label[4]; char *q; free_volume(dev); /* Store new Volume name */ q = dev->VolHdr.VolumeName; for (int j=0; *p != ' ' && j < 6; j++) { *q++ = *p++; } *q = 0; Dmsg0(100, "Call reserve_volume\n"); /* ***FIXME*** why is this reserve_volume() needed???? KES */ reserve_volume(dcr, dev->VolHdr.VolumeName); dev = dcr->dev; /* may have changed in reserve_volume */ Dmsg2(100, "Wanted ANSI Vol %s got %6s\n", VolName, dev->VolHdr.VolumeName); Mmsg2(jcr->errmsg, _("Wanted ANSI Volume \"%s\" got \"%s\"\n"), VolName, dev->VolHdr.VolumeName); return VOL_NAME_ERROR; } } break; case 1: if (dev->label_type == B_IBM_LABEL) { ebcdic_to_ascii(label, label, sizeof(label)); } if (stat != 80 || strncmp("HDR1", label, 4) != 0) { Dmsg0(100, "No HDR1 label\n"); Mmsg0(jcr->errmsg, _("No HDR1 label while reading ANSI label.\n")); return VOL_LABEL_ERROR; } if (strncmp("BACULA.DATA", &label[4], 11) != 0) { Dmsg1(100, "HD1 not Bacula label. Wanted BACULA.DATA got %11s\n", &label[4]); Mmsg1(jcr->errmsg, _("ANSI/IBM Volume \"%s\" does not belong to Bacula.\n"), dev->VolHdr.VolumeName); return VOL_NAME_ERROR; /* Not a Bacula label */ } Dmsg0(100, "Got HDR1 label\n"); break; case 2: if (dev->label_type == B_IBM_LABEL) { ebcdic_to_ascii(label, label, sizeof(label)); } if (stat != 80 || strncmp("HDR2", label, 4) != 0) { Dmsg0(100, "No HDR2 label\n"); Mmsg0(jcr->errmsg, _("No HDR2 label while reading ANSI/IBM label.\n")); return VOL_LABEL_ERROR; } Dmsg0(100, "Got ANSI HDR2 label\n"); break; default: if (stat == 0) { Dmsg0(100, "ANSI label OK\n"); return VOL_OK; } if (dev->label_type == B_IBM_LABEL) { ebcdic_to_ascii(label, label, sizeof(label)); } if (stat != 80 || strncmp("HDR", label, 3) != 0) { Dmsg0(100, "Unknown or bad ANSI/IBM label record.\n"); Mmsg0(jcr->errmsg, _("Unknown or bad ANSI/IBM label record.\n")); return VOL_LABEL_ERROR; } Dmsg0(100, "Got HDR label\n"); break; } } Dmsg0(100, "Too many records in ANSI/IBM label.\n"); Mmsg0(jcr->errmsg, _("Too many records in while reading ANSI/IBM label.\n")); return VOL_LABEL_ERROR; } /* * ANSI/IBM VOL1 label * 80 characters blank filled * Pos count Function What Bacula puts * 0-3 4 "VOL1" VOL1 * 4-9 6 Volume name Volume name * 10-10 1 Access code * 11-36 26 Unused * * ANSI * 37-50 14 Owner * 51-78 28 reserved * 79 1 ANSI level 3 * * IBM * 37-40 4 reserved * 41-50 10 Owner * 51-79 29 reserved * * * ANSI/IBM HDR1 label * 80 characters blank filled * Pos count Function What Bacula puts * 0-3 4 "HDR1" HDR1 * 4-20 17 File name BACULA.DATA * 21-26 6 Volume name Volume name * 27-30 4 Vol seq num 0001 * 31-34 4 file num 0001 * 35-38 4 Generation 0001 * 39-40 2 Gen version 00 * 41-46 6 Create date bYYDDD yesterday * 47-52 6 Expire date bYYDDD today * 53-53 1 Access * 54-59 6 Block count 000000 * 60-72 13 Software name Bacula * 73-79 7 Reserved * ANSI/IBM HDR2 label * 80 characters blank filled * Pos count Function What Bacula puts * 0-3 4 "HDR2" HDR2 * 4-4 1 Record format D (V if IBM) => variable * 5-9 5 Block length 32000 * 10-14 5 Rec length 32000 * 15-15 1 Density * 16-16 1 Continued * 17-33 17 Job * 34-35 2 Recording * 36-36 1 cr/lf ctl * 37-37 1 reserved * 38-38 1 Blocked flag * 39-49 11 reserved * 50-51 2 offset * 52-79 28 reserved */ static const char *labels[] = {"HDR", "EOF", "EOV"}; /* * Write an ANSI or IBM 80 character tape label * Type determines whether we are writing HDR, EOF, or EOV labels * Assume we are positioned to write the labels * Returns: true of OK * false if error */ bool write_ansi_ibm_labels(DCR *dcr, int type, const char *VolName) { DEVICE *dev = dcr->dev; JCR *jcr = dcr->jcr; char ansi_volname[7]; /* 6 char + \0 */ char label[80]; /* tape label */ char date[20]; /* ansi date buffer */ time_t now; int len, stat, label_type; /* * If the Device requires a specific label type use it, * otherwise, use the type requested by the Director */ if (dcr->device->label_type != B_BACULA_LABEL) { label_type = dcr->device->label_type; /* force label type */ } else { label_type = dcr->VolCatInfo.LabelType; /* accept Dir type */ } switch (label_type) { case B_BACULA_LABEL: return true; case B_ANSI_LABEL: case B_IBM_LABEL: ser_declare; Dmsg1(100, "Write ANSI label type=%d\n", label_type); len = strlen(VolName); if (len > 6) { Jmsg1(jcr, M_FATAL, 0, _("ANSI Volume label name \"%s\" longer than 6 chars.\n"), VolName); return false; } /* ANSI labels have 6 characters, and are padded with spaces * 'vol1\0' => 'vol1 \0' */ strcpy(ansi_volname, VolName); for(int i=len; i < 6; i++) { ansi_volname[i]=' '; } ansi_volname[6]='\0'; /* only for debug */ if (type == ANSI_VOL_LABEL) { ser_begin(label, sizeof(label)); ser_bytes("VOL1", 4); ser_bytes(ansi_volname, 6); /* Write VOL1 label */ if (label_type == B_IBM_LABEL) { ascii_to_ebcdic(label, label, sizeof(label)); } else { label[79] = '3'; /* ANSI label flag */ } stat = dev->write(label, sizeof(label)); if (stat != sizeof(label)) { berrno be; Jmsg3(jcr, M_FATAL, 0, _("Could not write ANSI VOL1 label. Wanted size=%d got=%d ERR=%s\n"), sizeof(label), stat, be.bstrerror()); return false; } } /* Now construct HDR1 label */ memset(label, ' ', sizeof(label)); ser_begin(label, sizeof(label)); ser_bytes(labels[type], 3); ser_bytes("1", 1); ser_bytes("BACULA.DATA", 11); /* Filename field */ ser_begin(&label[21], sizeof(label)-21); /* fileset field */ ser_bytes(ansi_volname, 6); /* write Vol Ser No. */ ser_begin(&label[27], sizeof(label)-27); ser_bytes("00010001000100", 14); /* File section, File seq no, Generation no */ now = time(NULL); ser_bytes(ansi_date(now, date), 6); /* current date */ ser_bytes(ansi_date(now - 24 * 3600, date), 6); /* created yesterday */ ser_bytes(" 000000Bacula ", 27); /* Write HDR1 label */ if (label_type == B_IBM_LABEL) { ascii_to_ebcdic(label, label, sizeof(label)); } /* * This could come at the end of a tape, ignore * EOT errors. */ stat = dev->write(label, sizeof(label)); if (stat != sizeof(label)) { berrno be; if (stat == -1) { dev->clrerror(-1); if (dev->dev_errno == 0) { dev->dev_errno = ENOSPC; /* out of space */ } if (dev->dev_errno != ENOSPC) { Jmsg1(jcr, M_FATAL, 0, _("Could not write ANSI HDR1 label. ERR=%s\n"), be.bstrerror()); return false; } } else { Jmsg(jcr, M_FATAL, 0, _("Could not write ANSI HDR1 label.\n")); return false; } } /* Now construct HDR2 label */ memset(label, ' ', sizeof(label)); ser_begin(label, sizeof(label)); ser_bytes(labels[type], 3); ser_bytes("2D3200032000", 12); /* Write HDR2 label */ if (label_type == B_IBM_LABEL) { label[4] = 'V'; ascii_to_ebcdic(label, label, sizeof(label)); } stat = dev->write(label, sizeof(label)); if (stat != sizeof(label)) { berrno be; if (stat == -1) { dev->clrerror(-1); if (dev->dev_errno == 0) { dev->dev_errno = ENOSPC; /* out of space */ } if (dev->dev_errno != ENOSPC) { Jmsg1(jcr, M_FATAL, 0, _("Could not write ANSI HDR1 label. ERR=%s\n"), be.bstrerror()); return false; } dev->weof(NULL, 1); return true; } else { Jmsg(jcr, M_FATAL, 0, _("Could not write ANSI HDR1 label.\n")); return false; } } if (!dev->weof(NULL, 1)) { Jmsg(jcr, M_FATAL, 0, _("Error writing EOF to tape. ERR=%s"), dev->errmsg); return false; } return true; default: Jmsg0(jcr, M_ABORT, 0, _("write_ansi_ibm_label called for non-ANSI/IBM type\n")); return false; /* should not get here */ } } /* Check a Bacula Volume name against an ANSI Volume name */ static bool same_label_names(char *bacula_name, char *ansi_name) { char *a = ansi_name; char *b = bacula_name; /* Six characters max */ for (int i=0; i < 6; i++) { if (*a == *b) { a++; b++; continue; } /* ANSI labels are blank filled, Bacula's are zero terminated */ if (*a == ' ' && *b == 0) { return true; } return false; } /* Reached 6 characters */ b++; if (*b == 0) { return true; } return false; } /* * ANSI date * ' 'YYDDD */ static char *ansi_date(time_t td, char *buf) { struct tm *tm; if (td == 0) { td = time(NULL); } tm = gmtime(&td); bsnprintf(buf, 10, " %05d ", 1000 * (tm->tm_year + 1900 - 2000) + tm->tm_yday); return buf; } bacula-15.0.3/src/stored/storage_encryption.rst0000644000175000017500000004270714771010173021433 0ustar bsbuildbsbuild New Storage Daemon encryption and XXH64 checksum ++++++++++++++++++++++++++++++++++++++++++++++++ This new version switch from the ``BB02`` volume format to the new ``BB03``. This new format replaces the old 32bits CRC32 checksum with the new and faster 64bits *XXH64*. The new format also add support for volume encryption by the Storage Daemon. Finally this patch add 3 new file Digest Algorithm *XXH64*, *XXH3_64* and *XXH3_128* to the ones that already exists *MD5*, *SHA1*, *SHA256*, *SHA512*. These 3 new *XX...* algorithms are not Cryptographic algorithms, but are very good regarding the dispersion and randomness qualities. There are also the fastest ones available. The New XXH64 checksum ====================== The new XXH64 checksum, is optional you can disable it using the same directive than for the old CRC32 in ``BB02`` ``Block Checksum = no`` Storage Daemon encryption ========================= Blocks in the volumes can be encrypted using the *BLOCK_CIPHER_AES_128_XTS* or *BLOCK_CIPHER_AES_256_XTS* ciphers algorithms. These symmetrical ciphers are fast and used by most applications that need to do symmetrical encryption of blocks. Every blocks are encrypted using a *key* that is unique for the volume and an *IV* (Initialization vector) that is the block number that is saved in the block header. The *XTS's* ciphers are specifically designed to support an *IV* with a low entropy The first *block* of the volume that hold the *Volume Label* is not encrypted because some fields as the *volume name* are required to manage the volume and the encryption. The user has the option to obfuscate some fields that are not required and could hold critical information like for example the *hostname*. These field are replaced by the string *"OBFUSCATED"* The header of the block are not encrypted. This 24 bytes header don't hold any user information. Here is the content of the header: - the 32bits header option bit field - the 32bits block length - the 32bits block number - the ``BB03`` string - the 32bits volume session id - the 32bits volume session time The *volume session time* is the time of the Storage Daemon at startup and not the time of the backup. The *volume session id* is reset at zero when the daemon starts and is incremented at every backup by the Storage Daemon The 64bits *XXH64* checksum is encrypted with the data. The *block* must be be decrypted and then the checksum can be verified. If the checksum matches, you can be confident that you have the right encryption key and that the block has not been modified. The drawback is that it is not possible to verify the integrity of the block without the encryption key. The data in the *data spool* are not encrypted, don't store you data spool on an unsafe storage! The new Storage Daemon Directives ================================= Here are the new directives in the Storage resource of the Storage Daemon .. _Storage:Device:BlockEncryption: **Block Encryption = ** This directive allows you to enable the encryption for the given device. The encryption can be of 3 different types: **none** This is the default, the device don't do any encryption. **enable** The device encrypts the data but let all information in the *volume label* in clear text. **strong** The device encrypts the data, and obfuscate any information in the *volume label* except the one that are needed for the management of the volume. The fields that are obfuscate are: *volume name*. .. _Storage:Storage:EncryptionCommand: **Encryption Command = ** The **command** specifies an external program that must provide the key related to a volume. The Encryption Command ====================== The *Encryption Command* is called by the Storage Daemon every time it initialize a new volume or mount an existing one using a device with encryption enable. We are providing a very simple script that can handle keys Here is an example about how to call the command. :: EncryptionCommand = "/opt/bacula/script/sd_encryption_command.py getkey --key-dir /opt/bacula/working/keys" Be careful the command is limited to 127 characters. The same variable substitution than for the *AutoChanger command* are provided. The program can be an interface with your existing key management system or do the key management on its own. *Bacula* provides a sample script *sd_encryption_command.py* that do the work. Lets illustrate our protocol with some usage of our script:: $ OPERATION=LABEL VOLUME_NAME=Volume0001 ./sd_encryption_command.py getkey --cipher AES_128_XTS --key-dir tmp/keys cipher: AES_128_XTS cipher_key: G6HksAYDnNGr67AAx2Lb/vecTVjZoYAqSLZ7lGMyDVE= volume_name: Volume0001 $ OPERATION=READ VOLUME_NAME=Volume0001 ./sd_encryption_command.py getkey --cipher AES_128_XTS --key-dir tmp/keys cipher: AES_128_XTS cipher_key: G6HksAYDnNGr67AAx2Lb/vecTVjZoYAqSLZ7lGMyDVE= volume_name: Volume0001 $ cat tmp/keys/Volume0001 cipher: AES_128_XTS cipher_key: G6HksAYDnNGr67AAx2Lb/vecTVjZoYAqSLZ7lGMyDVE= volume_name: Volume0001 $ OPERATION=READ VOLUME_NAME=DontExist ./sd_encryption_command.py getkey --cipher AES_128_XTS --key-dir tmp/keys 2>/dev/null error: no key information for volume "DontExist" $ echo $? 0 $ OPERATION=BAD_CMD VOLUME_NAME=Volume0002 ./sd_encryption_command.py getkey --cipher AES_128_XTS --key-dir tmp/keys 2>/dev/null error: environment variable OPERATION invalid "BAD_CMD" for volume "Volume0002" $ echo $? 0 You can see in this command above that we keep the keys into one directory *tmp/keys*, we pass the arguments using the *environment variables*. *Bacula* pass the following variables via the *environment*: **OPERATION** This is can *LABEL* when the volume is labeled, in this case the script should generate a new key or this can be *READ* when the volume has already a label and the Storage Daemon need the already existing key to read or append data to the volume **VOLUME_NAME** This is the name of the volume Some variables are already there to support a *Master Key* in the future. This feature is not yet supported, but will come later: **ENC_CIPHER_KEY** This is a base64 encoded version of the key encrypted by the *master key* **MASTER_KEYID** This is a base64 encoded version of the key Id of the *master key* that was used to encrypt the *ENC_CIPHER_KEY* above. *Bacula* expects some values in return: **volumename** This is a repetition of the name of the volume that is given to the script. This field is optional and ignored by Bacula. **cipher** This is the cipher that Bacula must use. Bacula knows the following ciphers: *AES_128_XTS* and *AES_256_XTS*. Of course the key length vary with the cipher. **cipher_key** This is the symmetric *key* in *base 64* format. **comment** This is a single line of comment that is optional and ignored by Bacula. **error** This is a single line error message. This is optional, but when provided, Bacula consider that the script returned an error and display this error in the job log. Bacula expect an *exit code" of 0, if the script exits with a different error code, any output are ignored and Bacula display a generic message with the exit code in the job log. To return an error to bacula, the script must use the *error* field and return an error code of 0. What is encrypted and what is not ================================= The main goal of encryption is to prevent anybody that don't own the key to read the data. And *Bacula* does it well. But encryption alone don't protect again some modification. The first block of the volume is the *volume label* and it is not encrypted. Some information are required for the management of the *volume* itself. The only data in the *volume label* coming from the user are: the *hostname*, the *volumename* *poolname*. The *hostname* can be obfuscated using the *STRONG* mode of the encryption, the *poolname* and the *volumename* could be made useless to a attacker by using generic name lie ``PoolAlpha`` or ``Volume12345``. Also be aware that data in the catalog: the directories, filenames, and the *JobLog* are not encrypted. An attacker could make some undetected modification to the volume. The easiest way is to remove one block inside the volume. Other verification inside *Bacula* could detect such modification and an attacker must be meticulous, but it is possible. The *XXH64* checksum inside each volume are encrypted using the encryption key. This is not as good as using a certified signature but this provides substantial confidence that the block will not be modified easily. To resume, you can be confident that: - An attacker cannot read any of your data: **Very Strong** - An attacker cannot substitute the volume by another one: **Strong** - An attacker cannot modify the content of the volume: **Good** Notes for the developers and support and personal +++++++++++++++++++++++++++++++++++++++++++++++++ The new Volume Format ===================== In the new ``BB03`` volume format, the space used by the CRC32 is now used by a 32bits bit field that for now only knows about the two *option* bits: :: BLKHOPT_CHKSUM = 1 << 0, // If the XXH64 checksum is enable BLKHOPT_ENCRYPT_VOL = 1 << 1, // If the volume is encrypted BLKHOPT_ENCRYPT_BLOCK = 1 << 2, // If this block is encrypted Notice that the *block number* is reset to zero when the volume is happened and then is not unique! This could be a little security concern, as multiple block could be encrypted using the same key and *IV* The New XXH64 checksum ====================== The new XXH64 checksum, is saved just after *VolSessionTime* and is set to zero if the checksum is disabled. The checksum is calculated on the all block with the new 64bits checksum field set to zero. To verify the checksum you must first verify that *the BLKHOPT_CHKSUM* bit is enable, then un-serialize the value of the checksum in the block and save it, then replace it with zeroes, calculate the checksum of the block and compare it with the value that you have saved. Volume Signature ================ The purpose is to be able to verify: - the authenticity of the volume (that we have the volume we are looking for) - the integrity of the volume (that the data on the volume did not change) This is independent of the volume encryption. Some assertion to validate: - The Volume Signature is configured on the SD side only. - Nothing is configured on the DIR side - The SD should be able to tell the DIR that this feature is enable on one volume, and the DIR **could** (something to decide) store this information in the *volume media* in the catalog - The DIR must be able to initiate a volume signature check and display the result. We need to define an interfece for that. - The signature are more commonly done using public/private keys pairs. - The customer could appreciate to use different keys pairs for different groups of volumes (for example regarding the poolname) - The KeyManager can handle these keys the same way it is handling the encryption keys. Something in the volume must tell the user which key was used to sign the volume and which one can be used to verify: - this information is not mandatory as the user could keep this information somewhere else - we could store the keyid, the KeyManager can then provide the public key - we can store the public key. A 4096bits public key is about 717bytes width (this include the *modulus*). But this is not valid has me must validate the content of the volume from information from outer the volume. Then the keyid is better solution, we are sure that the public key will come from outside. At the block level. The signature digest for a SHA256 and a 2048bits key is 122bytes. I don't know how the size fluctuate, but the size could vary if we want to use another hash function or maybe another key size. Then the size in the block must be variable. The best place at the end of the header. How to combine the signature and the XXH64 checksum ? First set the signature and the XXH64 checksum spaces to 0x00. Calculate and fill in the signature area. Second calculate and fill in the XXH64 checksum area. For the verification, the XXH64 checksum will always be verified while the signature will only be verified if we can provide or verify the authenticity of the public key. Weakness of the signature in bacula =================================== The signature sign individual block. If one block is removed from the volume we don't know it. We could ensure that the BlockNumber is well updated and verify that there is no missing BlockNumber. Today the BlockNumber is reset to zero every time the volume is unmounted. In fact when it is mounted again we don't read the value of the last BlockNumber and restart at zero. Even with *valid* BlockNumber we are not safe. The last blocks of the volume could be removed and we will not know. For that we should refresh the volume label with this information. This is impossible with some device like a tape. Worst it is possible to swap some block inside a file. An attacker could recompose a file by switching and dropping some blocks of the same file. This make our signature very weak. We only secure the front door. Do we still need to implement a signature? We don't need to decide today. What I'm doing today will have a little impact, not more than 4 extra bytes in the volume label that could be recycled for the next feature. The signature, checksum64 and encryptions together ================================================== The question is how to order the 3 operation at backup and restore time:: uint32_t header_option; // was checksum in BB02 uint32_t block_len uint32_t BlockNumber char ID[4] // BB03 uint32_t VolSessionId uint32_t VolSessionTime uint64_t CheckSum64 if (header_option | BLKHOPT_SIGN_BLOCK) { uint32_t signature_size char signature[signature_size] } char PAYLOAD[] notice that the checksum64 and the signature are in the middle of the block the only way for them to protect the entire block it to reset these area to zero before to calculate them. (choice 1) CheckSum64 should be like a hardware checksum. It should protect all the block, be computed last (after encryption). At read time it should be checked first. (before decryption) if we use the wrong decryption key, we will have a block totally messed up and we will not know and maybe some crash while the SD try to decode and use the *random* data (choice 2) But It can be done in a different way! Having the CheckSum64 calculated BEFORE the encryption. would validate that we have the right key to decrypt the block. In this case it should be calculated before the encryption and at read time it should be used after decryption. In this situation the key is required to verify that there is no "hardware" error, but reading the volume without the right key is useless. This is the choice I made at first I we are using the wrong key at restore time, the CheckSum64 will detect it and bacula will not try to decode a corrupted block About the signature. The signature is two time optional: - we don't need to use a signature - if we have it in a volume, we don't need to verify it and we can still read the volume. The signature must be independent of encryption we must be able to verify the signature without having the key to decrypt the data. Then the signature must be calculated AFTER the encryption At read time it must be verified before decryption. In choice1:: do encryption set checksum64 to zero set signature to zero calculate and fill in signature calculate and fill in checksum64 At restore time (choice1):: read and copy checksum64 set checksum64 to zero calculate & verify checksum64 read and copy signature set signature to zero calculate & verify signature <== we can stop here if we just want to check the signature do decryption In choice2:: set signature to zero set checksum64 to zero calculate and fill in checksum64 do encryption set signature to zero one more time calculate and fill in signature At restore time (choice2):: read and copy signature set signature to zero calculate & verify signature <== we can stop here if we just want to check the signature do decryption set signature to zero one more time read and copy checksum64 set checksum64 to zero calculate & verify checksum64 Despite the *"set signature to zero one more time"*, and the fact that the CheckSum64 don't work like a hardware checksum (that anybody can check), I prefer (choice2) Signatures ========== Because we don't know in advance on which volume a block will be written, and because the space for the signature must be reserved before to start writing the records inside the volume, all the blocks written by a device must share the same signature size. Then some signature parameters must be defined at the device level. The Key manager can still provide the key for the signature, but they must match the device parameters! The signature should not prevent the volume to be read on any other device or machine. The information inside the volume, that describe the structure of the volume must be self sufficient to read the volume. If we want to change the configuration of the device, for example to increase the "security" by increasing the hash or the key size, the old volumes should be able to be read on this device. https://medium.com/@bn121rajesh/rsa-sign-and-verify-using-openssl-behind-the-scene-bf3cac0aade2 https://github.com/bn121rajesh/ipython-notebooks/blob/master/BehindTheScene/OpensslSignVerify/openssl_sign_verify.ipynb bacula-15.0.3/src/stored/bls.c0000644000175000017500000003607614771010173015711 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Dumb program to do an "ls" of a Bacula 1.0 mortal file. * * Kern Sibbald, MM * */ #include "bacula.h" #include "stored.h" #include "findlib/find.h" #include "lib/cmd_parser.h" extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code); static void do_blocks(char *infname); static void do_jobs(char *infname); static void do_ls(char *fname); static void do_close(JCR *jcr); static void get_session_record(DEVICE *dev, DEV_RECORD *rec, SESSION_LABEL *sessrec); static bool record_cb(DCR *dcr, DEV_RECORD *rec); static DEVICE *dev; static DCR *dcr; static bool dump_label = false; static bool list_blocks = false; static bool list_jobs = false; static DEV_RECORD *rec; static JCR *jcr; static SESSION_LABEL sessrec; static uint32_t num_files = 0; static uint32_t retry_count = 10; static ATTR *attr; static CONFIG *config; bool filter = false; /* any filter set ? */ bool dedup = false; /* decode dedup reference */ #define CONFIG_FILE "bacula-sd.conf" char *configfile = NULL; bool detect_errors = false; int errors = 0; static FF_PKT *ff; static BSR *bsr = NULL; #ifdef COMMUNITY bool dedup_parse_filter(char *fltr) { return false; } void dedup_filter_record(int verbose, DCR *dcr, DEV_RECORD *rec, char *dedup_msg, int len) {} #endif static void usage() { fprintf(stderr, _( PROG_COPYRIGHT "\n%sVersion: %s (%s)\n\n" "Usage: bls [options] \n" " -b specify a bootstrap file\n" " -c specify a Storage configuration file\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -e exclude list\n" " -i include list\n" " -j list jobs\n" " -k list blocks\n" " (no j or k option) list saved files\n" " -n number of times user is aked to provide correct volume\n" " -L dump label\n" " -p proceed inspite of errors\n" " -V specify Volume names (separated by |)\n" " -E Check records to detect errors\n" " -v be verbose\n" " -D Display dedup reference (to use with -v)\n" " -F Filter regarding some key, value (to use with -D)\n" " -? print this message\n\n"), 2000, BDEMO, VERSION, BDATE); exit(1); } int main (int argc, char *argv[]) { int i, ch; FILE *fd; char line[1000]; char *VolumeName= NULL; char *bsrName = NULL; bool ignore_label_errors = false; BtoolsAskDirHandler askdir_handler; init_askdir_handler(&askdir_handler); setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); lmgr_init_thread(); working_directory = "/tmp"; my_name_is(argc, argv, "bls"); init_msg(NULL, NULL); /* initialize message handler */ OSDependentInit(); ff = init_find_files(); while ((ch = getopt(argc, argv, "b:c:d:e:i:n:jkLpvV:?EDF:")) != -1) { switch (ch) { case 'b': bsrName = optarg; break; case 'E': detect_errors = true; break; case 'c': /* specify config file */ if (configfile != NULL) { free(configfile); } configfile = bstrdup(optarg); break; case 'd': /* debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { char *p; /* We probably find a tag list -d 10,sql,bvfs */ if ((p = strchr(optarg, ',')) != NULL) { *p = 0; } debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } if (p) { debug_parse_tags(p+1, &debug_level_tags); } } break; case 'e': /* exclude list */ if ((fd = bfopen(optarg, "rb")) == NULL) { berrno be; Pmsg2(0, _("Could not open exclude file: %s, ERR=%s\n"), optarg, be.bstrerror()); exit(1); } while (fgets(line, sizeof(line), fd) != NULL) { strip_trailing_junk(line); Dmsg1(100, "add_exclude %s\n", line); add_fname_to_exclude_list(ff, line); } fclose(fd); break; case 'i': /* include list */ if ((fd = bfopen(optarg, "rb")) == NULL) { berrno be; Pmsg2(0, _("Could not open include file: %s, ERR=%s\n"), optarg, be.bstrerror()); exit(1); } while (fgets(line, sizeof(line), fd) != NULL) { strip_trailing_junk(line); Dmsg1(100, "add_include %s\n", line); add_fname_to_include_list(ff, 0, line); } fclose(fd); break; case 'j': list_jobs = true; break; case 'k': list_blocks = true; break; case 'n': retry_count = atoi(optarg); break; case 'L': dump_label = true; break; case 'p': ignore_label_errors = true; forge_on = true; break; case 'v': verbose++; break; case 'V': /* Volume name */ VolumeName = optarg; break; case 'D': dedup = true; break; case 'F': filter = true; if (!dedup_parse_filter(optarg)) { Pmsg1(0, _("Error parsing filter: %s\n"), optarg); exit(1); } break; case '?': default: usage(); } /* end switch */ } /* end while */ argc -= optind; argv += optind; if (!argc) { Pmsg0(0, _("No archive name specified\n")); usage(); } if (configfile == NULL) { configfile = bstrdup(CONFIG_FILE); } config = New(CONFIG()); parse_sd_config(config, configfile, M_ERROR_TERM); setup_me(); load_sd_plugins(me->plugin_directory); if (ff->included_files_list == NULL) { add_fname_to_include_list(ff, 0, "/"); } for (i=0; i < argc; i++) { if (bsrName) { bsr = parse_bsr(NULL, bsrName); } jcr = setup_jcr("bls", argv[i], bsr, VolumeName, SD_READ, false/*read dedup data*/, retry_count); if (!jcr) { exit(1); } jcr->ignore_label_errors = ignore_label_errors; dev = jcr->dcr->dev; if (!dev) { exit(1); } dcr = jcr->dcr; rec = new_record(); attr = new_attr(jcr); /* * Assume that we have already read the volume label. * If on second or subsequent volume, adjust buffer pointer */ if (dev->VolHdr.PrevVolumeName[0] != 0) { /* second volume */ Pmsg1(0, _("\n" "Warning, this Volume is a continuation of Volume %s\n"), dev->VolHdr.PrevVolumeName); } if (list_blocks) { do_blocks(argv[i]); } else if (list_jobs) { do_jobs(argv[i]); } else { do_ls(argv[i]); } do_close(jcr); } if (bsr) { free_bsr(bsr); } term_include_exclude_files(ff); term_find_files(ff); if (detect_errors) { return (errors > 0)? 1 : 0; } return 0; } static void do_close(JCR *jcr) { release_device(jcr->dcr); free_attr(attr); free_record(rec); free_jcr(jcr); dev->term(NULL); } /* List just block information */ static void do_blocks(char *infname) { DEV_BLOCK *block = dcr->block; char buf1[100], buf2[100]; for ( ;; ) { if (!dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK)) { Dmsg1(100, "!read_block(): ERR=%s\n", dev->print_errmsg()); if (dev->at_eot()) { if (!mount_next_read_volume(dcr)) { Jmsg(jcr, M_INFO, 0, _("Got EOM at file %u on device %s, Volume \"%s\"\n"), dev->file, dev->print_name(), dcr->VolumeName); break; } /* Read and discard Volume label */ DEV_RECORD *record; record = new_record(); dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK); read_record_from_block(dcr, record); get_session_record(dev, record, &sessrec); free_record(record); Jmsg(jcr, M_INFO, 0, _("Mounted Volume \"%s\".\n"), dcr->VolumeName); } else if (dev->at_eof()) { Jmsg(jcr, M_INFO, 0, _("End of file %u on device %s, Volume \"%s\"\n"), dev->file, dev->print_name(), dcr->VolumeName); Dmsg0(20, "read_record got eof. try again\n"); continue; } else if (dev->is_short_block()) { Jmsg(jcr, M_INFO, 0, "%s", dev->print_errmsg()); continue; } else { /* I/O error */ errors++; display_tape_error_status(jcr, dev); break; } } if (!match_bsr_block(bsr, block)) { Dmsg5(100, "reject Blk=%u blen=%u bVer=%d SessId=%u SessTim=%u\n", block->BlockNumber, block->block_len, block->BlockVer, block->VolSessionId, block->VolSessionTime); continue; } Dmsg5(100, "Blk=%u blen=%u bVer=%d SessId=%u SessTim=%u\n", block->BlockNumber, block->block_len, block->BlockVer, block->VolSessionId, block->VolSessionTime); if (verbose == 1) { read_record_from_block(dcr, rec); Pmsg8(-1, "Addr=%llu blk_num=%u blen=%u First rec FI=%s SessId=%u SessTim=%u Strm=%s rlen=%d\n", dev->get_full_addr(), block->BlockNumber, block->block_len, FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, rec->VolSessionTime, stream_to_ascii_ex(buf2, rec->Stream, rec->FileIndex), rec->data_len); rec->remainder = 0; } else if (verbose > 1) { /* detailed block dump */ Pmsg5(-1, "Blk=%u blen=%u bVer=%d SessId=%u SessTim=%u\n", block->BlockNumber, block->block_len, block->BlockVer, block->VolSessionId, block->VolSessionTime); dump_block(dcr->dev, block, "", true); } else { printf("Block: %d size=%d\n", block->BlockNumber, block->block_len); } } return; } /* * We are only looking for labels or in particular Job Session records */ static bool jobs_cb(DCR *dcr, DEV_RECORD *rec) { if (rec->FileIndex < 0) { dump_label_record(dcr->dev, rec, verbose, detect_errors); } rec->remainder = 0; return true; } /* Do list job records */ static void do_jobs(char *infname) { if (!read_records(dcr, jobs_cb, mount_next_read_volume)) { errors++; } } /* Do an ls type listing of an archive */ static void do_ls(char *infname) { if (dump_label) { dev->dump_volume_label(); return; } if (!read_records(dcr, record_cb, mount_next_read_volume)) { errors++; } printf("%u files found.\n", num_files); } /* * Called here for each record from read_records() */ static bool record_cb(DCR *dcr, DEV_RECORD *rec) { char dedup_msg[200]; if (verbose && rec->FileIndex < 0) { dump_label_record(dcr->dev, rec, verbose, false); return true; } dedup_filter_record(verbose, dcr, rec, dedup_msg, sizeof(dedup_msg)); /* File Attributes stream */ if (rec->maskedStream == STREAM_UNIX_ATTRIBUTES || rec->maskedStream == STREAM_UNIX_ATTRIBUTES_EX) { if (!unpack_attributes_record(jcr, rec->Stream, rec->data, rec->data_len, attr)) { if (!forge_on) { Emsg0(M_ERROR_TERM, 0, _("Cannot continue.\n")); } else { Emsg0(M_ERROR, 0, _("Attrib unpack error!\n")); } num_files++; return true; } attr->data_stream = decode_stat(attr->attr, &attr->statp, sizeof(attr->statp), &attr->LinkFI); build_attr_output_fnames(jcr, attr); if (file_is_included(ff, attr->fname) && !file_is_excluded(ff, attr->fname)) { if (verbose) { Pmsg6(000, _("FileIndex=%d VolSessionId=%d VolSessionTime=%d Stream=%d DataLen=%d %s\n"), rec->FileIndex, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len, dedup_msg); } print_ls_output(jcr, attr); num_files++; } } else if (rec->maskedStream == STREAM_PLUGIN_NAME) { char data[100]; int len = MIN(rec->data_len+1, sizeof(data)); bstrncpy(data, rec->data, len); Dmsg1(100, "Plugin data: %s\n", data); } else if (rec->maskedStream == STREAM_RESTORE_OBJECT) { Dmsg0(100, "Restore Object record\n"); } else if (rec->maskedStream == STREAM_PLUGIN_OBJECT) { Dmsg0(100, "Plugin Object record\n"); } else if (rec->maskedStream == STREAM_PLUGIN_META_BLOB) { Dmsg0(100, "Plugin Object binary metadata record\n"); } else if (rec->maskedStream == STREAM_PLUGIN_META_CATALOG) { Dmsg0(100, "Plugin Object catalog metadata record\n"); } else if (rec->maskedStream == STREAM_ADATA_BLOCK_HEADER) { Dmsg0(000, "Adata block header\n"); } else if (rec->maskedStream == STREAM_ADATA_RECORD_HEADER) { Dmsg0(000, "Adata record header\n"); } return true; } static void get_session_record(DEVICE *dev, DEV_RECORD *rec, SESSION_LABEL *sessrec) { const char *rtype; memset(sessrec, 0, sizeof(SESSION_LABEL)); jcr->JobId = 0; switch (rec->FileIndex) { case PRE_LABEL: rtype = _("Fresh Volume Label"); break; case VOL_LABEL: rtype = _("Volume Label"); unser_volume_label(dev, rec); break; case SOS_LABEL: rtype = _("Begin Job Session"); unser_session_label(sessrec, rec); jcr->JobId = sessrec->JobId; break; case EOS_LABEL: rtype = _("End Job Session"); break; case 0: case EOM_LABEL: rtype = _("End of Medium"); break; case EOT_LABEL: rtype = _("End of Physical Medium"); break; case SOB_LABEL: rtype = _("Start of object"); break; case EOB_LABEL: rtype = _("End of object"); break; default: rtype = _("Unknown"); Dmsg1(10, "FI rtype=%d unknown\n", rec->FileIndex); break; } Dmsg5(10, "%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n", rtype, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); if (verbose) { Pmsg5(-1, _("%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n"), rtype, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); } } bacula-15.0.3/src/stored/tape_worm.c0000644000175000017500000000552014771010173017114 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Routines for getting tape worm status * * Written by Kern Sibbald, September MMXVIII * */ #include "bacula.h" /* pull in global headers */ #include "stored.h" /* pull in Storage Deamon headers */ static const int dbglvl = 50; bool tape_dev::get_tape_worm(DCR *dcr) { JCR *jcr = dcr->jcr; if (!job_canceled(jcr) && dcr->device->worm_command && dcr->device->control_name) { POOLMEM *wormcmd; int status = 1; bool is_worm = false; int worm_val = 0; BPIPE *bpipe; char line[MAXSTRING]; const char *fmt = " %d"; wormcmd = get_pool_memory(PM_FNAME); edit_device_codes(dcr, &wormcmd, dcr->device->worm_command, ""); /* Wait maximum 5 minutes */ bpipe = open_bpipe(wormcmd, 60 * 5, "r"); if (bpipe) { while (fgets(line, (int)sizeof(line), bpipe->rfd)) { is_worm = false; if (bsscanf(line, fmt, &worm_val) == 1) { if (worm_val > 0) { is_worm = true; } } } status = close_bpipe(bpipe); free_pool_memory(wormcmd); return is_worm; } else { status = errno; } if (status != 0) { berrno be; Jmsg(jcr, M_WARNING, 0, _("3997 Bad worm command status: %s: ERR=%s.\n"), wormcmd, be.bstrerror(status)); Dmsg2(dbglvl, _("3997 Bad worm command status: %s: ERR=%s.\n"), wormcmd, be.bstrerror(status)); } Dmsg1(400, "worm script status=%d\n", status); free_pool_memory(wormcmd); } else { if (!dcr->device->worm_command) { Dmsg1(dbglvl, "Cannot get tape worm status: no Worm Command specified for device %s\n", print_name()); Dmsg1(dbglvl, "Cannot get tape worm status: no Worm Command specified for device %s\n", print_name()); } if (!dcr->device->control_name) { Dmsg1(dbglvl, "Cannot get tape worm status: no Control Device specified for device %s\n", print_name()); Dmsg1(dbglvl, "Cannot get tape worm status: no Control Device specified for device %s\n", print_name()); } } return false; } bacula-15.0.3/src/stored/stored_conf.h0000644000175000017500000002705314771010173017436 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ extern s_kw dev_types[]; /* * Cloud Truncate Cache options */ enum { TRUNC_NO = 0, /* default value */ TRUNC_AFTER_UPLOAD = 1, TRUNC_AT_ENDOFJOB = 2, TRUNC_CONF_DEFAULT = 3 /* only use as a parameter, not in the conf */ }; /* * Cloud Upload options */ enum { UPLOAD_EACHPART = 0, /* default value */ UPLOAD_NO = 1, UPLOAD_AT_ENDOFJOB = 2 }; /* * Resource codes -- they must be sequential for indexing * */ enum { R_DIRECTOR = 3001, R_STORAGE = 3002, R_DEVICE = 3003, R_MSGS = 3004, R_AUTOCHANGER = 3005, R_CLOUD = 3006, R_COLLECTOR = 3007, R_DEDUP = 3008, R_FIRST = R_DIRECTOR, R_LAST = R_DEDUP /* keep this updated */ }; enum { R_NAME = 3020, R_ADDRESS, R_PASSWORD, R_TYPE, R_BACKUP }; /* Definition of the contents of each Resource */ /* * Cloud drivers */ enum { CLOUD_RESTORE_PRIO_HIGH = 0, CLOUD_RESTORE_PRIO_MEDIUM, CLOUD_RESTORE_PRIO_LOW }; enum { S3_STANDARD = 0, S3_STANDARD_IA, S3_INTELLIGENT_TIERING, S3_ONE_ZONE_IA, S3_GLACIER_INSTANT_RETRIEVAL, S3_GLACIER_FLEXIBLE_RETRIEVAL, S3_GLACIER_DEEP_ARCHIVE, S3_RRS }; class CLOUD { public: RES hdr; char *host_name; char *bucket_name; char *access_key; char *secret_key; char *blob_endpoint; char *file_endpoint; char *queue_endpoint; char *table_endpoint; char *endpoint_suffix; char *region; int32_t protocol; int32_t uri_style; uint32_t driver_type; /* Cloud driver type */ uint32_t trunc_opt; uint32_t upload_opt; uint32_t max_concurrent_uploads; uint32_t max_concurrent_downloads; uint64_t upload_limit; uint64_t download_limit; char *driver_command; int32_t transfer_priority; utime_t transfer_retention; uint32_t objects_default_tier; }; /* * Dedup drivers */ class DedupEngine; class DEDUPRES { public: RES hdr; char *dedup_dir; /* DEDUP directory */ char *dedup_index_dir; /* Directory for index (db) file */ int64_t max_container_size; /* Maximum container size then split */ int64_t max_index_memory_size; /* Maximum memory usable for the dedup index */ bool dedup_check_hash; /* Check Hash of each chunk after rehydration */ int64_t dedup_scrub_max_bandwidth; /* Maximum disk bandwidth usable for scrub */ uint32_t driver_type; /* dedup driver type */ DedupEngine *dedupengine; int dedupengine_use_count; char *dedup_err_msg; /* is set for any error status */ uint32_t max_container_open; /* the maximum number of simultaneously open container */ }; /* * Director resource */ class DIRRES { public: RES hdr; char *password; /* Director password */ char *address; /* Director IP address or zero */ bool monitor; /* Have only access to status and .status functions */ bool tls_authenticate; /* Authenticate with TLS */ bool tls_enable; /* Enable TLS */ bool tls_psk_enable; /* Enable TLS-PSK */ bool tls_require; /* Require TLS */ bool tls_verify_peer; /* TLS Verify Client Certificate */ char *tls_ca_certfile; /* TLS CA Certificate File */ char *tls_ca_certdir; /* TLS CA Certificate Directory */ char *tls_certfile; /* TLS Server Certificate File */ char *tls_keyfile; /* TLS Server Key File */ char *tls_dhfile; /* TLS Diffie-Hellman Parameters */ alist *tls_allowed_cns; /* TLS Allowed Clients */ TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ TLS_CONTEXT *psk_ctx; /* Shared TLS-PSK Context */ }; /* Storage daemon "global" definitions */ class s_res_store { public: RES hdr; dlist *sdaddrs; dlist *sddaddrs; char *working_directory; /* working directory for checkpoints */ char *pid_directory; char *subsys_directory; char *plugin_directory; /* Plugin directory */ char *scripts_directory; char *ssd_directory; uint32_t max_concurrent_jobs; /* maximum concurrent jobs to run */ MSGS *messages; /* Daemon message handler */ utime_t ClientConnectTimeout; /* Max time to wait to connect client */ utime_t heartbeat_interval; /* Interval to send hb to FD */ utime_t client_wait; /* Time to wait for FD to connect */ bool comm_compression; /* Set to allow comm line compression */ bool require_fips; /* Check for FIPS module */ bool tls_authenticate; /* Authenticate with TLS */ bool tls_enable; /* Enable TLS */ bool tls_psk_enable; /* Enable TLS-PSK */ bool tls_require; /* Require TLS */ bool tls_verify_peer; /* TLS Verify Client Certificate */ char *tls_ca_certfile; /* TLS CA Certificate File */ char *tls_ca_certdir; /* TLS CA Certificate Directory */ char *tls_certfile; /* TLS Server Certificate File */ char *tls_keyfile; /* TLS Server Key File */ char *tls_dhfile; /* TLS Diffie-Hellman Parameters */ alist *tls_allowed_cns; /* TLS Allowed Clients */ char *verid; /* Custom Id to print in version command */ TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ TLS_CONTEXT *psk_ctx; /* Shared TLS-PSK Context */ char *dedup_dir; /* DEDUP directory */ char *dedup_index_dir; /* Directory for index (db) file */ int64_t max_container_size; /* Maximum container size then split */ bool dedup_check_hash; /* Check Hash of each chunk after rehydration */ int64_t dedup_scrub_max_bandwidth; /* Maximum disk bandwidth usable for scrub */ char *encryption_command; /* encryption key command -- external program */ }; typedef class s_res_store STORES; class AUTOCHANGER { public: RES hdr; alist *device; /* List of DEVRES device pointers */ char *changer_name; /* Changer device name */ char *changer_command; /* Changer command -- external program */ char *lock_command; /* Share storage lock command -- external program */ brwlock_t changer_lock; /* One changer operation at a time */ }; /* Device specific definitions */ class DEVRES { public: RES hdr; char *media_type; /* User assigned media type */ char *device_name; /* Archive device name */ char *adevice_name; /* Aligned device name */ char *changer_name; /* Changer device name */ char *control_name; /* SCSI control device name */ char *changer_command; /* Changer command -- external program */ char *alert_command; /* Alert command -- external program */ char *lock_command; /* Share storage lock command -- external program */ char *worm_command; /* Worm detection command -- external program */ char *spool_directory; /* Spool file directory */ uint32_t dev_type; /* device type */ uint32_t label_type; /* label type */ bool enabled; /* Set when enabled (default) */ bool autoselect; /* Automatically select from AutoChanger */ bool read_only; /* Drive is read only */ bool set_vol_append_only; /* Set 'Append Only' filesystem flag for volumes */ bool set_vol_immutable; /* Set 'Immutable' filesystem flag for volumes */ bool set_vol_read_only; /* Set permission of volumes when marking them as Full/Used */ uint32_t volume_encryption; /* call the key-manager command to get the cipher and the key to use */ utime_t min_volume_protection_time; /* Minimum Volume Protection Time */ uint32_t drive_index; /* Autochanger drive index */ uint32_t cap_bits; /* Capabilities of this device */ utime_t max_changer_wait; /* Changer timeout */ utime_t max_rewind_wait; /* maximum secs to wait for rewind */ utime_t max_open_wait; /* maximum secs to wait for open */ uint32_t padding_size; /* adata block padding -- bytes */ uint32_t file_alignment; /* adata file alignment -- bytes */ uint32_t min_aligned_size; /* minimum adata size */ uint32_t min_block_size; /* min block size */ uint32_t max_block_size; /* max block size */ uint32_t max_volume_jobs; /* max jobs to put on one volume */ uint32_t max_network_buffer_size; /* max network buf size */ uint32_t max_concurrent_jobs; /* maximum concurrent jobs this drive */ utime_t vol_poll_interval; /* interval between polling volume during mount */ int64_t max_volume_files; /* max files to put on one volume */ int64_t max_volume_size; /* max bytes to put on one volume */ int64_t max_file_size; /* max file size in bytes */ int64_t max_file_index; /* max file size between two FileMedia in bytes */ /* ***BEEF*** */ int64_t volume_capacity; /* advisory capacity */ int64_t min_free_space; /* Minimum disk free space */ int64_t max_spool_size; /* Max spool size for all jobs */ int64_t max_job_spool_size; /* Max spool size for any single job */ int64_t max_part_size; /* Max part size */ uint32_t max_vol_parts_num; /* Max number of parts in a cloud volume */ char *mount_point; /* Mount point for require mount devices */ char *mount_command; /* Mount command */ char *unmount_command; /* Unmount command */ char *write_part_command; /* Write part command */ char *free_space_command; /* Free space command */ CLOUD *cloud; /* pointer to cloud resource */ DEDUPRES *dedup; /* pointer to dedup resource */ /* The following are set at runtime */ DEVICE *dev; /* Pointer to physical dev -- set at runtime */ AUTOCHANGER *changer_res; /* pointer to changer res if any */ int init_state; /* 0 -> 'B'uzy -> ( 'R'eady | 0 (retry) ) */ }; union URES { DIRRES res_dir; STORES res_store; DEVRES res_dev; MSGS res_msgs; AUTOCHANGER res_changer; CLOUD res_cloud; DEDUPRES res_dedup; RES hdr; COLLECTOR res_collector; }; /* Get the size of a give resource */ int get_resource_size(int type); extern s_kw enc_types[]; /* Encryption type */ enum { ET_NO, ET_YES, ET_STRONG, }; bacula-15.0.3/src/stored/dircmd.c0000644000175000017500000022161014771010173016361 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * This file handles accepting Director Commands * * Most Director commands are handled here, with the * exception of the Job command command and subsequent * subcommands that are handled * in job.c. * * N.B. in this file, in general we must use P(dev->mutex) rather * than dev->rLock() so that we can examine the blocked * state rather than blocking ourselves because a Job * thread has the device blocked. In some "safe" cases, * we can do things to a blocked device. CAREFUL!!!! * * File daemon commands are handled in fdcmd.c * * Written by Kern Sibbald, May MMI * */ #include "bacula.h" #include "stored.h" /* Exported variables */ /* Imported variables */ extern BSOCK *filed_chan; extern struct s_last_job last_job; extern bool init_done; /* Static variables */ static char derrmsg[] = "3900 Invalid command:"; static char OKsetdebug[] = "3000 OK setdebug=%lld trace=%d options=%s tags=%s\n"; static char invalid_cmd[] = "3997 Invalid command for a Director with Monitor directive enabled.\n"; static char OK_bootstrap[] = "3000 OK bootstrap\n"; static char ERROR_bootstrap[] = "3904 Error bootstrap\n"; static char OKclient[] = "3000 OK client command\n"; /* Imported functions */ extern void terminate_child(); extern bool job_cmd(JCR *jcr); extern bool use_cmd(JCR *jcr); extern bool run_cmd(JCR *jcr); extern bool status_cmd(JCR *sjcr); extern bool qstatus_cmd(JCR *jcr); extern bool collect_cmd(JCR *jcr); extern bool query_store(JCR *jcr); //extern bool query_cmd(JCR *jcr); /* Forward referenced functions */ static bool client_cmd(JCR *jcr); static bool storage_cmd(JCR *jcr); static bool label_cmd(JCR *jcr); static bool die_cmd(JCR *jcr); static bool relabel_cmd(JCR *jcr); static bool truncate_cache_cmd(JCR *jcr); static bool upload_cmd(JCR *jcr); static bool readlabel_cmd(JCR *jcr); static bool release_cmd(JCR *jcr); static bool setdebug_cmd(JCR *jcr); static bool cancel_cmd(JCR *cjcr); static bool mount_cmd(JCR *jcr); static bool unmount_cmd(JCR *jcr); static bool enable_cmd(JCR *jcr); static bool disable_cmd(JCR *jcr); //static bool action_on_purge_cmd(JCR *jcr); static bool bootstrap_cmd(JCR *jcr); static bool cloud_list_cmd(JCR *jcr); static bool cloud_prunecache_cmd(JCR *jcr); static bool changer_cmd(JCR *sjcr); static bool volumeprotect_cmd(JCR *jcr); static bool do_label(JCR *jcr, int relabel); static DCR *find_device(JCR *jcr, POOL_MEM &dev_name, char *media_type, int drive); static DCR *find_any_device(JCR *jcr, POOL_MEM &dev_name, char *media_type, int drive); static void read_volume_label(JCR *jcr, DCR *dcr, DEVICE *dev, int Slot); static void label_volume_if_ok(DCR *dcr, char *oldname, char *newname, char *poolname, int Slot, int relabel); static bool try_autoload_device(JCR *jcr, DCR *dcr, int slot, const char *VolName); static void send_dir_busy_message(BSOCK *dir, DEVICE *dev); static bool sd_sendprogress_cmd(JCR *jcr); bool user_shstore_lock(DCR *dcr); void user_shstore_unlock(DCR *dcr); bool dedup_cmd(JCR *jcr); /* BEEF */ /* Responses send to Director for storage command */ static char BADcmd[] = "2902 Bad %s\n"; static char OKstore[] = "2000 OK storage\n"; /* Commands received from director that need scanning */ static char storaddr[] = "storage address=%s port=%d ssl=%d Job=%127s Authentication=%127s"; struct dir_cmds { const char *cmd; bool (*func)(JCR *jcr); bool monitoraccess; /* set if monitors can access this cmd */ }; /* * The following are the recognized commands from the Director. */ static struct dir_cmds cmds[] = { {"JobId=", job_cmd, 0}, /* start Job */ {"autochanger", changer_cmd, 0}, {"bootstrap", bootstrap_cmd, 0}, {"cancel", cancel_cmd, 0}, {"client", client_cmd, 0}, /* client address */ {".die", die_cmd, 0}, {"label", label_cmd, 0}, /* label a tape */ {"mount", mount_cmd, 0}, {"enable", enable_cmd, 0}, {"disable", disable_cmd, 0}, {"readlabel", readlabel_cmd, 0}, {"release", release_cmd, 0}, {"relabel", relabel_cmd, 0}, /* relabel a tape */ {"setdebug=", setdebug_cmd, 0}, /* set debug level */ {"status", status_cmd, 1}, {".status", qstatus_cmd, 1}, {"stop", cancel_cmd, 0}, {"storage", storage_cmd, 0}, /* get SD addr from Dir */ {"truncate", truncate_cache_cmd, 0}, {"upload", upload_cmd, 0}, {"volumeprotect", volumeprotect_cmd, 0}, {"prunecache", cloud_prunecache_cmd, 0}, {"cloudlist", cloud_list_cmd, 0}, /* List volumes/parts in the cloud */ {"unmount", unmount_cmd, 0}, {"use storage=", use_cmd, 0}, #if SD_DEDUP_SUPPORT {"dedup", dedup_cmd, 0}, /* any dedup command */ #endif {"run", run_cmd, 0}, {"statistics", collect_cmd, 0}, {"store_mngr", query_store, 0}, {"sendprogress", sd_sendprogress_cmd, 0}, // {"query", query_cmd, 0}, {NULL, NULL} /* list terminator */ }; /* * Connection request. We accept connections either from the * Director or a Client (File daemon). * * Note, we are running as a separate thread of the Storage daemon. * and it is because a Director has made a connection with * us on the "Message" channel. * * Basic tasks done here: * - Create a JCR record * - If it was from the FD, call handle_filed_connection() * - Authenticate the Director * - We wait for a command * - We execute the command * - We continue or exit depending on the return status */ void *handle_connection_request(void *arg) { BSOCK *bs = (BSOCK *)arg; JCR *jcr; int i; bool found, quit; int bnet_stat = 0; char tbuf[100]; if (bs->recv() <= 0) { Qmsg1(NULL, M_ERROR, 0, _("Connection request from %s failed.\n"), bs->who()); bmicrosleep(5, 0); /* make user wait 5 seconds */ bs->destroy(); return NULL; } /* Check for client connection */ if (is_client_connection(bs)) { handle_client_connection(bs); return NULL; } /* * This is a connection from the Director, so setup a JCR */ Dmsg1(050, "Got a DIR connection at %s\n", bstrftimes(tbuf, sizeof(tbuf), (utime_t)time(NULL))); jcr = new_jcr(sizeof(JCR), stored_free_jcr); /* create Job Control Record */ jcr->set_killable(true); jcr->start_time = time(NULL); jcr->dir_bsock = bs; /* save Director bsock */ jcr->dir_bsock->set_jcr(jcr); jcr->dcrs = New(alist(10, not_owned_by_alist)); create_jobmedia_queue(jcr); /* Initialize FD start condition variable */ int errstat = pthread_cond_init(&jcr->job_start_wait, NULL); if (errstat != 0) { berrno be; Qmsg1(jcr, M_FATAL, 0, _("Unable to init job cond variable: ERR=%s\n"), be.bstrerror(errstat)); goto bail_out; } Dmsg0(1000, "stored in start_job\n"); /* * Validate then authenticate the Director */ if (!validate_dir_hello(jcr)) { goto bail_out; } { SDAuthenticateDIR auth(jcr); if (!auth.authenticate_director()) { Qmsg(jcr, M_FATAL, 0, _("[SF0065] Unable to authenticate Director\n")); goto bail_out; } } Dmsg0(90, "Message channel init completed.\n"); for (quit=false; !quit;) { /* Read command */ if ((bnet_stat = bs->recv()) <= 0) { break; /* connection terminated */ } Dmsg1(199, "msg); /* Ensure that device initialization is complete */ while (!init_done) { bmicrosleep(1, 0); } found = false; for (i=0; cmds[i].cmd; i++) { if (strncmp(cmds[i].cmd, bs->msg, strlen(cmds[i].cmd)) == 0) { if ((!cmds[i].monitoraccess) && (jcr->director->monitor)) { Dmsg1(100, "Command \"%s\" is invalid.\n", cmds[i].cmd); bs->fsend(invalid_cmd); bs->signal(BNET_EOD); break; } Dmsg1(200, "Do command: %s\n", cmds[i].cmd); if (!cmds[i].func(jcr)) { /* do command */ quit = true; /* error, get out */ Dmsg1(190, "Command %s requests quit\n", cmds[i].cmd); } found = true; /* indicate command found */ break; } } if (!found) { /* command not found */ POOL_MEM err_msg; Mmsg(err_msg, "%s %s\n", derrmsg, bs->msg); bs->fsend(err_msg.c_str()); break; } } bail_out: generate_daemon_event(jcr, "JobEnd"); generate_plugin_event(jcr, bsdEventJobEnd); flush_jobmedia_queue(jcr); dequeue_messages(jcr); /* send any queued messages */ dequeue_daemon_messages(jcr); bs->signal(BNET_TERMINATE); free_plugins(jcr); /* release instantiated plugins */ free_jcr(jcr); return NULL; } /* * Force SD to die, and hopefully dump itself. Turned on only * in development version. */ static bool die_cmd(JCR *jcr) { #ifdef DEVELOPER JCR *djcr = NULL; int a; BSOCK *dir = jcr->dir_bsock; pthread_mutex_t m=PTHREAD_MUTEX_INITIALIZER; if (strstr(dir->msg, "deadlock")) { Pmsg0(000, "I have been requested to deadlock ...\n"); P(m); P(m); } Pmsg1(000, "I have been requested to die ... (%s)\n", dir->msg); a = djcr->JobId; /* ref NULL pointer */ djcr->JobId = a; #endif return 0; } /* * Get address of client from Director * This initiates SD Calls Client. * We attempt to connect to the client (an FD or SD) and * authenticate it. */ static bool client_cmd(JCR *jcr) { int enable_ssl; /* enable ssl */ BSOCK *dir = jcr->dir_bsock; BSOCK *cl = new_bsock(); /* client bsock */ POOL_MEM buf; Dmsg1(100, "ClientCmd: %s", dir->msg); jcr->sd_calls_client = true; if (sscanf(dir->msg, "client address=%127s port=%d ssl=%d", jcr->client_addr, &jcr->client_port, &enable_ssl) != 3) { /* destroy() OK because cl is local */ cl->destroy(); pm_strcpy(jcr->errmsg, dir->msg); Jmsg(jcr, M_FATAL, 0, _("[SF0031] Bad client command: %s"), jcr->errmsg); Dmsg1(050, "Bad client command: %s", jcr->errmsg); goto bail_out; } Dmsg3(110, "Connect to client: %s:%d ssl=%d\n", jcr->client_addr, jcr->client_port, enable_ssl); /* Open command communications with Client */ /* Try to connect for 1 hour at 10 second intervals */ if (!cl->connect(jcr, 10, (int)me->ClientConnectTimeout, me->heartbeat_interval, _("Client daemon"), jcr->client_addr, NULL, jcr->client_port, 1)) { /* destroy() OK because cl is local */ Jmsg(jcr, M_FATAL, 0, "%s", cl->errmsg); cl->destroy(); goto bail_out; } Dmsg0(110, "SD connection OK to Client.\n"); jcr->file_bsock = cl; jcr->file_bsock->set_jcr(jcr); // This is a "dummy" hello, the FD will send the "real" hello with a TLS-PSK info (or not) if (!send_hello_client(jcr, jcr->Job)) { goto bail_out; } /* Send OK to Director */ return dir->fsend(OKclient); bail_out: jcr->setJobStatus(JS_ErrorTerminated); dir->fsend("3902 Bad %s cmd\n", "client"); return 0; } /* * Get address of storage daemon from Director */ static bool storage_cmd(JCR *jcr) { int stored_port; /* storage daemon port */ int enable_ssl; /* enable ssl to sd */ char sd_auth_key[200]; BSOCK *dir = jcr->dir_bsock; BSOCK *sd = new_bsock(); /* storage daemon bsock */ char Job[MAX_NAME_LENGTH]; Dmsg1(050, "StorageCmd: %s", dir->msg); if (sscanf(dir->msg, storaddr, &jcr->stored_addr, &stored_port, &enable_ssl, Job, sd_auth_key) != 5) { pm_strcpy(jcr->errmsg, dir->msg); Jmsg(jcr, M_FATAL, 0, _("[SF0103] Bad storage command: %s"), jcr->errmsg); Pmsg1(010, "Bad storage command: %s", jcr->errmsg); goto bail_out; } unbash_spaces(Job); if (jcr->sd_auth_key) { bfree_and_null(jcr->sd_auth_key); jcr->sd_auth_key = bstrdup(sd_auth_key); } if (stored_port != 0) { Dmsg2(050, "sd_calls=%d sd_client=%d\n", jcr->sd_calls_client, jcr->sd_client); jcr->sd_calls_client = false; /* We are doing the connecting */ Dmsg3(050, "Connect to storage and wait: %s:%d ssl=%d\n", jcr->stored_addr, stored_port, enable_ssl); /* Open command communications with Storage daemon */ /* Try to connect for 1 hour at 10 second intervals */ if (!sd->connect(jcr, 10, (int)me->ClientConnectTimeout, me->heartbeat_interval, _("Storage daemon"), jcr->stored_addr, NULL, stored_port, 1)) { Jmsg(jcr, M_FATAL, 0, "%s", sd->errmsg); /* destroy() OK because sd is local */ sd->destroy(); goto bail_out; } Dmsg0(050, "Connection OK to SD.\n"); jcr->store_bsock = sd; } else { /* The storage daemon called us */ jcr->sd_calls_client = true; /* We should already have a storage connection! */ if (jcr->file_bsock && jcr->store_bsock == NULL) { jcr->store_bsock = jcr->file_bsock; } if (jcr->store_bsock == NULL) { Jmsg0(jcr, M_FATAL, 0, _("[SF0105] In storage_cmd port==0, no prior Storage connection.\n")); Pmsg0(010, "In storage_cmd port==0, no prior Storage connection.\n"); goto bail_out; } } if (!send_hello_and_authenticate_sd(jcr, Job)) { goto bail_out; } if (jcr->JobId > 0) { POOL_MEM buf; /* Print connection info only for real jobs */ build_connecting_info_log(_("Storage"), "", jcr->stored_addr, stored_port, sd->tls ? true : false, buf.addr()); Jmsg(jcr, M_INFO, 0, "%s", buf.c_str()); } /* * We are a client so we read from the socket we just * opened as if we were a FD, so set file_bsock and * clear the store_bsock. */ jcr->file_bsock = jcr->store_bsock; jcr->store_bsock = NULL; jcr->authenticated = true; /* Dir authentication is sufficient */ Dmsg1(050, "=== Storage_cmd authenticated Job=%s with SD.\n", Job); /* Send OK to Director */ return dir->fsend(OKstore); bail_out: Dmsg0(100, "Send storage command failed.\n"); dir->fsend(BADcmd, "storage"); return false; } /* * Set debug level as requested by the Director * */ static bool setdebug_cmd(JCR *jcr) { BSOCK *dir = jcr->dir_bsock; int32_t trace_flag, lvl, hangup, blowup; int64_t level, level_tags = 0; char options[60]; char tags[512]; *tags = *options = 0; Dmsg1(10, "setdebug_cmd: %s", dir->msg); if (sscanf(dir->msg, "setdebug=%ld trace=%ld hangup=%ld blowup=%ld options=%55s tags=%511s", &lvl, &trace_flag, &hangup, &blowup, options, tags) != 6) { if (sscanf(dir->msg, "setdebug=%ld trace=%ld", &lvl, &trace_flag) != 2 || lvl < 0) { dir->fsend(_("3991 Bad setdebug command: %s\n"), dir->msg); return 0; } } level = lvl; set_trace(trace_flag); set_hangup(hangup); set_blowup(blowup); set_debug_flags(options); if (!debug_parse_tags(tags, &level_tags)) { *tags = 0; } if (level >= 0) { debug_level = level; } debug_level_tags = level_tags; bee_setdebug_cmd_parse_options(jcr, options); return dir->fsend(OKsetdebug, lvl, trace_flag, options, tags); } /* * Cancel a Job * Be careful, we switch to using the job's JCR! So, using * BSOCKs on that jcr can have two threads in the same code. */ static bool cancel_cmd(JCR *cjcr) { BSOCK *dir = cjcr->dir_bsock; int oldStatus; char Job[MAX_NAME_LENGTH]; JCR *jcr = NULL; int status; const char *reason; if (sscanf(dir->msg, "cancel Job=%127s", Job) == 1) { status = JS_Canceled; reason = "canceled"; } else if (sscanf(dir->msg, "stop Job=%127s", Job) == 1) { status = JS_Incomplete; reason = "stopped"; } else { dir->fsend(_("3903 Error scanning cancel command.\n")); goto bail_out; } if (!(jcr=get_jcr_by_full_name(Job))) { dir->fsend(_("3904 Job %s not found.\n"), Job); } else { oldStatus = jcr->JobStatus; jcr->setJobStatus(status); Dmsg2(800, "Cancel JobId=%d %p\n", jcr->JobId, jcr); if (!jcr->authenticated && oldStatus == JS_WaitFD) { pthread_cond_signal(&jcr->job_start_wait); /* wake waiting thread */ } if (jcr->file_bsock) { jcr->file_bsock->set_terminated(); jcr->file_bsock->set_timed_out(); Dmsg2(800, "Term bsock jid=%d %p\n", jcr->JobId, jcr); } else { /* Still waiting for FD to connect, release it */ pthread_cond_signal(&jcr->job_start_wait); /* wake waiting job */ Dmsg2(800, "Signal FD connect jid=%d %p\n", jcr->JobId, jcr); } /* If thread waiting on mount, wake him */ if (jcr->dcr && jcr->dcr->dev && jcr->dcr->dev->waiting_for_mount()) { pthread_cond_broadcast(&jcr->dcr->dev->wait_next_vol); Dmsg1(100, "JobId=%u broadcast wait_device_release\n", (uint32_t)jcr->JobId); pthread_cond_broadcast(&wait_device_release); } if (jcr->read_dcr && jcr->read_dcr->dev && jcr->read_dcr->dev->waiting_for_mount()) { pthread_cond_broadcast(&jcr->read_dcr->dev->wait_next_vol); Dmsg1(100, "JobId=%u broadcast wait_device_release\n", (uint32_t)jcr->JobId); pthread_cond_broadcast(&wait_device_release); } jcr->my_thread_send_signal(TIMEOUT_SIGNAL); /* Inform the Director about the result and send EOD signal before free_jcr() below, * since it might take some time if we hold the last reference */ dir->fsend(_("3000 JobId=%ld Job=\"%s\" marked to be %s.\n"), jcr->JobId, jcr->Job, reason); } bail_out: dir->signal(BNET_EOD); if (jcr) { free_jcr(jcr); } return 1; } /* * Label a Volume * */ static bool label_cmd(JCR *jcr) { return do_label(jcr, 0); } static bool relabel_cmd(JCR *jcr) { return do_label(jcr, 1); } static bool do_label(JCR *jcr, int relabel) { POOLMEM *newname, *oldname, *poolname, *mtype; POOL_MEM dev_name; BSOCK *dir = jcr->dir_bsock; DCR *dcr = NULL;; DEVICE *dev; bool ok = false; int32_t slot, drive; newname = get_memory(dir->msglen+1); oldname = get_memory(dir->msglen+1); poolname = get_memory(dir->msglen+1); mtype = get_memory(dir->msglen+1); if (relabel) { if (sscanf(dir->msg, "relabel %127s OldName=%127s NewName=%127s PoolName=%127s " "MediaType=%127s Slot=%d drive=%d", dev_name.c_str(), oldname, newname, poolname, mtype, &slot, &drive) == 7) { ok = true; } } else { *oldname = 0; if (sscanf(dir->msg, "label %127s VolumeName=%127s PoolName=%127s " "MediaType=%127s Slot=%d drive=%d", dev_name.c_str(), newname, poolname, mtype, &slot, &drive) == 6) { ok = true; } } if (ok) { unbash_spaces(newname); unbash_spaces(oldname); unbash_spaces(poolname); unbash_spaces(mtype); dcr = find_device(jcr, dev_name, mtype, drive); if (dcr) { uint32_t max_jobs; dev = dcr->dev; ok = true; dev->Lock(); /* Use P to avoid indefinite block */ max_jobs = dev->max_concurrent_jobs; dev->max_concurrent_jobs = 1; bstrncpy(dcr->VolumeName, newname, sizeof(dcr->VolumeName)); if (dcr->can_i_write_volume()) { if (reserve_volume(dcr, newname) == NULL) { ok = false; } Dmsg1(400, "Reserved Volume=%s for relabel/truncate.\n", newname); } else { ok = false; } if (!ok) { dir->fsend(_("3908 Error reserving volume: %s\n"), jcr->errmsg); dev->max_concurrent_jobs = max_jobs; dev->Unlock(); goto bail_out; } /* some command use recv and don't accept catalog update. * it's not the case here, so we force dir_update_volume_info catalog update */ dcr->force_update_volume_info = true; if (!dev->is_open() && !dev->is_busy()) { Dmsg1(400, "Can %slabel. Device is not open\n", relabel?"re":""); label_volume_if_ok(dcr, oldname, newname, poolname, slot, relabel); if (!dev->close(dcr)) { dir->fsend(_("3999 Device \"%s\" %s"), dev_name.c_str(), dev->errmsg); } /* Under certain "safe" conditions, we can steal the lock */ } else if (dev->can_obtain_block()) { Dmsg0(400, "Can relabel. can_obtain_block\n"); label_volume_if_ok(dcr, oldname, newname, poolname, slot, relabel); } else if (dev->is_busy() || dev->is_blocked()) { send_dir_busy_message(dir, dev); } else { /* device not being used */ Dmsg0(400, "Can relabel. device not used\n"); label_volume_if_ok(dcr, oldname, newname, poolname, slot, relabel); } dev->max_concurrent_jobs = max_jobs; volume_unused(dcr); dev->Unlock(); #ifdef DEVELOPER if (chk_dbglvl(DT_VOLUME)) { Dmsg0(0, "Waiting few seconds to force a bug...\n"); bmicrosleep(30, 0); Dmsg0(0, "Doing free_volume()\n"); } #endif } else { dir->fsend(_("3999 Device \"%s\" not found or could not be opened.\n"), dev_name.c_str()); } } else { /* NB dir->msg gets clobbered in bnet_fsend, so save command */ pm_strcpy(jcr->errmsg, dir->msg); dir->fsend(_("3903 Error scanning label command: %s\n"), jcr->errmsg); } bail_out: if (dcr) { free_dcr(dcr); } free_memory(oldname); free_memory(newname); free_memory(poolname); free_memory(mtype); dir->signal(BNET_EOD); return true; } /* * Handles truncate cache commands */ static bool truncate_cache_cmd(JCR *jcr) { POOLMEM *volname, *poolname, *mtype, *msg; POOL_MEM dev_name; BSOCK *dir = jcr->dir_bsock; DCR *dcr = NULL; DEVICE *dev; bool ok = false; int32_t slot, drive; int nbpart = 0; int64_t size = 0; char ed1[50]; volname = get_memory(dir->msglen+1); poolname = get_memory(dir->msglen+1); mtype = get_memory(dir->msglen+1); msg = get_memory(dir->msglen+1); msg[0] = 0; if (sscanf(dir->msg, "truncate cache Storage=%127s Volume=%127s PoolName=%127s " "MediaType=%127s Slot=%d drive=%d", dev_name.c_str(), volname, poolname, mtype, &slot, &drive) == 6) { ok = true; } if (ok) { unbash_spaces(volname); unbash_spaces(poolname); unbash_spaces(mtype); dcr = find_device(jcr, dev_name, mtype, drive); if (dcr) { uint32_t max_jobs; dev = dcr->dev; ok = true; dev->Lock(); /* Use P to avoid indefinite block */ max_jobs = dev->max_concurrent_jobs; dev->max_concurrent_jobs = 1; bstrncpy(dcr->VolumeName, volname, sizeof(dcr->VolumeName)); if (dcr->can_i_write_volume()) { if (reserve_volume(dcr, volname) == NULL) { ok = false; } Dmsg1(400, "Reserved volume \"%s\"\n", volname); } else { ok = false; } if (!ok) { dir->fsend(_("3908 Error reserving volume \"%s\": %s\n"), volname, jcr->errmsg); dev->max_concurrent_jobs = max_jobs; dev->Unlock(); goto bail_out; } if ((!dev->is_open() && !dev->is_busy()) || dev->can_obtain_block()) { Dmsg0(400, "Call truncate_cache\n"); nbpart = dev->truncate_cache(dcr, volname, &size, msg); if (nbpart >= 0) { dir->fsend(_("3000 OK truncate cache for volume \"%s\" %d part(s) %sB. %s\n"), volname, nbpart, edit_uint64_with_suffix(size, ed1), msg); } else { dir->fsend(_("3900 Truncate cache for volume \"%s\" failed. ERR=%s\n"), volname, dev->errmsg); } } else if (dev->is_busy() || dev->is_blocked()) { send_dir_busy_message(dir, dev); } else { /* device not being used */ Dmsg0(400, "Call truncate_cache\n"); nbpart = dev->truncate_cache(dcr, volname, &size, msg); if (nbpart >= 0) { dir->fsend(_("3000 OK truncate cache for volume \"%s\" %d part(s) %sB. %s\n"), volname, nbpart, edit_uint64_with_suffix(size, ed1), msg); } else { dir->fsend(_("3900 Truncate cache for volume \"%s\" failed. ERR=%s\n"), volname, dev->errmsg); } } dev->max_concurrent_jobs = max_jobs; volume_unused(dcr); dev->Unlock(); #ifdef DEVELOPER if (chk_dbglvl(DT_VOLUME)) { Dmsg0(0, "Waiting few seconds to force a bug...\n"); bmicrosleep(30, 0); Dmsg0(0, "Doing free_volume()\n"); } #endif } else { dir->fsend(_("3999 Device \"%s\" not found or could not be opened.\n"), dev_name.c_str()); } } else { /* NB dir->msg gets clobbered in bnet_fsend, so save command */ pm_strcpy(jcr->errmsg, dir->msg); dir->fsend(_("3911 Error scanning truncate command: %s\n"), jcr->errmsg); } bail_out: if (dcr) { free_dcr(dcr); } free_memory(msg); free_memory(volname); free_memory(poolname); free_memory(mtype); dir->signal(BNET_EOD); return true; } static bool cloud_prunecache_cmd(JCR *jcr) { /* TODO: Implement a function to prune the cache of a cloud device */ jcr->dir_bsock->fsend(_("3900 Not yet implemented\n")); return true; } /* List volumes in the cloud */ static bool cloud_list_cmd(JCR *jcr) { BSOCK *dir = jcr->dir_bsock; POOL_MEM dev_name; POOLMEM *errmsg = get_pool_memory(PM_FNAME); errmsg[0] = 0; char volname[MAX_NAME_LENGTH]; char mtype[MAX_NAME_LENGTH]; int slot, drive; DCR *dcr = NULL; if (sscanf(dir->msg, "cloudlist Storage=%127s Volume=%127s MediaType=%127s Slot=%d drive=%d", dev_name.c_str(), volname, mtype, &slot, &drive) != 5) { dir->fsend(_("3912 Error scanning the command\n")); goto bail_out; } /* In fact, we do not need to find and reserve a device for this operation, * we just need to find one, idle or not */ dcr = find_device(jcr, dev_name, mtype, drive); if (!dcr) { dir->fsend(_("3900 Error reserving device %s %s\n"), dev_name.c_str(), mtype); goto bail_out; } if (volname[0] == 0) { /* List all volumes, TODO: Switch to a callback mode */ char *vol; alist volumes(100, not_owned_by_alist); if (!dcr->dev->get_cloud_volumes_list(dcr, &volumes, errmsg)) { dir->fsend(_("3900 Error cannot get cloud Volume list. ERR=%s\n"), errmsg); } free_dcr(dcr); foreach_alist(vol, &volumes) { bash_spaces(vol); dir->fsend("volume=%s\n", vol); free(vol); /* Walk through the list only one time */ } } else { ilist parts(100, not_owned_by_alist); if (!dcr->dev->get_cloud_volume_parts_list(dcr, volname, &parts, errmsg)) { dir->fsend(_("3900 Error cannot get cloud parts list. ERR=%s\n"), errmsg); free_dcr(dcr); goto bail_out; } free_dcr(dcr); for (int i=1; i <= parts.last_index() ; i++) { cloud_part *p = (cloud_part *)parts.get(i); if (p) { dir->fsend("part=%d size=%lld mtime=%lld hash=%s\n", i, p->size, p->mtime, p->hash64); free(p); } } } bail_out: free_pool_memory(errmsg); dir->signal(BNET_EOD); return true; } /* * Handles upload cache to Cloud command */ static bool upload_cmd(JCR *jcr) { POOLMEM *volname, *poolname, *mtype, *truncate, *err; POOL_MEM dev_name; BSOCK *dir = jcr->dir_bsock; DCR *dcr = NULL; DEVICE *dev; bool ok = false; int32_t slot, drive; volname = get_memory(dir->msglen+1); poolname = get_memory(dir->msglen+1); mtype = get_memory(dir->msglen+1); truncate = get_memory(dir->msglen+1); err = get_pool_memory(PM_MESSAGE); *volname = *poolname = *mtype = *truncate = *err = 0; if (sscanf(dir->msg, "upload Storage=%127s Volume=%127s PoolName=%127s " "MediaType=%127s Slot=%d drive=%d truncate=%127s", dev_name.c_str(), volname, poolname, mtype, &slot, &drive, truncate) == 7) { ok = true; } if (ok) { unbash_spaces(volname); unbash_spaces(poolname); unbash_spaces(mtype); unbash_spaces(truncate); dcr = find_device(jcr, dev_name, mtype, drive); if (dcr) { uint32_t max_jobs; dev = dcr->dev; ok = true; dev->Lock(); /* Use P to avoid indefinite block */ max_jobs = dev->max_concurrent_jobs; dev->max_concurrent_jobs = 1; bstrncpy(dcr->VolumeName, volname, sizeof(dcr->VolumeName)); if (dcr->can_i_write_volume()) { if (reserve_volume(dcr, volname) == NULL) { ok = false; } Dmsg1(400, "Reserved volume \"%s\"\n", volname); } else { ok = false; } if (!ok) { dir->fsend(_("3908 Error reserving volume \"%s\": %s\n"), volname, jcr->errmsg); dev->max_concurrent_jobs = max_jobs; dev->Unlock(); goto bail_out; } uint32_t truncate_option = 0; /* modify truncate_option if truncate matches a truncate option */ if (!find_truncate_option(truncate, truncate_option)) { Dmsg1(400, "truncate option %s does not exist. Fallback on default configuration truncate option\n", truncate); truncate_option = TRUNC_CONF_DEFAULT; } if ((!dev->is_open() && !dev->is_busy()) || dev->can_obtain_block()) { Dmsg0(400, "Can upload, because device is not open.\n"); dev->setVolCatName(volname); dev->part = 0; if (dev->open_device(dcr, OPEN_READ_WRITE)) { ok = dev->upload_cache(dcr, volname, truncate_option, err); dev->part = 0; if (!dev->close(dcr)) { dir->fsend(_("3999 Error Device \"%s\": %s\n"), dev_name.c_str(), dev->errmsg); } dev->end_of_job(dcr, truncate_option); } } else if (dev->is_busy() || dev->is_blocked()) { send_dir_busy_message(dir, dev); } else { /* device not being used */ Dmsg0(400, "Can upload, because device not used\n"); dev->setVolCatName(volname); dev->part = 0; if (dev->open_device(dcr, OPEN_READ_WRITE)) { ok = dev->upload_cache(dcr, volname, truncate_option, err); dev->part = 0; if (!dev->close(dcr)) { dir->fsend(_("3999 Error Device \"%s\": %s\n"), dev_name.c_str(), dev->errmsg); } dev->end_of_job(dcr, truncate_option); } } dev->max_concurrent_jobs = max_jobs; volume_unused(dcr); dev->Unlock(); #ifdef DEVELOPER if (chk_dbglvl(DT_VOLUME)) { Dmsg0(0, "Waiting few seconds to force a bug...\n"); bmicrosleep(30, 0); Dmsg0(0, "Doing free_volume()\n"); } #endif } else { dir->fsend(_("3999 Device \"%s\" not found or could not be opened.\n"), dev_name.c_str()); } } else { /* NB dir->msg gets clobbered in bnet_fsend, so save command */ pm_strcpy(jcr->errmsg, dir->msg); dir->fsend(_("3912 Error scanning upload command: ERR=%s\n"), jcr->errmsg); } bail_out: if (ok) { dir->fsend(_("3000 OK upload.\n")); } else { dir->fsend(_("3999 Error with the upload: ERR=%s\n"), err); } if (dcr) { free_dcr(dcr); } free_pool_memory(err); free_memory(volname); free_memory(poolname); free_memory(mtype); free_memory(truncate); dir->signal(BNET_EOD); return true; } /* * Read the tape label and determine if we can safely * label the tape (not a Bacula volume), then label it. * * Enter with the mutex set */ static void label_volume_if_ok(DCR *dcr, char *oldname, char *newname, char *poolname, int slot, int relabel) { BSOCK *dir = dcr->jcr->dir_bsock; bsteal_lock_t hold; DEVICE *dev = dcr->dev; int label_status; int mode; const char *volname = (relabel == 1) ? oldname : newname; uint64_t volCatBytes; if (!obtain_device_block(dev, &hold, 1, /* one try */ BST_WRITING_LABEL)) { send_dir_busy_message(dir, dev); return; } dev->Unlock(); Dmsg1(100, "Stole device %s lock, writing label.\n", dev->print_name()); if (!user_shstore_lock(dcr)) { goto bail_out; } Dmsg0(90, "try_autoload_device - looking for volume_info\n"); if (!try_autoload_device(dcr->jcr, dcr, slot, volname)) { goto bail_out; /* error */ } if (relabel) { dev->truncating = true; /* let open_device() know we will truncate it */ } /* Set old volume name for open if relabeling */ dcr->setVolCatName(volname); /* Ensure that the device is open -- autoload_device() closes it */ if (dev->is_tape()) { mode = OPEN_READ_WRITE; } else { mode = CREATE_READ_WRITE; } if (!dev->open_device(dcr, mode)) { dir->fsend(_("3929 Unable to open device \"%s\": ERR=%s\n"), dev->print_name(), dev->bstrerror()); goto bail_out; } /* See what we have for a Volume */ label_status = dev->read_dev_volume_label(dcr); /* Set new volume name */ dcr->setVolCatName(newname); switch(label_status) { case VOL_NAME_ERROR: case VOL_VERSION_ERROR: case VOL_LABEL_ERROR: case VOL_OK: if (!relabel) { dir->fsend(_( "3920 Cannot label Volume because it is already labeled: \"%s\"\n"), dev->VolHdr.VolumeName); break; } /* Relabel request. If oldname matches, continue */ if (strcmp(oldname, dev->VolHdr.VolumeName) != 0) { dir->fsend(_("3921 Wrong volume mounted.\n")); break; } if (dev->label_type != B_BACULA_LABEL) { dir->fsend(_("3922 Cannot relabel an ANSI/IBM labeled Volume.\n")); break; } /* Fall through wanted! */ case VOL_IO_ERROR: case VOL_NO_LABEL: if (!dev->write_volume_label(dcr, newname, poolname, relabel, true /* write label now */)) { if (dev->errmsg[0]) { dir->fsend(_("3912 Failed to label Volume %s: ERR=%s\n"), newname, dev->errmsg); } else { dir->fsend(_("3912 Failed to label Volume %s: ERR=%s\n"), newname, dcr->jcr->errmsg); } break; } volCatBytes = dev->VolCatInfo.VolCatBytes; /* * After writing label, create a new part */ if (dev->is_cloud()) { dev->set_append(); if (!dev->open_next_part(dcr)) { dir->fsend(_("3913 Failed to open next part: ERR=%s\n"), dcr->jcr->errmsg); break; } } bstrncpy(dcr->VolumeName, newname, sizeof(dcr->VolumeName)); /* The following 3000 OK label. string is scanned in ua_label.c */ int type; if (dev->dev_type == B_FILE_DEV || dev->dev_type == B_ALIGNED_DEV || dev->dev_type == B_CLOUD_DEV || dev->dev_type == B_DEDUP_DEV || dev->dev_type == B_TAPE_DEV) { type = dev->dev_type; } else { type = 0; } dir->fsend("3000 OK label. VolBytes=%lld VolABytes=%lld VolType=%d UseProtect=%d VolEncrypted=%d Volume=\"%s\" Device=%s\n", volCatBytes, dev->VolCatInfo.VolCatAdataBytes, type, dev->use_protect(), dev->use_volume_encryption(), newname, dev->print_name()); break; case VOL_TYPE_ERROR: dir->fsend(_("3917 Failed to label Volume: ERR=%s\n"), dcr->jcr->errmsg); break; case VOL_NO_MEDIA: dir->fsend(_("3918 Failed to label Volume (no media): ERR=%s\n"), dcr->jcr->errmsg); break; default: dir->fsend(_("3919 Cannot label Volume. " "Unknown status %d from read_volume_label()\n"), label_status); break; } bail_out: if (dev->is_open() && !dev->has_cap(CAP_ALWAYSOPEN)) { if (!dev->close(dcr)) { dir->fsend(_("3999 Error Device \"%s\": %s\n"), dev->print_name(), dev->errmsg); } } dev->end_of_job(dcr, TRUNC_CONF_DEFAULT); if (!dev->is_open()) { dev->clear_volhdr(); } volume_unused(dcr); /* no longer using volume */ dev->Lock(); give_back_device_block(dev, &hold); user_shstore_unlock(dcr); return; } /* * Read the tape label * * Enter with the mutex set */ static bool read_label(DCR *dcr) { int ok; JCR *jcr = dcr->jcr; BSOCK *dir = jcr->dir_bsock; bsteal_lock_t hold; DEVICE *dev = dcr->dev; if (!obtain_device_block(dev, &hold, 1 /* one try */, BST_DOING_ACQUIRE)) { send_dir_busy_message(dir, dev); return false; } dev->Unlock(); dcr->VolumeName[0] = 0; dev->clear_labeled(); /* force read of label */ switch (dev->read_dev_volume_label(dcr)) { case VOL_OK: dir->fsend(_("3001 Mounted Volume: %s\n"), dev->VolHdr.VolumeName); ok = true; break; default: dir->fsend(_("3902 Cannot mount Volume on Storage Device \"%s\" because:\n%s"), dev->print_name(), jcr->errmsg); ok = false; break; } volume_unused(dcr); dev->Lock(); give_back_device_block(dev, &hold); return ok; } /* * Searches for device by name, and if found, creates a dcr and * returns it. */ static DCR *find_device(JCR *jcr, POOL_MEM &devname, char *media_type, int drive) { DEVRES *device; AUTOCHANGER *changer; bool found = false; DCR *dcr = NULL; unbash_spaces(devname); foreach_res(device, R_DEVICE) { /* Find resource, and make sure we were able to open it */ if (strcmp(device->hdr.name, devname.c_str()) == 0 && (!media_type || strcmp(device->media_type, media_type) ==0)) { if (!device->dev) { device->dev = init_dev(jcr, device, false, statcollector); } if (!device->dev) { Jmsg(jcr, M_WARNING, 0, _("\n" "[SW0106] Device \"%s\" requested by DIR could not be opened or does not exist.\n"), devname.c_str()); continue; } Dmsg1(20, "Found device %s\n", device->hdr.name); found = true; break; } } if (!found) { foreach_res(changer, R_AUTOCHANGER) { /* Find resource, and make sure we were able to open it */ if (strcmp(devname.c_str(), changer->hdr.name) == 0) { /* Try each device in this AutoChanger */ foreach_alist(device, changer->device) { Dmsg1(100, "Try changer device %s\n", device->hdr.name); if (!device->dev) { device->dev = init_dev(jcr, device, false, statcollector); } if (!device->dev) { Dmsg1(100, "Device %s could not be opened. Skipped\n", devname.c_str()); Jmsg(jcr, M_WARNING, 0, _("\n" "[SW0106] Device \"%s\" in changer \"%s\" requested by DIR could not be opened or does not exist.\n"), device->hdr.name, devname.c_str()); continue; } if (!device->dev->autoselect) { Dmsg1(100, "Device %s not autoselect skipped.\n", devname.c_str()); continue; /* device is not available */ } else if (!device->dev->enabled) { Dmsg1(100, "Device %s disabled skipped.\n", devname.c_str()); continue; /* device disabled */ } if ((drive < 0 || drive == (int)device->dev->drive_index) && (!media_type || strcmp(device->media_type, media_type) ==0)) { Dmsg1(20, "Found changer device %s\n", device->hdr.name); found = true; break; } Dmsg3(100, "Device %s drive wrong: want=%d got=%d skipping\n", devname.c_str(), drive, (int)device->dev->drive_index); } break; /* we found it but could not open a device */ } } } if (found) { Dmsg1(100, "Found device %s\n", device->hdr.name); dcr = new_dcr(jcr, NULL, device->dev); dcr->device = device; } return dcr; } /* * Find even disabled devices so that we can enable them * ***FIXME*** This could probably be merged with find_device with another * argument, but this is easier for the moment. */ static DCR *find_any_device(JCR *jcr, POOL_MEM &devname, char *media_type, int drive) { DEVRES *device; AUTOCHANGER *changer; bool found = false; DCR *dcr = NULL; unbash_spaces(devname); foreach_res(device, R_DEVICE) { /* Find resource, and make sure we were able to open it */ if (strcmp(device->hdr.name, devname.c_str()) == 0 && (!media_type || strcmp(device->media_type, media_type) ==0)) { if (!device->dev) { device->dev = init_dev(jcr, device, false, statcollector); } if (!device->dev) { Jmsg(jcr, M_WARNING, 0, _("\n" "[SW0108] Device \"%s\" requested by DIR could not be opened or does not exist.\n"), devname.c_str()); continue; } Dmsg1(20, "Found device %s\n", device->hdr.name); found = true; break; } } if (!found) { foreach_res(changer, R_AUTOCHANGER) { /* Find resource, and make sure we were able to open it */ if (strcmp(devname.c_str(), changer->hdr.name) == 0) { /* Try each device in this AutoChanger */ foreach_alist(device, changer->device) { Dmsg1(100, "Try changer device %s\n", device->hdr.name); if (!device->dev) { device->dev = init_dev(jcr, device, false, statcollector); } if (!device->dev) { Dmsg1(100, "Device %s could not be opened. Skipped\n", devname.c_str()); Jmsg(jcr, M_WARNING, 0, _("\n" "[SW0108] Device \"%s\" in changer \"%s\" requested by DIR could not be opened or does not exist.\n"), device->hdr.name, devname.c_str()); continue; } if ((drive < 0 || drive == (int)device->dev->drive_index) && (!media_type || strcmp(device->media_type, media_type) ==0)) { Dmsg1(20, "Found changer device %s\n", device->hdr.name); found = true; break; } Dmsg3(100, "Device %s drive wrong: want=%d got=%d skipping\n", devname.c_str(), drive, (int)device->dev->drive_index); } break; /* we found it but could not open a device */ } } } if (found) { Dmsg1(100, "Found device %s\n", device->hdr.name); dcr = new_dcr(jcr, NULL, device->dev); dcr->device = device; } return dcr; } /* * Protect command from Director */ static bool volumeprotect_cmd(JCR *jcr) { BSOCK *dir = jcr->dir_bsock; char mediatype[MAX_NAME_LENGTH]; char volume[MAX_NAME_LENGTH]; POOL_MEM device, tmp, error; uint32_t retention; int32_t drive; bool ok; ok = sscanf(dir->msg, "volumeprotect mediatype=%127s device=%127s volume=%127s drive=%d retention=%u", mediatype, device.c_str(), volume, &drive, &retention) == 5; if (ok) { unbash_spaces(mediatype); unbash_spaces(device.c_str()); unbash_spaces(volume); if (is_writing_volume(volume)) { dir->fsend(_("3900 Unable to set immutable flag on %s, volume still in use\n"), volume); return true; } DCR *dcr = find_any_device(jcr, device, mediatype, drive); if (dcr) { DEVICE *dev = dcr->dev; /* Everything can be done without a lock because we don't touch anything * inside the device structure */ char buf[128], buf2[128]; if (dev->device->set_vol_immutable || dev->device->set_vol_read_only) { uint32_t when = MAX(dev->device->min_volume_protection_time, retention); btime_t now = time(NULL); if (dev->set_atime(-1, volume, now + when, error.handle()) < 0) { MmsgD3(DT_VOLUME|50, tmp.addr(), _(" Failed to set the volume %s on device %s in atime retention, ERR=%s.\n"), volume, dev->print_name(), error.c_str()); } pm_strcpy(tmp, ""); bstrftime(buf2, sizeof(buf2), now+when); strip_trailing_junk(edit_utime(when, buf, sizeof(buf))); } if (dev->device->set_vol_immutable) { /* Set volume as immutable */ if (!dev->set_immutable(volume, error.handle())) { /* We may proceed with that but warn the user */ dir->fsend(_("3900 Unable to set immutable flag %s\n"), error.c_str()); } else { dir->fsend(_("3000 Mark volume \"%s\" as immutable. Retention set to %s (%s).\n"), volume, buf2, buf); events_send_msg(jcr, "SJ0003", EVENTS_TYPE_VOLUME, me->hdr.name, (intptr_t)jcr, "Mark volume \"%s\" as immutable, retention %s (%s)", volume, buf2, buf); } } else if (dev->device->set_vol_read_only) { /* Set volume as immutable/read only */ if (dev->set_readonly(-1, volume, error.handle()) < 0) { /* We may proceed with that but warn the user */ dir->fsend(_("3900 Failed to set the volume %s on device %s in read-only, ERR=%s.%s\n"), volume, dev->print_name(), error.c_str(), tmp.c_str()); } else { dir->fsend(_("3000 Marking volume \"%s\" as read-only. Retention set to %s (%s).\n"), volume,buf2, buf); events_send_msg(jcr, "SJ0003", EVENTS_TYPE_VOLUME, me->hdr.name, (intptr_t)jcr, "Mark volume \"%s\" as read-only, retention %s (%s)", volume, buf2, buf); } } else { dir->fsend(_("3900 Device %s not configured for ReadOnly or Immutable\n"), dev->device->hdr.name); } free_dcr(dcr); } else { dir->fsend(_("3901 Unable to find a device to perform the volumeprotect command\n")); } } else { dir->fsend(_("3907 Error scanning \"volumeprotect\" command\n")); } return true; } /* * Mount command from Director */ static bool mount_cmd(JCR *jcr) { POOL_MEM devname; BSOCK *dir = jcr->dir_bsock; DEVICE *dev; DCR *dcr; int32_t drive; /* device index */ int32_t slot; bool ok; Dmsg1(100, "%s\n", dir->msg); ok = sscanf(dir->msg, "mount %127s drive=%d slot=%d", devname.c_str(), &drive, &slot) == 3; Dmsg3(100, "ok=%d device_index=%d slot=%d\n", ok, drive, slot); if (ok) { dcr = find_device(jcr, devname, NULL, drive); if (dcr) { dev = dcr->dev; dev->Lock(); /* Use P to avoid indefinite block */ Dmsg2(100, "mount cmd blocked=%d must_unload=%d\n", dev->blocked(), dev->must_unload()); switch (dev->blocked()) { /* device blocked? */ case BST_WAITING_FOR_SYSOP: /* Someone is waiting, wake him */ Dmsg0(100, "Waiting for mount. Attempting to wake thread\n"); dev->set_blocked(BST_MOUNT); dir->fsend("3001 OK mount requested. %sDevice=%s\n", slot>0?_("Specified slot ignored. "):"", dev->print_name()); Dmsg1(100, "JobId=%u broadcast wait_next_vol\n", (uint32_t)dcr->jcr->JobId); pthread_cond_broadcast(&dev->wait_next_vol); Dmsg1(100, "JobId=%u broadcast wait_device_release\n", (uint32_t)dcr->jcr->JobId); pthread_cond_broadcast(&wait_device_release); break; /* In both of these two cases, we (the user) unmounted the Volume */ case BST_UNMOUNTED_WAITING_FOR_SYSOP: case BST_UNMOUNTED: Dmsg2(100, "Unmounted changer=%d slot=%d\n", dev->is_autochanger(), slot); if (!user_shstore_lock(dcr)) { break; } if (dev->is_autochanger() && slot > 0) { try_autoload_device(jcr, dcr, slot, ""); } /* We freed the device, so reopen it and wake any waiting threads */ if (!dev->open_device(dcr, OPEN_READ_ONLY)) { dir->fsend(_("3901 Unable to open device \"%s\": ERR=%s\n"), dev->print_name(), dev->bstrerror()); if (dev->blocked() == BST_UNMOUNTED) { /* We blocked the device, so unblock it */ Dmsg0(100, "Unmounted. Unblocking device\n"); unblock_device(dev); } user_shstore_unlock(dcr); break; } dev->read_dev_volume_label(dcr); if (dev->blocked() == BST_UNMOUNTED) { /* We blocked the device, so unblock it */ Dmsg0(100, "Unmounted. Unblocking device\n"); read_label(dcr); /* this should not be necessary */ unblock_device(dev); } else { Dmsg0(100, "Unmounted waiting for mount. Attempting to wake thread\n"); dev->set_blocked(BST_MOUNT); } if (dev->is_labeled()) { dir->fsend(_("3001 Device \"%s\" is mounted with Volume \"%s\"\n"), dev->print_name(), dev->VolHdr.VolumeName); } else { dir->fsend(_("3905 Device \"%s\" open but no Bacula volume is mounted.\n" "If this is not a blank tape, try unmounting and remounting the Volume.\n"), dev->print_name()); } user_shstore_unlock(dcr); pthread_cond_broadcast(&dev->wait_next_vol); Dmsg1(100, "JobId=%u broadcast wait_device_release\n", (uint32_t)dcr->jcr->JobId); pthread_cond_broadcast(&wait_device_release); break; case BST_DOING_ACQUIRE: dir->fsend(_("3001 Device \"%s\" is doing acquire.\n"), dev->print_name()); break; case BST_WRITING_LABEL: dir->fsend(_("3903 Device \"%s\" is being labeled.\n"), dev->print_name()); break; case BST_NOT_BLOCKED: Dmsg2(100, "Not blocked changer=%d slot=%d\n", dev->is_autochanger(), slot); if (dev->is_autochanger() && slot > 0) { try_autoload_device(jcr, dcr, slot, ""); } if (dev->is_open()) { if (dev->is_labeled()) { dir->fsend(_("3001 Device \"%s\" is mounted with Volume \"%s\"\n"), dev->print_name(), dev->VolHdr.VolumeName); } else { dir->fsend(_("3905 Device \"%s\" open but no Bacula volume is mounted.\n" "If this is not a blank tape, try unmounting and remounting the Volume.\n"), dev->print_name()); } } else if (dev->is_tape()) { if (!user_shstore_lock(dcr)) { break; } if (!dev->open_device(dcr, OPEN_READ_ONLY)) { dir->fsend(_("3901 Unable to open device \"%s\": ERR=%s\n"), dev->print_name(), dev->bstrerror()); user_shstore_unlock(dcr); break; } read_label(dcr); if (dev->is_labeled()) { dir->fsend(_("3001 Device \"%s\" is already mounted with Volume \"%s\"\n"), dev->print_name(), dev->VolHdr.VolumeName); } else { dir->fsend(_("3905 Device \"%s\" open but no Bacula volume is mounted.\n" "If this is not a blank tape, try unmounting and remounting the Volume.\n"), dev->print_name()); } if (dev->is_open() && !dev->has_cap(CAP_ALWAYSOPEN)) { if (!dev->close(dcr)) { dir->fsend(_("3999 Error Device \"%s\": %s\n"), dev->print_name(), dev->errmsg); } } user_shstore_unlock(dcr); } else if (dev->is_unmountable()) { if (dev->mount(1)) { dir->fsend(_("3002 Device \"%s\" is mounted.\n"), dev->print_name()); } else { dir->fsend(_("3907 %s"), dev->bstrerror()); } } else { /* must be file */ dir->fsend(_("3906 File device \"%s\" is always mounted.\n"), dev->print_name()); pthread_cond_broadcast(&dev->wait_next_vol); Dmsg1(100, "JobId=%u broadcast wait_device_release\n", (uint32_t)dcr->jcr->JobId); pthread_cond_broadcast(&wait_device_release); } break; case BST_RELEASING: dir->fsend(_("3930 Device \"%s\" is being released.\n"), dev->print_name()); break; default: dir->fsend(_("3905 Unknown wait state %d\n"), dev->blocked()); break; } dev->Unlock(); free_dcr(dcr); } else { dir->fsend(_("3999 Device \"%s\" not found or could not be opened.\n"), devname.c_str()); } } else { pm_strcpy(jcr->errmsg, dir->msg); dir->fsend(_("3909 Error scanning mount command: %s\n"), jcr->errmsg); } dir->signal(BNET_EOD); return true; } /* enable command from Director */ static bool enable_cmd(JCR *jcr) { POOL_MEM devname; BSOCK *dir = jcr->dir_bsock; DEVICE *dev; DCR *dcr; int32_t drive; bool ok; int deleted; ok = sscanf(dir->msg, "enable %127s drive=%d", devname.c_str(), &drive) == 2; Dmsg3(100, "ok=%d device=%s device_index=%d\n", ok, devname.c_str(), drive); if (ok) { dcr = find_any_device(jcr, devname, NULL, drive); if (dcr) { dev = dcr->dev; dev->Lock(); /* Use P to avoid indefinite block */ if (dev->enabled) { dir->fsend(_("3003 Device \"%s\" already enabled.\n"), dev->print_name()); } else { dev->enabled = true; dev->devstatcollector->set_value_bool(dev->devstatmetrics.bacula_storage_device_status, true); dir->fsend(_("3002 Device \"%s\" enabled.\n"), dev->print_name()); } deleted = dev->delete_alerts(); if (deleted > 0) { dir->fsend(_("3004 Device \"%s\" deleted %d alert%s.\n"), dev->print_name(), deleted, deleted>1?"s":""); } dev->Unlock(); free_dcr(dcr); } } else { /* NB dir->msg gets clobbered in bnet_fsend, so save command */ pm_strcpy(jcr->errmsg, dir->msg); dir->fsend(_("3907 Error scanning \"enable\" command: %s\n"), jcr->errmsg); } dir->signal(BNET_EOD); return true; } /* enable command from Director */ static bool disable_cmd(JCR *jcr) { POOL_MEM devname; BSOCK *dir = jcr->dir_bsock; DEVICE *dev; DCR *dcr; int32_t drive; bool ok; ok = sscanf(dir->msg, "disable %127s drive=%d", devname.c_str(), &drive) == 2; Dmsg3(100, "ok=%d device=%s device_index=%d\n", ok, devname.c_str(), drive); if (ok) { dcr = find_device(jcr, devname, NULL, drive); if (dcr) { dev = dcr->dev; dev->Lock(); dev->enabled = false; dev->devstatcollector->set_value_bool(dev->devstatmetrics.bacula_storage_device_status, false); dir->fsend(_("3002 Device \"%s\" disabled.\n"), dev->print_name()); dev->Unlock(); free_dcr(dcr); } } else { /* NB dir->msg gets clobbered in bnet_fsend, so save command */ pm_strcpy(jcr->errmsg, dir->msg); dir->fsend(_("3907 Error scanning \"disable\" command: %s\n"), jcr->errmsg); } dir->signal(BNET_EOD); return true; } /* * unmount command from Director */ static bool unmount_cmd(JCR *jcr) { POOL_MEM devname; BSOCK *dir = jcr->dir_bsock; DEVICE *dev; DCR *dcr; int32_t drive; if (sscanf(dir->msg, "unmount %127s drive=%d", devname.c_str(), &drive) == 2) { dcr = find_device(jcr, devname, NULL, drive); if (dcr) { dev = dcr->dev; dev->Lock(); /* Use P to avoid indefinite block */ if (!dev->is_open()) { if (!dev->is_busy()) { unload_autochanger(dcr, -1); } if (dev->is_unmountable()) { if (dev->unmount(0)) { dir->fsend(_("3002 Device \"%s\" unmounted.\n"), dev->print_name()); } else { dir->fsend(_("3907 %s"), dev->bstrerror()); } } else { Dmsg0(90, "Device already unmounted\n"); dir->fsend(_("3901 Device \"%s\" is already unmounted.\n"), dev->print_name()); } } else if (dev->blocked() == BST_WAITING_FOR_SYSOP) { Dmsg2(90, "%d waiter dev_block=%d. doing unmount\n", dev->num_waiting, dev->blocked()); if (!unload_autochanger(dcr, -1)) { /* * ***FIXME**** what is this ???? -- probably we had * the wrong volume so we must free it and try again. KES */ if (!dev->close(dcr)) { dir->fsend(_("3999 Error Device \"%s\": %s\n"), dev->print_name(), dev->errmsg); } free_volume(dev); } if (dev->is_unmountable() && !dev->unmount(0)) { dir->fsend(_("3907 %s"), dev->bstrerror()); } else { dev->set_blocked(BST_UNMOUNTED_WAITING_FOR_SYSOP); dir->fsend(_("3001 Device \"%s\" unmounted.\n"), dev->print_name()); } } else if (dev->blocked() == BST_DOING_ACQUIRE) { dir->fsend(_("3902 Device \"%s\" is busy in acquire.\n"), dev->print_name()); } else if (dev->blocked() == BST_WRITING_LABEL) { dir->fsend(_("3903 Device \"%s\" is being labeled.\n"), dev->print_name()); } else if (dev->is_busy()) { send_dir_busy_message(dir, dev); } else { /* device not being used */ Dmsg0(90, "Device not in use, unmounting\n"); /* On FreeBSD, I am having ASSERT() failures in block_device() * and I can only imagine that the thread id that we are * leaving in no_wait_id is being re-used. So here, * we simply do it by hand. Gross, but a solution. */ /* block_device(dev, BST_UNMOUNTED); replace with 2 lines below */ dev->set_blocked(BST_UNMOUNTED); clear_thread_id(dev->no_wait_id); if (!unload_autochanger(dcr, -1)) { if (!dev->close(dcr)) { dir->fsend(_("3999 Error Device \"%s\": %s\n"), dev->print_name(), dev->errmsg); } free_volume(dev); } if (dev->is_unmountable() && !dev->unmount(0)) { dir->fsend(_("3907 %s"), dev->bstrerror()); } else { dir->fsend(_("3002 Device \"%s\" unmounted.\n"), dev->print_name()); } } dev->Unlock(); free_dcr(dcr); } else { dir->fsend(_("3999 Device \"%s\" not found or could not be opened.\n"), devname.c_str()); } } else { /* NB dir->msg gets clobbered in bnet_fsend, so save command */ pm_strcpy(jcr->errmsg, dir->msg); dir->fsend(_("3907 Error scanning unmount command: %s\n"), jcr->errmsg); } dir->signal(BNET_EOD); return true; } #if 0 /* * The truncate command will recycle a volume. The director can call this * after purging a volume so that disk space will not be wasted. Only useful * for File Storage, of course. * * * It is currently disabled */ static bool action_on_purge_cmd(JCR *jcr) { BSOCK *dir = jcr->dir_bsock; char devname[MAX_NAME_LENGTH]; char volumename[MAX_NAME_LENGTH]; int32_t action; /* TODO: Need to find a free device and ask for slot to the director */ if (sscanf(dir->msg, "action_on_purge %127s vol=%127s action=%d", devname, volumename, &action)!= 5) { dir->fsend(_("3916 Error scanning action_on_purge command\n")); goto done; } unbash_spaces(volumename); unbash_spaces(devname); /* Check if action is correct */ if (action & AOP_TRUNCTATE) { } /* ... */ done: dir->signal(BNET_EOD); return true; } #endif /* * Release command from Director. This rewinds the device and if * configured does a offline and ensures that Bacula will * re-read the label of the tape before continuing. This gives * the operator the chance to change the tape anytime before the * next job starts. */ static bool release_cmd(JCR *jcr) { POOL_MEM devname; BSOCK *dir = jcr->dir_bsock; DEVICE *dev; DCR *dcr; int32_t drive; if (sscanf(dir->msg, "release %127s drive=%d", devname.c_str(), &drive) == 2) { dcr = find_device(jcr, devname, NULL, drive); if (dcr) { dev = dcr->dev; dev->Lock(); /* Use P to avoid indefinite block */ if (!dev->is_open()) { if (!dev->is_busy()) { unload_autochanger(dcr, -1); } Dmsg0(90, "Device already released\n"); dir->fsend(_("3921 Device \"%s\" already released.\n"), dev->print_name()); } else if (dev->blocked() == BST_WAITING_FOR_SYSOP) { Dmsg2(90, "%d waiter dev_block=%d.\n", dev->num_waiting, dev->blocked()); unload_autochanger(dcr, -1); dir->fsend(_("3922 Device \"%s\" waiting for sysop.\n"), dev->print_name()); } else if (dev->blocked() == BST_UNMOUNTED_WAITING_FOR_SYSOP) { Dmsg2(90, "%d waiter dev_block=%d. doing unmount\n", dev->num_waiting, dev->blocked()); dir->fsend(_("3922 Device \"%s\" waiting for mount.\n"), dev->print_name()); } else if (dev->blocked() == BST_DOING_ACQUIRE) { dir->fsend(_("3923 Device \"%s\" is busy in acquire.\n"), dev->print_name()); } else if (dev->blocked() == BST_WRITING_LABEL) { dir->fsend(_("3914 Device \"%s\" is being labeled.\n"), dev->print_name()); } else if (dev->is_busy()) { send_dir_busy_message(dir, dev); } else { /* device not being used */ Dmsg0(90, "Device not in use, releasing\n"); dcr->release_volume(); dir->fsend(_("3022 Device \"%s\" released.\n"), dev->print_name()); } dev->Unlock(); free_dcr(dcr); } else { dir->fsend(_("3999 Device \"%s\" not found or could not be opened.\n"), devname.c_str()); } } else { /* NB dir->msg gets clobbered in bnet_fsend, so save command */ pm_strcpy(jcr->errmsg, dir->msg); dir->fsend(_("3927 Error scanning release command: %s\n"), jcr->errmsg); } dir->signal(BNET_EOD); return true; } static pthread_mutex_t bsr_mutex = PTHREAD_MUTEX_INITIALIZER; static uint32_t bsr_uniq = 0; static bool get_bootstrap_file(JCR *jcr, BSOCK *sock) { POOLMEM *fname = get_pool_memory(PM_FNAME); FILE *bs; bool ok = false; if (jcr->RestoreBootstrap) { unlink(jcr->RestoreBootstrap); bfree_and_null(jcr->RestoreBootstrap); } P(bsr_mutex); bsr_uniq++; Mmsg(fname, "%s/%s.%s.%d.bootstrap", me->working_directory, me->hdr.name, jcr->Job, bsr_uniq); V(bsr_mutex); Dmsg1(400, "bootstrap=%s\n", fname); jcr->RestoreBootstrap = fname; bs = bfopen(fname, "a+b"); /* create file */ if (!bs) { berrno be; Jmsg(jcr, M_FATAL, 0, _("[SF0110] Could not create bootstrap file %s: ERR=%s\n"), jcr->RestoreBootstrap, be.bstrerror()); goto bail_out; } Dmsg0(150, "=== Bootstrap file ===\n"); while (sock->recv() >= 0) { Dmsg1(150, "%s", sock->msg); fputs(sock->msg, bs); } if (fclose(bs) != 0) { berrno be; Jmsg(jcr, M_FATAL, 0, _("Error closing boostrap file. %s\n"), be.bstrerror()); goto bail_out; } Dmsg0(150, "=== end bootstrap file ===\n"); jcr->bsr = parse_bsr(jcr, jcr->RestoreBootstrap); if (!jcr->bsr) { Jmsg(jcr, M_FATAL, 0, _("Error parsing bootstrap file.\n")); goto bail_out; } if (chk_dbglvl(150)) { dump_bsr(NULL, jcr->bsr, true); } /* If we got a bootstrap, we are reading, so create read volume list */ create_restore_volume_list(jcr, true /* store the volumes in the global vol_read list */); ok = true; bail_out: unlink(jcr->RestoreBootstrap); jcr->RestoreBootstrap = NULL; free_pool_memory(fname); if (!ok) { sock->fsend(ERROR_bootstrap); return false; } return sock->fsend(OK_bootstrap); } static bool bootstrap_cmd(JCR *jcr) { return get_bootstrap_file(jcr, jcr->dir_bsock); } /* * Autochanger command from Director */ static bool changer_cmd(JCR *jcr) { POOL_MEM devname; BSOCK *dir = jcr->dir_bsock; DEVICE *dev; DCR *dcr; const char *cmd = NULL; bool ok = false; /* * A safe_cmd may call autochanger script but does not load/unload * slots so it can be done at the same time that the drive is open. */ bool safe_cmd = false; if (sscanf(dir->msg, "autochanger listall %127s", devname.c_str()) == 1) { cmd = "listall"; safe_cmd = ok = true; } else if (sscanf(dir->msg, "autochanger list %127s", devname.c_str()) == 1) { cmd = "list"; safe_cmd = ok = true; } else if (sscanf(dir->msg, "autochanger slots %127s", devname.c_str()) == 1) { cmd = "slots"; safe_cmd = ok = true; } else if (sscanf(dir->msg, "autochanger drives %127s", devname.c_str()) == 1) { cmd = "drives"; safe_cmd = ok = true; } if (ok) { dcr = find_device(jcr, devname, NULL, -1); if (dcr) { dev = dcr->dev; dev->Lock(); /* Use P to avoid indefinite block */ if (!dev->device->changer_res) { dir->fsend(_("3998 Device \"%s\" is not an autochanger.\n"), dev->print_name()); /* Under certain "safe" conditions, we can steal the lock */ } else if (safe_cmd || !dev->is_open() || dev->can_obtain_block()) { autochanger_cmd(dcr, dir, cmd); } else if (dev->is_busy() || dev->is_blocked()) { send_dir_busy_message(dir, dev); } else { /* device not being used */ autochanger_cmd(dcr, dir, cmd); } dev->Unlock(); free_dcr(dcr); } else { dir->fsend(_("3999 Device \"%s\" not found or could not be opened.\n"), devname.c_str()); } } else { /* error on scanf */ pm_strcpy(jcr->errmsg, dir->msg); dir->fsend(_("3909 Error scanning autochanger drives/list/slots command: %s\n"), jcr->errmsg); } dir->signal(BNET_EOD); return true; } /* * Read and return the Volume label */ static bool readlabel_cmd(JCR *jcr) { POOL_MEM devname; BSOCK *dir = jcr->dir_bsock; DEVICE *dev; DCR *dcr; int32_t Slot, drive; if (sscanf(dir->msg, "readlabel %127s Slot=%d drive=%d", devname.c_str(), &Slot, &drive) == 3) { dcr = find_device(jcr, devname, NULL, drive); if (dcr) { dev = dcr->dev; dev->Lock(); /* Use P to avoid indefinite block */ if (!dev->is_open()) { read_volume_label(jcr, dcr, dev, Slot); if (!dev->close(dcr)) { dir->fsend(_("3999 Error Device \"%s\": %s\n"), dev->print_name(), dev->errmsg); } /* Under certain "safe" conditions, we can steal the lock */ } else if (dev->can_obtain_block()) { read_volume_label(jcr, dcr, dev, Slot); } else if (dev->is_busy() || dev->is_blocked()) { send_dir_busy_message(dir, dev); } else { /* device not being used */ read_volume_label(jcr, dcr, dev, Slot); } dev->Unlock(); free_dcr(dcr); } else { dir->fsend(_("3999 Device \"%s\" not found or could not be opened.\n"), devname.c_str()); } } else { pm_strcpy(jcr->errmsg, dir->msg); dir->fsend(_("3909 Error scanning readlabel command: %s\n"), jcr->errmsg); } dir->signal(BNET_EOD); return true; } /* * Read the tape label * * Enter with the mutex set */ static void read_volume_label(JCR *jcr, DCR *dcr, DEVICE *dev, int Slot) { BSOCK *dir = jcr->dir_bsock; bsteal_lock_t hold; dcr->set_dev(dev); if (!obtain_device_block(dev, &hold, 1 /* one try */, BST_WRITING_LABEL)) { send_dir_busy_message(dir, dev); return; } dev->Unlock(); if (!try_autoload_device(jcr, dcr, Slot, "")) { goto bail_out; /* error */ } dev->clear_labeled(); /* force read of label */ switch (dev->read_dev_volume_label(dcr)) { case VOL_OK: /* DO NOT add quotes around the Volume name. It is scanned in the DIR */ dir->fsend(_("3001 Volume=%s Slot=%d\n"), dev->VolHdr.VolumeName, Slot); Dmsg1(100, "Volume: %s\n", dev->VolHdr.VolumeName); break; default: dir->fsend(_("3902 Cannot mount Volume on Storage Device \"%s\" because:\n%s"), dev->print_name(), jcr->errmsg); break; } bail_out: dev->Lock(); give_back_device_block(dev, &hold); return; } static bool try_autoload_device(JCR *jcr, DCR *dcr, int slot, const char *VolName) { BSOCK *dir = jcr->dir_bsock; bstrncpy(dcr->VolumeName, VolName, sizeof(dcr->VolumeName)); dcr->VolCatInfo.Slot = slot; dcr->VolCatInfo.InChanger = slot > 0; if (autoload_device(dcr, 0, dir) < 0) { /* autoload if possible */ return false; } return true; } static void send_dir_busy_message(BSOCK *dir, DEVICE *dev) { if (dev->is_blocked()) { switch (dev->blocked()) { case BST_UNMOUNTED: dir->fsend(_("3931 Device \"%s\" is BLOCKED. user unmounted.\n"), dev->print_name()); break; case BST_UNMOUNTED_WAITING_FOR_SYSOP: dir->fsend(_("3932 Device \"%s\" is BLOCKED. user unmounted during wait for media/mount.\n"), dev->print_name()); break; case BST_WAITING_FOR_SYSOP: dir->fsend(_("3933 Device \"%s\" is BLOCKED waiting for media.\n"), dev->print_name()); break; case BST_DOING_ACQUIRE: dir->fsend(_("3934 Device \"%s\" is being initialized.\n"), dev->print_name()); break; case BST_WRITING_LABEL: dir->fsend(_("3935 Device \"%s\" is blocked labeling a Volume.\n"), dev->print_name()); break; default: dir->fsend(_("3935 Device \"%s\" is blocked for unknown reason.\n"), dev->print_name()); break; } } else if (dev->can_read()) { dir->fsend(_("3936 Device \"%s\" is busy reading.\n"), dev->print_name());; } else { dir->fsend(_("3937 Device \"%s\" is busy with writers=%d reserved=%d.\n"), dev->print_name(), dev->num_writers, dev->num_reserved()); } } /* Send progress information to the director if needed (for copy/migration in general) */ static bool sd_sendprogress_cmd(JCR *jcr) { jcr->stat_interval = 60; return true; } #if COMMUNITY bool user_shstore_lock(DCR *dcr) { return true; } void user_shstore_unlock(DCR *dcr) {} bool dedup_cmd(JCR *jcr) { return true; } #endif bacula-15.0.3/src/stored/read.c0000644000175000017500000003245414771010173016040 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Read code for Storage daemon * * Kern Sibbald, November MM * */ #include "bacula.h" #include "stored.h" #include "dedup_interface.h" /* Forward referenced subroutines */ static bool read_record_cb(DCR *dcr, DEV_RECORD *rec); static bool mac_record_cb(DCR *dcr, DEV_RECORD *rec); /* Responses sent to the File daemon */ static char OK_data[] = "3000 OK data\n"; static char FD_error[] = "3000 error\n"; static char rec_header[] = "rechdr %ld %ld %ld %ld %ld"; /* * Read Data and send to File Daemon * Returns: false on failure * true on success */ bool do_read_data(JCR *jcr) { BSOCK *fd = jcr->file_bsock; bool ok = true; DCR *dcr = jcr->read_dcr; char ec[50]; Dmsg0(100, "Start read data.\n"); if (!fd->set_buffer_size(dcr->device->max_network_buffer_size, BNET_SETBUF_WRITE)) { return false; } if (jcr->NumReadVolumes == 0) { Jmsg(jcr, M_FATAL, 0, _("No Volume names found for restore.\n")); fd->fsend(FD_error); return false; } Dmsg2(200, "Found %d volumes names to restore. First=%s\n", jcr->NumReadVolumes, jcr->VolList->VolumeName); /* Ready device for reading */ if (!acquire_device_for_read(dcr)) { fd->fsend(FD_error); return false; } dcr->dev->start_of_job(dcr); dcr->dev->setup_dedup_rehydration_interface(dcr); /* Tell File daemon we will send data */ if (!jcr->is_ok_data_sent) { /* OK_DATA can have been already sent for copy/migrate by run_job() to avoid dead lock*/ Dmsg0(DT_DEDUP|215, "send OK_data\n"); if (jcr->dedup && !jcr->dedup->do_flowcontrol_rehydration(1)) { jcr->dedup->warn_rehydration_eod(); return false; } fd->fsend(OK_data); jcr->is_ok_data_sent = true; } jcr->sendJobStatus(JS_Running); jcr->run_time = time(NULL); jcr->JobFiles = 0; if (jcr->is_JobType(JT_MIGRATE) || jcr->is_JobType(JT_COPY)) { ok = read_records(dcr, mac_record_cb, mount_next_read_volume); } else { ok = read_records(dcr, read_record_cb, mount_next_read_volume); } /* * Don't use time_t for job_elapsed as time_t can be 32 or 64 bits, * and the subsequent Jmsg() editing will break */ int32_t job_elapsed = time(NULL) - jcr->run_time; if (job_elapsed <= 0) { job_elapsed = 1; } Jmsg(dcr->jcr, M_INFO, 0, _("Elapsed time=%02d:%02d:%02d, Transfer rate=%s Bytes/second\n"), job_elapsed / 3600, job_elapsed % 3600 / 60, job_elapsed % 60, edit_uint64_with_suffix(jcr->JobBytes / job_elapsed, ec)); if (jcr->dedup) { /* must be sure that FD queue is not full to receive the BNET_EOD below */ jcr->dedup->do_flowcontrol_rehydration(1); /* tell the dedup rehydration thread that this is done and it can quit * as soon it get the last ACK from the FD/SD * This is only useful for SD-SD right now, because * SD-FD use the BNET_CMD_STP_THREAD command * This must be done before the BNET_EOD, because this is the * matching BNET_CMD_REC_ACK that will allow to exit the loop */ Dmsg0(DT_DEDUP|215, "warn about end of rehydration thread\n"); jcr->dedup->warn_rehydration_eod(); } /* Send end of data to FD */ fd->signal(BNET_EOD); dcr->dev->free_dedup_rehydration_interface(dcr); if (!release_device(jcr->read_dcr)) { ok = false; } Dmsg0(30, "Done reading.\n"); return ok; } static bool read_record_cb(DCR *dcr, DEV_RECORD *rec) { JCR *jcr = dcr->jcr; BSOCK *fd = jcr->file_bsock; bool ok = true; POOLMEM *save_msg; char ec1[50], ec2[50]; POOLMEM *wbuf = rec->data; /* send buffer */ uint32_t wsize = rec->data_len; /* send size */ if (rec->FileIndex < 0) { return true; } /* Do rehydration */ if (rec->Stream & STREAM_BIT_DEDUPLICATION_DATA) { if (jcr->dedup==NULL) { // aka dcr->dev->dev_type!=B_DEDUP_DEV Jmsg0(jcr, M_FATAL, 0, _("Cannot do rehydration, device is not dedup aware\n")); return false; } Dmsg2(DT_DEDUP|640, "stream 0x%x is_rehydration_srvside=%d\n", rec->Stream, jcr->dedup->is_rehydration_srvside()); if (jcr->dedup->is_rehydration_srvside()) { wbuf = jcr->dedup->get_msgbuf(); bool despite_of_error = forge_on; int size; int err = jcr->dedup->record_rehydration(dcr, rec, wbuf, jcr->errmsg, despite_of_error, &size); if (err) { /* cannot read data from DDE */ if (!despite_of_error) { Jmsg1(jcr, M_FATAL, 0, "%s", jcr->errmsg); return false; } Jmsg1(jcr, M_ERROR, 0, "%s", jcr->errmsg); } wsize = size; } else { // if the FD will do dedup, then do flow control if (!jcr->dedup->is_thread_started()) { Dmsg0(DT_DEDUP|215, "Starting rehydration thread\n"); jcr->dedup->start_rehydration(); } jcr->dedup->add_circular_buf(dcr, rec); } } Dmsg5(400, "Send to FD: SessId=%u SessTim=%u FI=%s Strm=%s, len=%d\n", rec->VolSessionId, rec->VolSessionTime, FI_to_ascii(ec1, rec->FileIndex), stream_to_ascii(ec2, rec->Stream, rec->FileIndex), wsize); if (jcr->dedup && !jcr->dedup->do_flowcontrol_rehydration(1)) { return false; } Dmsg2(DT_DEDUP|640, ">filed: send header stream=0x%lx len=%ld\n", rec->Stream, wsize); /* Send record header to File daemon */ if (!fd->fsend(rec_header, rec->VolSessionId, rec->VolSessionTime, rec->FileIndex, rec->Stream, wsize)) { Pmsg1(000, _(">filed: Error Hdr=%s\n"), fd->msg); Jmsg1(jcr, M_FATAL, 0, _("Error sending header to Client. ERR=%s\n"), fd->bstrerror()); return false; } /* * For normal migration jobs, FileIndex values are sequential because * we are dealing with one job. However, for Vbackup (consolidation), * we will be getting records from multiple jobs and writing them back * out, so we need to ensure that the output FileIndex is sequential. * We do so by detecting a FileIndex change and incrementing the * JobFiles, which we then use as the output FileIndex. */ if (rec->FileIndex >= 0) { /* If something changed, increment FileIndex */ if (rec->VolSessionId != rec->last_VolSessionId || rec->VolSessionTime != rec->last_VolSessionTime || rec->FileIndex != rec->last_FileIndex) { jcr->JobFiles++; rec->last_VolSessionId = rec->VolSessionId; rec->last_VolSessionTime = rec->VolSessionTime; rec->last_FileIndex = rec->FileIndex; } } /* Debug code: check if we must hangup or blowup */ if (handle_hangup_blowup(jcr, jcr->JobFiles, jcr->JobBytes)) { return false; } save_msg = fd->msg; /* save fd message pointer */ fd->msg = wbuf; fd->msglen = wsize; /* Send data record to File daemon */ jcr->JobBytes += wsize; /* increment bytes this job */ Dmsg1(DT_DEDUP|640, ">filed: send %d bytes data.\n", fd->msglen); if (jcr->dedup) { ok = jcr->dedup->do_flowcontrol_rehydration(1); } if (!fd->send()) { Pmsg1(000, _("Error sending to FD. ERR=%s\n"), fd->bstrerror()); Jmsg1(jcr, M_FATAL, 0, _("Error sending data to Client. ERR=%s\n"), fd->bstrerror()); ok = false; } fd->msg = save_msg; return ok; } /* * New routine after to SD->SD implementation * Called here for each record from read_records() * Returns: true if OK * false if error */ static bool mac_record_cb(DCR *dcr, DEV_RECORD *rec) { JCR *jcr = dcr->jcr; BSOCK *fd = jcr->file_bsock; char buf1[100], buf2[100]; bool new_header = false; POOLMEM *save_msg; char ec1[50], ec2[50]; bool ok = true; POOLMEM *wbuf = rec->data;; /* send buffer */ uint32_t wsize = rec->data_len; /* send size */ #ifdef xxx Pmsg5(000, "on entry JobId=%d FI=%s SessId=%d Strm=%s len=%d\n", jcr->JobId, FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len); #endif /* If label and not for us, discard it */ if (rec->FileIndex < 0) { Dmsg1(100, "FileIndex=%d\n", rec->FileIndex); return true; } if (rec->Stream & STREAM_BIT_DEDUPLICATION_DATA) { if (jcr->dedup==NULL) { // aka dcr->dev->dev_type!=B_DEDUP_DEV Jmsg0(jcr, M_FATAL, 0, _("Cannot do rehydration, device is not dedup aware\n")); return false; } Dmsg2(DT_DEDUP|640, "stream 0x%x is_rehydration_srvside=%d\n", rec->Stream, jcr->dedup->is_rehydration_srvside()); if (jcr->dedup->is_rehydration_srvside()) { wbuf = jcr->dedup->get_msgbuf(); bool despite_of_error = false; /* the destination SD will check the data, don't try to cheat */ int size; int err = jcr->dedup->record_rehydration(dcr, rec, wbuf, jcr->errmsg, despite_of_error, &size); if (err < 0) { /* cannot read data from DSE */ Jmsg1(jcr, M_FATAL, 0, "%s", jcr->errmsg); return false; } wsize = size; } else { // if the other SD does dedup, then do flow control if (!jcr->dedup->is_thread_started()) { Dmsg0(DT_DEDUP|215, "Starting rehydration thread\n"); jcr->dedup->start_rehydration(); } jcr->dedup->add_circular_buf(dcr, rec); } } /* * For normal migration jobs, FileIndex values are sequential because * we are dealing with one job. However, for Vbackup (consolidation), * we will be getting records from multiple jobs and writing them back * out, so we need to ensure that the output FileIndex is sequential. * We do so by detecting a FileIndex change and incrementing the * JobFiles, which we then use as the output FileIndex. */ if (rec->FileIndex >= 0) { /* If something changed, increment FileIndex */ if (rec->VolSessionId != rec->last_VolSessionId || rec->VolSessionTime != rec->last_VolSessionTime || rec->FileIndex != rec->last_FileIndex || rec->Stream != rec->last_Stream) { /* Something changed */ if (rec->last_VolSessionId != 0) { /* Not first record */ Dmsg1(200, "Send EOD jobfiles=%d\n", jcr->JobFiles); if (jcr->dedup && !jcr->dedup->do_flowcontrol_rehydration(1)) { return false; } if (!fd->signal(BNET_EOD)) { /* End of previous stream */ Jmsg(jcr, M_FATAL, 0, _("Error sending to File daemon. ERR=%s\n"), fd->bstrerror()); return false; } } new_header = true; if (rec->FileIndex != rec->last_FileIndex) { jcr->JobFiles++; } rec->last_VolSessionId = rec->VolSessionId; rec->last_VolSessionTime = rec->VolSessionTime; rec->last_FileIndex = rec->FileIndex; rec->last_Stream = rec->Stream; } rec->FileIndex = jcr->JobFiles; /* set sequential output FileIndex */ } if (new_header) { new_header = false; Dmsg5(400, "Send header to FD: SessId=%u SessTim=%u FI=%s Strm=%s, len=%ld\n", rec->VolSessionId, rec->VolSessionTime, FI_to_ascii(ec1, rec->FileIndex), stream_to_ascii(ec2, rec->Stream, rec->FileIndex), wsize); /* Send data header to File daemon */ if (jcr->dedup && !jcr->dedup->do_flowcontrol_rehydration(1)) { return false; } if (!fd->fsend("%ld %ld %ld", rec->FileIndex, rec->Stream, wsize)) { Pmsg1(000, _(">filed: Error Hdr=%s\n"), fd->msg); Jmsg1(jcr, M_FATAL, 0, _("Error sending to File daemon. ERR=%s\n"), fd->bstrerror()); return false; } } Dmsg1(400, "FI=%d\n", rec->FileIndex); /* Send data record to File daemon */ save_msg = fd->msg; /* save fd message pointer */ fd->msg = wbuf; /* pass data directly to the FD */ fd->msglen = wsize; jcr->JobBytes += wsize; /* increment bytes this job */ Dmsg1(400, ">filed: send %d bytes data.\n", fd->msglen); if (jcr->dedup) { ok = jcr->dedup->do_flowcontrol_rehydration(1); } if (!fd->send()) { Pmsg1(000, _("Error sending to FD. ERR=%s\n"), fd->bstrerror()); Jmsg1(jcr, M_FATAL, 0, _("Error sending to File daemon. ERR=%s\n"), fd->bstrerror()); ok = false; } fd->msg = save_msg; /* restore fd message pointer */ Dmsg5(500, "wrote_record JobId=%d FI=%s SessId=%d Strm=%s len=%d\n", jcr->JobId, FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len); jcr->sendProgressStatus(); return ok; } bacula-15.0.3/src/stored/cloud_glacier.h0000644000175000017500000000361414771010173017722 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Routines for writing Cloud drivers * * Written by Kern Sibbald, May MMXVI * */ #include "bacula.h" #include "stored.h" #include "cloud_parts.h" #include "cloud_transfer_mgr.h" #ifndef _CLOUD_GLACIER_H_ #define _CLOUD_GLACIER_H_ typedef struct cloud_glacier_cgc { const char *host_name; const char *bucket_name; } cloud_glacier_ctx; /* Abstract class cannot be instantiated */ class cloud_glacier: public SMARTALLOC { public: cloud_glacier() {}; virtual ~cloud_glacier() {}; virtual bool init(CLOUD *cloud, POOLMEM *&err) = 0; virtual bool restore_cloud_object(transfer *xfer, const char *cloud_fname) = 0; virtual bool is_waiting_on_server(transfer *xfer, const char *cloud_fname) = 0; }; class dummy_glacier: public cloud_glacier { public: dummy_glacier() {}; bool init (CLOUD *cloud, POOLMEM *&err) { Mmsg(err, "Cloud glacier not properly loaded"); return false; } bool restore_cloud_object(transfer *xfer, const char *cloud_fname) { Mmsg(xfer->m_message, "Cloud glacier not properly loaded"); return false; } bool is_waiting_on_server(transfer *xfer, const char *cloud_fname) { Mmsg(xfer->m_message, "Cloud glacier not properly loaded"); return false; }; }; #endif /* _CLOUD_GLACIER_H_ */ bacula-15.0.3/src/stored/tape_alert.c0000644000175000017500000001636214771010173017245 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Routines for getting and displaying tape alerts * * Written by Kern Sibbald, October MMXVI * */ #include "bacula.h" /* pull in global headers */ #include "stored.h" /* pull in Storage Deamon headers */ #include "tape_alert_msgs.h" static const int dbglvl = 120; #define MAX_MSG 54 /* Maximum alert message number */ void alert_callback(void *ctx, const char *short_msg, const char *long_msg, char *Volume, int severity, int flags, int alertno, utime_t alert_time) { DCR *dcr = (DCR *)ctx; JCR *jcr = dcr->jcr; DEVICE *dev = dcr->dev; int type = M_INFO; switch (severity) { case 'C': type = M_FATAL; break; case 'W': type = M_WARNING; break; case 'I': type = M_INFO; break; } if (flags & TA_DISABLE_DRIVE) { dev->enabled = false; Jmsg(jcr, M_WARNING, 0, _("Disabled Device %s due to tape alert=%d.\n"), dev->print_name(), alertno); Tmsg2(dbglvl, _("Disabled Device %s due to tape alert=%d.\n"), dev->print_name(), alertno); } if (flags & TA_DISABLE_VOLUME) { dev->setVolCatStatus("Disabled"); dev->VolCatInfo.VolEnabled = false; dir_update_volume_info(dcr, false, true); Jmsg(jcr, M_WARNING, 0, _("Disabled Volume \"%s\" due to tape alert=%d.\n"), Volume, alertno); Tmsg2(dbglvl, _("Disabled Volume \"%s\" due to tape alert=%d.\n"), Volume, alertno); } Jmsg(jcr, type, (utime_t)alert_time, _("Alert: Volume=\"%s\" alert=%d: ERR=%s\n"), Volume, alertno, long_msg); } bool tape_dev::get_tape_alerts(DCR *dcr) { JCR *jcr = dcr->jcr; if (job_canceled(jcr)) { return false; } if (dcr->device->alert_command && dcr->device->control_name) { POOLMEM *alertcmd; int status = 1; int nalerts = 0; BPIPE *bpipe; ALERT *alert, *rmalert; char line[MAXSTRING]; const char *fmt = "TapeAlert[%d]"; struct stat statp; if (stat(dcr->device->control_name, &statp) < 0) { berrno be; Jmsg2(jcr, M_ERROR, 0, _("Unable to stat ControlDevice %s: ERR=%s\n"), dcr->device->control_name, be.bstrerror()); return false; } if (!alert_list) { alert_list = New(alist(10)); } alertcmd = get_pool_memory(PM_FNAME); edit_device_codes(dcr, &alertcmd, dcr->device->alert_command, ""); /* Wait maximum 5 minutes */ bpipe = open_bpipe(alertcmd, 60 * 5, "r"); if (bpipe) { int alertno; alert = (ALERT *)malloc(sizeof(ALERT)); memset(alert->alerts, 0, sizeof(alert->alerts)); alert->Volume = bstrdup(getVolCatName()); alert->alert_time = (utime_t)time(NULL); while (fgets(line, (int)sizeof(line), bpipe->rfd)) { alertno = 0; if (bsscanf(line, fmt, &alertno) == 1) { if (alertno > 0) { if (nalerts+1 > (int)sizeof(alert->alerts)) { break; } else { alert->alerts[nalerts++] = alertno; } } } } status = close_bpipe(bpipe); if (nalerts > 0) { /* Maintain First in, last out list */ if (alert_list->size() > 8) { rmalert = (ALERT *)alert_list->last(); free(rmalert->Volume); alert_list->pop(); free(rmalert); } alert_list->prepend(alert); } else { free(alert->Volume); free(alert); } free_pool_memory(alertcmd); return true; } else { status = errno; } if (status != 0) { berrno be; Jmsg(jcr, M_ALERT, 0, _("3997 Bad alert command: %s: ERR=%s.\n"), alertcmd, be.bstrerror(status)); Tmsg2(10, _("3997 Bad alert command: %s: ERR=%s.\n"), alertcmd, be.bstrerror(status)); } Dmsg1(400, "alert status=%d\n", status); free_pool_memory(alertcmd); } else { if (!dcr->device->alert_command) { Dmsg1(dbglvl, "Cannot do tape alerts: no Alert Command specified for device %s\n", print_name()); Tmsg1(dbglvl, "Cannot do tape alerts: no Alert Command specified for device %s\n", print_name()); } if (!dcr->device->control_name) { Dmsg1(dbglvl, "Cannot do tape alerts: no Control Device specified for device %s\n", print_name()); Tmsg1(dbglvl, "Cannot do tape alerts: no Control Device specified for device %s\n", print_name()); } } return false; } /* * Print desired tape alert messages */ void tape_dev::show_tape_alerts(DCR *dcr, alert_list_type list_type, alert_list_which which, alert_cb alert_callback) { int i; ALERT *alert; int code; if (!alert_list) { return; } Dmsg1(dbglvl, "There are %d alerts.\n", alert_list->size()); switch (list_type) { case list_codes: foreach_alist(alert, alert_list) { for (i=0; i<(int)sizeof(alert->alerts) && alert->alerts[i]; i++) { code = alert->alerts[i]; Dmsg4(dbglvl, "Volume=%s alert=%d severity=%c flags=0x%x\n", alert->Volume, code, ta_errors[code].severity, (int)ta_errors[code].flags); alert_callback(dcr, ta_errors[code].short_msg, long_msg[code], alert->Volume, ta_errors[code].severity, ta_errors[code].flags, code, (utime_t)alert->alert_time); } if (which == list_last) { break; } } break; default: foreach_alist(alert, alert_list) { for (i=0; i<(int)sizeof(alert->alerts) && alert->alerts[i]; i++) { code = alert->alerts[i]; Dmsg4(dbglvl, "Volume=%s severity=%c flags=0x%x alert=%s\n", alert->Volume, ta_errors[code].severity, (int)ta_errors[code].flags, ta_errors[code].short_msg); alert_callback(dcr, ta_errors[code].short_msg, long_msg[code], alert->Volume, ta_errors[code].severity, ta_errors[code].flags, code, (utime_t)alert->alert_time); } if (which == list_last) { break; } } break; } return; } /* * Delete alert list returning number deleted */ int tape_dev::delete_alerts() { ALERT *alert; int deleted = 0; if (alert_list) { foreach_alist(alert, alert_list) { free(alert->Volume); deleted++; } alert_list->destroy(); free(alert_list); alert_list = NULL; } return deleted; } bacula-15.0.3/src/stored/prepare.h0000644000175000017500000000506614771010173016567 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Code to do a Percona prepare on a MySQL Percona backup * during the backup but prior to putting it on the volume. * * Kern Sibbald, February MMIIXX */ #ifndef __PREPARE_H__ #define __PREPARE_H__ 1 #include "findlib/find.h" struct attr_record { int32_t stream; /* masked stream */ int32_t Stream; /* full stream */ uint32_t data_len; int32_t FileIndex; /* *** last item in struct */ char data[0]; /* malloced at end of this record */ }; class prepare_ctx { public: int32_t stream; /* current stream */ ATTR *attr; BFILE bfd; char *wbuf; int temp_fd; FILE *temp_fp; uint32_t wsize; /* write size */ uint64_t fileAddr; /* file write address */ uint32_t num_files; bool prepare; /* set when running prepare job */ bool created; /* set for a single initialization */ POOLMEM *tmp; POOLMEM *tmp2; POOLMEM *result; /* run_program results */ alist *attr_list; /* list of attr records */ const char *working; struct stat statp; /* stat() of prepared .xbstream */ const char* xtrabackupconf; POOLMEM *backup_binary; POOLMEM *stream_binary; BPIPE* bpipe; /* Methods */ prepare_ctx() : stream(0), attr(NULL), bfd(), /* will be zero initialized */ wbuf(NULL), temp_fd(0), temp_fp(NULL), wsize(0), fileAddr(0), num_files(0), prepare(false), created(false), tmp(NULL), tmp2(NULL), result(NULL), attr_list(NULL), working(NULL), statp(), /* will be zero initialized */ xtrabackupconf(NULL), backup_binary(NULL), stream_binary(NULL), bpipe(NULL) { }; ~prepare_ctx() { }; }; bool prepare(JCR *jcr, prepare_ctx &pctx, DEV_RECORD &rec); bool prepare_sd_end(JCR *jcr, prepare_ctx &pctx, DEV_RECORD &rec); #endif bacula-15.0.3/src/stored/cloud_dev.h0000644000175000017500000001065614771010173017076 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Generic routines for writing Cloud Volumes * * Written by Kern Sibbald, May MMXVI * */ #ifndef _CLOUD_DEV_H_ #define _CLOUD_DEV_H_ #define part_bits 20 #define part_mask 0x7FFFFLL #define off_bits (64-part_bits) #define off_mask 0xFFFFFFFFFFFLL #include "bacula.h" #include "stored.h" #include "cloud_driver.h" #include "cloud_transfer_mgr.h" #include "cloud_parts.h" /* enum loadable drivers */ enum { C_S3_DRIVER = 1, C_FILE_DRIVER = 2, C_WAS_DRIVER = 3, C_GOOGLE_DRIVER= 4, C_ORACLE_DRIVER= 5, C_GEN_DRIVER = 6, C_SWIFT_DRIVER = 7, C_AWS_DRIVER = 8 }; class cloud_dev: public file_dev { public: int64_t obj_len; int status; uint64_t *cache_sizes; uint32_t num_cache_parts; uint32_t max_cache_part; uint32_t max_cache_size; uint32_t trunc_opt; uint32_t upload_opt; uint32_t current_driver_type; cloud_driver *driver; static transfer_manager download_mgr; static transfer_manager upload_mgr; cloud_proxy *cloud_prox; private: char full_type[64]; bool download_parts_to_read(DCR *dcr, alist* parts); bool upload_part_to_cloud(DCR *dcr, const char *VolumeName, uint32_t part, bool do_truncate); transfer *download_part_to_cache(DCR *dcr, const char *VolumeName, uint32_t part); void make_cache_filename(POOLMEM *&filename, const char *VolumeName, uint32_t part); void make_cache_volume_name(POOLMEM *&full_volname, const char *VolumeName); bool get_cache_sizes(DCR *dcr, const char *VolumeName); bool wait_end_of_transfer(DCR *dcr, transfer *elem); bool get_cache_volume_parts_list(DCR *dcr, const char* VolumeName, ilist *parts); bool wait_one_transfer(DCR *dcr, char *VolName, uint32_t part); bool probe_cloud_proxy(DCR *dcr, const char* VolName, bool force=false); public: cloud_dev(JCR *jcr, DEVRES *device); ~cloud_dev(); bool close_part(DCR *dcr); uint32_t get_part(boffset_t ls_offset); /* DEVICE virtual interfaces that we redefine */ boffset_t lseek(DCR *dcr, off_t offset, int whence); bool rewind(DCR *dcr); bool reposition(DCR *dcr, uint64_t raddr); bool open_device(DCR *dcr, int omode); bool open_next_part(DCR *dcr); bool truncate(DCR *dcr); int truncate_cache(DCR *dcr, const char *VolName, int64_t *size, POOLMEM *&err); bool upload_cache(DCR *dcr, const char *VolName, uint32_t truncate, POOLMEM *&err); bool close(DCR *dcr); bool update_pos(DCR *dcr); bool is_eod_valid(DCR *dcr); bool eod(DCR *dcr); int read_dev_volume_label(DCR *dcr); const char *print_type(); const char *print_driver_type(); const char *print_full_type(); DEVICE *get_dev(DCR *dcr); uint32_t get_hi_addr(); uint32_t get_low_addr(); uint64_t get_full_addr(); uint64_t get_full_addr(boffset_t addr); char *print_addr(char *buf, int32_t buf_len); char *print_addr(char *buf, int32_t maxlen, boffset_t addr); bool do_size_checks(DCR *dcr, DEV_BLOCK *block); bool write_volume_label(DCR *dcr, const char *VolName, const char *PoolName, bool relabel, bool no_prelabel); bool rewrite_volume_label(DCR *dcr, bool recycle); bool start_of_job(DCR *dcr); bool end_of_job(DCR *dcr, uint32_t truncate); bool get_cloud_volumes_list(DCR* dcr, alist *volumes, POOLMEM *&err); bool get_cloud_volume_parts_list(DCR *dcr, const char *VolumeName, ilist *parts, POOLMEM *&err); uint32_t get_cloud_upload_transfer_status(POOL_MEM &msg, bool verbose); void get_api_cloud_upload_transfer_status(OutputWriter &ow, bool verbose); uint32_t get_cloud_download_transfer_status(POOL_MEM &msg, bool verbose); void get_api_cloud_download_transfer_status(OutputWriter &ow, bool verbose); virtual int device_specific_init(JCR *jcr, DEVRES *device); }; /* Exported subroutines */ bool makedir(JCR *jcr, char *path, mode_t mode); #endif /* _CLOUD_DEV_H_ */ bacula-15.0.3/src/stored/record_write.c0000644000175000017500000003346414771010173017617 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * record_write.c -- Volume (tape/disk) record write functions * * Kern Sibbald, April MMI * added BB02 format October MMII * added aligned format November MMXII * */ #include "bacula.h" #include "stored.h" /* Imported functions */ static const int dbgep = 250|DT_RECORD; /* debug execution path */ static const int dbgel = 250|DT_RECORD; /* debug Enter/Leave code */ struct rechdr { int32_t FileIndex; uint32_t data_len; uint32_t reclen; int32_t Stream; int32_t oStream; }; /* * Write an ameta (normal) header record to the block. */ static bool write_header_to_block(DCR *dcr, DEV_BLOCK *block, DEV_RECORD *rec) { ser_declare; Dmsg0(dbgep, "=== wpath 11 write_header_to_block\n"); ASSERT2(!block->adata, "Attempt to write header to adata block!"); rec->remlen = block->buf_len - block->binbuf; /* Require enough room to write a full header */ if (rec->remlen < WRITE_RECHDR_LENGTH) { Dmsg0(dbgep, "=== wpath 12 write_header_to_block\n"); Dmsg5(190, "remlenadata, rec->remlen, WRITE_RECHDR_LENGTH, block->buf_len, block->binbuf); rec->remainder = rec->data_len + WRITE_RECHDR_LENGTH; return false; } ser_begin(block->bufp, WRITE_RECHDR_LENGTH); if (BLOCK_VER == 1) { Dmsg0(dbgep, "=== wpath 13 write_header_to_block\n"); ser_uint32(rec->VolSessionId); ser_uint32(rec->VolSessionTime); } else { // BLOCK_VER is 2 or 3 Dmsg0(dbgep, "=== wpath 14 write_header_to_block\n"); block->VolSessionId = rec->VolSessionId; block->VolSessionTime = rec->VolSessionTime; } ser_int32(rec->FileIndex); ser_int32(rec->Stream); ser_uint32(rec->data_len); block->bufp += WRITE_RECHDR_LENGTH; block->binbuf += WRITE_RECHDR_LENGTH; /* See if we create a FileMedia record for this record */ create_filemedia(dcr, block, rec); block->RecNum++; rec->remlen -= WRITE_RECHDR_LENGTH; rec->remainder = rec->data_len; if (rec->FileIndex > 0) { Dmsg0(dbgep, "=== wpath 15 write_header_to_block\n"); /* If data record, update what we have in this block */ if (block->FirstIndex == 0) { Dmsg0(dbgep, "=== wpath 16 write_header_to_block\n"); block->FirstIndex = rec->FileIndex; } block->LastIndex = rec->FileIndex; } block->extra_bytes += rec->extra_bytes; //dump_block(dcr->dev, block, "Add header"); return true; } /* * If the prior ameta block was not big enough to hold the * whole record, write a continuation header record. */ static void write_continue_header_to_block(DCR *dcr, DEV_BLOCK *block, DEV_RECORD *rec) { ser_declare; Dmsg0(dbgep, "=== wpath 17 write_cont_hdr_to_block\n"); ASSERT2(!block->adata, "Attempt to write adata header!"); rec->remlen = block->buf_len - block->binbuf; /* No space left to write the continue header */ if (rec->remlen == 0) { return; } /* * We have unwritten bytes from a previous * time. Presumably we have a new buffer (possibly * containing a volume label), so the new header * should be able to fit in the block -- otherwise we have * an error. Note, we have to continue splitting the * data record if it is longer than the block. * * First, write the header. * * Every time we write a header and it is a continuation * of a previous partially written record, we store the * Stream as -Stream in the record header. */ ser_begin(block->bufp, WRITE_RECHDR_LENGTH); if (BLOCK_VER == 1) { Dmsg0(dbgep, "=== wpath 18 write_cont_hdr_to_block\n"); ser_uint32(rec->VolSessionId); ser_uint32(rec->VolSessionTime); } else { Dmsg0(dbgep, "=== wpath 19 write_cont_hdr_to_block\n"); block->VolSessionId = rec->VolSessionId; block->VolSessionTime = rec->VolSessionTime; } ser_int32(rec->FileIndex); if (rec->remainder > rec->data_len) { Dmsg0(dbgep, "=== wpath 20 write_cont_hdr_to_block\n"); ser_int32(rec->Stream); /* normal full header */ ser_uint32(rec->data_len); rec->remainder = rec->data_len; /* must still do data record */ } else { Dmsg0(dbgep, "=== wpath 21 write_cont_hdr_to_block\n"); ser_int32(-rec->Stream); /* mark this as a continuation record */ ser_uint32(rec->remainder); /* bytes to do */ } /* Require enough room to write a full header */ ASSERT(rec->remlen >= WRITE_RECHDR_LENGTH); block->bufp += WRITE_RECHDR_LENGTH; block->binbuf += WRITE_RECHDR_LENGTH; rec->remlen -= WRITE_RECHDR_LENGTH; if (rec->FileIndex > 0) { Dmsg0(dbgep, "=== wpath 22 write_cont_hdr_to_block\n"); /* If data record, update what we have in this block */ if (block->FirstIndex == 0) { Dmsg0(dbgep, "=== wpath 23 write_cont_hdr_to_block\n"); block->FirstIndex = rec->FileIndex; } block->LastIndex = rec->FileIndex; } if (block->adata) { Dmsg3(150, "=== write_cont_hdr ptr=%p begin=%p off=%d\n", block->bufp, block->buf, block->bufp-block->buf); } block->RecNum++; block->extra_bytes += rec->extra_bytes; /* ***BEEF*** */ //dump_block(dcr->dev, block, "Add cont header"); } /* * Now write non-aligned data to an ameta block */ static bool write_data_to_block(DCR *dcr, DEV_BLOCK *block, DEV_RECORD *rec) { Dmsg0(dbgep, "=== wpath 24 write_data_to_block\n"); ASSERT2(!block->adata, "Attempt to write adata to metadata file!"); rec->remlen = block->buf_len - block->binbuf; /* Write as much of data as possible */ if (rec->remlen >= rec->remainder) { Dmsg0(dbgep, "=== wpath 25 write_data_to_block\n"); memcpy(block->bufp, rec->data+rec->data_len-rec->remainder, rec->remainder); block->bufp += rec->remainder; block->binbuf += rec->remainder; rec->remainder = 0; } else { if (rec->state_bits & REC_NO_SPLIT) { return false; /* do not split record */ } Dmsg0(dbgep, "=== wpath 26 write_data_to_block\n"); memcpy(block->bufp, rec->data+rec->data_len-rec->remainder, rec->remlen); block->bufp += rec->remlen; block->binbuf += rec->remlen; rec->remainder -= rec->remlen; return false; /* did partial transfer */ } if (block->adata) { /* Adata label data */ Dmsg3(190, "write_data adata=%d blkAddr=%lld off=%d\n", block->adata, block->BlockAddr, block->bufp-block->buf); } //dump_block(dcr->dev, block, "Add data/adata"); return true; } /* * Write a record to the block -- handles writing out full * blocks by writing them to the device. * * Returns: false means the block could not be written to tape/disk. * true on success (all bytes written to the block). */ bool DCR::write_record(DEV_RECORD *rec) { Enter(dbgel); Dmsg0(dbgep, "=== wpath 33 write_record\n"); while (!write_record_to_block(this, rec)) { Dmsg2(850, "!write_record_to_block data_len=%d rem=%d\n", rec->data_len, rec->remainder); if (jcr->is_canceled()) { Leave(dbgel); return false; } if (!write_block_to_device()) { Dmsg0(dbgep, "=== wpath 34 write_record\n"); Pmsg2(000, "Got write_block_to_dev error on device %s. %s\n", dev->print_name(), dev->bstrerror()); Leave(dbgel); return false; } Dmsg2(850, "!write_record_to_block data_len=%d rem=%d\n", rec->data_len, rec->remainder); } Leave(dbgel); return true; } /* * Write a record to the block * * Returns: false on failure (none or partially written) * true on success (all bytes written) * * and remainder returned in packet. * * We require enough room for the header, and we deal with * two special cases. 1. Only part of the record may have * been transferred the last time (when remainder is * non-zero), and 2. The remaining bytes to write may not * all fit into the block. * * Ensure we leave with same ameta/adata set as enter. */ bool write_record_to_block(DCR *dcr, DEV_RECORD *rec) { char buf1[100], buf2[100]; bool save_adata = dcr->block->adata; bool rtn; Enter(dbgel); Dmsg0(dbgep, "=== wpath 35 enter write_record_to_block\n"); Dmsg7(250, "write_record_to_block() state=%d FI=%s SessId=%d" " Strm=%s len=%d rem=%d remainder=%d\n", rec->wstate, FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len, rec->remlen, rec->remainder); Dmsg4(250, "write_rec Strm=%s len=%d rem=%d remainder=%d\n", stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len, rec->remlen, rec->remainder); if (!dcr->dev->do_pre_write_checks(dcr, rec)) { goto fail_out; } for ( ;; ) { Dmsg1(dbgep, "=== wpath 37 top of for loop wstate=%d\n", rec->wstate); ASSERT(dcr->block->binbuf == (uint32_t) (dcr->block->bufp - dcr->block->buf)); ASSERT(dcr->block->buf_len >= dcr->block->binbuf); switch (rec->wstate) { case st_none: Dmsg0(dbgep, "=== wpath 38 st_none\n"); /* Figure out what to do */ rec->wstate = st_header; /* If labeling adata, special path */ if (dcr->adata_label) { Dmsg1(dbgep, "=== wpath adata_label set adata=%d\n", dcr->dev->adata); rec->wstate = st_adata_label; continue; } if (rec->FileIndex < 0) { /* Label record -- ameta label */ Dmsg3(dbgep, "=== wpath label adata=%d Strm=%d FI=%d\n", dcr->dev->adata, rec->Stream, rec->FileIndex); rec->wstate = st_header; continue; } dcr->dev->select_data_stream(dcr, rec); continue; /* go to next state */ case st_header: /* * Write header * * If rec->remainder is non-zero, we have been called a * second (or subsequent) time to finish writing a record * that did not previously fit into the block. */ Dmsg0(dbgep, "=== wpath 42 st_header\n"); dcr->set_ameta(); if (!write_header_to_block(dcr, dcr->ameta_block, rec)) { Dmsg0(dbgep, "=== wpath 43 st_header\n"); rec->wstate = st_cont_header; goto fail_out; } Dmsg0(dbgep, "=== wpath 44 st_header\n"); rec->wstate = st_data; continue; case st_cont_header: Dmsg0(dbgep, "=== wpath 45 st_cont_header\n"); dcr->set_ameta(); write_continue_header_to_block(dcr, dcr->ameta_block, rec); rec->wstate = st_data; if (rec->remlen == 0) { Dmsg0(dbgep, "=== wpath 46 st_cont_header\n"); goto fail_out; } continue; /* * We come here only once for each record */ case st_data: /* * Write data * * Part of it may have already been transferred, and we * may not have enough room to transfer the whole this time. */ Dmsg0(dbgep, "=== wpath 47 st_data\n"); dcr->set_ameta(); if (rec->remainder > 0) { Dmsg0(dbgep, "=== wpath 48 st_data\n"); if (!write_data_to_block(dcr, dcr->ameta_block, rec)) { Dmsg0(dbgep, "=== wpath 49 st_data\n"); if (rec->state_bits & REC_NO_SPLIT) { rec->wstate = st_header; } else { rec->wstate = st_cont_header; } goto fail_out; } } rec->state_bits &= ~REC_NO_SPLIT; /* clear possible no split bit */ rec->remainder = 0; /* did whole transfer */ rec->wstate = st_none; goto get_out; case st_adata_label: if (!dcr->dev->write_adata_label(dcr, rec)) { goto fail_out; } goto get_out; /* * We come here only once for each record */ case st_adata: dcr->dev->write_adata(dcr, rec); continue; case st_cont_adata: dcr->dev->write_cont_adata(dcr, rec); continue; /* * Note, the following two cases are handled differently * in write_adata_record_header() so take care if you want to * eliminate one of them. */ case st_cont_adata_rechdr: Dmsg2(200, "=== cont rechdr remainder=%d reclen=%d\n", rec->remainder, dcr->adata_block->reclen); Dmsg0(200, "st_cont_adata_rechdr\n"); /* Fall through wanted */ case st_adata_rechdr: switch(dcr->dev->write_adata_rechdr(dcr, rec)) { case -1: goto fail_out; case 0: continue; case 1: goto get_out; } break; default: Dmsg0(dbgep, "=== wpath 67!!!! default\n"); Dmsg0(50, "Something went wrong. Default state.\n"); rec->wstate = st_none; goto get_out; } } get_out: rtn = true; goto out; fail_out: rtn = false; out: if (save_adata) { dcr->set_adata(); } else { dcr->set_ameta(); } Leave(dbgel); return rtn; } bacula-15.0.3/src/stored/tape_dev.h0000644000175000017500000000424014771010173016711 0ustar bsbuildbsbuild/* [vssfs.c] IQ Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Inspired by vtape.h */ #ifndef __TAPE_DEV_ #define __TAPE_DEV_ struct ALERT { char *Volume; utime_t alert_time; char alerts[10]; }; class tape_dev : public DEVICE { public: tape_dev() { }; ~tape_dev() { }; /* DEVICE virtual functions that we redefine with our tape code */ bool fsf(int num); bool offline(DCR *dcr); bool rewind(DCR *dcr); bool bsf(int num); void lock_door(); void unlock_door(); bool reposition(DCR *dcr, uint64_t raddr); bool mount(int timeout); bool unmount(int timeout); bool mount_tape(int mount, int dotimeout); bool weof(DCR *dcr, int num); bool eod(DCR *dcr); bool is_eod_valid(DCR *dcr); void set_ateof(); bool open_device(DCR *dcr, int omode); void term(DCR *dcr); const char *print_type(); DEVICE *get_dev(DCR *dcr); uint32_t get_hi_addr(); uint32_t get_low_addr(); uint64_t get_full_addr(); bool end_of_volume(DCR *dcr); char *print_addr(char *buf, int32_t buf_len); char *print_addr(char *buf, int32_t maxlen, boffset_t addr); bool get_tape_worm(DCR *dcr); bool get_tape_alerts(DCR *dcr); void show_tape_alerts(DCR *dcr, alert_list_type type, alert_list_which which, alert_cb alert_callback); int delete_alerts(); bool check_lintape_eod(); int use_worm() { return device->worm_command && device->control_name; }; alist *alert_list; bool sync_data(DCR *dcr) { return true; }; // not implemented by tape driver }; #endif /* __TAPE_DEV_ */ bacula-15.0.3/src/stored/cloud_transfer_mgr.h0000644000175000017500000002624614771010173021013 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Bacula Cloud Transfer Manager: * transfer manager wraps around the work queue. * Reports transfer status and error * Reports statistics about current past and future work * Written by Norbert Bizet, May MMXVI * */ #ifndef BCLOUD_TRANSFER_MANAGER_H #define BCLOUD_TRANSFER_MANAGER_H #include "bacula.h" #include "lib/workq.h" /* forward declarations */ class transfer_manager; class cloud_driver; class DCR; class transfer; class cloud_proxy; /* possible states of a transfer object */ typedef enum { /* initial state */ /* object has been created but not queued yet*/ TRANS_STATE_CREATED = 0, /* in the workq states */ /* object is queued*/ TRANS_STATE_QUEUED, /* object is processed*/ TRANS_STATE_PROCESSED, /* completed states */ /* object processing has completed ok*/ TRANS_STATE_DONE, /* object processing has completed but failed*/ TRANS_STATE_ERROR, /* number of states */ NUM_TRANS_STATE } transfer_state; typedef transfer_state (transfer_engine)(transfer *); /* each cloud transfer (download, upload, etc.) is wrapped into a transfer object */ class transfer : public SMARTALLOC { public: dlink link; /* Used in global manager dlist */ /* m_stat prefixed statistics variables : */ /* protect access to statistics resources*/ pthread_mutex_t m_stat_mutex; /* size of the transfer: should be filled asap */ uint64_t m_stat_size; /* size processed so far : filled by the processor (driver) */ uint64_t m_stat_processed_size; /* time when process started */ btime_t m_stat_start; /* duration of the transfer : automatically filled when transfer is completed*/ btime_t m_stat_duration; /* estimate time to arrival : predictive guest approximation of transfer time*/ btime_t m_stat_eta; /* computed bytes/sec transfer rate */ uint64_t m_stat_average_rate; /* status variables :*/ /* protect status changes*/ pthread_mutex_t m_mutex; /* cond variable to broadcast transfer completion*/ pthread_cond_t m_done; /* status message */ POOLMEM *m_message; /* current transfer state*/ transfer_state m_state; /* other variables :*/ /* the manager that holds this element */ transfer_manager *m_mgr; /* the function processed by this transfer: contrary to the workq, it can be different for each transfer */ transfer_engine *m_funct; /* current waiting time for observer. * if != 0, the transfer is supposed to be in QUEUED state * and is reported as Waiting in statistics. */ int m_wait_timeout_inc_insec; /* next wait timeout */ btime_t m_wait_timeout; /* used for file_driver glacier simulation */ bool m_debug_retry; /* variables */ char *m_cache_fname; char *m_volume_name; char *m_device_name; uint32_t m_part; cloud_driver *m_driver; uint32_t m_job_id; DCR *m_dcr; cloud_proxy *m_proxy; /* size of the transfer result : filled by the processor (driver) */ uint64_t m_res_size; /* last modification time of the transfer result : filled by the processor (driver) */ utime_t m_res_mtime; /* SHA512 checksum of the part */ unsigned char m_hash64[64]; /* the associated workq element */ workq_ele_t *m_workq_elem; /* reference counter */ int m_use_count; /* Number of retry for this tranfer */ int32_t m_retry; /* cancel flag */ bool m_cancel; /* truncate cache once transfer is completed (upload)*/ bool m_do_cache_truncate; /* for Archive/Glacier restoration */ POOLMEM *m_restore_bucket; /* methods :*/ /* constructor * size : the size in bytes of the transfer * funct : function to process * cache_fname : cache file name is duplicated in the transfer constructor * volume_name : volume name is duplicated in the transfer constructor * part : part index * driver : pointer to the cloud_driver * dcr : pointer to DCR */ transfer(uint64_t size, transfer_engine *funct, const char *cache_fname, const char *volume_name, const char *device_name, uint32_t part, cloud_driver *driver, uint32_t JobId, DCR *dcr, cloud_proxy *proxy ); /* destructor*/ ~transfer(); /* opaque function that will process m_funct with m_arg as parameter. Called back from the workq. * depending on m_funct return value, changes m_state to TRANS_STATE_DONE or TRANS_STATE_ERROR */ void proceed(); /* waits for the asynchronous computation to finish (including cancel()ed computations). * ret: 0:Ok, errorcode otherwise */ int wait(); /* no timeout */ int timedwait(const timeval& tv); /* with timeout */ /* queue this transfer for processing in the manager workq * ret :true if queuing is successful */ bool queue(); /* cancel processing * ret: true cancel done, false cancel failed */ bool cancel(); /* callback fct that checks if transfer has been cancelled */ bool is_canceled() const; /* append a status message into msg*/ uint32_t append_status(POOL_MEM& msgs); void append_api_status(OutputWriter &ow); void set_do_cache_truncate(bool do_cache_truncate); void set_restore_bucket(const char *restore_bucket); /* reset processed size */ void reset_processed_size(); /* set processed size absolute value */ void set_processed_size(uint64_t size); /* add increment to the current processed size */ void increment_processed_size(uint64_t increment); /* Increment the retry count for this transfer */ void inc_retry(); protected: friend class transfer_manager; /* the manager references itselfs thru this function*/ void set_manager(transfer_manager *mgr); /* change the state * ret : true if transition is legal, false otherwise */ bool transition(transfer_state state); /* ref counting must lock the element prior to use */ int inc_use_count(); /* !!dec use count can delete the transfer */ int dec_use_count(); }; /* The transfer_manager wraps around the work queue and holds the transfer(s) */ class transfer_manager : public SMARTALLOC { public: /* m_stat prefixed statistics variables global for this manager: */ /* protect access to statistics resources*/ pthread_mutex_t m_stat_mutex; /* number of workers*/ uint32_t m_stat_nb_workers; /* current number of transfers in TRANS_STATE_QUEUED state in this manager*/ uint64_t m_stat_nb_transfer_queued; /* current number of requeued transfers in TRANS_STATE_QUEUED state in this manager*/ uint64_t m_stat_nb_transfer_waiting; /* current number of transfers in TRANS_STATE_PROCESSED state in this manager*/ uint64_t m_stat_nb_transfer_processed; /* current number of transfers in TRANS_STATE_DONE state in this manager*/ uint64_t m_stat_nb_transfer_done; /* current number of transfers in TRANS_STATE_ERROR state in this manager*/ uint64_t m_stat_nb_transfer_error; /* size in bytes of transfers in TRANS_STATE_QUEUED state in this manager*/ uint64_t m_stat_size_queued; /* size in bytes of requeued transfers in TRANS_STATE_QUEUED state in this manager*/ uint64_t m_stat_size_waiting; /* size in bytes of transfers in TRANS_STATE_PROCESSED state in this manager*/ uint64_t m_stat_size_processed; /* size in bytes of transfers in TRANS_STATE_DONE state in this manager*/ uint64_t m_stat_size_done; /* size in bytes of transfers in TRANS_STATE_ERROR state in this manager*/ uint64_t m_stat_size_error; /* duration of transfers in TRANS_STATE_DONE state in this manager*/ btime_t m_stat_duration_done; /* computed bytes/sec transfer rate */ uint64_t m_stat_average_rate; /* computed Estimate Time to Arrival */ btime_t m_stat_eta; /* status variables global for this manager: */ /* protect status access*/ pthread_mutex_t m_mutex; /* status message for this manager TBD*/ POOLMEM *m_message; /* m_state for the manager TBD*/ int32_t m_state; /* others: */ /* tranfer list*/ dlist m_transfer_list; /* workq used by this manager*/ workq_t m_wq; /* methods */ /* constructor */ /* nb_workers: maximum number of workers allowed for this manager */ transfer_manager(uint32_t nb_worker); /* destructor */ ~transfer_manager(); /* transfer functions */ /* create a new or inc-reference a similar transfer. (factory) * ret: transfer* is ref_counted and must be kept, used * and eventually released by caller with release() */ transfer *get_xfer(uint64_t size, transfer_engine *funct, POOLMEM *cache_fname, const char *volume_name, const char *device_name, uint32_t part, cloud_driver *driver, uint32_t JobId, DCR *dcr, cloud_proxy *proxy); /* does the xfer belong to this manager? */ bool owns(transfer *xfer); /* un-ref transfer and delete if ref count falls to zero * caller must NOT use xfer anymore after calling release() */ void release(transfer *xfer); /* accessors to xfer->queue */ bool queue(transfer *xfer); /* accessors to xfer->wait */ int wait(transfer *xfer); /* accessors to xfer->timedwait */ int timedwait(transfer *xfer, const timeval& tv); /* accessors to xfer->cancel */ bool cancel(transfer *xfer); /* search the transfer list for similar transfer */ bool find(const char *VolName, uint32_t index); /* call to update manager statistics, before displaying it b.e.*/ void update_statistics(); /* append a status message into msg*/ uint32_t append_status(POOL_MEM& msg, bool verbose); void append_api_status(OutputWriter &ow, bool verbose); protected: friend class transfer; /* append a transfer object to this manager */ int add_work(transfer* t); /* remove associated workq_ele_t from this manager workq*/ int remove_work(workq_ele_t *elem); }; #endif /* BCLOUD_TRANSFER_MANAGER_H */ bacula-15.0.3/src/stored/null_dev.h0000644000175000017500000000156614771010173016742 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * NULL driver -- normally only used for performance testing */ #ifndef __NULL_DEV_ #define __NULL_DEV_ class null_dev : public file_dev { public: null_dev() { }; ~null_dev() { }; const char *print_type(); }; #endif /* __NULL_DEV_ */ bacula-15.0.3/src/stored/wait.c0000644000175000017500000002533314771010173016067 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Subroutines to handle waiting for operator intervention * or waiting for a Device to be released * * Code for wait_for_sysop() pulled from askdir.c * * Kern Sibbald, March 2005 */ #include "bacula.h" /* pull in global headers */ #include "stored.h" /* pull in Storage Deamon headers */ const int dbglvl = 400; /* * Wait for SysOp to mount a tape on a specific device * * Returns: W_ERROR, W_TIMEOUT, W_POLL, W_MOUNT, or W_WAKE */ int wait_for_sysop(DCR *dcr) { struct timeval tv; struct timezone tz; struct timespec timeout; time_t last_heartbeat = 0; time_t first_start = time(NULL); int stat = 0; int add_wait; bool unmounted; DEVICE *dev = dcr->dev; JCR *jcr = dcr->jcr; dev->Lock(); Dmsg1(dbglvl, "Enter blocked=%s\n", dev->print_blocked()); /* * Since we want to mount a tape, make sure current one is * not marked as using this drive. */ volume_unused(dcr); unmounted = dev->is_device_unmounted(); dev->poll = false; /* * Wait requested time (dev->rem_wait_sec). However, we also wake up every * HB_TIME seconds and send a heartbeat to the FD and the Director * to keep stateful firewalls from closing them down while waiting * for the operator. */ add_wait = dev->rem_wait_sec; if (me->heartbeat_interval && add_wait > me->heartbeat_interval) { add_wait = me->heartbeat_interval; } /* If the user did not unmount the tape and we are polling, ensure * that we poll at the correct interval. */ if (!unmounted && dev->vol_poll_interval && add_wait > dev->vol_poll_interval) { add_wait = dev->vol_poll_interval; } if (!unmounted) { Dmsg1(dbglvl, "blocked=%s\n", dev->print_blocked()); dev->dev_prev_blocked = dev->blocked(); dev->set_blocked(BST_WAITING_FOR_SYSOP); /* indicate waiting for mount */ } for ( ; !job_canceled(jcr) && !jcr->is_incomplete(); ) { time_t now, start, total_waited; gettimeofday(&tv, &tz); timeout.tv_nsec = tv.tv_usec * 1000; timeout.tv_sec = tv.tv_sec + add_wait; Dmsg4(dbglvl, "I'm going to sleep on device %s. HB=%d rem_wait=%d add_wait=%d\n", dev->print_name(), (int)me->heartbeat_interval, dev->rem_wait_sec, add_wait); start = time(NULL); /* Wait required time */ stat = dev->next_vol_timedwait(&timeout); Dmsg2(dbglvl, "Wokeup from sleep on device stat=%d blocked=%s\n", stat, dev->print_blocked()); now = time(NULL); total_waited = now - first_start; dev->rem_wait_sec -= (now - start); /* Note, this always triggers the first time. We want that. */ if (me->heartbeat_interval) { if (now - last_heartbeat >= me->heartbeat_interval) { /* Send Heartbeats Note when sd_client is set, the SD is acting as an FD, bug the SD has code to receive heartbeats, so we skip sending them. */ if (jcr->file_bsock && !(jcr->is_JobType(JT_BACKUP) && jcr->sd_client)) { jcr->file_bsock->signal(BNET_HEARTBEAT); Dmsg0(dbglvl, "Send heartbeat to FD.\n"); } if (jcr->dir_bsock) { jcr->dir_bsock->signal(BNET_HEARTBEAT); } last_heartbeat = now; } } if (stat == EINVAL) { berrno be; Jmsg1(jcr, M_FATAL, 0, _("pthread timedwait error. ERR=%s\n"), be.bstrerror(stat)); stat = W_ERROR; /* error */ break; } /* * Continue waiting if operator is labeling volumes */ if (dev->blocked() == BST_WRITING_LABEL) { continue; } if (dev->rem_wait_sec <= 0) { /* on exceeding wait time return */ Dmsg0(dbglvl, "Exceed wait time.\n"); stat = W_TIMEOUT; break; } /* * Check if user unmounted the device while we were waiting */ unmounted = dev->is_device_unmounted(); if (!unmounted && dev->vol_poll_interval && (total_waited >= dev->vol_poll_interval)) { Dmsg1(dbglvl, "Set poll=true return in wait blocked=%s\n", dev->print_blocked()); dev->poll = true; /* returning a poll event */ stat = W_POLL; break; } /* * Check if user mounted the device while we were waiting */ if (dev->blocked() == BST_MOUNT) { /* mount request ? */ Dmsg0(dbglvl, "Mounted return.\n"); stat = W_MOUNT; break; } /* * If we did not timeout, then some event happened, so * return to check if state changed. */ if (stat != ETIMEDOUT) { berrno be; Dmsg2(dbglvl, "Wake return. stat=%d. ERR=%s\n", stat, be.bstrerror(stat)); stat = W_WAKE; /* someone woke us */ break; } /* * At this point, we know we woke up because of a timeout, * that was due to a heartbeat, because any other reason would * have caused us to return, so update the wait counters and continue. */ add_wait = dev->rem_wait_sec; if (me->heartbeat_interval && add_wait > me->heartbeat_interval) { add_wait = me->heartbeat_interval; } /* If the user did not unmount the tape and we are polling, ensure * that we poll at the correct interval. */ if (!unmounted && dev->vol_poll_interval && add_wait > dev->vol_poll_interval - total_waited) { add_wait = dev->vol_poll_interval - total_waited; } if (add_wait < 0) { add_wait = 0; } } if (!unmounted) { dev->set_blocked(dev->dev_prev_blocked); /* restore entry state */ Dmsg1(dbglvl, "set %s\n", dev->print_blocked()); } Dmsg2(dbglvl, "Exit blocked=%s poll=%d\n", dev->print_blocked(), dev->poll); dev->Unlock(); return stat; } /* * Wait for any device to be released, then we return, so * higher level code can rescan possible devices. Since there * could be a job waiting for a drive to free up, we wait a maximum * of 1 minute then retry just in case a broadcast was lost, and * we return to rescan the devices. * * Returns: true if a device has changed state * false if the total wait time has expired. */ bool wait_for_any_device(JCR *jcr, int &retries) { struct timeval tv; struct timezone tz; struct timespec timeout; int stat = 0; bool ok = true; const int max_wait_time = 1 * 60; /* wait 1 minute */ char ed1[50]; Dmsg0(dbglvl, "Enter wait_for_any_device\n"); P(device_release_mutex); if (++retries % 5 == 0) { /* Print message every 5 minutes */ Jmsg(jcr, M_MOUNT, 0, _("JobId=%s, Job %s waiting to reserve a device.\n"), edit_uint64(jcr->JobId, ed1), jcr->Job); } gettimeofday(&tv, &tz); timeout.tv_nsec = tv.tv_usec * 1000; timeout.tv_sec = tv.tv_sec + max_wait_time; Dmsg0(dbglvl, "Going to wait for a device.\n"); /* Wait required time */ stat = pthread_cond_timedwait(&wait_device_release, &device_release_mutex, &timeout); Dmsg1(dbglvl, "Wokeup from sleep on device stat=%d\n", stat); V(device_release_mutex); Dmsg1(dbglvl, "Return from wait_device ok=%d\n", ok); return ok; } /* * Wait for a specific device to be released * We wait a maximum of 1 minute then * retry just in case a broadcast was lost. * * Returns: true if the device has changed state * false if the total wait time has expired. */ bool wait_for_device(DCR *dcr, int &retries) { struct timeval tv; struct timezone tz; struct timespec timeout; JCR *jcr = dcr->jcr; DEVICE *dev = dcr->dev; int stat = 0; bool ok = true; const int max_wait_time = 1 * 60; /* wait 1 minute */ char ed1[50]; Dmsg3(40, "Enter wait_for_device. busy=%d dcrvol=%s devvol=%s\n", dev->is_busy(), dcr->VolumeName, dev->getVolCatName()); P(device_release_mutex); if (++retries % 5 == 0) { /* Print message every 5 minutes */ Jmsg(jcr, M_MOUNT, 0, _("JobId=%s, Job %s waiting device %s.\n"), edit_uint64(jcr->JobId, ed1), jcr->Job, dcr->dev->print_name()); } gettimeofday(&tv, &tz); timeout.tv_nsec = tv.tv_usec * 1000; timeout.tv_sec = tv.tv_sec + max_wait_time; Dmsg0(dbglvl, "Going to wait for a device.\n"); /* Wait required time */ stat = pthread_cond_timedwait(&wait_device_release, &device_release_mutex, &timeout); Dmsg1(dbglvl, "Wokeup from sleep on device stat=%d\n", stat); V(device_release_mutex); Dmsg1(dbglvl, "Return from wait_device ok=%d\n", ok); return ok; } /* * This routine initializes the device wait timers */ void init_device_wait_timers(DCR *dcr) { DEVICE *dev = dcr->dev; JCR *jcr = dcr->jcr; /* ******FIXME******* put these on config variables */ dev->min_wait = 60 * 60; dev->max_wait = 24 * 60 * 60; dev->max_num_wait = 9; /* 5 waits =~ 1 day, then 1 day at a time */ dev->wait_sec = dev->min_wait; dev->rem_wait_sec = dev->wait_sec; dev->num_wait = 0; dev->poll = false; jcr->min_wait = 60 * 60; jcr->max_wait = 24 * 60 * 60; jcr->max_num_wait = 9; /* 5 waits =~ 1 day, then 1 day at a time */ jcr->wait_sec = jcr->min_wait; jcr->rem_wait_sec = jcr->wait_sec; jcr->num_wait = 0; } void init_jcr_device_wait_timers(JCR *jcr) { /* ******FIXME******* put these on config variables */ jcr->min_wait = 60 * 60; jcr->max_wait = 24 * 60 * 60; jcr->max_num_wait = 9; /* 5 waits =~ 1 day, then 1 day at a time */ jcr->wait_sec = jcr->min_wait; jcr->rem_wait_sec = jcr->wait_sec; jcr->num_wait = 0; } /* * The dev timers are used for waiting on a particular device * * Returns: true if time doubled * false if max time expired */ bool double_dev_wait_time(DEVICE *dev) { dev->wait_sec *= 2; /* double wait time */ if (dev->wait_sec > dev->max_wait) { /* but not longer than maxtime */ dev->wait_sec = dev->max_wait; } dev->num_wait++; dev->rem_wait_sec = dev->wait_sec; if (dev->num_wait >= dev->max_num_wait) { return false; } return true; } bacula-15.0.3/src/stored/tape_alert_msgs.h0000644000175000017500000003615114771010173020301 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Routines for getting and displaying tape alerts * * Written by Kern Sibbald, October MMXVI * */ struct ta_error_handling { const char severity; const char flags; const char *short_msg; }; #define TA_NONE (0) #define TA_DISABLE_DRIVE (1<<0) #define TA_DISABLE_VOLUME (1<<1) #define TA_CLEAN_DRIVE (1<<2) #define TA_PERIODIC_CLEAN (1<<3) #define TA_RETENTION (1<<4) /* * ta_error_handling determines error handling * Severity determines if we have info, warning or fail (critical) error. * Flags allows setting flags * */ static struct ta_error_handling ta_errors[] = { {' ', TA_NONE, ""}, /* 1 */ {'W', TA_NONE, "Read Warning"}, {'W', TA_NONE, "Write Warning"}, {'C', TA_NONE, "Hard Error"}, {'C', TA_NONE, "Media"}, {'C', TA_NONE, "Read Failure"}, {'C', TA_NONE, "Write Failure"}, {'W', TA_DISABLE_VOLUME, "Media Life"}, {'W', TA_DISABLE_VOLUME, "Not Data Grade"}, {'C', TA_NONE, "Write Protect"}, /* 10 */ {'I', TA_NONE, "No Removal"}, {'I', TA_NONE, "Cleaning Media"}, {'I', TA_NONE, "Unsupported Format"}, {'C', TA_DISABLE_VOLUME, "Recoverable Snapped Tape"}, {'C', TA_DISABLE_DRIVE| \ TA_DISABLE_VOLUME, "Unrecoverable Snapped Tape"}, {'W', TA_NONE, "Cartridge Memory Chip Failure"}, {'C', TA_NONE, "Forced Eject"}, {'W', TA_NONE, "Read Only Format"}, {'W', TA_NONE, "Tape Directory Corrupted on load"}, {'I', TA_NONE, "Nearing Media Life"}, /* 20 */ {'C', TA_CLEAN_DRIVE| \ TA_DISABLE_DRIVE| \ TA_DISABLE_VOLUME, "Clean Now"}, {'W', TA_PERIODIC_CLEAN, "Clean Periodic"}, {'C', TA_DISABLE_VOLUME, "Expired Cleaning Media"}, {'C', TA_RETENTION, "Invalid Cleaning Media"}, {'W', TA_NONE, "Retention Requested"}, {'W', TA_NONE, "Dual-Port Interface Error"}, {'W', TA_NONE, "Cooling Fan Failure"}, {'W', TA_NONE, "Power Supply Failure"}, {'W', TA_NONE, "Power Consumption"}, {'W', TA_DISABLE_DRIVE, "Drive Maintenance"}, /* 30 */ {'C', TA_DISABLE_DRIVE, "Hardware A"}, {'C', TA_DISABLE_DRIVE, "Hardware B"}, {'W', TA_NONE, "Interface"}, {'C', TA_NONE, "Eject Media"}, {'W', TA_NONE, "Download Fail"}, {'W', TA_NONE, "Drive Humidity"}, {'W', TA_NONE, "Drive Temperature"}, {'W', TA_NONE, "Drive Voltage"}, {'C', TA_DISABLE_DRIVE, "Predictive Failure"}, {'W', TA_DISABLE_DRIVE, "Diagnostics Required"}, /* 40 */ {'C', TA_NONE, "Loader Hardware A"}, {'C', TA_NONE, "Loader Stray Tape"}, {'W', TA_NONE, "Loader Hardware B"}, {'C', TA_NONE, "Loader Door"}, {'C', TA_NONE, "Loader Hardware C"}, {'C', TA_NONE, "Loader Magazine"}, /* 46 */ {'W', TA_NONE, "Loader Predictive Failure"}, {' ', TA_NONE, ""}, {' ', TA_NONE, ""}, {' ', TA_NONE, ""}, /* 50 */ {'W', TA_NONE, "Lost Statistics"}, {'W', TA_NONE, "Tape directory invalid at unload"}, {'C', TA_DISABLE_VOLUME, "Tape system area write failure"}, {'C', TA_DISABLE_VOLUME, "Tape system area read failure"}, /* 54 */ {'C', TA_DISABLE_VOLUME, "No start of data"} }; /* * Long message, sometimes even too verbose. */ static const char *long_msg[] = { "", /* 1 */ "The tape drive is having problems reading data. No data has been lost, but there has been a reduction in the performance of the tape. The drive is having severe trouble reading", "The tape drive is having problems writing data. No data has been lost, but there has been a reduction in the capacity of the tape. The drive is having severe trouble writing", "The operation has stopped because an error has occurred while reading or writing data which the drive cannot correct. The drive had a hard read or write error", "Your data is at risk: Media cannot be written/read, or media performance is severely degraded.\n 1. Copy any data you require from the tape.\n 2. Do not use this tape again.\n 3. Restart the operation with a different tape.", "The tape is damaged or the drive is faulty. Call the tape drive supplier helpline. The drive can no longer read data from the tape", "The tape is from a faulty batch or the tape drive is faulty: The drive can no longer write data to the tape.\n 1. Use a good tape to test the drive.\n 2. If the problem persists, call the tape drive supplier helpline.", "The tape cartridge has reached the end of its calculated useful life: The media has exceeded its specified life.\n 1. Copy any data you need to another tape.\n 2. Discard the old tape.", "The tape cartridge is not data-grade. Any data you back up to the tape is at risk. The drive has not been able to read the MRS stripes. Replace the cartridge with a data-grade tape.", "You are trying to write to a write-protected cartridge. Write command is attempted to a write protected tape. Remove the write-protection or use another tape.", /* 10 */ "You cannot eject the cartridge because the tape drive is in use. Manual or s/w unload attempted when prevent media removal is enabled. Wait until the operation is complete before ejecting the cartridge.", "The tape in the drive is a cleaning cartridge. Cleaning tape loaded in drive.", "You have tried to load a cartridge of a type which is not supported by this drive. Attempted loaded of unsupported tape format, e.g. DDS2 in DDS1 drive.", "The operation has failed because the tape in the drive has snapped: Tape snapped/cut in the drive where media can be ejected.\n 1. Discard the old tape.\n 2. Restart the operation with a different tape.", "The operation has failed because the tape in the drive has snapped: Tape snapped/cut in the drive where media cannot be ejected.\n 1. Do not attempt to extract the tape cartridge.\n 2. Call the tape drive supplier helpline.", "The memory in the tape cartridge has failed, which reduces performance. Memory chip failed in cartridge. Do not use the cartridge for further backup operations.", "The operation has failed because the tape cartridge was manually ejected while the tape drive was actively writing or reading. Manual or forced eject while drive actively writing or reading", "You have loaded a cartridge of a type that is read-only in this drive. Media loaded that is read-only format. The cartridge will appear as write-protected.", "The directory on the tape cartridge has been corrupted. Tape drive powered down with tape loaded, or permanent error prevented the tape directory being updated. File search performance will be degraded. The tape directory can be rebuilt by reading all the data on the cartridge.", "The tape cartridge is nearing the end of its calculated life. Media may have exceeded its specified number of passes. It is recommended that you:\n 1. Use another tape cartridge for your next backup.\n 2. Store this tape cartridge in a safe place in case you need to restore data from it.", /* 20 */ "The tape drive needs cleaning: The drive thinks it has a head clog, or needs cleaning.\n 1. If the operation has stopped, eject the tape and clean the drive.\n 2. If the operation has not stopped, wait for it to finish and then clean the drive.\n Check the tape drive users manual for device specific cleaning instructions.", "The tape drive is due for routine cleaning: The drive is ready for a periodic clean.\n 1. Wait for the current operation to finish.\n 2. Then use a cleaning cartridge.\n Check the tape drive users manual for device specific cleaning instructions.", "The last cleaning cartridge used in the tape drive has worn out. The cleaning tape has expired.\n 1. Discard the worn out cleaning cartridge.\n 2. Wait for the current operation to finish.\n 3. Then use a new cleaning cartridge.", "The last cleaning cartridge used in the tape drive was an invalid type: Invalid cleaning tape type used.\n 1. Do not use this cleaning cartridge in this drive.\n 2. Wait for the current operation to finish.\n 3. Then use a valid cleaning cartridge.", "The tape drive has requested a retention operation. The drive is having severe trouble reading or writing, which will be resolved by a retention cycle.", "A redundant interface port on the tape drive has failed. Failure of one interface port in a dual-port configuration, e.g. Fibrechannel.", "A tape drive cooling fan has failed. Fan failure inside tape drive mechanism or tape drive enclosure.", "A redundant power supply has failed inside the tape drive enclosure. Check the enclosure users manual for instructions on replacing the failed power supply. Redundant PSU failure inside the tape drive enclosure or rack subsystem.", "The tape drive power consumption is outside the specified range. Power consumption of the tape drive is outside specified range.", "Preventive maintenance of the tape drive is required. The drive requires preventative maintenance (not cleaning). Check the tape drive users manual for device specific preventive maintenance tasks or call the tape drive supplier helpline.", /* 30 */ "The tape drive has a hardware fault: The drive has a hardware fault that requires reset to recover.\n 1. Eject the tape or magazine.\n 2. Reset the drive.\n 3. Restart the operation.", "The tape drive has a hardware fault: The drive has a hardware fault which is not read/write related or requires a power cycle to recover.\n 1. Turn the tape drive off and then on again.\n 2. Restart the operation.\n 3. If the problem persists, call the tape drive supplier helpline.\n Check the tape drive users manual for device specific instructions on turning the device power on and off.", "The tape drive has a problem with the host interface: The drive has identified an interfacing fault.\n 1. Check the cables and cable connections.\n 2. Restart the operation.", "The operation has failed. Error recovery action:\n 1. Eject the tape or magazine.\n 2. Insert the tape or magazine again.\n 3. Restart the operation.", "The firmware download has failed because you have tried to use the incorrect firmware for this tape drive. Firmware download failed. Obtain the correct firmware and try again.", "Environmental conditions inside the tape drive are outside the specified humidity range. Drive humidity limits exceeded.", "Environmental conditions inside the tape drive are outside the specified temperature range. Drive temperature limits exceeded.", "The voltage supply to the tape drive is outside the specified range. Drive voltage limits exceeded.", "A hardware failure of the tape drive is predicted. Call the tape drive supplier helpline. Predictive failure of drive hardware.", "The tape drive may have a fault. Check for availability of diagnostic information and run extended diagnostics if applicable. The drive may have had a failure which may be identified by stored diagnostic information or by running extended diagnostics (eg Send Diagnostic). Check the tape drive users manual for instructions on running extended diagnostic tests and retrieving diagnostic data.", /* 40 */ "The changer mechanism is having difficulty communicating with the tape drive: Loader mech. is having trouble communicating with the tape drive.\n 1. Turn the autoloader off then on.\n 2. Restart the operation.\n 3. If problem persists, call the tape drive supplier helpline.", "A tape has been left in the autoloader by a previous hardware fault: Stray tape left in loader after previous error recovery.\n 1. Insert an empty magazine to clear the fault.\n 2. If the fault does not clear, turn the autoloader off and then on again.\n 3. If the problem persists, call the tape drive supplier helpline.", "There is a problem with the autoloader mechanism. Loader mech. has a hardware fault.", "The operation has failed because the autoloader door is open: Tape changer door open:\n 1. Clear any obstructions from the autoloader door.\n 2. Eject the magazine and then insert it again.\n 3. If the fault does not clear, turn the autoloader off and then on again.\n 4. If the problem persists, call the tape drive supplier helpline.", "The autoloader has a hardware fault: The loader mechanism has a hardware fault that is not mechanically related.\n 1. Turn the autoloader off and then on again.\n 2. Restart the operation.\n 3. If the problem persists, call the tape drive supplier helpline.\n Check the autoloader users manual for device specific instructions on turning the device power on and off.", "The autoloader cannot operate without the magazine. Loader magazine not present.\n 1. Insert the magazine into the autoloader.\n 2. Restart the operation.", /* 46 */ "A hardware failure of the changer mechanism is predicted. Predictive failure of loader mechanism hardware. Call the tape drive supplier helpline.", "", "", "", /* 50 */ "Media statistics have been lost at some time in the past, Drive or library powered down with tape loaded.", "The tape directory on the tape cartridge just unloaded has been corrupted. Error prevented the tape directory being updated on unload. File search performance will be degraded. The tape directory can be rebuilt by reading all the data.", "The tape just unloaded could not write its system area successfully: Write errors while writing the system log on unload.\n 1. Copy data to another tape cartridge.\n 2. Discard the old cartridge.", "The tape system area could not be read successfully at load time: Read errors while reading the system area on load.\n 1. Copy data to another tape cartridge.\n 2. Discard the old cartridge.", /* 54 */ "The start of data could not be found on the tape: Tape damaged, bulk erased, or incorrect format.\n 1. Check you are using the correct format tape.\n 2. Discard the tape or return the tape to your supplier." }; bacula-15.0.3/src/stored/tape_dev.c0000644000175000017500000010264114771010173016710 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * tape_dev.c -- low level operations on tape devices * * written by, Kern Sibbald, MM * separated from dev.c in February 2014 * * The separation between tape and file is not yet clean. * */ /* * Handling I/O errors and end of tape conditions are a bit tricky. * This is how it is currently done when writing. * On either an I/O error or end of tape, * we will stop writing on the physical device (no I/O recovery is * attempted at least in this daemon). The state flag will be sent * to include ST_EOT, which is ephemeral, and ST_WEOT, which is * persistent. Lots of routines clear ST_EOT, but ST_WEOT is * cleared only when the problem goes away. Now when ST_WEOT * is set all calls to write_block_to_device() call the fix_up * routine. In addition, all threads are blocked * from writing on the tape by calling lock_dev(), and thread other * than the first thread to hit the EOT will block on a condition * variable. The first thread to hit the EOT will continue to * be able to read and write the tape (he sort of tunnels through * the locking mechanism -- see lock_dev() for details). * * Now presumably somewhere higher in the chain of command * (device.c), someone will notice the EOT condition and * get a new tape up, get the tape label read, and mark * the label for rewriting. Then this higher level routine * will write the unwritten buffer to the new volume. * Finally, he will release * any blocked threads by doing a broadcast on the condition * variable. At that point, we should be totally back in * business with no lost data. */ #include "bacula.h" #include "stored.h" #include "lintape.h" #ifndef O_NONBLOCK #define O_NONBLOCK 0 #endif /* Imported functions */ extern void set_os_device_parameters(DCR *dcr); extern bool dev_get_os_pos(DEVICE *dev, struct mtget *mt_stat); extern uint32_t status_dev(DEVICE *dev); const char *mode_to_str(int mode); /* */ bool tape_dev::open_device(DCR *dcr, int omode) { file_size = 0; int timeout = max_open_wait; #if !defined(HAVE_WIN32) struct mtop mt_com; utime_t start_time = time(NULL); #endif if (DEVICE::open_device(dcr, omode)) { return true; /* already open */ } omode = openmode; /* pickup possible new options */ mount(1); /* do mount if required */ Dmsg0(100, "Open dev: device is tape\n"); get_autochanger_loaded_slot(dcr); openmode = omode; set_mode(omode); if (timeout < 1) { timeout = 1; } errno = 0; if (is_fifo() && timeout) { /* Set open timer */ tid = start_thread_timer(dcr->jcr, pthread_self(), timeout); } Dmsg2(100, "Try open %s mode=%s\n", print_name(), mode_to_str(omode)); #if defined(HAVE_WIN32) /* Windows Code */ if ((m_fd = d_open(dev_name, mode)) < 0) { dev_errno = errno; } #else /* UNIX Code */ /* If busy retry each second for max_open_wait seconds */ for ( ;; ) { /* Try non-blocking open */ m_fd = d_open(dev_name, mode+O_NONBLOCK); if (m_fd < 0) { berrno be; dev_errno = errno; Dmsg5(100, "Open error on %s omode=%d mode=%x errno=%d: ERR=%s\n", print_name(), omode, mode, errno, be.bstrerror()); } else { /* Tape open, now rewind it */ Dmsg0(100, "Rewind after open\n"); mt_com.mt_op = MTREW; mt_com.mt_count = 1; /* rewind only if dev is a tape */ if (is_tape() && (d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com) < 0)) { berrno be; dev_errno = errno; /* set error status from rewind */ d_close(m_fd); clear_opened(); Dmsg2(100, "Rewind error on %s close: ERR=%s\n", print_name(), be.bstrerror(dev_errno)); /* If we get busy, device is probably rewinding, try again */ if (dev_errno != EBUSY) { break; /* error -- no medium */ } } else { /* Got fd and rewind worked, so we must have medium in drive */ d_close(m_fd); m_fd = d_open(dev_name, mode); /* open normally */ if (m_fd < 0) { berrno be; dev_errno = errno; Dmsg5(100, "Open error on %s omode=%d mode=%x errno=%d: ERR=%s\n", print_name(), omode, mode, errno, be.bstrerror()); break; } dev_errno = 0; lock_door(); set_os_device_parameters(dcr); /* do system dependent stuff */ break; /* Successfully opened and rewound */ } } bmicrosleep(5, 0); /* Exceed wait time ? */ if (time(NULL) - start_time >= max_open_wait) { break; /* yes, get out */ } } #endif if (!is_open()) { berrno be; Mmsg2(errmsg, _("Unable to open device %s: ERR=%s\n"), print_name(), be.bstrerror(dev_errno)); if (dcr->jcr) { pm_strcpy(dcr->jcr->errmsg, errmsg); } Dmsg1(100, "%s", errmsg); } /* Stop any open() timer we started */ if (tid) { stop_thread_timer(tid); tid = 0; } Dmsg1(100, "open dev: tape %d opened\n", m_fd); state |= preserve; /* reset any important state info */ return m_fd >= 0; } /* * Rewind the device. * Returns: true on success * false on failure */ bool tape_dev::rewind(DCR *dcr) { struct mtop mt_com; unsigned int i; bool first = true; Dmsg3(400, "rewind res=%d fd=%d %s\n", num_reserved(), m_fd, print_name()); state &= ~(ST_EOT|ST_EOF|ST_WEOT); /* remove EOF/EOT flags */ block_num = file = 0; file_size = 0; file_addr = 0; if (m_fd < 0) { return false; } if (is_tape()) { mt_com.mt_op = MTREW; mt_com.mt_count = 1; /* If we get an I/O error on rewind, it is probably because * the drive is actually busy. We loop for (about 5 minutes) * retrying every 5 seconds. */ for (i=max_rewind_wait; ; i -= 5) { if (d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com) < 0) { berrno be; clrerror(MTREW); if (i == max_rewind_wait) { Dmsg1(200, "Rewind error, %s. retrying ...\n", be.bstrerror()); } /* * This is a gross hack, because if the user has the * device mounted (i.e. open), then uses mtx to load * a tape, the current open file descriptor is invalid. * So, we close the drive and re-open it. */ if (first && dcr) { int open_mode = openmode; d_close(m_fd); clear_opened(); open_device(dcr, open_mode); if (m_fd < 0) { return false; } first = false; continue; } #ifdef HAVE_SUN_OS if (dev_errno == EIO) { Mmsg1(errmsg, _("No tape loaded or drive offline on %s.\n"), print_name()); return false; } #else if (dev_errno == EIO && i > 0) { Dmsg0(200, "Sleeping 5 seconds.\n"); bmicrosleep(5, 0); continue; } #endif Mmsg2(errmsg, _("Rewind error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); return false; } break; } } return true; } /* * Check if the current position on the volume corresponds to * what is in the catalog. * */ bool tape_dev::is_eod_valid(DCR *dcr) { JCR *jcr = dcr->jcr; /* * Check if we are positioned on the tape at the same place * that the database says we should be. */ if (VolCatInfo.VolCatFiles == get_file()) { Jmsg(jcr, M_INFO, 0, _("Ready to append to end of Volume \"%s\" at file=%d.\n"), dcr->VolumeName, get_file()); } else if (get_file() > VolCatInfo.VolCatFiles) { Jmsg(jcr, M_WARNING, 0, _("For Volume \"%s\":\n" "The number of files mismatch! Volume=%u Catalog=%u\n" "Correcting Catalog\n"), dcr->VolumeName, get_file(), VolCatInfo.VolCatFiles); VolCatInfo.VolCatFiles = get_file(); VolCatInfo.VolCatBlocks = get_block_num(); if (!dir_update_volume_info(dcr, false, true)) { Jmsg(jcr, M_WARNING, 0, _("Error updating Catalog\n")); dcr->mark_volume_in_error(); return false; } } else { Jmsg(jcr, M_ERROR, 0, _("Bacula cannot write on tape Volume \"%s\" because:\n" "The number of files mismatch! Volume=%u Catalog=%u\n"), dcr->VolumeName, get_file(), VolCatInfo.VolCatFiles); dcr->mark_volume_in_error(); return false; } return true; } /* * Position device to end of medium (end of data) * Returns: true on succes * false on error */ bool tape_dev::eod(DCR *dcr) { struct mtop mt_com; bool ok = true; int32_t os_file; Enter(100); ok = DEVICE::eod(dcr); if (!ok) { Leave(100); return false; } #if defined (__digital__) && defined (__unix__) Leave(100); return fsf(VolCatInfo.VolCatFiles); #endif #ifdef MTEOM if (has_cap(CAP_FASTFSF) && !has_cap(CAP_EOM)) { Dmsg0(100,"Using FAST FSF for EOM\n"); /* If unknown position, rewind */ if (get_os_tape_file() < 0) { if (!rewind(dcr)) { Dmsg0(100, "Rewind error\n"); Leave(100); return false; } } mt_com.mt_op = MTFSF; /* * ***FIXME*** fix code to handle case that INT16_MAX is * not large enough. */ mt_com.mt_count = INT16_MAX; /* use big positive number */ if (mt_com.mt_count < 0) { mt_com.mt_count = INT16_MAX; /* brain damaged system */ } } if (has_cap(CAP_MTIOCGET) && (has_cap(CAP_FASTFSF) || has_cap(CAP_EOM))) { if (has_cap(CAP_EOM)) { Dmsg0(100,"Using EOM for EOM\n"); mt_com.mt_op = MTEOM; mt_com.mt_count = 1; } if (d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com) < 0) { berrno be; clrerror(mt_com.mt_op); Dmsg1(50, "ioctl error: %s\n", be.bstrerror()); update_pos(dcr); Mmsg2(errmsg, _("ioctl MTEOM error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); Dmsg1(100, "%s", errmsg); Leave(100); return false; } os_file = get_os_tape_file(); if (os_file < 0) { berrno be; clrerror(-1); Mmsg2(errmsg, _("ioctl MTIOCGET error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); Dmsg1(100, "%s", errmsg); Leave(100); return false; } Dmsg1(100, "EOD file=%d\n", os_file); set_ateof(); file = os_file; } else { #else { #endif /* * Rewind then use FSF until EOT reached */ if (!rewind(dcr)) { Dmsg0(100, "Rewind error.\n"); Leave(100); return false; } /* * Move file by file to the end of the tape */ int file_num; for (file_num=file; !at_eot(); file_num++) { Dmsg0(200, "eod: doing fsf 1\n"); if (!fsf(1)) { Dmsg0(100, "fsf error.\n"); Leave(100); return false; } /* * Avoid infinite loop by ensuring we advance. */ if (!at_eot() && file_num == (int)file) { Dmsg1(100, "fsf did not advance from file %d\n", file_num); set_ateof(); os_file = get_os_tape_file(); if (os_file >= 0) { Dmsg2(100, "Adjust file from %d to %d\n", file_num, os_file); file = os_file; } break; } } } /* * Some drivers leave us after second EOF when doing * MTEOM, so we must backup so that appending overwrites * the second EOF. */ if (has_cap(CAP_BSFATEOM)) { /* Backup over EOF */ ok = bsf(1); /* If BSF worked and fileno is known (not -1), set file */ os_file = get_os_tape_file(); if (os_file >= 0) { Dmsg2(100, "BSFATEOF adjust file from %d to %d\n", file , os_file); file = os_file; } else { file++; /* wing it -- not correct on all OSes */ } } else { update_pos(dcr); /* update position */ } Dmsg1(200, "EOD dev->file=%d\n", file); Leave(100); return ok; } /* * Load medium in device * Returns: true on success * false on failure */ bool load_dev(DEVICE *dev) { #ifdef MTLOAD struct mtop mt_com; #endif if (dev->fd() < 0) { dev->dev_errno = EBADF; Mmsg0(dev->errmsg, _("Bad call to load_dev. Device not open\n")); Emsg0(M_FATAL, 0, dev->errmsg); return false; } if (!(dev->is_tape())) { return true; } #ifndef MTLOAD Dmsg0(200, "stored: MTLOAD command not available\n"); berrno be; dev->dev_errno = ENOTTY; /* function not available */ Mmsg2(dev->errmsg, _("ioctl MTLOAD error on %s. ERR=%s.\n"), dev->print_name(), be.bstrerror()); return false; #else dev->block_num = dev->file = 0; dev->file_size = 0; dev->file_addr = 0; mt_com.mt_op = MTLOAD; mt_com.mt_count = 1; if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { berrno be; dev->dev_errno = errno; Mmsg2(dev->errmsg, _("ioctl MTLOAD error on %s. ERR=%s.\n"), dev->print_name(), be.bstrerror()); return false; } return true; #endif } /* * Rewind device and put it offline * Returns: true on success * false on failure */ bool tape_dev::offline(DCR *dcr) { struct mtop mt_com; if (!is_tape()) { return true; /* device not open */ } state &= ~(ST_APPEND|ST_READ|ST_EOT|ST_EOF|ST_WEOT); /* remove EOF/EOT flags */ block_num = file = 0; file_size = 0; file_addr = 0; unlock_door(); mt_com.mt_op = MTOFFL; mt_com.mt_count = 1; if (d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com) < 0) { berrno be; dev_errno = errno; Mmsg2(errmsg, _("ioctl MTOFFL error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); return false; } Dmsg1(100, "Offlined device %s\n", print_name()); return true; } bool DEVICE::offline_or_rewind(DCR *dcr) { if (m_fd < 0) { return false; } if (has_cap(CAP_OFFLINEUNMOUNT)) { return offline(dcr); } else { /* * Note, this rewind probably should not be here (it wasn't * in prior versions of Bacula), but on FreeBSD, this is * needed in the case the tape was "frozen" due to an error * such as backspacing after writing and EOF. If it is not * done, all future references to the drive get and I/O error. */ clrerror(MTREW); return rewind(dcr); } } /* * Foward space a file * Returns: true on success * false on failure */ bool tape_dev::fsf(int num) { int32_t os_file = 0; struct mtop mt_com; int stat = 0; if (!is_open()) { dev_errno = EBADF; Mmsg0(errmsg, _("Bad call to fsf. Device not open\n")); Emsg0(M_FATAL, 0, errmsg); return false; } if (!is_tape()) { return true; } if (at_eot()) { dev_errno = 0; Mmsg1(errmsg, _("Device %s at End of Tape.\n"), print_name()); return false; } if (at_eof()) { Dmsg0(200, "ST_EOF set on entry to FSF\n"); } Dmsg0(100, "fsf\n"); block_num = 0; /* * If Fast forward space file is set, then we * use MTFSF to forward space and MTIOCGET * to get the file position. We assume that * the SCSI driver will ensure that we do not * forward space past the end of the medium. */ if (has_cap(CAP_FSF) && has_cap(CAP_MTIOCGET) && has_cap(CAP_FASTFSF)) { int my_errno = 0; mt_com.mt_op = MTFSF; mt_com.mt_count = num; stat = d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); if (stat < 0) { my_errno = errno; /* save errno */ } else if ((os_file=get_os_tape_file()) < 0) { my_errno = errno; /* save errno */ } if (my_errno != 0) { berrno be; set_eot(); Dmsg0(200, "Set ST_EOT\n"); clrerror(MTFSF); Mmsg2(errmsg, _("ioctl MTFSF error on %s. ERR=%s.\n"), print_name(), be.bstrerror(my_errno)); Dmsg1(200, "%s", errmsg); return false; } Dmsg1(200, "fsf file=%d\n", os_file); set_ateof(); file = os_file; return true; /* * Here if CAP_FSF is set, and virtually all drives * these days support it, we read a record, then forward * space one file. Using this procedure, which is slow, * is the only way we can be sure that we don't read * two consecutive EOF marks, which means End of Data. */ } else if (has_cap(CAP_FSF)) { POOLMEM *rbuf; int rbuf_len; Dmsg0(200, "FSF has cap_fsf\n"); if (max_block_size == 0) { rbuf_len = DEFAULT_BLOCK_SIZE; } else { rbuf_len = max_block_size; } rbuf = get_memory(rbuf_len); mt_com.mt_op = MTFSF; mt_com.mt_count = 1; while (num-- && !at_eot()) { Dmsg0(100, "Doing read before fsf\n"); if ((stat = this->read((char *)rbuf, rbuf_len)) < 0) { if (errno == ENOMEM) { /* tape record exceeds buf len */ stat = rbuf_len; /* This is OK */ /* * On IBM drives, they return ENOSPC at EOM * instead of EOF status */ } else if (at_eof() && errno == ENOSPC) { stat = 0; /* With IBM Lintape, we need to check the Error sense of the device */ } else if (at_eof() && errno == EIO && check_lintape_eod()) { stat = 0; } else { berrno be; set_eot(); clrerror(-1); Dmsg2(100, "Set ST_EOT read errno=%d. ERR=%s\n", dev_errno, be.bstrerror()); Mmsg2(errmsg, _("read error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); Dmsg1(100, "%s", errmsg); break; } } if (stat == 0) { /* EOF */ Dmsg1(100, "End of File mark from read. File=%d\n", file+1); /* Two reads of zero means end of tape */ if (at_eof()) { set_eot(); Dmsg0(100, "Set ST_EOT\n"); break; } else { set_ateof(); continue; } } else { /* Got data */ clear_eot(); clear_eof(); } Dmsg0(100, "Doing MTFSF\n"); stat = d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); if (stat < 0) { /* error => EOT */ berrno be; set_eot(); Dmsg0(100, "Set ST_EOT\n"); clrerror(MTFSF); Mmsg2(errmsg, _("ioctl MTFSF error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); Dmsg0(100, "Got < 0 for MTFSF\n"); Dmsg1(100, "%s", errmsg); } else { set_ateof(); } } free_memory(rbuf); /* * No FSF, so use FSR to simulate it */ } else { Dmsg0(200, "Doing FSR for FSF\n"); while (num-- && !at_eot()) { fsr(INT32_MAX); /* returns -1 on EOF or EOT */ } if (at_eot()) { dev_errno = 0; Mmsg1(errmsg, _("Device %s at End of Tape.\n"), print_name()); stat = -1; } else { stat = 0; } } Dmsg1(200, "Return %d from FSF\n", stat); if (at_eof()) { Dmsg0(200, "ST_EOF set on exit FSF\n"); } if (at_eot()) { Dmsg0(200, "ST_EOT set on exit FSF\n"); } Dmsg1(200, "Return from FSF file=%d\n", file); return stat == 0; } /* * Backward space a file * Returns: false on failure * true on success */ bool tape_dev::bsf(int num) { struct mtop mt_com; int stat; if (!is_open()) { dev_errno = EBADF; Mmsg0(errmsg, _("Bad call to bsf. Device not open\n")); Emsg0(M_FATAL, 0, errmsg); return false; } if (!is_tape()) { Mmsg1(errmsg, _("Device %s cannot BSF because it is not a tape.\n"), print_name()); return false; } Dmsg0(100, "bsf\n"); clear_eot(); clear_eof(); file -= num; file_addr = 0; file_size = 0; mt_com.mt_op = MTBSF; mt_com.mt_count = num; stat = d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); if (stat < 0) { berrno be; clrerror(MTBSF); Mmsg2(errmsg, _("ioctl MTBSF error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); } return stat == 0; } /* * Foward space num records * Returns: false on failure * true on success */ bool DEVICE::fsr(int num) { struct mtop mt_com; int stat; if (!is_open()) { dev_errno = EBADF; Mmsg0(errmsg, _("Bad call to fsr. Device not open\n")); Emsg0(M_FATAL, 0, errmsg); return false; } if (!is_tape()) { return false; } if (!has_cap(CAP_FSR)) { Mmsg1(errmsg, _("ioctl MTFSR not permitted on %s.\n"), print_name()); return false; } Dmsg1(100, "fsr %d\n", num); mt_com.mt_op = MTFSR; mt_com.mt_count = num; stat = d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); if (stat == 0) { clear_eof(); block_num += num; } else { berrno be; struct mtget mt_stat; clrerror(MTFSR); Dmsg1(100, "FSF fail: ERR=%s\n", be.bstrerror()); if (dev_get_os_pos(this, &mt_stat)) { Dmsg4(100, "Adjust from %d:%d to %d:%d\n", file, block_num, mt_stat.mt_fileno, mt_stat.mt_blkno); file = mt_stat.mt_fileno; block_num = mt_stat.mt_blkno; } else { if (at_eof()) { set_eot(); } else { set_ateof(); } } Mmsg3(errmsg, _("ioctl MTFSR %d error on %s. ERR=%s.\n"), num, print_name(), be.bstrerror()); } return stat == 0; } /* * Backward space a record * Returns: false on failure * true on success */ bool DEVICE::bsr(int num) { struct mtop mt_com; int stat; if (!is_open()) { dev_errno = EBADF; Mmsg0(errmsg, _("Bad call to bsr_dev. Device not open\n")); Emsg0(M_FATAL, 0, errmsg); return false; } if (!is_tape()) { return false; } if (!has_cap(CAP_BSR)) { Mmsg1(errmsg, _("ioctl MTBSR not permitted on %s.\n"), print_name()); return false; } Dmsg0(100, "bsr_dev\n"); block_num -= num; clear_eof(); clear_eot(); mt_com.mt_op = MTBSR; mt_com.mt_count = num; stat = d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); if (stat < 0) { berrno be; clrerror(MTBSR); Mmsg2(errmsg, _("ioctl MTBSR error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); } return stat == 0; } void tape_dev::lock_door() { #ifdef MTLOCK /* ***BEEF ***/ if (device->lock_command && device->control_name) { return; /* Locked using SCSI persistent locks */ } struct mtop mt_com; if (!is_tape()) return; mt_com.mt_op = MTLOCK; mt_com.mt_count = 1; d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); #endif } void tape_dev::unlock_door() { #ifdef MTUNLOCK /* ***BEEF ***/ if (device->lock_command && device->control_name) { return; /* Locked using SCSI persistent locks */ } struct mtop mt_com; if (!is_tape()) return; mt_com.mt_op = MTUNLOCK; mt_com.mt_count = 1; d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); #endif } /* * Reposition the device to file, block * Returns: false on failure * true on success */ bool tape_dev::reposition(DCR *dcr, uint64_t raddr) { uint32_t rfile, rblock; rfile = (uint32_t)(raddr>>32); rblock = (uint32_t)raddr; if (!is_open()) { dev_errno = EBADF; Mmsg0(errmsg, _("Bad call to reposition. Device not open\n")); Emsg0(M_FATAL, 0, errmsg); return false; } /* After this point, we are tape only */ Dmsg4(100, "reposition from %u:%u to %u:%u\n", file, block_num, rfile, rblock); if (rfile < file) { Dmsg0(100, "Rewind\n"); if (!rewind(dcr)) { return false; } } if (rfile > file) { Dmsg1(100, "fsf %d\n", rfile-file); if (!fsf(rfile-file)) { Dmsg1(100, "fsf failed! ERR=%s\n", bstrerror()); return false; } Dmsg2(100, "wanted_file=%d at_file=%d\n", rfile, file); } if (rblock < block_num) { Dmsg2(100, "wanted_blk=%d at_blk=%d\n", rblock, block_num); Dmsg0(100, "bsf 1\n"); bsf(1); Dmsg0(100, "fsf 1\n"); fsf(1); Dmsg2(100, "wanted_blk=%d at_blk=%d\n", rblock, block_num); } if (has_cap(CAP_POSITIONBLOCKS) && rblock > block_num) { /* Ignore errors as Bacula can read to the correct block */ Dmsg1(100, "fsr %d\n", rblock-block_num); return fsr(rblock-block_num); } else { while (rblock > block_num) { if (!dcr->read_block_from_dev(NO_BLOCK_NUMBER_CHECK)) { berrno be; dev_errno = errno; Dmsg2(30, "Failed to find requested block on %s: ERR=%s", print_name(), be.bstrerror()); return false; } Dmsg2(300, "moving forward wanted_blk=%d at_blk=%d\n", rblock, block_num); } } return true; } /* * Write an end of file on the device * Returns: true on success * false on failure */ bool tape_dev::weof(DCR *dcr, int num) { struct mtop mt_com; int stat; Dmsg1(129, "=== weof_dev=%s\n", print_name()); if (!is_open()) { dev_errno = EBADF; Mmsg0(errmsg, _("Bad call to weof_dev. Device not open\n")); Emsg0(M_FATAL, 0, errmsg); return false; } file_size = 0; if (!is_tape()) { return true; } if (!can_append()) { Mmsg0(errmsg, _("Attempt to WEOF on non-appendable Volume\n")); Emsg0(M_FATAL, 0, errmsg); return false; } clear_eof(); clear_eot(); mt_com.mt_op = MTWEOF; mt_com.mt_count = num; stat = d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); if (stat == 0) { block_num = 0; file += num; file_addr = 0; } else { berrno be; clrerror(MTWEOF); if (stat == -1) { Mmsg2(errmsg, _("ioctl MTWEOF error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); } } /* DCR is null if called from within write_ansi_ibm_labels() */ if (dcr && stat == 0) { if (!write_ansi_ibm_labels(dcr, ANSI_EOF_LABEL, VolHdr.VolumeName)) { stat = -1; } } return stat == 0; } /* * If timeout, wait until the mount command returns 0. * If !timeout, try to mount the device only once. */ bool tape_dev::mount(int timeout) { Dmsg0(190, "Enter tape mount\n"); if (!is_mounted() && device->mount_command) { return mount_tape(1, timeout); } return true; } /* * Unmount the device * If timeout, wait until the unmount command returns 0. * If !timeout, try to unmount the device only once. */ bool tape_dev::unmount(int timeout) { Dmsg0(100, "Enter tape unmount\n"); if (!is_mounted() && requires_mount() && device->unmount_command) { return mount_tape(0, timeout); } return true; } /* * (Un)mount the device (for tape devices) */ bool tape_dev::mount_tape(int mount, int dotimeout) { POOL_MEM ocmd(PM_FNAME); POOLMEM *results; char *icmd; int status, tries; berrno be; Dsm_check(200); if (mount) { icmd = device->mount_command; } else { icmd = device->unmount_command; } edit_mount_codes(ocmd, icmd); Dmsg2(100, "mount_tape: cmd=%s mounted=%d\n", ocmd.c_str(), !!is_mounted()); if (dotimeout) { /* Try at most 10 times to (un)mount the device. This should perhaps be configurable. */ tries = 10; } else { tries = 1; } results = get_memory(4000); /* If busy retry each second */ Dmsg1(100, "mount_tape run_prog=%s\n", ocmd.c_str()); while ((status = run_program_full_output(ocmd.c_str(), max_open_wait/2, results)) != 0) { if (tries-- > 0) { continue; } Dmsg5(100, "Device %s cannot be %smounted. stat=%d result=%s ERR=%s\n", print_name(), (mount ? "" : "un"), status, results, be.bstrerror(status)); Mmsg(errmsg, _("Device %s cannot be %smounted. ERR=%s\n"), print_name(), (mount ? "" : "un"), be.bstrerror(status)); set_mounted(false); free_pool_memory(results); Dmsg0(200, "============ mount=0\n"); Dsm_check(200); return false; } set_mounted(mount); /* set/clear mounted flag */ free_pool_memory(results); Dmsg1(200, "============ mount=%d\n", mount); return true; } void tape_dev::set_ateof() { if (at_eof()) { return; } DEVICE::set_ateof(); file++; } const char *tape_dev::print_type() { return "Tape"; } DEVICE *tape_dev::get_dev(DCR */*dcr*/) { return this; } uint32_t tape_dev::get_hi_addr() { return file; } uint32_t tape_dev::get_low_addr() { return block_num; } uint64_t tape_dev::get_full_addr() { return (((uint64_t)file) << 32) | (uint64_t)block_num; } bool tape_dev::end_of_volume(DCR *dcr) { return write_ansi_ibm_labels(dcr, ANSI_EOV_LABEL, VolHdr.VolumeName); } /* Print the address */ char *tape_dev::print_addr(char *buf, int32_t buf_len) { buf[0] = 0; bsnprintf(buf, buf_len, "%lu:%lu", get_hi_addr(), get_low_addr()); return buf; } char *tape_dev::print_addr(char *buf, int32_t buf_len, boffset_t addr) { buf[0] = 0; bsnprintf(buf, buf_len, "%lu:%lu", (uint32_t)(addr>>32), (uint32_t)addr); return buf; } /* * Clean up when terminating the device */ void tape_dev::term(DCR *dcr) { delete_alerts(); DEVICE::term(dcr); } /* Official documentation * https://www.ibm.com/support/pages/node/652827 * * TS3310 TApe Library SCSI Reference, look for "REQUEST SENSE" * https://www.ibm.com/support/pages/system/files/support/ssg/ssgdocs.nsf/0/b321b010cdcf56e58525773b0070c925/$FILE/GA32-0476-01.pdf */ bool tape_dev::check_lintape_eod() { #ifdef HAVE_LINUX_OS if (has_cap(CAP_LINTAPE)) { int rc; char buf[128]; lintape_request_sense sense; memset(&sense, 0, sizeof(sense)); rc = d_ioctl(m_fd, SIOC_REQSENSE, (char*)&sense); if (rc != 0) { Dmsg0(150, "Unable to perform SIOC_REQSENSE\n"); return false; } if (chk_dbglvl(150)) { d_msg(__FILE__, __LINE__, 150, "Information Field Valid Bit-------%d\n" "Error Code------------------------0x%02x\n" "Segment Number--------------------0x%02x\n" "filemark Detected Bit-------------%d\n" "End Of Medium Bit-----------------%d\n" "Illegal Length Indicator Bit------%d\n" "Sense Key-------------------------0x%02x\n" " Information Bytes---------------0x%02x 0x%02x 0x%02x 0x%02x\n" "Additional Sense Length-----------0x%02x\n" "Command Specific Information------0x%02x 0x%02x 0x%02x 0x%02x\n" "Additional Sense Code-------------0x%02x\n" "Additional Sense Code Qualifier---0x%02x\n" "Field Replaceable Unit Code-------0x%02x\n" "Sense Key Specific Valid Bit------%d\n" " Command Data Block Bit----------%d\n" " Bit Pointer Valid Bit-----------%d\n" " System Information Message----0x%02x\n" " Field Pointer-------------------0x%02x%02x\n" "Vendor----------------------------%s\n", (int)sense.valid, (int)sense.err_code, (int)sense.segnum, (int)sense.fm, (int)sense.eom, (int)sense.ili, (int)sense.key, sense.valid?(int)(sense.info >> 24) :0, sense.valid?(int)(sense.info >> 16) :0, sense.valid?(int)(sense.info >> 8) :0, sense.valid?(int)(sense.info & 0xFF):0, (int)sense.addlen, (int)(sense.cmdinfo >> 24), (int)(sense.cmdinfo >> 16), (int)(sense.cmdinfo >> 8), (int)(sense.cmdinfo & 0xFF), (int)sense.asc, (int)sense.ascq, (int)sense.fru, (int)sense.sksv, sense.sksv?(int)sense.cd :0, sense.sksv?(int)sense.bpv :0, (sense.sksv && sense.bpv)?(int)sense.sim :0, sense.sksv?(int)sense.field[0] :0, sense.sksv?(int)sense.field[1] :0, smartdump((char*)sense.vendor, sizeof(sense.vendor), buf, sizeof(buf), NULL)); } if (sense.err_code) { /* The following values are reported to be correct by Mariusz Czulada * on the bacula-devel list in 2007 */ return sense.key == 0x08 && sense.asc == 0x00 && sense.ascq == 0x05; } } #endif return false; } bacula-15.0.3/src/stored/append.c0000644000175000017500000004161114771010173016367 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Append code for Storage daemon * Kern Sibbald, May MM */ #include "bacula.h" #include "stored.h" #include "prepare.h" bool is_dedup_server_side(DEVICE *dev, int32_t stream, uint64_t stream_len); /* Responses sent to the File daemon */ static char OK_data[] = "3000 OK data\n"; static char OK_append[] = "3000 OK append data\n"; /* * Check if we can mark this job incomplete * */ void possible_incomplete_job(JCR *jcr, uint32_t last_file_index) { BSOCK *dir = jcr->dir_bsock; /* * Note, here we decide if it is worthwhile to restart * the Job at this point. For the moment, if at least * 10 Files have been seen. * We must be sure that the saved files are safe. * Using this function when there is as comm line problem is probably safe, * it is inappropriate to use it for a any failure that could * involve corrupted data. * We cannot mark a job Incomplete if we have already flushed * a bad JobMedia record (i.e. one beyond the last FileIndex * that is known to be good). * Only backups of type JT_BACKUP are handled for now. * Be careful MAC backups keep the type of the source job aka JT_BACKUP * but have the jcr->sd_client set. */ if (jcr->spool_attributes && last_file_index > 10 && dir->get_lastFlushIndex() < last_file_index && jcr->getJobType() == JT_BACKUP && !jcr->sd_client) { jcr->setJobStatus(JS_Incomplete); } } /* * Append Data sent from Client (FD/SD) * */ bool do_append_data(JCR *jcr) { int32_t n; int32_t file_index, stream, last_file_index; uint64_t stream_len; BSOCK *fd = jcr->file_bsock; bool ok = true; DEV_RECORD rec; prepare_ctx pctx; char buf1[100], buf2[100]; DCR *dcr = jcr->dcr; DEVICE *dev; char ec[50]; POOL_MEM errmsg(PM_EMSG); if (!dcr) { pm_strcpy(jcr->errmsg, _("DCR is NULL!!!\n")); Jmsg0(jcr, M_FATAL, 0, jcr->errmsg); return false; } dev = dcr->dev; if (!dev) { pm_strcpy(jcr->errmsg, _("DEVICE is NULL!!!\n")); Jmsg0(jcr, M_FATAL, 0, jcr->errmsg); return false; } Dmsg1(100, "Start append data. res=%d\n", dev->num_reserved()); memset(&rec, 0, sizeof(rec)); if (!fd->set_buffer_size(dcr->device->max_network_buffer_size, BNET_SETBUF_WRITE)) { jcr->setJobStatus(JS_ErrorTerminated); pm_strcpy(jcr->errmsg, _("Unable to set network buffer size.\n")); Jmsg0(jcr, M_FATAL, 0, jcr->errmsg); return false; } if (!acquire_device_for_append(dcr)) { jcr->setJobStatus(JS_ErrorTerminated); return false; } dev->start_of_job(dcr); jcr->sendJobStatus(JS_Running); Dmsg1(50, "Begin append device=%s\n", dev->print_name()); begin_data_spool(dcr); begin_attribute_spool(jcr); /* * Write Begin Session Record */ if (!write_session_label(dcr, SOS_LABEL)) { Jmsg1(jcr, M_FATAL, 0, _("Write session label failed. ERR=%s\n"), dev->bstrerror()); jcr->setJobStatus(JS_ErrorTerminated); ok = false; } /* Tell File daemon to send data */ if (!fd->fsend(OK_data)) { berrno be; Jmsg1(jcr, M_FATAL, 0, _("Network send error to FD. ERR=%s\n"), be.bstrerror(fd->b_errno)); ok = false; } /* * Get Data from File daemon, write to device. To clarify what is * going on here. We expect: * - A stream header * - Multiple records of data * - EOD record * * The Stream header is just used to synchronize things, and * none of the stream header is written to tape. * The Multiple records of data, contain first the Attributes, * then after another stream header, the file data, then * after another stream header, the MD5 data if any. * * So we get the (stream header, data, EOD) three time for each * file. 1. for the Attributes, 2. for the file data if any, * and 3. for the MD5 if any. */ dcr->VolFirstIndex = dcr->VolLastIndex = 0; jcr->run_time = time(NULL); /* start counting time for rates */ GetMsg *qfd = dcr->dev->get_msg_queue(jcr, fd, DEDUP_MAX_MSG_SIZE); qfd->start_read_sock(); for (last_file_index = 0; ok && !jcr->is_job_canceled(); ) { /* assume no server side deduplication at first */ bool dedup_srv_side = false; /* used to store references and hash in the volume */ char dedup_ref_buf[DEDUP_MAX_REF_SIZE+OFFSET_FADDR_SIZE+100]; /* Read Stream header from the File daemon. * The stream header consists of the following: * file_index (sequential Bacula file index, base 1) * stream (Bacula number to distinguish parts of data) * stream_len (Expected length of this stream. This * will be the size backed up if the file does not * grow during the backup. */ n = qfd->bget_msg(NULL); if (n <= 0) { if (n == BNET_SIGNAL && qfd->msglen == BNET_EOD) { Dmsg0(200, "Got EOD on reading header.\n"); break; /* end of data */ } Jmsg3(jcr, M_FATAL, 0, _("Error reading data header from FD. n=%d msglen=%d ERR=%s\n"), n, qfd->msglen, fd->bstrerror()); // ASX TODO the fd->bstrerror() can be related to the wrong error, I should Queue the error too possible_incomplete_job(jcr, last_file_index); ok = false; break; } if (sscanf(qfd->msg, "%ld %ld %lld", &file_index, &stream, &stream_len) != 3) { // TODO ASX already done in bufmsg, should reuse the values char buf[256]; Jmsg1(jcr, M_FATAL, 0, _("Malformed data header from FD: %s\n"), asciidump(qfd->msg, qfd->msglen, buf, sizeof(buf))); ok = false; possible_incomplete_job(jcr, last_file_index); break; } Dmsg3(890, "rerunning && file_index > 0 && last_file_index == 0) { goto fi_checked; } Dmsg2(400, "file_index=%d last_file_index=%d\n", file_index, last_file_index); if (file_index > 0 && (file_index == last_file_index || file_index == last_file_index + 1)) { goto fi_checked; } Jmsg2(jcr, M_FATAL, 0, _("FI=%d from FD not positive or last_FI=%d\n"), file_index, last_file_index); possible_incomplete_job(jcr, last_file_index); ok = false; break; fi_checked: if (file_index != last_file_index) { jcr->JobFiles = file_index; last_file_index = file_index; } dedup_srv_side = is_dedup_server_side(dev, stream, stream_len); /* Read data stream from the File daemon. * The data stream is just raw bytes */ while ((n=qfd->bget_msg(NULL)) > 0 && !jcr->is_job_canceled()) { rec.VolSessionId = jcr->VolSessionId; rec.VolSessionTime = jcr->VolSessionTime; rec.FileIndex = file_index; rec.Stream = stream | (dedup_srv_side ? STREAM_BIT_DEDUPLICATION_DATA : 0); rec.StreamLen = stream_len; rec.maskedStream = stream & STREAMMASK_TYPE; /* strip high bits */ rec.data_len = qfd->msglen; rec.data = qfd->msg; /* use message buffer */ rec.extra_bytes = 0; /* Debug code: check if we must hangup or blowup */ if (handle_hangup_blowup(jcr, jcr->JobFiles, jcr->JobBytes)) { return false; } Dmsg4(850, "before write_rec FI=%d SessId=%d Strm=%s len=%d\n", rec.FileIndex, rec.VolSessionId, stream_to_ascii(buf1, rec.Stream,rec.FileIndex), rec.data_len); jcr->sendProgressStatus(); /* * Check for any last minute Storage daemon preparation * of the files being backed up proir to doing so. E.g. * we might do a Percona prepare or a virus check. */ if (prepare(jcr, pctx, rec)) { /* All done in prepare */ } else { /* Normal non "prepare" backup */ char *rbuf = qfd->msg; char *wdedup_ref_buf = dedup_ref_buf; int rbuflen = qfd->msglen; if (is_offset_stream(stream)) { if (stream & STREAM_BIT_OFFSETS) { /* Prepare to update the index */ unser_declare; unser_begin(rbuf, 0); unser_uint64(rec.FileOffset); } rbuf += OFFSET_FADDR_SIZE; rbuflen -= OFFSET_FADDR_SIZE; if (dedup_srv_side) { wdedup_ref_buf += OFFSET_FADDR_SIZE; memcpy(dedup_ref_buf, qfd->msg, OFFSET_FADDR_SIZE); } } if (dedup_srv_side) { /* if dedup is in use then store and replace the chunk by its ref */ ok = qfd->dedup_store_chunk(&rec, rbuf, rbuflen, dedup_ref_buf, wdedup_ref_buf, errmsg.addr()); if (!ok) { // TODO ASX, I have used successfully a Jmsg and no break from the beginning // a Dmsg and a break looks more appropriate, hope this works Jmsg1(jcr, M_FATAL, 0, "%s", errmsg.c_str()); break; } } else { if (stream & STREAM_BIT_DEDUPLICATION_DATA) { /* do accounting for VolABytes */ rec.extra_bytes = qfd->bmsg->dedup_size; } else { rec.extra_bytes = 0; } } Dmsg4(850, "before write_rec FI=%d SessId=%d Strm=%s len=%d\n", rec.FileIndex, rec.VolSessionId, stream_to_ascii(buf1, rec.Stream,rec.FileIndex), rec.data_len); /* Do the detection here because references are also created by the FD when dedup=bothside */ rec.state_bits |= is_dedup_ref(&rec, true) ? REC_NO_SPLIT : 0; ok = dcr->write_record(&rec); if (!ok) { Dmsg2(90, "Got write_block_to_dev error on device %s. %s\n", dcr->dev->print_name(), dcr->dev->bstrerror()); break; } jcr->JobBytes += rec.data_len; /* increment bytes this job */ jcr->JobBytes += qfd->bmsg->jobbytes; // if the block as been downloaded, count it Dmsg4(850, "write_record FI=%s SessId=%d Strm=%s len=%d\n", FI_to_ascii(buf1, rec.FileIndex), rec.VolSessionId, stream_to_ascii(buf2, rec.Stream, rec.FileIndex), rec.data_len); send_attrs_to_dir(jcr, &rec); } Dmsg0(650, "Enter bnet_get\n"); } Dmsg2(650, "End read loop with FD. JobFiles=%d Stat=%d\n", jcr->JobFiles, n); if (fd->is_error()) { if (!jcr->is_job_canceled()) { Dmsg1(350, "Network read error from FD. ERR=%s\n", fd->bstrerror()); Jmsg1(jcr, M_FATAL, 0, _("Network error reading from FD. ERR=%s\n"), fd->bstrerror()); possible_incomplete_job(jcr, last_file_index); } ok = false; break; } } /* stop local and remote dedup */ Dmsg2(DT_DEDUP|215, "Wait for deduplication quarantine: emergency_exit=%d device=%s\n", ok?0:1, dev->print_name()); qfd->wait_read_sock((ok == false) || jcr->is_job_canceled()); if (qfd->commit(errmsg.addr(), jcr->JobId)) { ok = false; Jmsg1(jcr, M_ERROR, 0, _("DDE commit failed. ERR=%s\n"), errmsg.c_str()); } /* Create Job status for end of session label */ jcr->setJobStatus(ok?JS_Terminated:JS_ErrorTerminated); if (ok) { /* Terminate connection with Client */ fd->fsend(OK_append); do_client_commands(jcr); /* finish dialog with Client */ } else { fd->fsend("3999 Failed append\n"); } prepare_sd_end(jcr, pctx, rec); Dmsg1(200, "Write EOS label JobStatus=%c\n", jcr->JobStatus); /* * Check if we can still write. This may not be the case * if we are at the end of the tape or we got a fatal I/O error. */ dcr->set_ameta(); if (ok || dev->can_write()) { if (!dev->flush_before_eos(dcr)) { /* Print only if ok and not cancelled to avoid spurious messages */ if (ok && !jcr->is_job_canceled()) { Jmsg2(jcr, M_FATAL, 0, _("Fatal append error on device %s: ERR=%s\n"), dev->print_name(), dev->bstrerror()); Dmsg0(100, _("Set ok=FALSE after write_block_to_device.\n")); possible_incomplete_job(jcr, last_file_index); } jcr->setJobStatus(JS_ErrorTerminated); ok = false; } if (!write_session_label(dcr, EOS_LABEL)) { /* Print only if ok and not cancelled to avoid spurious messages */ if (ok && !jcr->is_job_canceled()) { Jmsg1(jcr, M_FATAL, 0, _("Error writing end session label. ERR=%s\n"), dev->bstrerror()); possible_incomplete_job(jcr, last_file_index); } jcr->setJobStatus(JS_ErrorTerminated); ok = false; } /* Flush out final partial ameta block of this session */ Dmsg1(200, "=== Flush adata=%d last block.\n", dcr->block->adata); ASSERT(!dcr->block->adata); if (!dcr->write_final_block_to_device()) { /* Print only if ok and not cancelled to avoid spurious messages */ if (ok && !jcr->is_job_canceled()) { Jmsg2(jcr, M_FATAL, 0, _("Fatal append error on device %s: ERR=%s\n"), dev->print_name(), dev->bstrerror()); Dmsg0(100, _("Set ok=FALSE after write_final_block_to_device.\n")); possible_incomplete_job(jcr, last_file_index); } jcr->setJobStatus(JS_ErrorTerminated); ok = false; } } /* Must keep the dedup connection alive (and the "last" hashes buffer) * until the last block has been written into the volume for the vacuum */ free_GetMsg(qfd); flush_jobmedia_queue(jcr); if (!ok && !jcr->is_JobStatus(JS_Incomplete)) { discard_data_spool(dcr); } else { /* Note: if commit is OK, the device will remain blocked */ commit_data_spool(dcr); } /* * Don't use time_t for job_elapsed as time_t can be 32 or 64 bits, * and the subsequent Jmsg() editing will break */ int32_t job_elapsed = time(NULL) - jcr->run_time; if (job_elapsed <= 0) { job_elapsed = 1; } Jmsg(dcr->jcr, M_INFO, 0, _("Elapsed time=%02d:%02d:%02d, Transfer rate=%s Bytes/second\n"), job_elapsed / 3600, job_elapsed % 3600 / 60, job_elapsed % 60, edit_uint64_with_suffix(jcr->JobBytes / job_elapsed, ec)); /* * Release the device -- and send final Vol info to DIR * and unlock it. */ release_device(dcr); if ((!ok || jcr->is_job_canceled()) && !jcr->is_JobStatus(JS_Incomplete)) { discard_attribute_spool(jcr); } else { commit_attribute_spool(jcr); } jcr->sendJobStatus(); /* update director */ Dmsg1(100, "return from do_append_data() ok=%d\n", ok); return ok; } /* Send attributes and digest to Director for Catalog */ bool send_attrs_to_dir(JCR *jcr, DEV_RECORD *rec) { if (rec->maskedStream == STREAM_FILEEVENT || rec->maskedStream == STREAM_UNIX_ATTRIBUTES || rec->maskedStream == STREAM_UNIX_ATTRIBUTES_EX || rec->maskedStream == STREAM_RESTORE_OBJECT || rec->maskedStream == STREAM_PLUGIN_OBJECT || rec->maskedStream == STREAM_PLUGIN_META_CATALOG || rec->maskedStream == STREAM_UNIX_ATTRIBUTE_UPDATE || crypto_digest_stream_type(rec->maskedStream) != CRYPTO_DIGEST_NONE) { if (!jcr->no_attributes) { BSOCK *dir = jcr->dir_bsock; if (are_attributes_spooled(jcr)) { dir->set_spooling(); } Dmsg2(850, "Send attributes to dir. FI=%d Stream=%s\n", rec->FileIndex, stream_to_ascii(rec->Stream)); if (!dir_update_file_attributes(jcr->dcr, rec)) { Jmsg(jcr, M_FATAL, 0, _("Error updating file attributes. ERR=%s\n"), dir->bstrerror()); dir->clear_spooling(); return false; } dir->clear_spooling(); } } return true; } bacula-15.0.3/src/stored/reserve.h0000644000175000017500000000412414771010173016576 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Definitions for reservation system. * * Kern Sibbald, February MMVI * */ /* * Use Device command from Director * The DIR tells us what Device Name to use, the Media Type, * the Pool Name, and the Pool Type. * * Ensure that the device exists and is opened, then store * the media and pool info in the JCR. This class is used * only temporarily in this file. */ class DIRSTORE { public: alist *device; bool append; char name[MAX_NAME_LENGTH]; char media_type[MAX_NAME_LENGTH]; char pool_name[MAX_NAME_LENGTH]; char pool_type[MAX_NAME_LENGTH]; }; /* Reserve context */ class RCTX { public: JCR *jcr; char *device_name; DIRSTORE *store; DEVRES *device; DEVICE *low_use_drive; /* Low use drive candidate */ bool try_low_use_drive; /* see if low use drive available */ bool any_drive; /* Accept any drive if set */ bool PreferMountedVols; /* Prefer volumes already mounted */ bool exact_match; /* Want exact volume */ bool have_volume; /* Have DIR suggested vol name */ bool suitable_device; /* at least one device is suitable */ bool autochanger_only; /* look at autochangers only */ bool notify_dir; /* Notify DIR about device */ bool append; /* set if append device */ char VolumeName[MAX_NAME_LENGTH]; /* Vol name suggested by DIR */ }; bacula-15.0.3/src/stored/file_driver.h0000644000175000017500000000537414771010173017425 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Routines for writing to the Cloud using S3 protocol. * * Written by Kern Sibbald, May MMXVI * */ #ifndef _FILE_DRV_H #define _FILE_DRV_H #include "bacula.h" #include "stored.h" #include "cloud_driver.h" /* get base class definitions */ class file_driver: public cloud_driver { public: cloud_dev *dev; /* device that is calling us */ DEVRES *device; CLOUD *cloud; /* Pointer to CLOUD resource */ alist *objects; uint32_t buf_len; /* Stuff directly from Cloud resource */ char *hostName; char *bucketName; char *accessKeyId; char *secretAccessKey; int32_t protocol; int32_t uriStyle; btime_t wait_timeout; uint32_t objects_default_tier; private: void make_cloud_filename(POOLMEM *&filename, const char *VolumeName, const char *file, uint32_t part); void make_cloud_filename(POOLMEM *&filename, const char *VolumeName, const char *file); bool init(CLOUD *cloud, POOLMEM *&err); bool start_of_job(POOLMEM *&msg); bool end_of_job(POOLMEM *&msg); bool term(POOLMEM *&msg); bool truncate_cloud_volume(const char *VolumeName, ilist *trunc_parts, cancel_callback *cancel_cb, POOLMEM *&err); bool copy_cache_part_to_cloud(transfer *xfer); bool move_cloud_part(const char *VolumeName, uint32_t apart , const char *to, cancel_callback *cancel_cb, POOLMEM *&err, int& exists); bool clean_cloud_volume(const char *VolumeName, cleanup_cb_type *cb, cleanup_ctx_type *ctx, cancel_callback *cancel_cb, POOLMEM *&err); int copy_cloud_part_to_cache(transfer *xfer); bool restore_cloud_object(transfer *xfer, const char *cloud_fname); bool is_waiting_on_server(transfer *xfer); bool get_cloud_volume_parts_list(const char* VolumeName, ilist *parts, cancel_callback *cancel_cb, POOLMEM *&err); bool get_cloud_volumes_list(alist *volumes, cancel_callback *cancel_cb, POOLMEM *&err); bool put_object(transfer *xfer, const char *cache_fname, const char *cloud_fname, bwlimit *limit); bool get_cloud_object(transfer *xfer, const char *cloud_fname, const char *cache_fname); public: file_driver() { }; ~file_driver() { }; }; #endif /* _FILE_DRV_H */ bacula-15.0.3/src/stored/job.c0000644000175000017500000003175314771010173015700 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Job control and execution for Storage Daemon * * Written by Kern Sibbald, MM * */ #include "bacula.h" #include "stored.h" static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; /* Imported variables */ extern STORES *me; /* our Global resource */ extern uint32_t VolSessionTime; /* Imported functions */ extern uint32_t newVolSessionId(); extern bool do_vbackup(JCR *jcr); /* Requests from the Director daemon */ static char jobcmd[] = "JobId=%d job=%127s job_name=%127s client_name=%127s " "type=%d level=%d FileSet=%127s NoAttr=%d SpoolAttr=%d FileSetMD5=%127s " "SpoolData=%d WritePartAfterJob=%d PreferMountedVols=%d SpoolSize=%s " "rerunning=%d VolSessionId=%d VolSessionTime=%d sd_client=%d " "Authorization=%s\n"; /* Responses sent to Director daemon */ static char OKjob[] = "3000 OK Job SDid=%u SDtime=%u Authorization=%s\n"; static char BAD_job[] = "3915 Bad Job command. stat=%d CMD: %s\n"; /* * Director requests us to start a job * Basic tasks done here: * - We pickup the JobId to be run from the Director. * - We pickup the device, media, and pool from the Director * - Wait for a connection from the File Daemon (FD) * - Accept commands from the FD (i.e. run the job) * - Return when the connection is terminated or * there is an error. */ bool job_cmd(JCR *jcr) { int32_t JobId; char sd_auth_key[200]; char spool_size[30]; char seed[100]; BSOCK *dir = jcr->dir_bsock; POOL_MEM job_name, client_name, job, fileset_name, fileset_md5; int32_t JobType, level, spool_attributes, no_attributes, spool_data; int32_t write_part_after_job, PreferMountedVols; int32_t rerunning; int32_t is_client; int stat; JCR *ojcr; /* * Get JobId and permissions from Director */ Dmsg1(100, "msg); bstrncpy(spool_size, "0", sizeof(spool_size)); stat = sscanf(dir->msg, jobcmd, &JobId, job.c_str(), job_name.c_str(), client_name.c_str(), &JobType, &level, fileset_name.c_str(), &no_attributes, &spool_attributes, fileset_md5.c_str(), &spool_data, &write_part_after_job, &PreferMountedVols, spool_size, &rerunning, &jcr->VolSessionId, &jcr->VolSessionTime, &is_client, &sd_auth_key); if (stat != 19) { pm_strcpy(jcr->errmsg, dir->msg); dir->fsend(BAD_job, stat, jcr->errmsg); Dmsg1(100, ">dird: %s", dir->msg); jcr->setJobStatus(JS_ErrorTerminated); return false; } jcr->rerunning = rerunning; jcr->sd_client = is_client; if (is_client) { jcr->sd_auth_key = bstrdup(sd_auth_key); } Dmsg3(100, "rerunning=%d VolSesId=%d VolSesTime=%d\n", jcr->rerunning, jcr->VolSessionId, jcr->VolSessionTime); /* * Since this job could be rescheduled, we * check to see if we have it already. If so * free the old jcr and use the new one. */ ojcr = get_jcr_by_full_name(job.c_str()); if (ojcr && !ojcr->authenticated) { Dmsg2(100, "Found ojcr=0x%x Job %s\n", (unsigned)(intptr_t)ojcr, job.c_str()); free_jcr(ojcr); } jcr->JobId = JobId; Dmsg2(800, "Start JobId=%d %p\n", JobId, jcr); set_jcr_in_tsd(jcr); /* * If job rescheduled because previous was incomplete, * the Resched flag is set and VolSessionId and VolSessionTime * are given to us (same as restarted job). */ if (!jcr->rerunning) { jcr->VolSessionId = newVolSessionId(); jcr->VolSessionTime = VolSessionTime; } bstrncpy(jcr->Job, job, sizeof(jcr->Job)); unbash_spaces(job_name); jcr->job_name = get_pool_memory(PM_NAME); pm_strcpy(jcr->job_name, job_name); unbash_spaces(client_name); jcr->client_name = get_pool_memory(PM_NAME); pm_strcpy(jcr->client_name, client_name); unbash_spaces(fileset_name); jcr->fileset_name = get_pool_memory(PM_NAME); pm_strcpy(jcr->fileset_name, fileset_name); jcr->setJobType(JobType); jcr->setJobLevel(level); jcr->no_attributes = no_attributes; jcr->spool_attributes = spool_attributes; jcr->spool_data = spool_data; jcr->spool_size = str_to_int64(spool_size); jcr->write_part_after_job = write_part_after_job; jcr->fileset_md5 = get_pool_memory(PM_NAME); pm_strcpy(jcr->fileset_md5, fileset_md5); jcr->PreferMountedVols = PreferMountedVols; jcr->authenticated = false; /* * Pass back an authorization key for the File daemon */ if (jcr->sd_client) { bstrncpy(sd_auth_key, "xxx", 4); /* include \0 */ } else { bsnprintf(seed, sizeof(seed), "%p%d", jcr, JobId); make_session_key(sd_auth_key, seed, 1); } dir->fsend(OKjob, jcr->VolSessionId, jcr->VolSessionTime, sd_auth_key); Dmsg2(150, ">dird jid=%u: %s", (uint32_t)jcr->JobId, dir->msg); /* If not client, set key, otherwise it is already set */ if (!jcr->sd_client) { jcr->sd_auth_key = bstrdup(sd_auth_key); memset(sd_auth_key, 0, sizeof(sd_auth_key)); } new_plugins(jcr); /* instantiate the plugins */ generate_daemon_event(jcr, "JobStart"); generate_plugin_event(jcr, bsdEventJobStart, (void *)"JobStart"); /* Keep track of the important events */ events_send_msg(jcr, "SJ0001", EVENTS_TYPE_JOB, jcr->director->hdr.name, (intptr_t)jcr, "Job Start jobid=%d job=%s", jcr->JobId, jcr->Job); return true; } bool run_cmd(JCR *jcr) { struct timeval tv; struct timezone tz; struct timespec timeout; int errstat = 0; Dsm_check(200); Dmsg1(200, "Run_cmd: %s\n", jcr->dir_bsock->msg); /* If we do not need the FD, we are doing a virtual backup. */ if (jcr->no_client_used()) { do_vbackup(jcr); return false; } jcr->sendJobStatus(JS_WaitFD); /* wait for FD to connect */ Dmsg2(050, "sd_calls_client=%d sd_client=%d\n", jcr->sd_calls_client, jcr->sd_client); if (jcr->sd_calls_client) { if (!read_client_hello(jcr)) { return false; } /* * Authenticate the File daemon */ Dmsg0(050, "=== Authenticate FD\n"); if (jcr->authenticated || !authenticate_filed(jcr, jcr->file_bsock, jcr->FDVersion)) { Dmsg1(050, "Authentication failed Job %s\n", jcr->Job); Qmsg(jcr, M_FATAL, 0, _("Unable to authenticate File daemon\n")); } else { jcr->authenticated = true; } if (jcr->JobId > 0) { POOL_MEM buf; /* Print connection info only for real jobs. * We don't have client name here, log connection info w/o it anyway */ build_connecting_info_log(_("Client"), "", jcr->client_addr, jcr->client_port, jcr->file_bsock->tls ? true : false, buf.addr()); Jmsg(jcr, M_INFO, 0, "%s", buf.c_str()); } } else if (!jcr->sd_client) { /* We wait to receive connection from Client */ gettimeofday(&tv, &tz); timeout.tv_nsec = tv.tv_usec * 1000; timeout.tv_sec = tv.tv_sec + me->client_wait; Dmsg3(050, "%s waiting %d sec for FD to contact SD key=%s\n", jcr->Job, (int)(timeout.tv_sec-time(NULL)), jcr->sd_auth_key); Dmsg3(800, "=== Block Job=%s jid=%d %p\n", jcr->Job, jcr->JobId, jcr); /* * Wait for the File daemon to contact us to start the Job, * when he does, we will be released, unless the 30 minutes * expires. */ P(mutex); while ( !jcr->authenticated && !job_canceled(jcr) ) { errstat = pthread_cond_timedwait(&jcr->job_start_wait, &mutex, &timeout); if (errstat == ETIMEDOUT || errstat == EINVAL || errstat == EPERM) { break; } Dmsg1(800, "=== Auth cond errstat=%d\n", errstat); } Dmsg4(050, "=== Auth=%d jid=%d canceled=%d errstat=%d\n", jcr->JobId, jcr->authenticated, job_canceled(jcr), errstat); V(mutex); Dmsg2(800, "Auth fail or cancel for jid=%d %p\n", jcr->JobId, jcr); } memset(jcr->sd_auth_key, 0, strlen(jcr->sd_auth_key)); if (jcr->authenticated && !job_canceled(jcr) && !jcr->is_incomplete()) { Dmsg2(800, "Running jid=%d %p\n", jcr->JobId, jcr); run_job(jcr); /* Run the job */ } Dmsg2(800, "Done jid=%d %p\n", jcr->JobId, jcr); return false; } #ifdef needed /* * Query Device command from Director * Sends Storage Daemon's information on the device to the * caller (presumably the Director). * This command always returns "true" so that the line is * not closed on an error. * */ bool query_cmd(JCR *jcr) { POOL_MEM dev_name, VolumeName, MediaType, ChangerName; BSOCK *dir = jcr->dir_bsock; DEVRES *device; AUTOCHANGER *changer; bool ok; Dmsg1(100, "Query_cmd: %s", dir->msg); ok = sscanf(dir->msg, query_device, dev_name.c_str()) == 1; Dmsg1(100, "msg); if (ok) { unbash_spaces(dev_name); foreach_res(device, R_DEVICE) { /* Find resource, and make sure we were able to open it */ if (strcmp(dev_name.c_str(), device->hdr.name) == 0) { if (!device->dev) { device->dev = init_dev(jcr, device, false, statcollector); } if (!device->dev) { break; } ok = dir_update_device(jcr, device->dev); if (ok) { ok = dir->fsend(OK_query); } else { dir->fsend(NO_query); } return ok; } } foreach_res(changer, R_AUTOCHANGER) { /* Find resource, and make sure we were able to open it */ if (strcmp(dev_name.c_str(), changer->hdr.name) == 0) { if (!changer->device || changer->device->size() == 0) { continue; /* no devices */ } ok = dir_update_changer(jcr, changer); if (ok) { ok = dir->fsend(OK_query); } else { dir->fsend(NO_query); } return ok; } } /* If we get here, the device/autochanger was not found */ unbash_spaces(dir->msg); pm_strcpy(jcr->errmsg, dir->msg); dir->fsend(NO_device, dev_name.c_str()); Dmsg1(100, ">dird: %s\n", dir->msg); } else { unbash_spaces(dir->msg); pm_strcpy(jcr->errmsg, dir->msg); dir->fsend(BAD_query, jcr->errmsg); Dmsg1(100, ">dird: %s\n", dir->msg); } return true; } #endif /* * Destroy the Job Control Record and associated * resources (sockets). */ void stored_free_jcr(JCR *jcr) { Dmsg2(800, "End Job JobId=%u %p\n", jcr->JobId, jcr); if (jcr->jobmedia_queue) { flush_jobmedia_queue(jcr); delete jcr->jobmedia_queue; jcr->jobmedia_queue = NULL; /* ***BEEF*** */ delete jcr->filemedia_queue; jcr->filemedia_queue = NULL; } free_bsock(jcr->file_bsock); free_bsock(jcr->dir_bsock); if (jcr->job_name) { free_pool_memory(jcr->job_name); } if (jcr->client_name) { free_memory(jcr->client_name); jcr->client_name = NULL; } if (jcr->fileset_name) { free_memory(jcr->fileset_name); } if (jcr->fileset_md5) { free_memory(jcr->fileset_md5); } if (jcr->bsr) { free_bsr(jcr->bsr); jcr->bsr = NULL; } /* Free any restore volume list created */ free_restore_volume_list(jcr); if (jcr->RestoreBootstrap) { unlink(jcr->RestoreBootstrap); bfree_and_null(jcr->RestoreBootstrap); } if (jcr->next_dev || jcr->prev_dev) { Qmsg0(NULL, M_FATAL, 0, _("In free_jcr(), but still attached to device!!!!\n")); } pthread_cond_destroy(&jcr->job_start_wait); if (jcr->dcrs) { delete jcr->dcrs; } jcr->dcrs = NULL; /* Avoid a double free */ if (jcr->dcr == jcr->read_dcr) { jcr->read_dcr = NULL; } if (jcr->dcr) { free_dcr(jcr->dcr); jcr->dcr = NULL; } if (jcr->read_dcr) { free_dcr(jcr->read_dcr); jcr->read_dcr = NULL; } if (jcr->read_store) { DIRSTORE *store; foreach_alist(store, jcr->read_store) { delete store->device; delete store; } delete jcr->read_store; jcr->read_store = NULL; } if (jcr->write_store) { DIRSTORE *store; foreach_alist(store, jcr->write_store) { delete store->device; delete store; } delete jcr->write_store; jcr->write_store = NULL; } Dsm_check(200); if (jcr->JobId != 0) write_state_file(me->working_directory, "bacula-sd", get_first_port_host_order(me->sdaddrs)); return; } bacula-15.0.3/src/stored/org_libsd_quota.c0000644000175000017500000000147514771010173020301 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "stored.h" bool dir_get_pool_info(DCR *dcr, VOLUME_CAT_INFO *volcatinfo) { return true; } bool is_pool_size_reached(DCR *dcr, bool quiet) { return false; } bacula-15.0.3/src/stored/null_dev.c0000644000175000017500000000145414771010173016731 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Null driver code * * written by, Kern Sibbald, MMXVI * */ #include "bacula.h" #include "stored.h" const char *null_dev::print_type() { return "Null"; } bacula-15.0.3/src/stored/vtape_dev.c0000644000175000017500000005274614771010173017110 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Please note!!! The VTAPE device is for testing only. * It simulates a tape drive, which is useful for testing * without a real drive, but is inefficient for writing * disk volumes. In addition, we do not test for every * possible error condition, so please do not use this * in production. */ /* Device { Name = Drive-1 # Maximum File Size = 800M Maximum Volume Size = 3G Device Type = TAPE Archive Device = /tmp/fake Media Type = DLT-8000 AutomaticMount = yes; # when device opened, read it AlwaysOpen = yes; RemovableMedia = yes; RandomAccess = no; } Block description : block { int32 size; void *data; } EOF description : EOF { int32 size=0; } */ #include "bacula.h" /* define 64bit file usage */ #include "stored.h" #ifdef USE_VTAPE #include static int dbglevel = 100; #define FILE_OFFSET 30 void vtape_debug(int level) { dbglevel = level; } /* DEVICE virtual that we redefine. */ int vtape::d_ioctl(int fd, ioctl_req_t request, char *op) { int result = 0; if (request == MTIOCTOP) { result = tape_op((mtop *)op); } else if (request == MTIOCGET) { result = tape_get((mtget *)op); } else if (request == MTIOCPOS) { result = tape_pos((mtpos *)op); } else { errno = ENOTTY; result = -1; } return result; } int vtape::tape_op(struct mtop *mt_com) { int result=0; int count = mt_com->mt_count; if (!online) { errno = ENOMEDIUM; return -1; } switch (mt_com->mt_op) { case MTRESET: case MTNOP: case MTSETDRVBUFFER: break; default: case MTRAS1: case MTRAS2: case MTRAS3: case MTSETDENSITY: errno = ENOTTY; result = -1; break; case MTFSF: /* Forward space over mt_count filemarks. */ do { result = fsf(); } while (--count > 0 && result == 0); break; case MTBSF: /* Backward space over mt_count filemarks. */ do { result = bsf(); } while (--count > 0 && result == 0); break; case MTFSR: /* Forward space over mt_count records (tape blocks). */ /* file number = 1 block number = 0 file number = 1 block number = 1 mt: /dev/lto2: Erreur d'entree/sortie file number = 2 block number = 0 */ /* tester si on se trouve a la fin du fichier */ result = fsr(mt_com->mt_count); break; case MTBSR: /* Backward space over mt_count records (tape blocks). */ result = bsr(mt_com->mt_count); break; case MTWEOF: /* Write mt_count filemarks. */ do { result = weof(); } while (result == 0 && --count > 0); break; case MTREW: /* Rewind. */ Dmsg0(dbglevel, "rewind vtape\n"); check_eof(); atEOF = atEOD = false; atBOT = true; current_file = 0; current_block = 0; lseek(fd, 0, SEEK_SET); result = !read_fm(VT_READ_EOF); break; case MTOFFL: /* put tape offline */ result = offline(NULL) ? 0 : -1; break; case MTRETEN: /* Re-tension tape. */ result = 0; break; case MTBSFM: /* not used by bacula */ errno = EIO; result = -1; break; case MTFSFM: /* not used by bacula */ errno = EIO; result = -1; break; case MTEOM:/* Go to the end of the recorded media (for appending files). */ while (next_FM) { lseek(fd, next_FM, SEEK_SET); if (read_fm(VT_READ_EOF)) { current_file++; } } boffset_t l; while (::read(fd, &l, sizeof(l)) > 0) { if (l) { lseek(fd, l, SEEK_CUR); } else { ASSERT(0); } Dmsg0(dbglevel, "skip 1 block\n"); } current_block = -1; atEOF = false; atEOD = true; /* file number = 3 block number = -1 */ /* Can be at EOM */ break; case MTERASE: /* not used by bacula */ atEOD = true; atEOF = false; atEOT = false; current_file = 0; current_block = -1; lseek(fd, 0, SEEK_SET); read_fm(VT_READ_EOF); truncate_file(); break; case MTSETBLK: break; case MTSEEK: break; case MTTELL: break; case MTFSS: break; case MTBSS: break; case MTWSM: break; case MTLOCK: break; case MTUNLOCK: break; case MTLOAD: break; case MTUNLOAD: break; case MTCOMPRESSION: break; case MTSETPART: break; case MTMKPART: break; } return result == 0 ? 0 : -1; } int vtape::tape_get(struct mtget *mt_get) { int density = 1; int block_size = 1024; mt_get->mt_type = MT_ISSCSI2; mt_get->mt_blkno = current_block; mt_get->mt_fileno = current_file; mt_get->mt_resid = -1; // pos_info.PartitionBlockValid ? pos_info.Partition : (ULONG)-1; /* TODO */ mt_get->mt_dsreg = ((density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK) | ((block_size << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK); mt_get->mt_gstat = 0x00010000; /* Immediate report mode.*/ if (atEOF) { mt_get->mt_gstat |= 0x80000000; // GMT_EOF } if (atBOT) { mt_get->mt_gstat |= 0x40000000; // GMT_BOT } if (atEOT) { mt_get->mt_gstat |= 0x20000000; // GMT_EOT } if (atEOD) { mt_get->mt_gstat |= 0x08000000; // GMT_EOD } if (0) { //WriteProtected) { mt_get->mt_gstat |= 0x04000000; // GMT_WR_PROT } if (online) { mt_get->mt_gstat |= 0x01000000; // GMT_ONLINE } else { mt_get->mt_gstat |= 0x00040000; // GMT_DR_OPEN } mt_get->mt_erreg = 0; return 0; } int vtape::tape_pos(struct mtpos *mt_pos) { if (current_block >= 0) { mt_pos->mt_blkno = current_block; return 0; } return -1; } /* * This function try to emulate the append only behavior * of a tape. When you wrote something, data after the * current position are discarded. */ int vtape::truncate_file() { Dmsg2(dbglevel, "truncate %i:%i\n", current_file, current_block); ftruncate(fd, lseek(fd, 0, SEEK_CUR)); last_file = current_file; atEOD=true; update_pos(); return 0; } vtape::~vtape() { } vtape::vtape() { lockfd = fd = -1; atEOF = false; atBOT = false; atEOT = false; atEOD = false; online = false; needEOF = false; file_block = 0; last_file = 0; current_file = 0; current_block = -1; lockfile = NULL; max_block = VTAPE_MAX_BLOCK; } int vtape::get_fd() { return this->fd; } /* * DEVICE virtual that we redefine. * * Write a variable block of count size. * block = vtape_header + data * vtape_header = sizeof(data) * if vtape_header == 0, this is a EOF */ ssize_t vtape::d_write(int, const void *buffer, size_t count) { ASSERT(online); ASSERT(current_file >= 0); ASSERT(count > 0); ASSERT(buffer); ssize_t nb; Dmsg3(dbglevel, "write len=%i %i:%i\n", count, current_file,current_block); if (atEOT) { Dmsg0(dbglevel, "write nothing, EOT !\n"); errno = ENOSPC; return -1; } if (m_is_worm) { /* The start of the vtape volume has a WEOF */ int64_t size = ::lseek(fd, 0, SEEK_END); if (size < 100) { size = 0; } int64_t pos = DEVICE::get_full_addr(current_file, current_block); if ( pos < size ) { Dmsg2(dbglevel, "WORM detected. Cannot write at %lld with current size at %lld\n", pos, size -20); errno = EIO; return -1; } } else { Dmsg0(dbglevel, "Not worm!\n"); } if (!atEOD) { /* if not at the end of the data */ truncate_file(); } if (current_block != -1) { current_block++; } atBOT = false; atEOF = false; atEOD = true; /* End of data */ needEOF = true; /* next operation need EOF mark */ uint32_t size = count; ::write(fd, &size, sizeof(uint32_t)); nb = ::write(fd, buffer, count); if (nb != (ssize_t)count) { atEOT = true; Dmsg2(dbglevel, "Not enough space writing only %i of %i requested\n", nb, count); } update_pos(); return nb; } /* * +---+---------+---+------------------+---+-------------------+ * |00N| DATA |0LN| DATA |0LC| DATA | * +---+---------+---+------------------+---+-------------------+ * * 0 : zero * L : Last FileMark offset * N : Next FileMark offset * C : Current FileMark Offset */ int vtape::weof() { ASSERT(online); ASSERT(current_file >= 0); #if 0 if (atEOT) { errno = ENOSPC; current_block = -1; return -1; } #endif if (!atEOD) { truncate_file(); /* nothing after this point */ } last_FM = cur_FM; cur_FM = lseek(fd, 0, SEEK_CUR); // current position /* update previous next_FM */ lseek(fd, last_FM + sizeof(uint32_t)+sizeof(boffset_t), SEEK_SET); ::write(fd, &cur_FM, sizeof(boffset_t)); lseek(fd, cur_FM, SEEK_SET); next_FM = 0; uint32_t c=0; ::write(fd, &c, sizeof(uint32_t)); // EOF ::write(fd, &last_FM, sizeof(last_FM)); // F-1 ::write(fd, &next_FM, sizeof(next_FM)); // F (will be updated next time) current_file++; current_block = 0; needEOF = false; atEOD = false; atBOT = false; atEOF = true; last_file = MAX(current_file, last_file); Dmsg4(dbglevel, "Writing EOF %i:%i last=%lli cur=%lli next=0\n", current_file, current_block, last_FM, cur_FM); return 0; } /* * Go to next FM */ int vtape::fsf() { ASSERT(online); ASSERT(current_file >= 0); ASSERT(fd >= 0); /* * 1 0 -> fsf -> 2 0 -> fsf -> 2 -1 */ int ret=0; if (atEOT || atEOD) { errno = EIO; current_block = -1; return -1; } atBOT = false; Dmsg2(dbglevel+1, "fsf %i <= %i\n", current_file, last_file); if (next_FM > cur_FM) { /* not the last file */ lseek(fd, next_FM, SEEK_SET); read_fm(VT_READ_EOF); current_file++; atEOF = true; ret = 0; } else if (atEOF) { /* last file mark */ current_block=-1; errno = EIO; atEOF = false; atEOD = true; } else { /* last file, but no at the end */ fsr(100000); Dmsg0(dbglevel, "Try to FSF after EOT\n"); errno = EIO; current_file = last_file ; current_block = -1; atEOD=true; ret = -1; } return ret; } /* /------------\ /---------------\ * +---+------+---+---------------+-+ * |OLN| |0LN| | | * +---+------+---+---------------+-+ */ bool vtape::read_fm(VT_READ_FM_MODE read_all) { int ret; uint32_t c = 0; if (read_all == VT_READ_EOF) { ::read(fd, &c, sizeof(c)); if (c != 0) { lseek(fd, cur_FM, SEEK_SET); return false; } } cur_FM = lseek(fd, 0, SEEK_CUR) - sizeof(c); ::read(fd, &last_FM, sizeof(last_FM)); ret = ::read(fd, &next_FM, sizeof(next_FM)); current_block=0; Dmsg3(dbglevel, "Read FM cur=%lli last=%lli next=%lli\n", cur_FM, last_FM, next_FM); return (ret == sizeof(next_FM)); } /* * TODO: Check fsr with EOF */ int vtape::fsr(int count) { ASSERT(online); ASSERT(current_file >= 0); ASSERT(fd >= 0); int i,nb, ret=0; // boffset_t where=0; uint32_t s; Dmsg4(dbglevel, "fsr %i:%i EOF=%i c=%i\n", current_file,current_block,atEOF,count); check_eof(); if (atEOT) { errno = EIO; current_block = -1; return -1; } if (atEOD) { errno = EIO; return -1; } atBOT = atEOF = false; /* check all block record */ for(i=0; (i < count) && !atEOF ; i++) { nb = ::read(fd, &s, sizeof(uint32_t)); /* get size of next block */ if (nb == sizeof(uint32_t) && s) { current_block++; lseek(fd, s, SEEK_CUR); /* seek after this block */ } else { Dmsg4(dbglevel, "read EOF %i:%i nb=%i s=%i\n", current_file, current_block, nb,s); errno = EIO; ret = -1; if (next_FM) { current_file++; read_fm(VT_SKIP_EOF); } atEOF = true; /* stop the loop */ } } return ret; } /* * BSR + EOF => begin of EOF + EIO * BSR + BSR + EOF => last block * current_block = -1 */ int vtape::bsr(int count) { ASSERT(online); ASSERT(current_file >= 0); ASSERT(count == 1); ASSERT(fd >= 0); check_eof(); if (!count) { return 0; } int ret=0; int last_f=0; int last_b=0; boffset_t last=-1, last2=-1; boffset_t orig = lseek(fd, 0, SEEK_CUR); int orig_f = current_file; int orig_b = current_block; Dmsg4(dbglevel, "bsr(%i) cur_blk=%i orig=%lli cur_FM=%lli\n", count, current_block, orig, cur_FM); /* begin of tape, do nothing */ if (atBOT) { errno = EIO; return -1; } /* at EOF 0:-1 BOT=0 EOD=0 EOF=0 ERR: Input/output error */ if (atEOF) { lseek(fd, cur_FM, SEEK_SET); atEOF = false; if (current_file > 0) { current_file--; } current_block=-1; errno = EIO; return -1; } /* * First, go to cur/last_FM and read all blocks to find the good one */ if (cur_FM == orig) { /* already just before EOF */ lseek(fd, last_FM, SEEK_SET); } else { lseek(fd, cur_FM, SEEK_SET); } ret = read_fm(VT_READ_EOF); do { if (!atEOF) { last2 = last; /* keep track of the 2 last blocs position */ last = lseek(fd, 0, SEEK_CUR); last_f = current_file; last_b = current_block; Dmsg6(dbglevel, "EOF=%i last2=%lli last=%lli < orig=%lli %i:%i\n", atEOF, last2, last, orig, current_file, current_block); } ret = fsr(1); } while ((lseek(fd, 0, SEEK_CUR) < orig) && (ret == 0)); if (last2 > 0 && atEOF) { /* we take the previous position */ lseek(fd, last2, SEEK_SET); current_file = last_f; current_block = last_b - 1; Dmsg3(dbglevel, "1 set offset2=%lli %i:%i\n", last, current_file, current_block); } else if (last > 0) { lseek(fd, last, SEEK_SET); current_file = last_f; current_block = last_b; Dmsg3(dbglevel, "2 set offset=%lli %i:%i\n", last, current_file, current_block); } else { lseek(fd, orig, SEEK_SET); current_file = orig_f; current_block = orig_b; return -1; } Dmsg2(dbglevel, "bsr %i:%i\n", current_file, current_block); errno=0; atEOT = atEOF = atEOD = false; atBOT = (lseek(fd, 0, SEEK_CUR) - (sizeof(uint32_t)+2*sizeof(boffset_t))) == 0; if (orig_b == -1) { current_block = orig_b; } return 0; } /* BSF => just before last EOF * EOF + BSF => just before EOF * file 0 + BSF => BOT + errno */ int vtape::bsf() { ASSERT(online); ASSERT(current_file >= 0); Dmsg2(dbglevel, "bsf %i:%i count=%i\n", current_file, current_block); int ret = 0; check_eof(); atBOT = atEOF = atEOT = atEOD = false; if (current_file == 0) {/* BOT + errno */ lseek(fd, 0, SEEK_SET); read_fm(VT_READ_EOF); current_file = 0; current_block = 0; atBOT = true; errno = EIO; ret = -1; } else { Dmsg1(dbglevel, "bsf last=%lli\n", last_FM); lseek(fd, cur_FM, SEEK_SET); current_file--; current_block=-1; } return ret; } /* * DEVICE virtual that we redefine. * * Put vtape in offline mode */ bool vtape::offline(DCR *dcr) { close(dcr); atEOF = false; /* End of file */ atEOT = false; /* End of tape */ atEOD = false; /* End of data */ atBOT = false; /* Begin of tape */ online = false; file_block = 0; current_file = -1; current_block = -1; last_file = -1; return true; } /* * DEVICE virtual that we redefine. * * A filemark is automatically written to tape if the last tape operation * before close was a write. */ int vtape::d_close(int) { struct flock lock; check_eof(); if (lockfd >= 0) { lock.l_type = F_UNLCK; lock.l_start = 0; lock.l_whence = SEEK_SET; lock.l_len = 0; lock.l_pid = getpid(); ASSERT(fcntl(fd, F_SETLK, &lock) != -1); ::close(lockfd); free(lockfile); } ::close(fd); lockfd = fd = -1; return 0; } /* * DEVICE virtual that we redefine. * * When a filemark is encountered while reading, the following happens. If * there are data remaining in the buffer when the filemark is found, the * buffered data is returned. The next read returns zero bytes. The following * read returns data from the next file. The end of recorded data is signaled * by returning zero bytes for two consecutive read calls. The third read * returns an error. */ ssize_t vtape::d_read(int, void *buffer, size_t count) { ASSERT(online); ASSERT(current_file >= 0); ssize_t nb; uint32_t s; Dmsg2(dbglevel*2, "read %i:%i\n", current_file, current_block); if (atEOT || atEOD) { errno = EIO; return -1; } if (atEOF) { if (!next_FM) { atEOD = true; atEOF = false; current_block=-1; return 0; } atEOF=false; } check_eof(); atEOD = atBOT = false; /* reading size of data */ nb = ::read(fd, &s, sizeof(uint32_t)); if (nb <= 0) { atEOF = true; /* TODO: check this */ return 0; } if (s > count) { /* not enough buffer to read block */ Dmsg2(dbglevel, "Need more buffer to read next block %i > %i\n",s,count); lseek(fd, s, SEEK_CUR); errno = ENOMEM; return -1; } if (!s) { /* EOF */ atEOF = true; if (read_fm(VT_SKIP_EOF)) { current_file++; } return 0; } /* reading data itself */ nb = ::read(fd, buffer, s); if (nb != (ssize_t)s) { /* read error */ errno=EIO; atEOT=true; current_block = -1; Dmsg0(dbglevel, "EOT during reading\n"); return -1; } /* read ok */ if (current_block >= 0) { current_block++; } return nb; } /* Redefine DEVICE virtual function */ int vtape::d_open(const char *pathname, int uflags) { Dmsg2(dbglevel, "vtape::d_open(%s, %i)\n", pathname, uflags); online = true; /* assume that drive contains a tape */ struct flock lock; struct stat statp; ASSERT(!m_shstore || (m_shstore_lock && m_shstore_register)); if (stat(pathname, &statp) != 0) { fd = -1; Dmsg1(dbglevel, "Can't stat on %s\n", pathname); if (uflags & O_NONBLOCK) { online = false; fd = ::open("/dev/null", O_RDWR | O_LARGEFILE | O_CLOEXEC, 0600); } } else { fd = ::open(pathname, O_RDWR | O_LARGEFILE | O_CLOEXEC, 0600); } if (fd < 0) { berrno be; Dmsg2(0, "Unable to open vtape device %s ERR=%s\n", pathname, be.bstrerror()); errno = ENOMEDIUM; return -1; } lockfile = (char *)malloc(strlen(pathname) + 3); strcpy(lockfile, pathname); strcat(lockfile, ".l"); lockfd = ::open(lockfile, O_CREAT | O_RDWR | O_LARGEFILE | O_CLOEXEC, 0600); if (lockfd < 0) { berrno be; Dmsg2(0, "Unable to open vtape device lock %s ERR=%s\n", lockfile, be.bstrerror()); } else { lock.l_type = F_WRLCK; lock.l_start = 0; lock.l_whence = SEEK_SET; lock.l_len = 0; lock.l_pid = getpid(); ASSERT(fcntl(lockfd, F_SETLK, &lock) != -1); } file_block = 0; current_block = 0; current_file = 0; cur_FM = next_FM = last_FM = 0; needEOF = false; atBOT = true; atEOT = atEOD = false; /* If the vtape is empty, start by writing a EOF */ if (online && !read_fm(VT_READ_EOF)) { lseek(fd, 0, SEEK_SET); /* rewind */ cur_FM = next_FM = last_FM = 0; /* reset */ weof(); /* write the first EOF */ last_file = current_file=0; } return fd; } /* use this to track file usage */ void vtape::update_pos() { ASSERT(online); struct stat statp; if (fstat(fd, &statp) == 0) { file_block = statp.st_blocks; } Dmsg1(dbglevel*2, "update_pos=%i\n", file_block); if (file_block > max_block) { atEOT = true; } else { atEOT = false; } } void vtape::dump() { Dmsg0(dbglevel+1, "===================\n"); Dmsg2(dbglevel, "file:block = %i:%i\n", current_file, current_block); Dmsg1(dbglevel+1, "last_file=%i\n", last_file); Dmsg1(dbglevel+1, "file_block=%i\n", file_block); Dmsg4(dbglevel+1, "EOF=%i EOT=%i EOD=%i BOT=%i\n", atEOF, atEOT, atEOD, atBOT); } const char *vtape::print_type() { return "Vtape"; } #else /*!USE_VTAPE */ /* faked constructor and destructor to avoid undefined reference to vtable */ vtape::vtape() { } vtape::~vtape() { } /* dummy implementation */ const char *vtape::print_type() { return "Vtape"; } #endif /* !USE_VTAPE */ bacula-15.0.3/src/stored/askdir.c0000644000175000017500000011151014771010173016371 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Subroutines to handle Catalog reqests sent to the Director * Reqests/commands from the Director are handled in dircmd.c * * Kern Sibbald, December 2000 */ #include "bacula.h" /* pull in global headers */ #include "stored.h" /* pull in Storage Deamon headers */ static const int dbglvl = 200; /* Requests sent to the Director */ static char Find_media[] = "CatReq JobId=%ld FindMedia=%d pool_name=%s media_type=%s vol_type=%d create=%d use_protect=%d vol_encrypted=%d\n"; static char Get_Vol_Info[] = "CatReq JobId=%ld GetVolInfo VolName=%s write=%d\n"; static char Update_media[] = "CatReq JobId=%ld UpdateMedia VolName=%s" " VolJobs=%u VolFiles=%u VolBlocks=%u VolBytes=%s VolABytes=%s" " VolHoleBytes=%s VolHoles=%u VolMounts=%u" " VolErrors=%u VolWrites=%u MaxVolBytes=%s EndTime=%s VolStatus=%s" " Slot=%d relabel=%d InChanger=%d VolReadTime=%s VolWriteTime=%s" " VolFirstWritten=%s VolType=%u VolParts=%d VolCloudParts=%d" " LastPartBytes=%lld Enabled=%d Recycle=%d Protected=%d UseProtect=%d VolEncrypted=%d\n"; static char Create_jobmedia[] = "CatReq JobId=%ld CreateJobMedia\n"; static char FileAttributes[] = "UpdCat JobId=%ld FileAttributes "; /* Responses received from the Director */ static char OK_media[] = "1000 OK VolName=%127s VolJobs=%u VolFiles=%lu" " VolBlocks=%lu VolBytes=%lld VolABytes=%lld" " VolHoleBytes=%lld VolHoles=%lu VolMounts=%lu" " VolErrors=%lu VolWrites=%lu" " MaxVolBytes=%lld VolCapacityBytes=%lld VolStatus=%20s" " Slot=%ld MaxVolJobs=%lu MaxVolFiles=%lu InChanger=%ld" " VolReadTime=%lld VolWriteTime=%lld EndFile=%lu EndBlock=%lu" " VolType=%lu LabelType=%ld MediaId=%lld ScratchPoolId=%lld" " VolParts=%d VolCloudParts=%d LastPartBytes=%lld Enabled=%d MaxPoolBytes=%lld PoolBytes=%lld Recycle=%d" " Protected=%d UseProtect=%d VolEncrypted=%d VolRetention=%u\n"; static char OK_create[] = "1000 OK CreateJobMedia\n"; static bthread_mutex_t vol_info_mutex = BTHREAD_MUTEX_PRIORITY(PRIO_SD_VOL_INFO); #ifdef needed Note: if you turn this on, be sure to add the Recycle Flag static char Device_update[] = "DevUpd JobId=%ld device=%s " "append=%d read=%d num_writers=%d " "open=%d labeled=%d offline=%d " "reserved=%d max_writers=%d " "autoselect=%d autochanger=%d " "enabled=%d " "changer_name=%s media_type=%s volume_name=%s\n"; /** Send update information about a device to Director */ bool dir_update_device(JCR *jcr, DEVICE *dev) { BSOCK *dir = jcr->dir_bsock; POOL_MEM dev_name, VolumeName, MediaType, ChangerName; DEVRES *device = dev->device; bool ok; pm_strcpy(dev_name, device->hdr.name); bash_spaces(dev_name); if (dev->is_labeled()) { pm_strcpy(VolumeName, dev->VolHdr.VolumeName); } else { pm_strcpy(VolumeName, "*"); } bash_spaces(VolumeName); pm_strcpy(MediaType, device->media_type); bash_spaces(MediaType); if (device->changer_res) { pm_strcpy(ChangerName, device->changer_res->hdr.name); bash_spaces(ChangerName); } else { pm_strcpy(ChangerName, "*"); } ok = dir->fsend(Device_update, jcr->JobId, dev_name.c_str(), dev->can_append()!=0, dev->can_read()!=0, dev->num_writers, dev->is_open()!=0, dev->is_labeled()!=0, dev->is_offline()!=0, dev->reserved_device, dev->is_tape()?100000:1, dev->autoselect, 0, dev->enabled, ChangerName.c_str(), MediaType.c_str(), VolumeName.c_str()); Dmsg1(dbglvl, ">dird: %s\n", dir->msg); return ok; } bool dir_update_changer(JCR *jcr, AUTOCHANGER *changer) { BSOCK *dir = jcr->dir_bsock; POOL_MEM dev_name, MediaType; DEVRES *device; bool ok; pm_strcpy(dev_name, changer->hdr.name); bash_spaces(dev_name); device = (DEVRES *)changer->device->first(); pm_strcpy(MediaType, device->media_type); bash_spaces(MediaType); /* This is mostly to indicate that we are here */ ok = dir->fsend(Device_update, jcr->JobId, dev_name.c_str(), /* Changer name */ 0, 0, 0, /* append, read, num_writers */ 0, 0, 0, /* is_open, is_labeled, offline */ 0, 0, /* reserved, max_writers */ 0, /* Autoselect */ 0, /* Enabled */ changer->device->size(), /* Number of devices */ "0", /* PoolId */ "*", /* ChangerName */ MediaType.c_str(), /* MediaType */ "*"); /* VolName */ Dmsg1(dbglvl, ">dird: %s\n", dir->msg); return ok; } #endif static AskDirHandler *askdir_handler = NULL; /* must be true when inside a "btools" */ AskDirHandler *get_askdir_handler() { return askdir_handler; } /* * btools must call this function, to modify behavior of some functions here */ AskDirHandler *init_askdir_handler(AskDirHandler *new_askdir_handler) { AskDirHandler *old = askdir_handler; askdir_handler = new_askdir_handler; return old; } /* * Alternate function used by btools */ bool AskDirHandler::dir_ask_sysop_to_mount_volume(DCR *dcr, bool /*writing*/) { DEVICE *dev = dcr->dev; fprintf(stderr, _("Mount Volume \"%s\" on device %s and press return when ready: "), dcr->VolumeName, dev->print_name()); dev->close(dcr); getchar(); return true; } bool AskDirHandler::dir_get_volume_info(DCR *dcr, const char *VolumeName, enum get_vol_info_rw writing) { Dmsg0(100, "Fake dir_get_volume_info\n"); dcr->setVolCatName(VolumeName); Dmsg2(500, "Vol=%s VolType=%d\n", dcr->getVolCatName(), dcr->VolCatInfo.VolCatType); return 1; } /** * Common routine for: * dir_get_volume_info() * and * dir_find_next_appendable_volume() * * NOTE!!! All calls to this routine must be protected by * locking vol_info_mutex before calling it so that * we don't have one thread modifying the parameters * and another reading them. * * Returns: true on success and vol info in dcr->VolCatInfo * false on failure */ static bool do_get_volume_info(DCR *dcr) { JCR *jcr = dcr->jcr; BSOCK *dir = jcr->dir_bsock; VOLUME_CAT_INFO vol; int n; int32_t Enabled, Recycle, Protected, UseProtect, VolEncrypted; int32_t InChanger; dcr->setVolCatInfo(false); if (dir->recv() <= 0) { Dmsg0(dbglvl, "getvolname error bnet_recv\n"); Mmsg(jcr->errmsg, _("Network error on bnet_recv in req_vol_info.\n")); return false; } memset(&vol, 0, sizeof(vol)); n = sscanf(dir->msg, OK_media, vol.VolCatName, &vol.VolCatJobs, &vol.VolCatFiles, &vol.VolCatBlocks, &vol.VolCatAmetaBytes, &vol.VolCatAdataBytes, &vol.VolCatHoleBytes, &vol.VolCatHoles, &vol.VolCatMounts, &vol.VolCatErrors, &vol.VolCatWrites, &vol.VolCatMaxBytes, &vol.VolCatCapacityBytes, vol.VolCatStatus, &vol.Slot, &vol.VolCatMaxJobs, &vol.VolCatMaxFiles, &InChanger, &vol.VolReadTime, &vol.VolWriteTime, &vol.EndFile, &vol.EndBlock, &vol.VolCatType, &vol.LabelType, &vol.VolMediaId, &vol.VolScratchPoolId, &vol.VolCatParts, &vol.VolCatCloudParts, &vol.VolLastPartBytes, &Enabled, &vol.MaxPoolBytes, &vol.PoolBytes, &Recycle, &Protected, &UseProtect, &VolEncrypted, &vol.VolRetention); Dmsg2(dbglvl, "msg); if (n != 37) { Dmsg1(dbglvl, "get_volume_info failed: ERR=%s", dir->msg); /* * Note, we can get an error here either because there is * a comm problem, or if the volume is not a suitable * volume to use, so do not issue a Jmsg() here, do it * in the calling routine. */ Mmsg(jcr->errmsg, _("Error getting Volume info: %s"), dir->msg); return false; } vol.InChanger = InChanger; /* bool in structure */ vol.VolEnabled = Enabled; /* bool in structure */ vol.VolRecycle = Recycle; /* bool in structure */ vol.Protected = Protected; /* bool in structure */ vol.UseProtect = UseProtect; /* bool in structure */ vol.VolEncrypted = VolEncrypted; /* bool in structure */ vol.is_valid = true; vol.VolCatBytes = vol.VolCatAmetaBytes + vol.VolCatAdataBytes; unbash_spaces(vol.VolCatName); bstrncpy(dcr->VolumeName, vol.VolCatName, sizeof(dcr->VolumeName)); dcr->VolCatInfo = vol; /* structure assignment */ Dmsg3(dbglvl, "do_reqest_vol_info return true slot=%d Volume=%s MediaId=%lld\n", dcr->VolCatInfo.Slot, dcr->VolCatInfo.VolCatName, dcr->VolCatInfo.VolMediaId); Dmsg5(dbglvl, "Dir returned VolCatAmetaBytes=%lld VolCatAdataBytes=%lld Status=%s Vol=%s MediaId=%lld\n", dcr->VolCatInfo.VolCatAmetaBytes, dcr->VolCatInfo.VolCatAdataBytes, dcr->VolCatInfo.VolCatStatus, dcr->VolCatInfo.VolCatName, dcr->VolCatInfo.VolMediaId); return true; } /** * Get Volume info for a specific volume from the Director's Database * * Returns: true on success (Director guarantees that Pool and MediaType * are correct and VolStatus==Append or * VolStatus==Recycle) * false on failure * * Volume information returned in dcr->VolCatInfo */ bool dir_get_volume_info(DCR *dcr, const char *VolumeName, enum get_vol_info_rw writing) { if (askdir_handler) { return askdir_handler->dir_get_volume_info(dcr, VolumeName, writing); } JCR *jcr = dcr->jcr; BSOCK *dir = jcr->dir_bsock; P(vol_info_mutex); dcr->setVolCatName(VolumeName); bash_spaces(dcr->getVolCatName()); dir->fsend(Get_Vol_Info, jcr->JobId, dcr->getVolCatName(), writing==GET_VOL_INFO_FOR_WRITE?1:0); Dmsg1(dbglvl, ">dird %s", dir->msg); unbash_spaces(dcr->getVolCatName()); bool ok = do_get_volume_info(dcr); V(vol_info_mutex); return ok; } /** * Get info on the next appendable volume in the Director's database * * Returns: true on success dcr->VolumeName is volume * reserve_volume() called on Volume name * false on failure dcr->VolumeName[0] == 0 * also sets dcr->found_in_use if at least one * in use volume was found. * * Volume information returned in dcr * */ bool dir_find_next_appendable_volume(DCR *dcr) { /* SD tools setup a handler because they have no connection to Dir */ if (askdir_handler) { return askdir_handler->dir_find_next_appendable_volume(dcr); } JCR *jcr = dcr->jcr; BSOCK *dir = jcr->dir_bsock; bool rtn; char lastVolume[MAX_NAME_LENGTH]; /* We may retrieve N volumes that are all busy if we have N busy devices * on the system */ int nb_retry=((rblist *)res_head[R_DEVICE-r_first]->res_list)->size() + 30; Dmsg3(dbglvl, "dir_find_next_appendable_volume: reserved=%d Vol=%s retry=%d\n", dcr->is_reserved(), dcr->VolumeName, nb_retry); Mmsg(jcr->errmsg, "Unknown error\n"); bool can_create = !dcr->dev->is_fs_nearly_full(dcr->dev->min_free_space); /* * Try the thirty oldest or most available volumes. Note, * the most available could already be mounted on another * drive, so we continue looking for a not in use Volume. */ lock_volumes(); P(vol_info_mutex); dcr->clear_found_in_use(); lastVolume[0] = 0; for (int vol_index=1; vol_index < nb_retry; vol_index++) { /* Have we still some space to create a new volume? */ bash_spaces(dcr->media_type); bash_spaces(dcr->pool_name); dir->fsend(Find_media, jcr->JobId, vol_index, dcr->pool_name, dcr->media_type, dcr->dev->dev_type, can_create, dcr->dev->use_protect(), dcr->dev->use_volume_encryption()?1:0); unbash_spaces(dcr->media_type); unbash_spaces(dcr->pool_name); Dmsg1(dbglvl, ">dird %s", dir->msg); if (do_get_volume_info(dcr)) { /* Give up if we get the same volume name twice */ if (lastVolume[0] && strcmp(lastVolume, dcr->VolumeName) == 0) { Mmsg(jcr->errmsg, "Director returned same volume name=%s twice.\n", lastVolume); Dmsg1(dbglvl, "Got same vol = %s\n", lastVolume); break; } /* Here, the Director *asks* us to mark the volume in ReadOnly */ if (dcr->dev->use_protect() && (strcmp(dcr->VolCatInfo.VolCatStatus, "Used") == 0 || strcmp(dcr->VolCatInfo.VolCatStatus, "Full") == 0)) { Dmsg1(dbglvl, "Need to mark %s in read-only/immutable\n", dcr->VolumeName); break; } /* If VolCatAdataBytes, we have ALIGNED_DEV */ if (dcr->VolCatInfo.VolCatType == 0 && dcr->VolCatInfo.VolCatAdataBytes != 0) { dcr->VolCatInfo.VolCatType = B_ALIGNED_DEV; } /* * If we have VolType and we are disk or aligned, the VolType must match */ /* ***FIXME*** find better way to handle voltype */ if (dcr->VolCatInfo.VolCatType != 0 && (dcr->dev->dev_type == B_FILE_DEV || dcr->dev->dev_type == B_ALIGNED_DEV || dcr->dev->dev_type == B_CLOUD_DEV) && dcr->dev->dev_type != (int)dcr->VolCatInfo.VolCatType) { Dmsg2(000, "Skip vol. Wanted VolType=%d Got=%d\n", dcr->dev->dev_type, dcr->VolCatInfo.VolCatType); continue; } /* Dedup devices is a bit special, we have two kind of device type so we accept both */ if (dcr->VolCatInfo.VolCatType != 0 && dcr->dev->dev_type == B_DEDUP_DEV && (B_DEDUP_DEV != (int)dcr->VolCatInfo.VolCatType && B_DEDUP_OLD_DEV != (int)dcr->VolCatInfo.VolCatType)) { Dmsg2(000, "Skip vol. Wanted VolType=%d Got=%d\n", dcr->dev->dev_type, dcr->VolCatInfo.VolCatType); continue; } bstrncpy(lastVolume, dcr->VolumeName, sizeof(lastVolume)); if (dcr->can_i_write_volume()) { Dmsg1(dbglvl, "Call reserve_volume for write. Vol=%s\n", dcr->VolumeName); if (reserve_volume(dcr, dcr->VolumeName) == NULL) { Dmsg1(dbglvl, "%s", jcr->errmsg); if (dcr->dev->must_wait()) { rtn = false; dcr->VolumeName[0] = 0; goto get_out; } continue; } Dmsg1(dbglvl, "dir_find_next_appendable_volume return true. vol=%s\n", dcr->VolumeName); rtn = true; goto get_out; } else { Mmsg(jcr->errmsg, "Volume %s is in use.\n", dcr->VolumeName); Dmsg1(dbglvl, "Volume %s is in use.\n", dcr->VolumeName); /* If volume is not usable, it is in use by someone else */ dcr->set_found_in_use(); continue; } } Dmsg2(dbglvl, "No vol. index %d return false. dev=%s\n", vol_index, dcr->dev->print_name()); break; } rtn = false; dcr->VolumeName[0] = 0; get_out: V(vol_info_mutex); unlock_volumes(); if (!rtn && dcr->VolCatInfo.VolScratchPoolId != 0) { Jmsg(jcr, M_WARNING, 0, "%s", jcr->errmsg); Dmsg2(000, "!!!!!!!!! Volume=%s rejected ScratchPoolId=%lld\n", dcr->VolumeName, dcr->VolCatInfo.VolScratchPoolId); Dmsg1(000, "%s", jcr->errmsg); //} else { // Dmsg3(000, "Rtn=%d Volume=%s ScratchPoolId=%lld\n", rtn, dcr->VolumeName, // dcr->VolCatInfo.VolScratchPoolId); } return rtn; } /* * After writing a Volume, send the updated statistics * back to the director. The information comes from the * dev record. */ bool dir_update_volume_info(DCR *dcr, bool label, bool update_LastWritten, bool use_dcr_only) { if (askdir_handler) { return askdir_handler->dir_update_volume_info(dcr, label, update_LastWritten, use_dcr_only); } JCR *jcr = dcr->jcr; BSOCK *dir = jcr->dir_bsock; DEVICE *dev = dcr->ameta_dev; VOLUME_CAT_INFO vol; char ed1[50], ed2[50], ed3[50], ed4[50], ed5[50], ed6[50], ed7[50], ed8[50]; int InChanger, Enabled, Recycle; bool ok = false; POOL_MEM VolumeName; /* If system job, do not update catalog, except if we explicitly force it. */ if (jcr->getJobType() == JT_SYSTEM && !dcr->force_update_volume_info) { return true; } /* Lock during Volume update */ P(vol_info_mutex); dev->Lock_VolCatInfo(); if (use_dcr_only) { vol = dcr->VolCatInfo; /* structure assignment */ } else { /* Just labeled or relabeled the tape */ if (label) { dev->setVolCatStatus("Append"); } vol = dev->VolCatInfo; /* structure assignment */ /* Reset the number of bytes written since the last update */ dev->VolCatInfo.BytesWritten = 0; } /* This happens when nothing to update after fixup_device ... */ if (vol.VolCatName[0] == 0) { Dmsg0(50, "Volume Name is NULL\n"); goto bail_out; } Dmsg4(100, "Update cat VolBytes=%lld VolABytes=%lld Status=%s Vol=%s\n", vol.VolCatAmetaBytes, vol.VolCatAdataBytes, vol.VolCatStatus, vol.VolCatName); // if (update_LastWritten) { vol.VolLastWritten = time(NULL); // } /* worm cannot be recycled, ensure catalog correct */ if (dev->is_worm() && vol.VolRecycle) { Jmsg(jcr, M_INFO, 0, _("WORM cassette detected: setting Recycle=No on Volume=\"%s\"\n"), vol.VolCatName); vol.VolRecycle = false; vol.Protected = true; } pm_strcpy(VolumeName, vol.VolCatName); bash_spaces(VolumeName); InChanger = vol.InChanger; Enabled = vol.VolEnabled; Recycle = vol.VolRecycle; /* Insanity test */ if (vol.VolCatHoleBytes > (((uint64_t)2)<<60)) { Pmsg1(010, "VolCatHoleBytes too big: %lld. Reset to zero.\n", vol.VolCatHoleBytes); vol.VolCatHoleBytes = 0; } /* Set device type where this Volume used */ if (vol.VolCatType == 0) { vol.VolCatType = dev->dev_type; } /* Do not lock device here because it may be locked from label */ if (!jcr->is_canceled()) { dir->fsend(Update_media, jcr->JobId, VolumeName.c_str(), vol.VolCatJobs, vol.VolCatFiles, vol.VolCatBlocks, edit_uint64(vol.VolCatAmetaBytes, ed1), edit_uint64(vol.VolCatAdataBytes, ed2), edit_uint64(vol.VolCatHoleBytes, ed3), vol.VolCatHoles, vol.VolCatMounts, vol.VolCatErrors, vol.VolCatWrites, edit_uint64(vol.VolCatMaxBytes, ed4), edit_uint64(vol.VolLastWritten, ed5), vol.VolCatStatus, vol.Slot, label, InChanger, /* bool in structure */ edit_int64(vol.VolReadTime, ed6), edit_int64(vol.VolWriteTime, ed7), edit_uint64(vol.VolFirstWritten, ed8), vol.VolCatType, vol.VolCatParts, vol.VolCatCloudParts, vol.VolLastPartBytes, Enabled, Recycle, vol.Protected, dev->use_protect(), vol.VolEncrypted ); Dmsg1(100, ">dird %s", dir->msg); /* * We sent info directly from dev to the Director. * What the Director sends back is first read into * the dcr with do_get_volume_info() */ if (!do_get_volume_info(dcr)) { Jmsg(jcr, M_FATAL, 0, "%s", jcr->errmsg); Dmsg2(dbglvl, _("Didn't get vol info vol=%s: ERR=%s"), vol.VolCatName, jcr->errmsg); goto bail_out; } Dmsg1(100, "get_volume_info() %s", dir->msg); /* Update dev Volume info in case something changed (e.g. expired) */ if (!use_dcr_only) { dev->VolCatInfo.Slot = dcr->VolCatInfo.Slot; bstrncpy(dev->VolCatInfo.VolCatStatus, dcr->VolCatInfo.VolCatStatus, sizeof(vol.VolCatStatus)); dev->VolCatInfo.VolCatAdataBytes = dcr->VolCatInfo.VolCatAdataBytes; dev->VolCatInfo.VolCatAmetaBytes = dcr->VolCatInfo.VolCatAmetaBytes; dev->VolCatInfo.VolCatHoleBytes = dcr->VolCatInfo.VolCatHoleBytes; dev->VolCatInfo.VolCatHoles = dcr->VolCatInfo.VolCatHoles; dev->VolCatInfo.VolCatPadding = dcr->VolCatInfo.VolCatPadding; dev->VolCatInfo.VolCatAmetaPadding = dcr->VolCatInfo.VolCatAmetaPadding; dev->VolCatInfo.VolCatAdataPadding = dcr->VolCatInfo.VolCatAdataPadding; dev->VolCatInfo.VolCatFiles = dcr->VolCatInfo.VolCatFiles; dev->VolCatInfo.VolCatBytes = dcr->VolCatInfo.VolCatBytes; dev->VolCatInfo.VolCatMounts = dcr->VolCatInfo.VolCatMounts; dev->VolCatInfo.VolCatJobs = dcr->VolCatInfo.VolCatJobs; dev->VolCatInfo.VolCatFiles = dcr->VolCatInfo.VolCatFiles; dev->VolCatInfo.VolCatRecycles = dcr->VolCatInfo.VolCatRecycles; dev->VolCatInfo.VolCatWrites = dcr->VolCatInfo.VolCatWrites; dev->VolCatInfo.VolCatReads = dcr->VolCatInfo.VolCatReads; dev->VolCatInfo.VolEnabled = dcr->VolCatInfo.VolEnabled; dev->VolCatInfo.VolCatMaxBytes = dcr->VolCatInfo.VolCatMaxBytes; dev->VolCatInfo.VolRecycle = dcr->VolCatInfo.VolRecycle; dev->VolCatInfo.BytesWritten = 0; } ok = true; } bail_out: dev->Unlock_VolCatInfo(); V(vol_info_mutex); return ok; } struct JOBMEDIA_ITEM { dlink link; int64_t VolMediaId; uint64_t StartAddr; uint64_t EndAddr; uint32_t VolFirstIndex; uint32_t VolLastIndex; uint32_t StartFile; uint32_t EndFile; uint32_t StartBlock; uint32_t EndBlock; }; void create_jobmedia_queue(JCR *jcr) { JOBMEDIA_ITEM *item = NULL; jcr->jobmedia_queue = New(dlist(item, &item->link)); FILEMEDIA_ITEM *fm = NULL; jcr->filemedia_queue = New(dlist(fm, &fm->link)); } bool flush_jobmedia_queue(JCR *jcr) { if (askdir_handler) { return askdir_handler->flush_jobmedia_queue(jcr); } JOBMEDIA_ITEM *item; BSOCK *dir = jcr->dir_bsock; bool ok; if (!flush_filemedia_queue(jcr)) { return false; /* already in FATAL */ } if (!jcr->jobmedia_queue || jcr->jobmedia_queue->size() == 0) { return true; /* should never happen */ } Dmsg1(400, "=== Flush jobmedia queue = %d\n", jcr->jobmedia_queue->size()); dir->fsend(Create_jobmedia, jcr->JobId); foreach_dlist(item, jcr->jobmedia_queue) { if (jcr->is_JobStatus(JS_Incomplete)) { if (item->VolFirstIndex >= dir->get_lastFileIndex()) { continue; } if (item->VolLastIndex >= dir->get_lastFileIndex()) { item->VolLastIndex = dir->get_lastFileIndex() - 1; } } ok = dir->fsend("%u %u %u %u %u %u %lld\n", item->VolFirstIndex, item->VolLastIndex, item->StartFile, item->EndFile, item->StartBlock, item->EndBlock, item->VolMediaId); /* Keep track of last FileIndex flushed */ dir->set_lastFlushIndex(item->VolLastIndex); Dmsg2(400, "sd->dir: ok=%d Jobmedia=%s", ok, dir->msg); } dir->signal(BNET_EOD); jcr->jobmedia_queue->destroy(); if (dir->recv() <= 0) { Dmsg0(dbglvl, "create_jobmedia error bnet_recv\n"); Jmsg(jcr, M_FATAL, 0, _("Error creating JobMedia records: ERR=%s\n"), dir->bstrerror()); return false; } Dmsg1(210, "msg); if (strcmp(dir->msg, OK_create) != 0) { Dmsg1(dbglvl, "Bad response from Dir: %s\n", dir->msg); Jmsg(jcr, M_FATAL, 0, _("Error creating JobMedia records: %s\n"), dir->msg); return false; } return true; } /* * After writing a Volume, create the JobMedia record. */ bool dir_create_jobmedia_record(DCR *dcr, bool zero) { if (askdir_handler) { return askdir_handler->dir_create_jobmedia_record(dcr, zero); } JCR *jcr = dcr->jcr; BSOCK *dir = jcr->dir_bsock; JOBMEDIA_ITEM *item; bool ok = true;; if (!zero && !dcr->WroteVol) { return true; } if (!zero && dcr->VolLastIndex == 0) { Pmsg7(dbglvl, "Discard: JobMedia Vol=%s wrote=%d MediaId=%lld FI=%lu LI=%lu StartAddr=%lld EndAddr=%lld\n", dcr->VolumeName, dcr->WroteVol, dcr->VolMediaId, dcr->VolFirstIndex, dcr->VolLastIndex, dcr->StartAddr, dcr->EndAddr); return true; /* nothing written to the Volume */ } /* Throw out records where the start address is bigger than the end */ if (!zero && dcr->StartAddr > dcr->EndAddr) { Pmsg7(dbglvl, "Discard: JobMedia Vol=%s wrote=%d MediaId=%lld FI=%lu LI=%lu StartAddr=%lld EndAddr=%lld\n", dcr->VolumeName, dcr->WroteVol, dcr->VolMediaId, dcr->VolFirstIndex, dcr->VolLastIndex, dcr->StartAddr, dcr->EndAddr); return true; } /* If system job, do not update catalog */ if (jcr->getJobType() == JT_SYSTEM) { return true; } /* Throw out records where FI is zero -- i.e. nothing done */ if (!zero && dcr->VolFirstIndex == 0 && (dcr->StartAddr != 0 || dcr->EndAddr != 0)) { Pmsg7(dbglvl, "Discard: JobMedia Vol=%s wrote=%d MediaId=%lld FI=%lu LI=%lu StartAddr=%lld EndAddr=%lld\n", dcr->VolumeName, dcr->WroteVol, dcr->VolMediaId, dcr->VolFirstIndex, dcr->VolLastIndex, dcr->StartAddr, dcr->EndAddr); return true; } /* * If this Job is incomplete, we need to backup the FileIndex * to the last correctly saved file so that the JobMedia * LastIndex is correct. * */ if (jcr->is_JobStatus(JS_Incomplete)) { dcr->VolLastIndex = dir->get_lastFileIndex(); Dmsg1(100, "======= Set FI=%ld\n", dcr->VolLastIndex); } Dmsg7(100, "Queue JobMedia Vol=%s wrote=%d MediaId=%lld FI=%lu LI=%lu StartAddr=%lld EndAddr=%lld\n", dcr->VolumeName, dcr->WroteVol, dcr->VolMediaId, dcr->VolFirstIndex, dcr->VolLastIndex, dcr->StartAddr, dcr->EndAddr); item = (JOBMEDIA_ITEM *)malloc(sizeof(JOBMEDIA_ITEM)); if (zero) { item->VolFirstIndex = item->VolLastIndex = 0; item->StartFile = item->EndFile = 0; item->StartBlock = item->EndBlock = 0; item->StartAddr = item->EndAddr = 0; item->VolMediaId = dcr->VolMediaId; } else { item->VolFirstIndex = dcr->VolFirstIndex; item->VolLastIndex = dcr->VolLastIndex; item->StartFile = (uint32_t)(dcr->StartAddr >> 32); item->EndFile = (uint32_t)(dcr->EndAddr >> 32); item->StartBlock = (uint32_t)dcr->StartAddr; item->EndBlock = (uint32_t)dcr->EndAddr; item->StartAddr = dcr->StartAddr; item->EndAddr = dcr->EndAddr; item->VolMediaId = dcr->VolMediaId; } jcr->jobmedia_queue->append(item); /* Flush at queue size of 1000 jobmedia records */ if (zero || jcr->jobmedia_queue->size() >= 1000) { ok = flush_jobmedia_queue(jcr); } dcr->VolFirstIndex = dcr->VolLastIndex = 0; dcr->StartAddr = dcr->EndAddr = 0; dcr->VolMediaId = 0; dcr->WroteVol = false; return ok; } /** * Update File Attribute data * We do the following: * 1. expand the bsock buffer to be large enough * 2. Write a "header" into the buffer with serialized data * VolSessionId * VolSeesionTime * FileIndex * Stream * data length that follows * start of raw byte data from the Device record. * Note, this is primarily for Attribute data, but can * also handle any device record. The Director must know * the raw byte data format that is defined for each Stream. * Now Restore Objects pass through here STREAM_RESTORE_OBJECT */ bool dir_update_file_attributes(DCR *dcr, DEV_RECORD *rec) { if (askdir_handler) { return askdir_handler->dir_update_file_attributes(dcr, rec); } JCR *jcr = dcr->jcr; BSOCK *dir = jcr->dir_bsock; ser_declare; #ifdef NO_ATTRIBUTES_TEST return true; #endif dir->msg = check_pool_memory_size(dir->msg, sizeof(FileAttributes) + MAX_NAME_LENGTH + sizeof(DEV_RECORD) + rec->data_len + 1); dir->msglen = bsnprintf(dir->msg, sizeof(FileAttributes) + MAX_NAME_LENGTH + 1, FileAttributes, jcr->JobId); ser_begin(dir->msg + dir->msglen, 0); ser_uint32(rec->VolSessionId); ser_uint32(rec->VolSessionTime); ser_int32(rec->FileIndex); ser_int32(rec->Stream); ser_uint32(rec->data_len); ser_bytes(rec->data, rec->data_len); dir->msglen = ser_length(dir->msg); Dmsg1(1800, ">dird %s\n", dir->msg); /* Attributes */ if (rec->maskedStream == STREAM_UNIX_ATTRIBUTES || rec->maskedStream == STREAM_UNIX_ATTRIBUTES_EX) { Dmsg2(1500, "==== set_data_end FI=%ld %s\n", rec->FileIndex, rec->data); dir->set_data_end(rec->FileIndex); /* set offset of valid data */ } return dir->send(); } /** * Request the sysop to create an appendable volume * * Entered with device blocked. * Leaves with device blocked. * * Returns: true on success (operator issues a mount command) * false on failure * Note, must create dev->errmsg on error return. * * On success, dcr->VolumeName and dcr->VolCatInfo contain * information on suggested volume, but this may not be the * same as what is actually mounted. * * When we return with success, the correct tape may or may not * actually be mounted. The calling routine must read it and * verify the label. */ bool dir_ask_sysop_to_create_appendable_volume(DCR *dcr) { if (askdir_handler) { return askdir_handler->dir_ask_sysop_to_create_appendable_volume(dcr); } int stat = W_TIMEOUT; DEVICE *dev = dcr->dev; JCR *jcr = dcr->jcr; bool got_vol = false; if (job_canceled(jcr) || jcr->is_incomplete()) { dev->poll = false; return false; } Dmsg0(400, "enter dir_ask_sysop_to_create_appendable_volume\n"); ASSERT(dev->blocked()); for ( ;; ) { if (job_canceled(jcr) || jcr->is_incomplete()) { Mmsg(dev->errmsg, _("Job %s canceled while waiting for mount on Storage Device \"%s\".\n"), jcr->Job, dev->print_name()); Jmsg(jcr, M_INFO, 0, "%s", dev->errmsg); dev->poll = false; return false; } got_vol = dir_find_next_appendable_volume(dcr); /* get suggested volume */ if (got_vol) { goto get_out; } else { dev->clear_wait(); if (stat == W_TIMEOUT || stat == W_MOUNT) { Mmsg(dev->errmsg, _( "Job %s is waiting. Cannot find any appendable volumes.\n" "Please use the \"label\" command to create a new Volume for:\n" " Storage: %s\n" " Pool: %s\n" " Media type: %s\n"), jcr->Job, dev->print_name(), dcr->pool_name, dcr->media_type); Jmsg(jcr, M_MOUNT, 0, "%s", dev->errmsg); Dmsg1(dbglvl, "%s", dev->errmsg); } } jcr->sendJobStatus(JS_WaitMedia); stat = wait_for_sysop(dcr); Dmsg1(dbglvl, "Back from wait_for_sysop stat=%d\n", stat); if (dev->poll) { Dmsg1(dbglvl, "Poll timeout in create append vol on device %s\n", dev->print_name()); continue; } if (stat == W_WAKE) { /* Job could be marked to stopped, need to break */ Mmsg0(dev->errmsg, _("Job was stopped by the user.\n")); Jmsg(jcr, M_INFO, 0, "%s", dev->errmsg); Dmsg1(dbglvl, "Job marked to be stopped. Gave up waiting on device %s\n", dev->print_name()); dev->poll = false; return false; } if (stat == W_TIMEOUT) { if (!double_dev_wait_time(dev)) { Mmsg(dev->errmsg, _("Max time exceeded waiting to mount Storage Device %s for Job %s\n"), dev->print_name(), jcr->Job); Jmsg(jcr, M_FATAL, 0, "%s", dev->errmsg); Dmsg1(dbglvl, "Gave up waiting on device %s\n", dev->print_name()); dev->poll = false; return false; /* exceeded maximum waits */ } continue; } if (stat == W_ERROR) { berrno be; Mmsg0(dev->errmsg, _("pthread error in mount_next_volume.\n")); Jmsg(jcr, M_FATAL, 0, "%s", dev->errmsg); dev->poll = false; return false; } Dmsg1(dbglvl, "Someone woke me for device %s\n", dev->print_name()); } get_out: dev->poll = false; jcr->sendJobStatus(JS_Running); Dmsg0(dbglvl, "leave dir_ask_sysop_to_create_appendable_volume\n"); return true; } /** * Request to mount specific Volume * * Entered with device blocked and dcr->VolumeName is desired * volume. * Leaves with device blocked. * * Returns: true on success (operator issues a mount command) * false on failure * Note, must create dev->errmsg on error return. * */ bool dir_ask_sysop_to_mount_volume(DCR *dcr, bool write_access) { if (askdir_handler) { return askdir_handler->dir_ask_sysop_to_mount_volume(dcr, write_access); } int stat = W_TIMEOUT; DEVICE *dev = dcr->dev; JCR *jcr = dcr->jcr; Dmsg0(400, "enter dir_ask_sysop_to_mount_volume\n"); if (!dcr->VolumeName[0]) { Mmsg0(dev->errmsg, _("Cannot request another volume: no volume name given.\n")); dev->poll = false; return false; } if (dcr->no_mount_request) { Mmsg(dev->errmsg, _("The current operation doesn't support mount request\n")); dev->poll = false; return false; } for ( ;; ) { if (job_canceled(jcr) || jcr->is_incomplete()) { Mmsg(dev->errmsg, _("Job %s canceled while waiting for mount on Storage Device %s.\n"), jcr->Job, dev->print_name()); dev->poll = false; return false; } /* * If we are not polling, and the wait timeout or the * user explicitly did a mount, send him the message. * Otherwise skip it. */ if (!dev->poll && (stat == W_TIMEOUT || stat == W_MOUNT)) { const char *msg; if (write_access) { msg = _("%sPlease mount append Volume \"%s\" or label a new one for:\n" " Job: %s\n" " Storage: %s\n" " Pool: %s\n" " Media type: %s\n"); } else { msg = _("%sPlease mount read Volume \"%s\" for:\n" " Job: %s\n" " Storage: %s\n" " Pool: %s\n" " Media type: %s\n"); } Jmsg(jcr, M_MOUNT, 0, msg, dev->is_nospace()?_("\n\nWARNING: device is full! Please add more disk space then ...\n\n"):"", dcr->VolumeName, jcr->Job, dev->print_name(), dcr->pool_name, dcr->media_type); Dmsg3(400, "Mount \"%s\" on device \"%s\" for Job %s\n", dcr->VolumeName, dev->print_name(), jcr->Job); } jcr->sendJobStatus(JS_WaitMount); stat = wait_for_sysop(dcr); /* wait on device */ Dmsg1(100, "Back from wait_for_sysop stat=%d\n", stat); if (dev->poll) { Dmsg1(100, "Poll timeout in mount vol on device %s\n", dev->print_name()); Dmsg1(100, "Blocked=%s\n", dev->print_blocked()); goto get_out; } if (stat == W_WAKE) { /* Job could be marked to stopped, need to break */ Mmsg0(dev->errmsg, _("Job was stopped by the user.\n")); Jmsg(jcr, M_INFO, 0, "%s", dev->errmsg); Dmsg1(dbglvl, "Job marked to be stopped. Gave up waiting on device %s\n", dev->print_name()); dev->poll = false; return false; } if (stat == W_TIMEOUT) { if (!double_dev_wait_time(dev)) { Mmsg(dev->errmsg, _("Max time exceeded waiting to mount Storage Device %s for Job %s\n"), dev->print_name(), jcr->Job); Jmsg(jcr, M_FATAL, 0, "%s", dev->errmsg); Dmsg1(400, "Gave up waiting on device %s\n", dev->print_name()); dev->poll = false; return false; /* exceeded maximum waits */ } continue; } if (stat == W_ERROR) { berrno be; Mmsg(dev->errmsg, _("pthread error in mount_volume\n")); Jmsg(jcr, M_FATAL, 0, "%s", dev->errmsg); dev->poll = false; return false; } Dmsg1(100, "Someone woke me for device %s\n", dev->print_name()); break; } get_out: if (job_canceled(jcr) || jcr->is_incomplete()) { Mmsg(dev->errmsg, _("Job %s canceled while waiting for mount on Storage Device %s.\n"), jcr->Job, dev->print_name()); dev->poll = false; return false; } jcr->sendJobStatus(JS_Running); Dmsg0(100, "leave dir_ask_sysop_to_mount_volume\n"); return true; } bacula-15.0.3/src/stored/stored.c0000644000175000017500000007660614771010173016434 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Third generation Storage daemon. * * Written by Kern Sibbald, MM * * It accepts a number of simple commands from the File daemon * and acts on them. When a request to append data is made, * it opens a data channel and accepts data from the * File daemon. * */ #include "bacula.h" #include "stored.h" /* TODO: fix problem with bls, bextract * that use findlib and already declare * filed plugins */ #include "sd_plugins.h" #if defined(HAVE_LINUX_OS) && defined(HAVE_LIBCAP) #include #endif /* Imported functions and variables */ extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code); /* Forward referenced functions */ void terminate_stored(int sig); static int check_resources(); static void cleanup_old_files(); extern "C" void *device_initialization(void *arg); #define CONFIG_FILE "bacula-sd.conf" /* Default config file */ /* Global variables exported */ char OK_msg[] = "3000 OK\n"; char TERM_msg[] = "3999 Terminate\n"; static bool test_config = false; bstatcollect *statcollector = NULL; sdstatmetrics_t sdstatmetrics; static uint32_t VolSessionId = 0; uint32_t VolSessionTime; char *configfile = NULL; bool init_done = false; static pthread_t server_tid; static bool server_tid_valid = false; /* Global static variables */ static bool foreground = false; static bool make_pid_file = true; /* create pid file */ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; static workq_t dird_workq; /* queue for processing connections */ static CONFIG *config; static void usage() { fprintf(stderr, _( PROG_COPYRIGHT "\n%sVersion: %s (%s)\n\n" "Usage: bacula-sd [options] [-c config_file] [config_file]\n" " -c use as configuration file\n" " -d [,] set debug level to , debug tags to \n" " -dt print timestamp in debug output\n" " -T set trace on\n" " -f run in foreground (for debugging)\n" " -g set groupid to group\n" " -m print kaboom output (for debugging)\n" " -p proceed despite I/O errors\n" " -P do not create pid file\n" " -s no signals (for debugging)\n" " -t test - read config and exit\n" " -u userid to \n" " -v verbose user messages\n" " -? print this message.\n" "\n"), 2000, BDEMO, VERSION, BDATE); exit(1); } /* * !!! WARNING !!! Use this function only when bacula is stopped. * ie, after a fatal signal and before exiting the program * Print information about a JCR */ static void sd_debug_print(JCR *jcr, FILE *fp) { if (jcr->dcr) { DCR *dcr = jcr->dcr; fprintf(fp, "\tdcr=%p volumename=%s dev=%p newvol=%d reserved=%d locked=%d\n", dcr, dcr->VolumeName, dcr->dev, dcr->NewVol, dcr->is_reserved(), dcr->is_dev_locked()); } else { fprintf(fp, "dcr=*None*\n"); } } #if defined(HAVE_LINUX_OS) && defined(HAVE_LIBCAP) static bool get_needed_caps() { /* Storage Daemon must be running with following capabilities to be able to set/clear * APPEND and IMMUTABLE flags on volumes. */ const char *caps_needed = "cap_linux_immutable"; cap_t caps = NULL; char *cap_text = NULL; bool ret = false; berrno be; caps = cap_get_proc(); if (!caps) { Dmsg1(90, "Calling cap_get_proc() failed, ERR=%s\n", be.bstrerror()); goto bail_out; } cap_text = cap_to_text(caps, NULL); if (!cap_text) { Dmsg1(90, "Calling cap_get_proc() failed, ERR=%s\n", be.bstrerror()); goto bail_out; } /* Check if we are running with capabilities needed */ ret = strstr(cap_text, caps_needed) == NULL ? false : true; bail_out: if (cap_text) { cap_free(cap_text); } if (caps) { cap_free(caps); } if (ret) { Dmsg0(90, "Have needed caps, APPEND and IMMUTABLE flags can be used for volumes.\n"); } else { Dmsg0(90, "Do not have needed caps, APPEND and IMMUTABLE flags cannot be used for volumes.\n"); } return ret; } #else static bool get_needed_caps() { /* Determine why we fail here */ bool islinux, libcap; #if defined(HAVE_LINUX_OS) islinux = true; #else islinux = false; #endif // HAVE_LINUX_OS #if defined(HAVE_LIBCAP) libcap = true; #else libcap = false; #endif // HAVE_LIBCAP Dmsg2(90, "Returning from mocked get_needed_caps(), linux: %d libcap: %d\n", islinux, libcap); return false; } #endif // HAVE_LINUX_OS && HAVE_LIBCAP /********************************************************************* * * Main Bacula Unix Storage Daemon * */ #if defined(HAVE_WIN32) #define main BaculaMain #endif int main (int argc, char *argv[]) { int ch; bool no_signals = false; pthread_t thid; char *uid = NULL; char *gid = NULL; device_default_open_mode = omd_write; mark_heap(); init_working_directory(); setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); my_name_is(argc, argv, "bacula-sd"); init_msg(NULL, NULL); daemon_start_time = time(NULL); setup_daemon_message_queue(); /* Sanity checks */ if (TAPE_BSIZE % B_DEV_BSIZE != 0 || TAPE_BSIZE / B_DEV_BSIZE == 0) { Emsg2(M_ABORT, 0, _("Tape block size (%d) not multiple of system size (%d)\n"), TAPE_BSIZE, B_DEV_BSIZE); } if (TAPE_BSIZE != (1 << (ffs(TAPE_BSIZE)-1))) { Emsg1(M_ABORT, 0, _("Tape block size (%d) is not a power of 2\n"), TAPE_BSIZE); } while ((ch = getopt(argc, argv, "c:d:fg:mpPstu:v?Ti")) != -1) { switch (ch) { case 'c': /* configuration file */ if (configfile != NULL) { free(configfile); } configfile = bstrdup(optarg); break; case 'd': /* debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { char *p; /* We probably find a tag list -d 10,sql,bvfs */ if ((p = strchr(optarg, ',')) != NULL) { *p = 0; } debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } if (p) { debug_parse_tags(p+1, &debug_level_tags); } } break; case 'T': set_trace(true); break; case 'f': /* run in foreground */ foreground = true; break; case 'g': /* set group id */ gid = optarg; break; /* Temp code to enable new match_bsr() code, not documented */ case 'i': use_new_match_all = 1; break; case 'm': /* print kaboom output */ prt_kaboom = true; break; case 'p': /* proceed in spite of I/O errors */ forge_on = true; break; case 'P': /* no pid file */ make_pid_file = false; break; case 's': /* no signals */ no_signals = true; break; case 't': test_config = true; break; case 'u': /* set uid */ uid = optarg; break; case 'v': /* verbose */ verbose++; break; case '?': default: usage(); break; } } argc -= optind; argv += optind; if (argc) { if (configfile != NULL) { free(configfile); } configfile = bstrdup(*argv); argc--; argv++; } if (argc) { usage(); } if (!foreground && !test_config) { daemon_start(); /* become daemon */ init_stack_dump(); /* pick up new pid */ } if (!no_signals) { init_signals(terminate_stored); } if (configfile == NULL) { configfile = bstrdup(CONFIG_FILE); } config = New(CONFIG()); parse_sd_config(config, configfile, M_ERROR_TERM); if (init_crypto() != 0) { Jmsg((JCR *)NULL, M_ERROR_TERM, 0, _("Cryptography library initialization failed.\n")); } if (!check_resources()) { Jmsg((JCR *)NULL, M_ERROR_TERM, 0, _("Please correct configuration file: %s\n"), configfile); } if (crypto_check_fips(me->require_fips) < 0) { Jmsg((JCR *)NULL, M_ERROR_TERM, 0, _("Unable to set FIPS mode\n")); } init_reservations_lock(); if (test_config) { terminate_stored(0); } my_name_is(0, (char **)NULL, me->hdr.name); /* Set our real name */ /* relocate trace file if needed, must be run after set_working_directory() * and my_name_is() */ update_trace_file_location(false); if (make_pid_file) { create_pid_file(me->pid_directory, "bacula-sd", get_first_port_host_order(me->sdaddrs)); } read_state_file(me->working_directory, "bacula-sd", get_first_port_host_order(me->sdaddrs)); set_jcr_in_tsd(INVALID_JCR); /* Make sure on Solaris we can run concurrent, watch dog + servers + misc */ set_thread_concurrency(me->max_concurrent_jobs * 2 + 4); lmgr_init_thread(); /* initialize the lockmanager stack */ got_caps_needed = get_needed_caps(); load_sd_plugins(me->plugin_directory); drop(uid, gid, false); /* initialize a statistics collector */ initialize_statcollector(); cleanup_old_files(); /* Ensure that Volume Session Time and Id are both * set and are both non-zero. */ VolSessionTime = (uint32_t)daemon_start_time; if (VolSessionTime == 0) { /* paranoid */ Jmsg0(NULL, M_ABORT, 0, _("Volume Session Time is ZERO!\n")); } /* * Start the device allocation thread */ create_volume_lists(); /* do before device_init */ if (pthread_create(&thid, NULL, device_initialization, NULL) != 0) { berrno be; Emsg1(M_ABORT, 0, _("Unable to create thread. ERR=%s\n"), be.bstrerror()); } start_watchdog(); /* start watchdog thread */ init_jcr_subsystem(); /* start JCR watchdogs etc. */ dbg_jcr_add_hook(sd_debug_print); /* used to director variables */ start_collector_threads(); /* start collector thread for every Collector resource */ /* Keep track of important events */ events_send_msg(NULL, "SD0001", EVENTS_TYPE_DAEMON, "*Daemon*", (intptr_t)get_first_port_host_order(me->sdaddrs), "Storage startup %s (%s)", VERSION, LSMDATE); /* Single server used for Director and File daemon */ server_tid = pthread_self(); server_tid_valid = true; bnet_thread_server(me->sdaddrs, me->max_concurrent_jobs * 2 + 1, &dird_workq, handle_connection_request); exit(1); /* to keep compiler quiet */ } /* Return a new Session Id */ uint32_t newVolSessionId() { uint32_t Id; P(mutex); VolSessionId++; Id = VolSessionId; V(mutex); return Id; } /* Check Configuration file for necessary info */ static int check_resources() { bool OK = true; bool tls_needed; AUTOCHANGER *changer; DEVRES *device; me = (STORES *)GetNextRes(R_STORAGE, NULL); if (!me) { Jmsg1(NULL, M_ERROR, 0, _("No Storage resource defined in %s. Cannot continue.\n"), configfile); OK = false; } if (GetNextRes(R_STORAGE, (RES *)me) != NULL) { Jmsg1(NULL, M_ERROR, 0, _("Only one Storage resource permitted in %s\n"), configfile); OK = false; } if (GetNextRes(R_DIRECTOR, NULL) == NULL) { Jmsg1(NULL, M_ERROR, 0, _("No Director resource defined in %s. Cannot continue.\n"), configfile); OK = false; } if (GetNextRes(R_DEVICE, NULL) == NULL){ Jmsg1(NULL, M_ERROR, 0, _("No Device resource defined in %s. Cannot continue.\n"), configfile); OK = false; } foreach_res(device, R_DEVICE) { if (device->max_volume_size && (device->max_volume_size < 1000000)) { Jmsg(NULL, M_FATAL, 0, _("Invalid (too small) MaximumVolumeSize: %llu for device: %s. " "MaximumVolumeSize should be greater or equal to 1MB\n"), device->max_volume_size, device->hdr.name); OK = false; } } if (me && !me->messages) { me->messages = (MSGS *)GetNextRes(R_MSGS, NULL); if (!me->messages) { Jmsg1(NULL, M_ERROR, 0, _("No Messages resource defined in %s. Cannot continue.\n"), configfile); OK = false; } } if (me && !me->working_directory) { Jmsg1(NULL, M_ERROR, 0, _("No Working Directory defined in %s. Cannot continue.\n"), configfile); OK = false; } DIRRES *director; STORES *store; foreach_res(store, R_STORAGE) { /* tls_require implies tls_enable */ if (store->tls_require) { if (have_tls) { if (store->tls_certfile || store->tls_keyfile) { store->tls_enable = true; } } else { Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); OK = false; continue; } } tls_needed = store->tls_enable || store->tls_authenticate; if (!store->tls_certfile && tls_needed) { Jmsg(NULL, M_FATAL, 0, _("\"TLS Certificate\" file not defined for Storage \"%s\" in %s.\n"), store->hdr.name, configfile); OK = false; } if (!store->tls_keyfile && tls_needed) { Jmsg(NULL, M_FATAL, 0, _("\"TLS Key\" file not defined for Storage \"%s\" in %s.\n"), store->hdr.name, configfile); OK = false; } if ((!store->tls_ca_certfile && !store->tls_ca_certdir) && tls_needed && store->tls_verify_peer) { Jmsg(NULL, M_FATAL, 0, _("Neither \"TLS CA Certificate File\"" " or \"TLS CA Certificate Dir\" are defined for Storage \"%s\" in %s." " At least one CA certificate store is required" " when using \"TLS Verify Peer\".\n"), store->hdr.name, configfile); OK = false; } /* If everything is well, attempt to initialize our per-resource TLS context */ if (OK && tls_needed) { /* Initialize TLS context: * Args: CA certfile, CA certdir, Certfile, Keyfile, * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ store->tls_ctx = new_tls_context(store->tls_ca_certfile, store->tls_ca_certdir, store->tls_certfile, store->tls_keyfile, NULL, NULL, store->tls_dhfile, store->tls_verify_peer); if (!store->tls_ctx) { Jmsg(NULL, M_FATAL, 0, _("Failed to initialize TLS context for Storage \"%s\" in %s.\n"), store->hdr.name, configfile); OK = false; } } store->psk_ctx = new_psk_context(NULL); /* shared key generated by DIR */ /* In this case, we have TLS Require=Yes and TLS not setup and no PSK */ if (OK && tls_needed == false && store->tls_require) { if (!store->psk_ctx) { Jmsg(NULL, M_FATAL, 0, _("Failed to initialize TLS PSK context for Storage \"%s\" in %s.\n"), store->hdr.name, configfile); OK = false; } } } foreach_res(director, R_DIRECTOR) { /* tls_require implies tls_enable */ if (director->tls_require) { if (director->tls_certfile || director->tls_keyfile) { director->tls_enable = true; } } tls_needed = director->tls_enable || director->tls_authenticate; if (!director->tls_certfile && tls_needed) { Jmsg(NULL, M_FATAL, 0, _("\"TLS Certificate\" file not defined for Director \"%s\" in %s.\n"), director->hdr.name, configfile); OK = false; } if (!director->tls_keyfile && tls_needed) { Jmsg(NULL, M_FATAL, 0, _("\"TLS Key\" file not defined for Director \"%s\" in %s.\n"), director->hdr.name, configfile); OK = false; } if ((!director->tls_ca_certfile && !director->tls_ca_certdir) && tls_needed && director->tls_verify_peer) { Jmsg(NULL, M_FATAL, 0, _("Neither \"TLS CA Certificate File\"" " or \"TLS CA Certificate Dir\" are defined for Director \"%s\" in %s." " At least one CA certificate store is required" " when using \"TLS Verify Peer\".\n"), director->hdr.name, configfile); OK = false; } /* If everything is well, attempt to initialize our per-resource TLS context */ if (OK && tls_needed) { /* Initialize TLS context: * Args: CA certfile, CA certdir, Certfile, Keyfile, * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ director->tls_ctx = new_tls_context(director->tls_ca_certfile, director->tls_ca_certdir, director->tls_certfile, director->tls_keyfile, NULL, NULL, director->tls_dhfile, director->tls_verify_peer); if (!director->tls_ctx) { Jmsg(NULL, M_FATAL, 0, _("Failed to initialize TLS context for Director \"%s\" in %s.\n"), director->hdr.name, configfile); OK = false; } } director->psk_ctx = new_psk_context(director->password); /* In this case, we have TLS Require=Yes and TLS not setup and no PSK */ if (OK && (tls_needed == false && director->tls_require)) { if (!director->psk_ctx) { Jmsg(NULL, M_FATAL, 0, _("Failed to initialize TLS PSK context for Director \"%s\" in %s.\n"), director->hdr.name, configfile); OK = false; } } } /* During the device selection, if we put read-only device first, * they will be selected automatically during a restore, so we * reorder the list to keep the device order, but put read-only * devices first. */ foreach_res(changer, R_AUTOCHANGER) { alist *org_list = changer->device; alist *new_list = New(alist(org_list->size(), not_owned_by_alist)); /* We loop two times to keep the order of the devices * per type (readonly/read-write) */ foreach_alist(device, changer->device) { device->cap_bits |= CAP_AUTOCHANGER; if (device->read_only) { new_list->append(device); } } foreach_alist(device, changer->device) { if (!device->read_only) { new_list->append(device); } } changer->device = new_list; delete org_list; } CLOUD *cloud; /* TODO: Can use a table */ foreach_res(cloud, R_CLOUD) { if (cloud->driver_type == C_S3_DRIVER || cloud->driver_type == C_FILE_DRIVER) { if (cloud->host_name == NULL) { Jmsg(NULL, M_FATAL, 0, _("Failed to initialize Cloud. Hostname not defined for Cloud \"%s\"\n"), cloud->hdr.name); OK = false; } } if (cloud->driver_type == C_WAS_DRIVER || cloud->driver_type == C_S3_DRIVER) { if (cloud->access_key == NULL) { Jmsg(NULL, M_FATAL, 0, _("Failed to initialize Cloud. AccessKey not set for Cloud \"%s\"\n"), cloud->hdr.name); OK = false; } if (cloud->secret_key == NULL) { Jmsg(NULL, M_FATAL, 0, _("Failed to initialize Cloud. SecretKey not set for Cloud \"%s\"\n"), cloud->hdr.name); OK = false; } } } #ifdef SD_DEDUP_SUPPORT DEDUPRES *dedup; foreach_res(dedup, R_DEDUP) { if (dedup->driver_type == D_LEGACY_DRIVER) { if (dedup->dedup_dir == NULL) { Jmsg(NULL, M_FATAL, 0, _("Failed to initialize Dedup Legacy. DedupDirectory not defined for Dedup \"%s\"\n"), dedup->hdr.name); OK = false; } } else if (dedup->driver_type == D_DEDUP2_DRIVER) { if (dedup->dedup_dir == NULL) { Jmsg(NULL, M_FATAL, 0, _("Failed to initialize Dedup 2. DedupDirectory not defined for Dedup \"%s\"\n"), dedup->hdr.name); OK = false; } } } #endif if (OK) { OK = init_autochangers(); } if (OK) { close_msg(NULL); /* close temp message handler */ init_msg(NULL, me->messages); /* open daemon message handler */ set_working_directory(me->working_directory); } return OK; } /* * Remove old .spool files written by me from the working directory. */ static void cleanup_old_files() { DIR* dp; int rc, name_max; int my_name_len = strlen(my_name); int len = strlen(me->working_directory); POOLMEM *cleanup = get_pool_memory(PM_MESSAGE); POOLMEM *basename = get_pool_memory(PM_MESSAGE); POOL_MEM dname(PM_FNAME); regex_t preg1; char prbuf[500]; const int nmatch = 30; regmatch_t pmatch[nmatch]; berrno be; /* Look for .spool files but don't allow spaces */ const char *pat1 = "^[^ ]+\\.spool$"; /* Setup working directory prefix */ pm_strcpy(basename, me->working_directory); if (len > 0 && !IsPathSeparator(me->working_directory[len-1])) { pm_strcat(basename, "/"); } /* Compile regex expressions */ rc = regcomp(&preg1, pat1, REG_EXTENDED); if (rc != 0) { regerror(rc, &preg1, prbuf, sizeof(prbuf)); Pmsg2(000, _("Could not compile regex pattern \"%s\" ERR=%s\n"), pat1, prbuf); goto get_out2; } name_max = pathconf(".", _PC_NAME_MAX); if (name_max < 1024) { name_max = 1024; } if (!(dp = opendir(me->working_directory))) { berrno be; Pmsg2(000, "Failed to open working dir %s for cleanup: ERR=%s\n", me->working_directory, be.bstrerror()); goto get_out1; } while (1) { if (breaddir(dp, dname.addr()) != 0) { break; } /* Exclude any name with ., .., not my_name or containing a space */ if (strcmp(dname.c_str(), ".") == 0 || strcmp(dname.c_str(), "..") == 0 || strncmp(dname.c_str(), my_name, my_name_len) != 0) { Dmsg1(500, "Skipped: %s\n", dname.c_str()); continue; } /* Unlink files that match regex */ if (regexec(&preg1, dname.c_str(), nmatch, pmatch, 0) == 0) { pm_strcpy(cleanup, basename); pm_strcat(cleanup, dname); Dmsg1(500, "Unlink: %s\n", cleanup); unlink(cleanup); } } closedir(dp); get_out1: regfree(&preg1); get_out2: free_pool_memory(cleanup); free_pool_memory(basename); } void dbg_print_devices(FILE *fp) { DEVRES *device; foreach_res(device, R_DEVICE) { if (device->dev) { device->dev->dbg_print(fp); } } } /* * Here we attempt to init and open each device. This is done * once at startup in a separate thread. */ extern "C" void *device_initialization(void *arg) { DEVRES *device; DCR *dcr; JCR *jcr; DEVICE *dev; struct stat statp; pthread_detach(pthread_self()); jcr = new_jcr(sizeof(JCR), stored_free_jcr); new_plugins(jcr); /* instantiate plugins */ jcr->setJobType(JT_SYSTEM); /* Initialize SD start condition variable */ int errstat = pthread_cond_init(&jcr->job_start_wait, NULL); if (errstat != 0) { berrno be; Jmsg1(jcr, M_ABORT, 0, _("Unable to init job cond variable: ERR=%s\n"), be.bstrerror(errstat)); } LockRes(); foreach_res(device, R_DEVICE) { Dmsg1(90, "calling init_dev %s\n", device->hdr.name); dev = init_dev(NULL, device, false, statcollector); Dmsg2(10, "SD init done %s (0x%p)\n", device->hdr.name, dev); if (!dev) { Jmsg1(NULL, M_ERROR, 0, _("Could not initialize SD device \"%s\"\n"), device->hdr.name); continue; } jcr->dcr = dcr = new_dcr(jcr, NULL, dev); generate_plugin_event(jcr, bsdEventDeviceInit, dcr); /* Keep track of important events */ events_send_msg(jcr, "SD0002", EVENTS_TYPE_DAEMON, "*Daemon*", (intptr_t)get_first_port_host_order(me->sdaddrs), "Device initialization %s", device->hdr.name); /* With USB devices, it might not be here when we start */ if (device->control_name && stat(device->control_name, &statp) < 0) { berrno be; Jmsg2(jcr, M_ERROR, 0, _("Unable to stat ControlDevice %s: ERR=%s\n"), device->control_name, be.bstrerror()); } if ((device->lock_command && device->control_name) && !me->plugin_directory) { Jmsg0(jcr, M_ERROR_TERM, 0, _("No plugin directory configured for SAN shared storage\n")); } if (device->min_block_size > device->max_block_size) { Jmsg1(jcr, M_ERROR_TERM, 0, _("MaximumBlockSize must be greater or equal than MinimumBlockSize for Device \"%s\"\n"), dev->print_name()); } #ifdef HAVE_WIN32 if (device->cap_bits & CAP_SYNCONCLOSE) { device->cap_bits &= ~CAP_SYNCONCLOSE; /* Not available on windows */ } #endif /* * Note: be careful setting the slot here. If the drive * is shared storage, the contents can change before * the drive is used. */ if (device->cap_bits & CAP_ALWAYSOPEN) { if (dev->is_autochanger()) { /* If autochanger set slot in dev sturcture */ get_autochanger_loaded_slot(dcr); } Dmsg1(20, "calling first_open_device %s\n", dev->print_name()); if (generate_plugin_event(jcr, bsdEventDeviceOpen, dcr) != bRC_OK) { Jmsg(jcr, M_FATAL, 0, _("generate_plugin_event(bsdEventDeviceOpen) Failed\n")); continue; } if (!first_open_device(dcr)) { Jmsg1(NULL, M_ERROR, 0, _("Could not open device %s\n"), dev->print_name()); Dmsg1(20, "Could not open device %s\n", dev->print_name()); generate_plugin_event(jcr, bsdEventDeviceClose, dcr); free_dcr(dcr); jcr->dcr = NULL; continue; } } else { /* If not always open, we don't know what is in the drive */ dev->clear_slot(); } if (device->cap_bits & CAP_AUTOMOUNT && dev->is_open()) { switch (dev->read_dev_volume_label(dcr)) { case VOL_OK: memcpy(&dev->VolCatInfo, &dcr->VolCatInfo, sizeof(dev->VolCatInfo)); volume_unused(dcr); /* mark volume "released" */ break; default: Jmsg1(NULL, M_WARNING, 0, _("Could not mount device %s\n"), dev->print_name()); break; } } free_dcr(dcr); jcr->dcr = NULL; } UnlockRes(); #ifndef HAVE_WIN32 dbg_add_hook(dbg_print_devices); #endif #ifdef xxx if (jcr->dcr) { Pmsg1(000, "free_dcr=%p\n", jcr->dcr); free_dcr(jcr->dcr); jcr->dcr = NULL; } #endif free_plugins(jcr); free_jcr(jcr); init_done = true; return NULL; } /* Clean up and then exit */ void terminate_stored(int sig) { static bool in_here = false; DEVRES *device; DEDUPRES *dedup; JCR *jcr; if (in_here) { /* prevent loops */ bmicrosleep(2, 0); /* yield */ exit(1); } in_here = true; debug_level = 0; /* turn off any debug */ stop_watchdog(); terminate_collector_threads(); if (sig == SIGTERM || sig == SIGINT) { /* normal shutdown request? or ^C */ /* * This is a normal shutdown request. We wiffle through * all open jobs canceling them and trying to wake * them up so that they will report back the correct * volume status. */ foreach_jcr(jcr) { BSOCK *fd; if (jcr->JobId == 0) { free_jcr(jcr); continue; /* ignore console */ } if (jcr->dcr) { /* Make sure no device remains locked */ generate_plugin_event(jcr, bsdEventDeviceClose, jcr->dcr); } jcr->setJobStatus(JS_Canceled); fd = jcr->file_bsock; if (fd) { fd->set_timed_out(); jcr->my_thread_send_signal(TIMEOUT_SIGNAL); Dmsg1(100, "term_stored killing JobId=%d\n", jcr->JobId); /* ***FIXME*** wiffle through all dcrs */ if (jcr->dcr && jcr->dcr->dev && jcr->dcr->dev->blocked()) { pthread_cond_broadcast(&jcr->dcr->dev->wait_next_vol); Dmsg1(100, "JobId=%u broadcast wait_device_release\n", (uint32_t)jcr->JobId); pthread_cond_broadcast(&wait_device_release); } if (jcr->read_dcr && jcr->read_dcr->dev && jcr->read_dcr->dev->blocked()) { pthread_cond_broadcast(&jcr->read_dcr->dev->wait_next_vol); pthread_cond_broadcast(&wait_device_release); } bmicrosleep(0, 50000); } free_jcr(jcr); } bmicrosleep(0, 500000); /* give them 1/2 sec to clean up */ } if (!test_config) { write_state_file(me->working_directory, "bacula-sd", get_first_port_host_order(me->sdaddrs)); if (make_pid_file) { delete_pid_file(me->pid_directory, "bacula-sd", get_first_port_host_order(me->sdaddrs)); } /* Keep track of important events */ events_send_msg(NULL, "SD0003", EVENTS_TYPE_DAEMON, "*Daemon*", get_first_port_host_order(me->sdaddrs), "Storage shutdown %s (%s)", VERSION, LSMDATE); } Dmsg1(200, "In terminate_stored() sig=%d\n", sig); unload_plugins(); free_volume_lists(); free_daemon_message_queue(); foreach_res(device, R_DEVICE) { Dmsg2(10, "Term device %s %s\n", device->hdr.name, device->device_name); if (device->dev) { device->dev->clear_volhdr(); device->dev->term(NULL); device->dev = NULL; } else { Dmsg2(10, "No dev structure %s %s\n", device->hdr.name, device->device_name); } } foreach_res(dedup, R_DEDUP) { if (dedup->dedupengine) { dedup->dedupengine = NULL; // Should have been closed by the last device } else { Dmsg1(10, "No dev Dedupengine structure %s\n", dedup->hdr.name); } } if (server_tid_valid) { server_tid_valid = false; bnet_stop_thread_server(server_tid); } if (configfile) { free(configfile); configfile = NULL; } if (config) { delete config; config = NULL; } if (chk_dbglvl(10)) { print_memory_pool_stats(); } if (statcollector){ // statcollector->dump(); delete(statcollector); } term_msg(); cleanup_crypto(); term_reservations_lock(); free(res_head); res_head = NULL; close_memory_pool(); lmgr_cleanup_main(); sm_dump(false); /* dump orphaned buffers */ exit(sig); } bacula-15.0.3/src/stored/dedup_interface.h0000644000175000017500000000452514771010173020251 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef DEDUP_INTERFACE_H_ #define DEDUP_INTERFACE_H_ bool is_dedup_server_side(DEVICE *dev, int32_t stream, uint64_t stream_len); bool is_dedup_ref(DEV_RECORD *rec, bool lazy); void list_dedupengines(char *cmd, STATUS_PKT *sp, const char *target); void dedup_get_limits(int64_t *nofile, int64_t *memlock); bool dedup_parse_filter(char *fltr); void dedup_filter_record(int verbose, DCR *dcr, DEV_RECORD *rec, char *dedup_msg, int len); /* Interface between DEDUP and SD */ class DedupStoredInterfaceBase { #if 0 protected: virtual void *do_rehydration_thread(void){return NULL;}; virtual int handle_rehydration_command(BSOCK *fd){return -1;}; #endif public: DedupStoredInterfaceBase(JCR *jcr, DedupEngine *dedupengine) {}; virtual ~DedupStoredInterfaceBase() {}; // rehydration virtual int start_rehydration(){return -1;}; virtual void *wait_rehydration(bool emergency=false){return NULL;}; virtual bool wait_flowcontrol_rehydration(int free_rec_count, int timeoutms){return false;}; virtual bool do_flowcontrol_rehydration(int free_rec_count, int retry_timeoutms=250){return false;}; virtual void warn_rehydration_eod() {}; virtual int record_rehydration(DCR *dcr, DEV_RECORD *rec, char *buf, POOLMEM *&errmsg, bool despite_of_error, int *chunk_size){return -1;}; virtual int add_circular_buf(DCR *dcr, DEV_RECORD *rec){return -1;}; virtual void set_checksum_after_rehydration(bool val) {}; virtual void set_use_index_for_recall(bool val) {}; virtual POOLMEM *get_msgbuf() { return NULL; }; virtual void unset_rehydration_srvside() { return; }; virtual bool is_rehydration_srvside() { return false; }; virtual bool is_thread_started() { return false; }; }; #endif /* DEDUP_INTERFACE_H_ */ bacula-15.0.3/src/stored/file_dev.h0000644000175000017500000000425514771010173016705 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Inspired by vtape.h */ #ifndef __FILE_DEV_ #define __FILE_DEV_ class file_dev : public DEVICE { private: bool modify_fattr(const char *vol_name, int attr, bool set, POOLMEM **error); bool check_for_attr(const char *vol_name, int attr); bool set_fattr(const char *vol_name, int attr, POOLMEM **error); bool clear_fattr(const char *vol_name, int attr, POOLMEM **error); bool append_open_needed(const char *vol_name); bool is_attribute_supported(int attr); protected: virtual void get_volume_fpath(const char *vol_name, POOLMEM **buf); public: file_dev() { }; ~file_dev() { m_fd = -1; }; bool is_eod_valid(DCR *dcr); bool eod(DCR *dcr); bool open_device(DCR *dcr, int omode); const char *print_type(); virtual int device_specific_init(JCR *jcr, DEVRES *device); bool set_append_only(const char *vol_name, POOLMEM **error); bool clear_append_only(const char *vol_name, POOLMEM **error); bool set_immutable(const char *vol_name, POOLMEM **error); bool clear_immutable(const char *vol_name, POOLMEM **error); bool check_volume_protection_time(const char *vol_name); bool check_for_immutable(const char *vol_name); bool check_for_read_only(int fd, const char *vol_name); bool get_os_device_freespace(); bool is_fs_nearly_full(uint64_t threshold); int set_writable(int fd, const char *vol_name, POOLMEM **error); int set_readonly(int fd, const char *vol_name, POOLMEM **error); int set_atime(int fd, const char *vol_name, btime_t val, POOLMEM **error); int use_protect(); }; #endif /* __FILE_DEV_ */ bacula-15.0.3/src/stored/fifo_dev.h0000644000175000017500000000226314771010173016706 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Modified version of tape_dev.h */ #ifndef __FIFO_DEV_ #define __FIFO_DEV_ class fifo_dev : public tape_dev { public: fifo_dev() { }; ~fifo_dev() { }; /* DEVICE virtual functions that we redefine with our fifo code */ bool open_device(DCR *dcr, int omode); boffset_t lseek(DCR *dcr, boffset_t offset, int whence); bool truncate(DCR *dcr); const char *print_type(); virtual int device_specific_init(JCR *jcr, DEVRES *device); bool sync_data(DCR *dcr) { return true; }; // Not implemented in fifo device }; #endif /* __FIFO_DEV_ */ bacula-15.0.3/src/stored/aligned_write.c0000644000175000017500000000152214771010173017732 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * record_write.c -- Volume (tape/disk) record write functions * * Kern Sibbald, April MMI * added BB02 format October MMII * added aligned format November MMXII */ bacula-15.0.3/src/stored/block.c0000644000175000017500000006607514771010173016225 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * block.c -- tape block handling functions * * Kern Sibbald, March MMI * added BB02 format October MMII */ #include "bacula.h" #include "stored.h" #ifdef DEBUG_BLOCK_CHECKSUM static const bool debug_block_checksum = true; #else static const bool debug_block_checksum = false; #endif static int debug_io_error = 0; /* # blocks to write before creating I/O error */ #ifdef NO_TAPE_WRITE_TEST static const bool no_tape_write_test = true; #else static const bool no_tape_write_test = false; #endif uint32_t get_len_and_clear_block(DEV_BLOCK *block, DEVICE *dev, uint32_t &pad); uint32_t ser_block_header(DEV_BLOCK *block, bool do_checksum); bool unser_block_header(DCR *dcr, DEVICE *dev, DEV_BLOCK *block); /* * Write a block to the device, with locking and unlocking * * Returns: true on success * : false on failure * */ bool DCR::write_block_to_device(bool final) { bool ok = true; DCR *dcr = this; if (dcr->spooling) { Dmsg0(250, "Write to spool\n"); ok = write_block_to_spool_file(dcr); return ok; } if (!is_dev_locked()) { /* device already locked? */ /* note, do not change this to dcr->rLock */ dev->rLock(false); /* no, lock it */ } if (!check_for_newvol_or_newfile(dcr)) { ok = false; goto bail_out; /* fatal error */ } if (dcr->despooling && dev->device->volume_encryption != ET_NO && dev->crypto_device_ctx!=NULL) { block->dev = dev; } Dmsg1(500, "Write block to dev=%p\n", dcr->dev); if (!write_block_to_dev()) { Dmsg2(40, "*** Failed write_block_to_dev adata=%d block=%p\n", block->adata, block); if (job_canceled(jcr) || jcr->getJobType() == JT_SYSTEM) { ok = false; Dmsg2(40, "cancel=%d or SYSTEM=%d\n", job_canceled(jcr), jcr->getJobType() == JT_SYSTEM); } else { bool was_adata = false; if (was_adata) { dcr->set_ameta(); was_adata = true; } /* Flush any existing JobMedia info */ if (!(ok = dir_create_jobmedia_record(dcr))) { Jmsg(jcr, M_FATAL, 0, _("[SF0201] Error writing JobMedia record to catalog.\n")); } else { Dmsg1(40, "Calling fixup_device was_adata=%d...\n", was_adata); ok = fixup_device_block_write_error(dcr); } if (was_adata) { dcr->set_adata(); } } } if (ok && final && !dir_create_jobmedia_record(dcr)) { Jmsg(jcr, M_FATAL, 0, _("[SF0202] Error writing final JobMedia record to catalog.\n")); } bail_out: if (!dcr->is_dev_locked()) { /* did we lock dev above? */ /* note, do not change this to dcr->dunlock */ dev->Unlock(); /* unlock it now */ } return ok; } /* * Write a block to the device * * Returns: true on success or EOT * false on hard error */ bool DCR::write_block_to_dev() { ssize_t stat = 0; uint32_t wlen; /* length to write */ bool ok = true; DCR *dcr = this; uint64_t checksum; uint32_t pad; /* padding or zeros written */ boffset_t pos; char ed1[50]; if (no_tape_write_test) { empty_block(block); return true; } if (job_canceled(jcr)) { return false; } if (!dev->enabled) { Jmsg1(jcr, M_FATAL, 0, _("[SF0203] Cannot write block. Device is disabled. dev=%s\n"), dev->print_name()); return false; } ASSERT2(block->adata == dev->adata, "Block and dev adata not same"); Dmsg4(200, "fd=%d adata=%d bufp-buf=%d binbuf=%d\n", dev->fd(), block->adata, block->bufp-block->buf, block->binbuf); ASSERT2(block->binbuf == ((uint32_t)(block->bufp - block->buf)), "binbuf badly set"); if (is_block_empty(block)) { /* Does block have data in it? */ Dmsg1(50, "return write_block_to_dev no adata=%d data to write\n", block->adata); return true; } if (dev->at_weot()) { Dmsg1(50, "==== FATAL: At EOM with ST_WEOT. adata=%d.\n", dev->adata); dev->dev_errno = ENOSPC; Jmsg1(jcr, M_FATAL, 0, _("[SF0204] Cannot write block. Device at EOM. dev=%s\n"), dev->print_name()); return false; } if (!dev->can_append()) { dev->dev_errno = EIO; Jmsg1(jcr, M_FATAL, 0, _("[SF0205] Attempt to write on read-only Volume. dev=%s\n"), dev->print_name()); Dmsg1(50, "Attempt to write on read-only Volume. dev=%s\n", dev->print_name()); return false; } if (!dev->is_open()) { Jmsg1(jcr, M_FATAL, 0, _("[SF0206] Attempt to write on closed device=%s\n"), dev->print_name()); Dmsg1(50, "Attempt to write on closed device=%s\n", dev->print_name()); return false; } wlen = get_len_and_clear_block(block, dev, pad); block->block_len = wlen; dev->updateVolCatPadding(pad); checksum = ser_block_header(block, dev->do_checksum()); if (!dev->do_size_checks(dcr, block)) { Dmsg0(50, "Size check triggered. Cannot write block.\n"); return false; } dev->updateVolCatWrites(1); dump_block(dev, block, "before write"); #ifdef DEBUG_BLOCK_ZEROING uint32_t *bp = (uint32_t *)block->buf; if (bp[0] == 0 && bp[1] == 0 && bp[2] == 0 && block->buf[12] == 0) { Jmsg0(jcr, M_ABORT, 0, _("[SA0201] Write block header zeroed.\n")); } #endif /* * If Adata block, we must seek to the correct address */ if (block->adata) { ASSERT(dcr->dev->adata); uint64_t cur = dev->lseek(dcr, 0, SEEK_CUR); /* If we are going to create a hole, record it */ if (block->BlockAddr != cur) { dev->lseek(dcr, block->BlockAddr, SEEK_SET); Dmsg4(100, "Adata seek BlockAddr from %lld to %lld = %lld bytes adata_addr=%lld\n", cur, block->BlockAddr, block->BlockAddr - cur, dev->adata_addr); /* Insanity check */ if (block->BlockAddr > cur) { dev->updateVolCatHoleBytes(block->BlockAddr - cur); } else if (block->BlockAddr < cur) { Pmsg5(000, "Vol=%s cur=%lld BlockAddr=%lld adata=%d block=%p\n", dev->getVolCatName(), cur, block->BlockAddr, block->adata, block); Jmsg3(jcr, M_FATAL, 0, "[SF0207] Bad seek on adata Vol=%s BlockAddr=%lld DiskAddr=%lld. Multiple simultaneous Jobs?\n", dev->getVolCatName(), block->BlockAddr, cur); //Pmsg2(000, "HoleBytes would go negative cur=%lld blkaddr=%lld\n", cur, block->BlockAddr); } } } /* * Do write here, make a somewhat feeble attempt to recover from * I/O errors, or from the OS telling us it is busy. */ int retry = 0; errno = 0; stat = 0; /* ***FIXME**** remove next line debug */ pos = dev->lseek(dcr, 0, SEEK_CUR); do { if (retry > 0 && stat == -1 && errno == EBUSY) { berrno be; Dmsg4(100, "===== write retry=%d stat=%d errno=%d: ERR=%s\n", retry, stat, errno, be.bstrerror()); bmicrosleep(5, 0); /* pause a bit if busy or lots of errors */ dev->clrerror(-1); } stat = dev->write(block->buf_out, (size_t)wlen); Dmsg4(100, "%s write() BlockAddr=%lld wlen=%d Vol=%s\n", block->adata?"Adata":"Ameta", block->BlockAddr, wlen, dev->VolHdr.VolumeName); } while (stat == -1 && (errno == EBUSY || errno == EIO) && retry++ < 3); /* ***FIXME*** remove 2 lines debug */ Dmsg2(100, "Wrote %d bytes at %s\n", wlen, dev->print_addr(ed1, sizeof(ed1), pos)); dump_block(dev, block, "After write"); if (debug_block_checksum) { uint32_t achecksum = ser_block_header(block, dev->do_checksum()); if (checksum != achecksum) { Jmsg2(jcr, M_ERROR, 0, _("[SA0201] Block checksum changed during write: before=%u after=%u\n"), checksum, achecksum); dump_block(dev, block, "with checksum error"); } } #ifdef DEBUG_BLOCK_ZEROING if (bp[0] == 0 && bp[1] == 0 && bp[2] == 0 && block->buf[12] == 0) { Jmsg0(jcr, M_ABORT, 0, _("[SA0202] Write block header zeroed.\n")); } #endif if (debug_io_error) { debug_io_error--; if (debug_io_error == 1) { /* trigger error */ stat = -1; dev->dev_errno = EIO; errno = EIO; debug_io_error = 0; /* turn off trigger */ } } if (stat == (ssize_t)wlen) { if (block->first_block) { // It was a volume label, it has been successfully written // next block should not hold a volume_label and should // be encrypted if encryption is enable block->first_block = false; } } else { /* Some devices simply report EIO when the volume is full. * With a little more thought we may be able to check * capacity and distinguish real errors and EOT * conditions. In any case, we probably want to * simulate an End of Medium. */ if (stat == -1) { berrno be; dev->clrerror(-1); /* saves errno in dev->dev_errno */ if (dev->dev_errno == 0) { dev->dev_errno = ENOSPC; /* out of space */ } if (dev->dev_errno != ENOSPC) { int etype = M_ERROR; if (block->adata) { etype = M_FATAL; } dev->VolCatInfo.VolCatErrors++; Jmsg5(jcr, etype, 0, _("%s Write error at %s on device %s Vol=%s. ERR=%s.\n"), etype==M_FATAL?"[SF0201]":"[SE0201]", dev->print_addr(ed1, sizeof(ed1)), dev->print_name(), dev->getVolCatName(), be.bstrerror()); if (dev->get_tape_alerts(this)) { dev->show_tape_alerts(this, list_long, list_last, alert_callback); } } } else { dev->dev_errno = ENOSPC; /* out of space */ } if (dev->dev_errno == ENOSPC) { dev->update_freespace(); if (dev->is_freespace_ok() && dev->free_space < dev->min_free_space) { dev->set_nospace(); Jmsg(jcr, M_WARNING, 0, _("[SW0201] Out of freespace caused End of Volume \"%s\" at %s on device %s. Write of %u bytes got %d.\n"), dev->getVolCatName(), dev->print_addr(ed1, sizeof(ed1)), dev->print_name(), wlen, stat); } else { dev->clear_nospace(); Jmsg(jcr, M_INFO, 0, _("[SI0202] End of Volume \"%s\" at %s on device %s. Write of %u bytes got %d.\n"), dev->getVolCatName(), dev->print_addr(ed1, sizeof(ed1)), dev->print_name(), wlen, stat); } } if (chk_dbglvl(100)) { berrno be; Dmsg7(90, "==== Write error. fd=%d size=%u rtn=%d dev_blk=%d blk_blk=%d errno=%d: ERR=%s\n", dev->fd(), wlen, stat, dev->block_num, block->BlockNumber, dev->dev_errno, be.bstrerror(dev->dev_errno)); } Dmsg0(40, "Calling terminate_writing_volume\n"); ok = terminate_writing_volume(dcr); if (ok) { reread_last_block(dcr); } return false; } /* We successfully wrote the block, now do housekeeping */ Dmsg2(1300, "VolCatBytes=%lld newVolCatBytes=%lld\n", dev->VolCatInfo.VolCatBytes, (dev->VolCatInfo.VolCatBytes+wlen)); if (!dev->setVolCatAdataBytes(block->BlockAddr + wlen)) { dev->updateVolCatBytes(wlen); Dmsg3(200, "AmetaBytes=%lld AdataBytes=%lld Bytes=%lld\n", dev->VolCatInfo.VolCatAmetaBytes, dev->VolCatInfo.VolCatAdataBytes, dev->VolCatInfo.VolCatBytes); } dev->updateVolCatExtraBytes(block->extra_bytes); /* Count bytes stored outside volumes */ dev->updateVolCatBlocks(1); dev->LastBlock = block->BlockNumber; block->BlockNumber++; /* Update dcr values */ if (dev->is_tape()) { dev->EndAddr = dev->get_full_addr(); if (dcr->EndAddr < dev->EndAddr) { dcr->EndAddr = dev->EndAddr; } dev->block_num++; } else { /* Save address of byte just written */ uint64_t addr = dev->file_addr + wlen - 1; if (dev->is_indexed()) { uint64_t full_addr = dev->get_full_addr(addr); if (full_addr < dcr->EndAddr) { Pmsg2(000, "Possible incorrect EndAddr oldEndAddr=%llu newEndAddr=%llu\n", dcr->EndAddr, full_addr); } dcr->EndAddr = full_addr; } if (dev->adata) { /* We really should use file_addr, but I am not sure it is correctly set */ Dmsg3(100, "Set BlockAddr from %lld to %lld adata_addr=%lld\n", block->BlockAddr, block->BlockAddr + wlen, dev->adata_addr); block->BlockAddr += wlen; dev->adata_addr = block->BlockAddr; } else { block->BlockAddr = dev->get_full_addr() + wlen; } } if (dev->is_indexed()) { if (dcr->VolMediaId != dev->VolCatInfo.VolMediaId) { Dmsg7(100, "JobMedia Vol=%s wrote=%d MediaId=%lld FI=%lu LI=%lu StartAddr=%lld EndAddr=%lld Wrote\n", dcr->VolumeName, dcr->WroteVol, dcr->VolMediaId, dcr->VolFirstIndex, dcr->VolLastIndex, dcr->StartAddr, dcr->EndAddr); } dcr->VolMediaId = dev->VolCatInfo.VolMediaId; Dmsg3(150, "VolFirstIndex=%d blockFirstIndex=%d Vol=%s\n", dcr->VolFirstIndex, block->FirstIndex, dcr->VolumeName); if (dcr->VolFirstIndex == 0 && block->FirstIndex > 0) { dcr->VolFirstIndex = block->FirstIndex; } if (block->LastIndex > (int32_t)dcr->VolLastIndex) { dcr->VolLastIndex = block->LastIndex; } dcr->WroteVol = true; /* Update FileMedia records with the block offset, we do it before the BlockAddr update */ dir_create_filemedia_record(dcr); } dev->file_addr += wlen; /* update file address */ dev->update_file_size(wlen); dev->usage += wlen; /* update usage counter */ if (dev->part > 0) { dev->part_size += wlen; } dev->setVolCatInfo(false); /* Needs update */ Dmsg2(1300, "write_block: wrote block %d bytes=%d\n", dev->block_num, wlen); empty_block(block); return true; } /* * Read block with locking * */ bool DCR::read_block_from_device(bool check_block_numbers) { bool ok; Dmsg0(250, "Enter read_block_from_device\n"); dev->rLock(false); ok = read_block_from_dev(check_block_numbers); dev->rUnlock(); Dmsg1(250, "Leave read_block_from_device. ok=%d\n", ok); return ok; } static void set_block_position(DCR *dcr, DEVICE *dev, DEV_BLOCK *block) { /* Used also by the Single Item Restore code to locate a particular block */ if (dev->is_tape()) { block->BlockAddr = dev->get_full_addr(); } else if (!dev->adata) { /* TODO: See if we just use !dev->adata for tapes */ /* Note: we only update the DCR values for ameta blocks * because all the indexing (JobMedia) is done with * ameta blocks/records, which may point to adata. */ block->BlockAddr = dev->get_full_addr(); } block->RecNum = 0; } /* * Read the next block into the block structure and unserialize * the block header. For a file, the block may be partially * or completely in the current buffer. * Note: in order for bscan to generate correct JobMedia records * we must be careful to update the EndAddr of the last byte read. */ bool DCR::read_block_from_dev(bool check_block_numbers) { ssize_t stat; int looping; int retry; int status; DCR *dcr = this; boffset_t pos; char ed1[50]; uint32_t data_len; if (job_canceled(jcr)) { Mmsg(dev->errmsg, _("Job failed or canceled.\n")); Dmsg1(000, "%s", dev->errmsg); block->read_len = 0; return false; } if (!dev->enabled) { Mmsg(dev->errmsg, _("[SF023] Cannot read block. Device is disabled. dev=%s\n"), dev->print_name()); Jmsg1(jcr, M_FATAL, 0, "%s", dev->errmsg); return false; } if (dev->at_eot()) { Mmsg(dev->errmsg, _("[SX0201] At EOT: attempt to read past end of Volume.\n")); Dmsg1(000, "%s", dev->errmsg); block->read_len = 0; return false; } looping = 0; if (!dev->is_open()) { Mmsg4(dev->errmsg, _("[SF0206] Attempt to read closed device: fd=%d at file:blk %u:%u on device %s\n"), dev->fd(), dev->file, dev->block_num, dev->print_name()); Jmsg(dcr->jcr, M_FATAL, 0, "%s", dev->errmsg); Pmsg4(000, "Fatal: dev=%p dcr=%p adata=%d bytes=%lld\n", dev, dcr, dev->adata, VolCatInfo.VolCatAdataRBytes); Pmsg1(000, "%s", dev->errmsg); block->read_len = 0; return false; } set_block_position(dcr, dev, block); reread: if (looping > 1) { dev->dev_errno = EIO; Mmsg1(dev->errmsg, _("[SE0202] Block buffer size looping problem on device %s\n"), dev->print_name()); Dmsg1(000, "%s", dev->errmsg); Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); block->read_len = 0; return false; } /* See if we must open another part */ if (dev->at_eof() && !dev->open_next_part(dcr)) { if (dev->at_eof()) { /* EOF just seen? */ dev->set_eot(); /* yes, error => EOT */ } return false; } retry = 0; errno = 0; stat = 0; if (dev->adata) { dev->lseek(dcr, dcr->block->BlockAddr, SEEK_SET); } { /* debug block */ pos = dev->lseek(dcr, (boffset_t)0, SEEK_CUR); /* get curr pos */ Dmsg2(200, "Pos for read=%s %lld\n", dev->print_addr(ed1, sizeof(ed1), pos), pos); } data_len = 0; do { retry = 0; do { if ((retry > 0 && stat == -1 && errno == EBUSY)) { berrno be; Dmsg4(100, "===== read retry=%d stat=%d errno=%d: ERR=%s\n", retry, stat, errno, be.bstrerror()); bmicrosleep(10, 0); /* pause a bit if busy or lots of errors */ dev->clrerror(-1); } stat = dev->read(block->buf + data_len, (size_t)(block->buf_len - data_len)); if (stat > 0) { data_len += stat; } } while (stat == -1 && (errno == EBUSY || errno == EINTR || errno == EIO) && retry++ < 3); } while (data_len < block->buf_len && stat > 0 && dev->dev_type == B_FIFO_DEV); Dmsg4(110, "Read() adata=%d vol=%s nbytes=%d pos=%lld\n", block->adata, dev->VolHdr.VolumeName, stat < 0 ? stat : data_len, pos); if (stat < 0) { berrno be; dev->clrerror(-1); Dmsg2(90, "Read device fd=%d got: ERR=%s\n", dev->fd(), be.bstrerror()); block->read_len = 0; if (reading_label) { /* Trying to read Volume label */ Mmsg(dev->errmsg, _("[SE0203] The %sVolume=%s on device=%s appears to be unlabeled.%s\n"), dev->adata?"adata ":"", VolumeName, dev->print_name(), dev->is_fs_nearly_full(1048576)?" Warning: The filesystem is nearly full.":""); } else { Mmsg4(dev->errmsg, _("[SE0204] Read error on fd=%d at addr=%s on device %s. ERR=%s.\n"), dev->fd(), dev->print_addr(ed1, sizeof(ed1)), dev->print_name(), be.bstrerror()); } Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); if (dev->get_tape_alerts(this)) { dev->show_tape_alerts(this, list_long, list_last, alert_callback); } if (dev->at_eof()) { /* EOF just seen? */ dev->set_eot(); /* yes, error => EOT */ } return false; } stat = data_len; if (stat == 0) { /* Got EOF ! */ pos = dev->lseek(dcr, (boffset_t)0, SEEK_CUR); /* get curr pos */ pos = dev->get_full_addr(pos); if (reading_label) { /* Trying to read Volume label */ Mmsg4(dev->errmsg, _("The %sVolume=%s on device=%s appears to be unlabeled.%s\n"), dev->adata?"adata ":"", VolumeName, dev->print_name(), dev->is_fs_nearly_full(1048576)?" Warning: The filesystem is nearly full.":""); } else { Mmsg4(dev->errmsg, _("Read zero %sbytes Vol=%s at %s on device %s.\n"), dev->adata?"adata ":"", dev->VolCatInfo.VolCatName, dev->print_addr(ed1, sizeof(ed1), pos), dev->print_name()); } block->read_len = 0; Dmsg1(100, "%s", dev->errmsg); if (dev->at_eof()) { /* EOF just seen? */ dev->set_eot(); /* yes, error => EOT */ } dev->set_ateof(); dev->file_addr = 0; dev->EndAddr = pos; if (dcr->EndAddr < dev->EndAddr) { dcr->EndAddr = dev->EndAddr; } Dmsg3(150, "==== Read zero bytes. adata=%d vol=%s at %s\n", dev->adata, dev->VolCatInfo.VolCatName, dev->print_addr(ed1, sizeof(ed1), pos)); return false; /* return eof */ } /* Continue here for successful read */ block->read_len = stat; /* save length read */ if (block->adata) { block->binbuf = block->read_len; block->block_len = block->read_len; } else { if (block->read_len == 80 && (dcr->VolCatInfo.LabelType != B_BACULA_LABEL || dcr->device->label_type != B_BACULA_LABEL)) { /* ***FIXME*** should check label */ Dmsg2(100, "Ignore 80 byte ANSI label at %u:%u\n", dev->file, dev->block_num); dev->clear_eof(); goto reread; /* skip ANSI/IBM label */ } if (block->read_len < BLKHDR2_LENGTH) { dev->dev_errno = EIO; Mmsg3(dev->errmsg, _("[SE0205] Volume data error at %s! Short block of %d bytes on device %s discarded.\n"), dev->print_addr(ed1, sizeof(ed1)), block->read_len, dev->print_name()); Jmsg(jcr, M_WARNING, 0, "%s", dev->errmsg); /* discard block, but continue */ dev->set_short_block(); block->read_len = block->binbuf = 0; Dmsg2(50, "set block=%p binbuf=%d\n", block, block->binbuf); return false; /* return error */ } status = jcr->getJobStatus(); if (!unser_block_header(this, dev, block)) { if (forge_on) { /* Skip the current byte to find a valid block */ dev->file_addr += 1; dev->update_file_size(1); /* Can be canceled at this point... */ if (jcr->is_canceled()) { jcr->forceJobStatus(status); } set_block_position(dcr, dev, block); dev->lseek(dcr, dcr->block->BlockAddr, SEEK_SET); goto reread; } return false; } } /* * If the block is bigger than the buffer, we reposition for * re-reading the block, allocate a buffer of the correct size, * and go re-read. */ Dmsg3(150, "adata=%d block_len=%d buf_len=%d\n", block->adata, block->block_len, block->buf_len); if (block->block_len > block->buf_len) { dev->dev_errno = EIO; Mmsg2(dev->errmsg, _("[SE0206] Block length %u is greater than buffer %u. Attempting recovery.\n"), block->block_len, block->buf_len); Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); Pmsg1(000, "%s", dev->errmsg); /* Attempt to reposition to re-read the block */ if (dev->is_tape()) { Dmsg0(250, "BSR for reread; block too big for buffer.\n"); if (!dev->bsr(1)) { Mmsg(dev->errmsg, "%s", dev->bstrerror()); if (dev->errmsg[0]) { Jmsg(jcr, M_ERROR, 0, "[SE0207] %s", dev->errmsg); } block->read_len = 0; return false; } } else { Dmsg0(250, "Seek to beginning of block for reread.\n"); boffset_t pos = dev->lseek(dcr, (boffset_t)0, SEEK_CUR); /* get curr pos */ pos -= block->read_len; dev->lseek(dcr, pos, SEEK_SET); dev->file_addr = pos; } Mmsg1(dev->errmsg, _("[SI0203] Setting block buffer size to %u bytes.\n"), block->block_len); Jmsg(jcr, M_INFO, 0, "%s", dev->errmsg); Pmsg1(000, "%s", dev->errmsg); /* Set new block length */ dev->max_block_size = block->block_len; block->buf_len = block->block_len; free_memory(block->buf); block->buf = get_memory(block->buf_len); empty_block(block); looping++; goto reread; /* re-read block with correct block size */ } if (block->block_len > block->read_len) { dev->dev_errno = EIO; Mmsg4(dev->errmsg, _("[SE0208] Volume data error at %u:%u! Short block of %d bytes on device %s discarded.\n"), dev->file, dev->block_num, block->read_len, dev->print_name()); Jmsg(jcr, M_WARNING, 0, "%s", dev->errmsg); /* discard block, but continue */ dev->set_short_block(); block->read_len = block->binbuf = 0; return false; /* return error */ } dev->clear_short_block(); dev->clear_eof(); dev->updateVolCatReads(1); dev->updateVolCatReadBytes(block->read_len); /* Update dcr values */ if (dev->is_tape()) { dev->EndAddr = dev->get_full_addr(); if (dcr->EndAddr < dev->EndAddr) { dcr->EndAddr = dev->EndAddr; } dev->block_num++; } else { /* We need to take care about a short block in EndBlock/File * computation */ uint32_t len = MIN(block->read_len, block->block_len); uint64_t addr = dev->get_full_addr() + len - 1; if (dev->is_indexed()) { if (addr > dcr->EndAddr) { dcr->EndAddr = addr; } } dev->EndAddr = addr; } if (dev->is_indexed()) { dcr->VolMediaId = dev->VolCatInfo.VolMediaId; } dev->file_addr += block->read_len; dev->update_file_size(block->read_len); dev->usage += block->read_len; /* update usage counter */ /* * If we read a short block on disk, * seek to beginning of next block. This saves us * from shuffling blocks around in the buffer. Take a * look at this from an efficiency stand point later, but * it should only happen once at the end of each job. * * I've been lseek()ing negative relative to SEEK_CUR for 30 * years now. However, it seems that with the new off_t definition, * it is not possible to seek negative amounts, so we use two * lseek(). One to get the position, then the second to do an * absolute positioning -- so much for efficiency. KES Sep 02. */ Dmsg0(250, "At end of read block\n"); if (block->read_len > block->block_len && !dev->is_tape()) { char ed1[50]; boffset_t pos = dev->lseek(dcr, (boffset_t)0, SEEK_CUR); /* get curr pos */ Dmsg1(250, "Current lseek pos=%s\n", edit_int64(pos, ed1)); pos -= (block->read_len - block->block_len); dev->lseek(dcr, pos, SEEK_SET); Dmsg3(250, "Did lseek pos=%s blk_size=%d rdlen=%d\n", edit_int64(pos, ed1), block->block_len, block->read_len); dev->file_addr = pos; dev->set_file_size(pos); } Dmsg3(150, "Exit read_block read_len=%d block_len=%d binbuf=%d\n", block->read_len, block->block_len, block->binbuf); block->block_read = true; return true; } bacula-15.0.3/src/stored/authenticate.c0000644000175000017500000001176714771010173017607 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Authenticate caller * * Written by Kern Sibbald, October 2000 * */ #include "bacula.h" #include "stored.h" extern STORES *me; /* our Global resource */ const int dbglvl = 50; static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; SDAuthenticateDIR::SDAuthenticateDIR(JCR *jcr): AuthenticateBase(jcr, jcr->dir_bsock, dtSrv, dcSD, dcDIR) { } /* * Authenticate the Director */ bool SDAuthenticateDIR::authenticate_director() { BSOCK *dir = jcr->dir_bsock; DIRRES *director = jcr->director; DecodeRemoteTLSPSKNeed(bsock->tlspsk_remote); /* TLS Requirement */ CalcLocalTLSNeedFromRes(director->tls_enable, director->tls_require, director->tls_authenticate, director->tls_verify_peer, director->tls_allowed_cns, director->tls_ctx, director->tls_psk_enable, director->psk_ctx, director->password); /* Timeout authentication after 10 mins */ StartAuthTimeout(); /* Challenge the director */ if (!ServerCramMD5Authenticate(password)) { goto auth_fatal; } this->auth_success = HandleTLS(); auth_fatal: if (auth_success) { return send_hello_ok(dir); } send_sorry(dir); bmicrosleep(5, 0); return false; } class SDAuthenticateFD: public AuthenticateBase { public: SDAuthenticateFD(JCR *jcr, BSOCK *bsock): AuthenticateBase(jcr, bsock, dtSrv, dcSD, dcFD) { } virtual ~SDAuthenticateFD() {}; int authenticate_filed(int FDVersion); }; int authenticate_filed(JCR *jcr, BSOCK *fd, int FDVersion) { return SDAuthenticateFD(jcr, fd).authenticate_filed(FDVersion); } int SDAuthenticateFD::authenticate_filed(int FDVersion) { BSOCK *fd = bsock; DecodeRemoteTLSPSKNeed(bsock->tlspsk_remote); /* TLS Requirement */ CalcLocalTLSNeedFromRes(me->tls_enable, me->tls_require, me->tls_authenticate, me->tls_verify_peer, me->tls_allowed_cns, me->tls_ctx, me->tls_psk_enable, me->psk_ctx, jcr->sd_auth_key); /* Timeout authentication after 10 mins */ StartAuthTimeout(); /* Challenge the FD */ if (!ServerCramMD5Authenticate(jcr->sd_auth_key)) { goto auth_fatal; } this->auth_success = HandleTLS(); auth_fatal: /* Version 5 of the protocol is a bit special, it is used by both 6.0.0 * Enterprise version and 7.0.x Community version, but do not support the * same level of features. As nobody is using the 6.0.0 release, we can * be pretty sure that the FD in version 5 is a community FD. */ if (auth_success && (FDVersion >= 9 || FDVersion == 5)) { send_hello_ok(fd); } return auth_success; } class SDAuthenticateSD: public AuthenticateBase { public: SDAuthenticateSD(JCR *jcr): AuthenticateBase(jcr, jcr->store_bsock, dtCli, dcSD, dcSD) { /* TLS Requirement */ CalcLocalTLSNeedFromRes(me->tls_enable, me->tls_require, me->tls_authenticate, me->tls_verify_peer, me->tls_allowed_cns, me->tls_ctx, me->tls_psk_enable, me->psk_ctx, jcr->sd_auth_key); } virtual ~SDAuthenticateSD() {}; bool authenticate_storagedaemon(); }; bool send_hello_and_authenticate_sd(JCR *jcr, char *Job) { SDAuthenticateSD auth(jcr); /* TODO disable PSK/TLS when the SD talk to itself */ if (!send_hello_sd(jcr, Job, auth.GetTLSPSKLocalNeed())) { return false; } return auth.authenticate_storagedaemon(); } /* * First prove our identity to the Storage daemon, then * make him prove his identity. */ bool SDAuthenticateSD::authenticate_storagedaemon() { BSOCK *sd = jcr->store_bsock; int sd_version = 0; /* Timeout authentication after 10 mins */ StartAuthTimeout(); /* Challenge the FD */ if (!ClientCramMD5Authenticate(jcr->sd_auth_key)) { goto auth_fatal; } this->auth_success = HandleTLS(); if (!auth_success) { goto auth_fatal; } if (sd->recv() <= 0) { auth_success = false; goto auth_fatal; } sscanf(sd->msg, "3000 OK Hello %d", &sd_version); if (sd_version >= 1 && me->comm_compression) { sd->set_compress(); } else { sd->clear_compress(); Dmsg0(050, "*** No FD compression with SD\n"); } /* At this point, we have successfully connected */ auth_fatal: /* Destroy session key */ memset(jcr->sd_auth_key, 0, strlen(jcr->sd_auth_key)); /* Single thread all failures to avoid DOS */ if (!auth_success) { P(mutex); bmicrosleep(6, 0); V(mutex); } return auth_success; } bacula-15.0.3/src/stored/btape.c0000644000175000017500000025711514771010173016223 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Bacula Tape manipulation program * * Has various tape manipulation commands -- mostly for * use in determining how tapes really work. * * Kern Sibbald, April MM * * Note, this program reads stored.conf, and will only * talk to devices that are configured. */ #include "bacula.h" #include "stored.h" #ifdef USE_VTAPE #include "vtape_dev.h" #endif extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code); /* External subroutines */ extern void free_config_resources(); /* Exported variables */ int quit = 0; char buf[100000]; int bsize = TAPE_BSIZE; char VolName[MAX_NAME_LENGTH]; /* * If you change the format of the state file, * increment this value */ static uint32_t btape_state_level = 2; DEVICE *dev = NULL; DCR *dcr; DEVRES *device = NULL; int exit_code = 0; #define REC_SIZE 32768 /* Forward referenced subroutines */ static void do_tape_cmds(); static void helpcmd(); static void scancmd(); static void rewindcmd(); static void clearcmd(); static void wrcmd(); static void rrcmd(); static void rbcmd(); static void eodcmd(); static void fillcmd(); static void qfillcmd(); static void statcmd(); static void unfillcmd(); static int flush_block(DEV_BLOCK *block, int dump); static bool quickie_cb(DCR *dcr, DEV_RECORD *rec); static bool compare_blocks(DEV_BLOCK *last_block, DEV_BLOCK *block); static bool my_mount_next_read_volume(DCR *dcr); static void scan_blocks(); static void set_volume_name(const char *VolName, int volnum); static void rawfill_cmd(); static bool open_the_device(); static void autochangercmd(); static bool do_unfill(); /* Static variables */ static CONFIG *config; #define CONFIG_FILE "bacula-sd.conf" char *configfile = NULL; #define MAX_CMD_ARGS 30 static POOLMEM *cmd; static POOLMEM *args; static char *argk[MAX_CMD_ARGS]; static char *argv[MAX_CMD_ARGS]; static int argc; static int quickie_count = 0; static uint64_t write_count = 0; static BSR *bsr = NULL; static int signals = TRUE; static bool ok; static int stop = 0; static uint64_t vol_size; static uint64_t VolBytes; static time_t now; static int32_t file_index; static int end_of_tape = 0; static uint32_t LastBlock = 0; static uint32_t eot_block; static uint32_t eot_block_len; static uint32_t eot_FileIndex; static int dumped = 0; static DEV_BLOCK *last_block1 = NULL; static DEV_BLOCK *last_block2 = NULL; static DEV_BLOCK *last_block = NULL; static DEV_BLOCK *this_block = NULL; static DEV_BLOCK *first_block = NULL; static uint32_t last_file1 = 0; static uint32_t last_file2 = 0; static uint32_t last_file = 0; static uint32_t last_block_num1 = 0; static uint32_t last_block_num2 = 0; static uint32_t last_block_num = 0; static uint32_t BlockNumber = 0; static bool simple = true; static const char *VolumeName = NULL; static int vol_num = 0; static JCR *jcr = NULL; static void usage(); static void terminate_btape(int sig); int get_cmd(const char *prompt); class BtapeAskDirHandler: public BtoolsAskDirHandler { public: BtapeAskDirHandler() {} ~BtapeAskDirHandler() {} bool dir_find_next_appendable_volume(DCR *dcr); bool dir_ask_sysop_to_mount_volume(DCR *dcr, bool /* writing */); bool dir_ask_sysop_to_create_appendable_volume(DCR *dcr); bool dir_create_jobmedia_record(DCR *dcr, bool zero); }; /********************************************************************* * * Bacula tape testing program * */ int main(int margc, char *margv[]) { int ch, i; uint32_t x32, y32; uint64_t x64, y64; char buf[1000]; BtapeAskDirHandler askdir_handler; init_askdir_handler(&askdir_handler); setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); lmgr_init_thread(); /* Sanity checks */ if (TAPE_BSIZE % B_DEV_BSIZE != 0 || TAPE_BSIZE / B_DEV_BSIZE == 0) { Emsg2(M_ABORT, 0, _("Tape block size (%d) not multiple of system size (%d)\n"), TAPE_BSIZE, B_DEV_BSIZE); } if (TAPE_BSIZE != (1 << (ffs(TAPE_BSIZE)-1))) { Emsg1(M_ABORT, 0, _("Tape block size (%d) is not a power of 2\n"), TAPE_BSIZE); } if (sizeof(boffset_t) < 8) { Pmsg1(-1, _("\n\n!!!! Warning large disk addressing disabled. boffset_t=%d should be 8 or more !!!!!\n\n\n"), sizeof(boffset_t)); } x32 = 123456789; bsnprintf(buf, sizeof(buf), "%u", x32); i = bsscanf(buf, "%lu", &y32); if (i != 1 || x32 != y32) { Pmsg3(-1, _("32 bit printf/scanf problem. i=%d x32=%u y32=%u\n"), i, x32, y32); exit(1); } x64 = 123456789; x64 = x64 << 32; x64 += 123456789; bsnprintf(buf, sizeof(buf), "%" llu, x64); i = bsscanf(buf, "%llu", &y64); if (i != 1 || x64 != y64) { Pmsg3(-1, _("64 bit printf/scanf problem. i=%d x64=%" llu " y64=%" llu "\n"), i, x64, y64); exit(1); } printf(_("Tape block granularity is %d bytes.\n"), TAPE_BSIZE); working_directory = "/tmp"; my_name_is(margc, margv, "btape"); init_msg(NULL, NULL); OSDependentInit(); while ((ch = getopt(margc, margv, "b:w:c:d:psv?")) != -1) { switch (ch) { case 'b': /* bootstrap file */ bsr = parse_bsr(NULL, optarg); break; case 'w': working_directory = optarg; break; case 'c': /* specify config file */ if (configfile != NULL) { free(configfile); } configfile = bstrdup(optarg); break; case 'd': /* set debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } } break; case 'p': forge_on = true; break; case 's': signals = false; break; case 'v': verbose++; break; case '?': default: helpcmd(); exit(0); } } margc -= optind; margv += optind; cmd = get_pool_memory(PM_FNAME); args = get_pool_memory(PM_FNAME); if (signals) { init_signals(terminate_btape); } if (configfile == NULL) { configfile = bstrdup(CONFIG_FILE); } daemon_start_time = time(NULL); config = New(CONFIG()); parse_sd_config(config, configfile, M_ERROR_TERM); setup_me(); load_sd_plugins(me->plugin_directory); /* See if we can open a device */ if (margc == 0) { Pmsg0(000, _("No archive name specified.\n")); usage(); exit(1); } else if (margc != 1) { Pmsg0(000, _("Improper number of arguments specified.\n")); usage(); exit(1); } jcr = setup_jcr("btape", margv[0], bsr, NULL, SD_APPEND); if (!jcr) { exit(1); } dev = jcr->dcr->dev; if (!dev) { exit(1); } if (!dev->is_tape()) { Pmsg0(000, _("btape only works with tape storage.\n")); usage(); exit(1); } dcr = jcr->dcr; if (!open_the_device()) { exit(1); } Dmsg0(200, "Do tape commands\n"); do_tape_cmds(); terminate_btape(exit_code); } static void terminate_btape(int stat) { Dsm_check(200); if (args) { free_pool_memory(args); args = NULL; } if (cmd) { free_pool_memory(cmd); cmd = NULL; } if (bsr) { free_bsr(bsr); } free_jcr(jcr); jcr = NULL; free_volume_lists(); if (dev) { dev->term(dcr); } if (configfile) { free(configfile); } if (config) { delete config; config = NULL; } if (chk_dbglvl(10)) print_memory_pool_stats(); Dmsg1(900, "=== free_block %p\n", this_block); free_block(this_block); this_block = NULL; stop_watchdog(); term_msg(); term_last_jobs_list(); free(res_head); res_head = NULL; close_memory_pool(); /* free memory in pool */ lmgr_cleanup_main(); sm_dump(false); exit(stat); } btime_t total_time=0; uint64_t total_size=0; static void init_total_speed() { total_size = 0; total_time = 0; } static void print_total_speed() { char ec1[50], ec2[50]; uint64_t rate = total_size / total_time; Pmsg2(000, _("Total Volume bytes=%sB. Total Write rate = %sB/s\n"), edit_uint64_with_suffix(total_size, ec1), edit_uint64_with_suffix(rate, ec2)); } static void init_speed() { time(&jcr->run_time); /* start counting time for rates */ jcr->JobBytes=0; } static void print_speed(uint64_t bytes) { char ec1[50], ec2[50]; uint64_t rate; now = time(NULL); now -= jcr->run_time; if (now <= 0) { now = 1; /* don't divide by zero */ } total_time += now; total_size += bytes; rate = bytes / now; Pmsg2(000, _("Volume bytes=%sB. Write rate = %sB/s\n"), edit_uint64_with_suffix(bytes, ec1), edit_uint64_with_suffix(rate, ec2)); } /* * Helper that fill a buffer with random data or not */ typedef enum { FILL_RANDOM, FILL_ZERO } fill_mode_t; static void fill_buffer(fill_mode_t mode, char *buf, uint32_t len) { int fd; switch (mode) { case FILL_RANDOM: fd = open("/dev/urandom", O_RDONLY); if (fd != -1) { read(fd, buf, len); close(fd); } else { uint32_t *p = (uint32_t *)buf; srandom(time(NULL)); for (uint32_t i=0; iblock) { dev->new_dcr_blocks(dcr); } dev->rLock(false); Dmsg1(200, "Opening device %s\n", dcr->VolumeName); if (!dev->open_device(dcr, OPEN_READ_WRITE)) { Emsg1(M_FATAL, 0, _("dev open failed: %s\n"), dev->print_errmsg()); ok = false; goto bail_out; } Pmsg1(000, _("open device %s: OK\n"), dev->print_name()); dev->set_append(); /* put volume in append mode */ bail_out: dev->Unlock(); return ok; } void quitcmd() { quit = 1; } /* * Write a label to the tape */ static void labelcmd() { if (VolumeName) { pm_strcpy(cmd, VolumeName); } else { if (!get_cmd(_("Enter Volume Name: "))) { return; } } if (!dev->is_open()) { if (!first_open_device(dcr)) { Pmsg1(0, _("Device open failed. ERR=%s\n"), dev->bstrerror()); } } dev->rewind(dcr); dev->write_volume_label(dcr, cmd, "Default", false,/*no relabel*/ true/* label now */); Pmsg1(-1, _("Wrote Volume label for volume \"%s\".\n"), cmd); } /* * Read the tape label */ static void readlabelcmd() { int64_t save_debug_level = debug_level; int stat; stat = dev->read_dev_volume_label(dcr); switch (stat) { case VOL_NO_LABEL: Pmsg0(0, _("Volume has no label.\n")); break; case VOL_OK: Pmsg0(0, _("Volume label read correctly.\n")); break; case VOL_IO_ERROR: Pmsg1(0, _("I/O error on device: ERR=%s"), dev->bstrerror()); break; case VOL_TYPE_ERROR: Pmsg1(0, _("Volume type error: ERR=%s\n"), dev->print_errmsg()); break; case VOL_NAME_ERROR: Pmsg0(0, _("Volume name error\n")); break; case VOL_CREATE_ERROR: Pmsg1(0, _("Error creating label. ERR=%s"), dev->bstrerror()); break; case VOL_VERSION_ERROR: Pmsg0(0, _("Volume version error.\n")); break; case VOL_LABEL_ERROR: Pmsg0(0, _("Bad Volume label type.\n")); break; default: Pmsg0(0, _("Unknown error.\n")); break; } debug_level = 20; dev->dump_volume_label(); debug_level = save_debug_level; } /* * Load the tape should have prevously been taken * off line, otherwise this command is not necessary. */ static void loadcmd() { if (!load_dev(dev)) { Pmsg1(0, _("Bad status from load. ERR=%s\n"), dev->bstrerror()); } else Pmsg1(0, _("Loaded %s\n"), dev->print_name()); } /* * Rewind the tape. */ static void rewindcmd() { if (!dev->rewind(dcr)) { Pmsg1(0, _("Bad status from rewind. ERR=%s\n"), dev->bstrerror()); dev->clrerror(-1); } else { Pmsg1(0, _("Rewound %s\n"), dev->print_name()); } } /* * Clear any tape error */ static void clearcmd() { dev->clrerror(-1); } /* * Write and end of file on the tape */ static void weofcmd() { int num = 1; if (argc > 1) { num = atoi(argk[1]); } if (num <= 0) { num = 1; } if (!dev->weof(NULL, num)) { Pmsg1(0, _("Bad status from weof. ERR=%s\n"), dev->bstrerror()); return; } else { if (num==1) { Pmsg1(0, _("Wrote 1 EOF to %s\n"), dev->print_name()); } else { Pmsg2(0, _("Wrote %d EOFs to %s\n"), num, dev->print_name()); } } } /* Go to the end of the medium -- raw command * The idea was orginally that the end of the Bacula * medium would be flagged differently. This is not * currently the case. So, this is identical to the * eodcmd(). */ static void eomcmd() { if (!dev->eod(dcr)) { Pmsg1(0, "%s", dev->bstrerror()); return; } else { Pmsg0(0, _("Moved to end of medium.\n")); } } /* * Go to the end of the medium (either hardware determined * or defined by two eofs. */ static void eodcmd() { eomcmd(); } /* * Backspace file */ static void bsfcmd() { int num = 1; if (argc > 1) { num = atoi(argk[1]); } if (num <= 0) { num = 1; } if (!dev->bsf(num)) { Pmsg1(0, _("Bad status from bsf. ERR=%s\n"), dev->bstrerror()); } else { Pmsg2(0, _("Backspaced %d file%s.\n"), num, num==1?"":"s"); } } /* * Backspace record */ static void bsrcmd() { int num = 1; if (argc > 1) { num = atoi(argk[1]); } if (num <= 0) { num = 1; } if (!dev->bsr(num)) { Pmsg1(0, _("Bad status from bsr. ERR=%s\n"), dev->bstrerror()); } else { Pmsg2(0, _("Backspaced %d record%s.\n"), num, num==1?"":"s"); } } /* * List device capabilities as defined in the * stored.conf file. */ static void capcmd() { printf(_("Configured device capabilities:\n")); printf("%sEOF ", dev->capabilities & CAP_EOF ? "" : "!"); printf("%sBSR ", dev->capabilities & CAP_BSR ? "" : "!"); printf("%sBSF ", dev->capabilities & CAP_BSF ? "" : "!"); printf("%sFSR ", dev->capabilities & CAP_FSR ? "" : "!"); printf("%sFSF ", dev->capabilities & CAP_FSF ? "" : "!"); printf("%sFASTFSF ", dev->capabilities & CAP_FASTFSF ? "" : "!"); printf("%sBSFATEOM ", dev->capabilities & CAP_BSFATEOM ? "" : "!"); printf("%sEOM ", dev->capabilities & CAP_EOM ? "" : "!"); printf("%sREM ", dev->capabilities & CAP_REM ? "" : "!"); printf("%sRACCESS ", dev->capabilities & CAP_RACCESS ? "" : "!"); printf("%sAUTOMOUNT ", dev->capabilities & CAP_AUTOMOUNT ? "" : "!"); printf("%sLABEL ", dev->capabilities & CAP_LABEL ? "" : "!"); printf("%sANONVOLS ", dev->capabilities & CAP_ANONVOLS ? "" : "!"); printf("%sALWAYSOPEN ", dev->capabilities & CAP_ALWAYSOPEN ? "" : "!"); printf("%sMTIOCGET ", dev->capabilities & CAP_MTIOCGET ? "" : "!"); printf("\n"); printf(_("Device status:\n")); printf("%sOPENED ", dev->is_open() ? "" : "!"); printf("%sTAPE ", dev->is_tape() ? "" : "!"); printf("%sLABEL ", dev->is_labeled() ? "" : "!"); printf("%sMALLOC ", dev->state & ST_MALLOC ? "" : "!"); printf("%sAPPEND ", dev->can_append() ? "" : "!"); printf("%sREAD ", dev->can_read() ? "" : "!"); printf("%sEOT ", dev->at_eot() ? "" : "!"); printf("%sWEOT ", dev->state & ST_WEOT ? "" : "!"); printf("%sEOF ", dev->at_eof() ? "" : "!"); printf("%sNEXTVOL ", dev->state & ST_NEXTVOL ? "" : "!"); printf("%sSHORT ", dev->state & ST_SHORT ? "" : "!"); printf("\n"); printf(_("Device parameters:\n")); printf("Device name: %s\n", dev->dev_name); printf("File=%u block=%u\n", dev->file, dev->block_num); printf("Min block=%u Max block=%u\n", dev->min_block_size, dev->max_block_size); printf(_("Status:\n")); statcmd(); } /* * Test writing larger and larger records. * This is a torture test for records. */ static void rectestcmd() { DEV_BLOCK *save_block; DEV_RECORD *rec; int i, blkno = 0; Pmsg0(0, _("Test writing larger and larger records.\n" "This is a torture test for records.\nI am going to write\n" "larger and larger records. It will stop when the record size\n" "plus the header exceeds the block size (by default about 64K)\n")); get_cmd(_("Do you want to continue? (y/n): ")); if (cmd[0] != 'y') { Pmsg0(000, _("Command aborted.\n")); return; } Dsm_check(200); save_block = dcr->block; dcr->block = dev->new_block(dcr); rec = new_record(); for (i=1; i<500000; i++) { rec->data = check_pool_memory_size(rec->data, i); memset(rec->data, i & 0xFF, i); rec->data_len = i; Dsm_check(200); if (write_record_to_block(dcr, rec)) { empty_block(dcr->block); blkno++; Pmsg2(0, _("Block %d i=%d\n"), blkno, i); } else { break; } Dsm_check(200); } free_record(rec); Dmsg0(900, "=== free_blocks\n"); free_block(dcr->block); dcr->block = save_block; /* restore block to dcr */ Dsm_check(200); } /* * This test attempts to re-read a block written by Bacula * normally at the end of the tape. Bacula will then back up * over the two eof marks, backup over the record and reread * it to make sure it is valid. Bacula can skip this validation * if you set "Backward space record = no" */ static bool re_read_block_test() { DEV_BLOCK *block = dcr->block; DEV_RECORD *rec; bool rc = false; int len; if (!(dev->capabilities & CAP_BSR)) { Pmsg0(-1, _("Skipping read backwards test because BSR turned off.\n")); return true; } Pmsg0(-1, _("\n=== Write, backup, and re-read test ===\n\n" "I'm going to write three records and an EOF\n" "then backup over the EOF and re-read the last record.\n" "Bacula does this after writing the last block on the\n" "tape to verify that the block was written correctly.\n\n" "This is not an *essential* feature ...\n\n")); rewindcmd(); empty_block(block); rec = new_record(); rec->data = check_pool_memory_size(rec->data, block->buf_len); len = rec->data_len = block->buf_len-100; memset(rec->data, 1, rec->data_len); if (!write_record_to_block(dcr, rec)) { Pmsg0(0, _("Error writing record to block.\n")); goto bail_out; } if (!dcr->write_block_to_dev()) { Pmsg0(0, _("Error writing block to device.\n")); goto bail_out; } else { Pmsg1(0, _("Wrote first record of %d bytes.\n"), rec->data_len); } memset(rec->data, 2, rec->data_len); if (!write_record_to_block(dcr, rec)) { Pmsg0(0, _("Error writing record to block.\n")); goto bail_out; } if (!dcr->write_block_to_dev()) { Pmsg0(0, _("Error writing block to device.\n")); goto bail_out; } else { Pmsg1(0, _("Wrote second record of %d bytes.\n"), rec->data_len); } memset(rec->data, 3, rec->data_len); if (!write_record_to_block(dcr, rec)) { Pmsg0(0, _("Error writing record to block.\n")); goto bail_out; } if (!dcr->write_block_to_dev()) { Pmsg0(0, _("Error writing block to device.\n")); goto bail_out; } else { Pmsg1(0, _("Wrote third record of %d bytes.\n"), rec->data_len); } weofcmd(); if (dev->has_cap(CAP_TWOEOF)) { weofcmd(); } if (!dev->bsf(1)) { Pmsg1(0, _("Backspace file failed! ERR=%s\n"), dev->bstrerror()); goto bail_out; } if (dev->has_cap(CAP_TWOEOF)) { if (!dev->bsf(1)) { Pmsg1(0, _("Backspace file failed! ERR=%s\n"), dev->bstrerror()); goto bail_out; } } Pmsg0(0, _("Backspaced over EOF OK.\n")); if (!dev->bsr(1)) { Pmsg1(0, _("Backspace record failed! ERR=%s\n"), dev->bstrerror()); goto bail_out; } Pmsg0(0, _("Backspace record OK.\n")); if (!dcr->read_block_from_dev(NO_BLOCK_NUMBER_CHECK)) { Pmsg1(0, _("Read block failed! ERR=%s\n"), dev->print_errmsg()); goto bail_out; } memset(rec->data, 0, rec->data_len); if (!read_record_from_block(dcr, rec)) { berrno be; Pmsg1(0, _("Read block failed! ERR=%s\n"), be.bstrerror(dev->dev_errno)); goto bail_out; } for (int i=0; idata[i] != 3) { Pmsg0(0, _("Bad data in record. Test failed!\n")); goto bail_out; } } Pmsg0(0, _("\nBlock re-read correct. Test succeeded!\n")); Pmsg0(-1, _("=== End Write, backup, and re-read test ===\n\n")); rc = true; bail_out: free_record(rec); if (!rc) { Pmsg0(0, _("This is not terribly serious since Bacula only uses\n" "this function to verify the last block written to the\n" "tape. Bacula will skip the last block verification\n" "if you add:\n\n" "Backward Space Record = No\n\n" "to your Storage daemon's Device resource definition.\n")); } return rc; } static bool speed_test_raw(fill_mode_t mode, uint64_t nb_gb, uint32_t nb) { DEV_BLOCK *block = dcr->block; int stat; uint32_t block_num = 0; int my_errno; char ed1[200]; nb_gb *= 1024*1024*1024; /* convert size from nb to GB */ init_total_speed(); fill_buffer(mode, block->buf, block->buf_len); Pmsg3(0, _("Begin writing %i files of %sB with raw blocks of %u bytes.\n"), nb, edit_uint64_with_suffix(nb_gb, ed1), block->buf_len); for (uint32_t j=0; jJobBytes < nb_gb; ) { stat = dev->d_write(dev->fd(), block->buf, block->buf_len); if (stat == (int)block->buf_len) { if ((block_num++ % 500) == 0) { printf("+"); fflush(stdout); } mix_buffer(mode, block->buf, block->buf_len); jcr->JobBytes += stat; } else { my_errno = errno; printf("\n"); berrno be; printf(_("Write failed at block %u. stat=%d ERR=%s\n"), block_num, stat, be.bstrerror(my_errno)); return false; } } printf("\n"); weofcmd(); print_speed(jcr->JobBytes); } print_total_speed(); printf("\n"); return true; } static bool speed_test_bacula(fill_mode_t mode, uint64_t nb_gb, uint32_t nb) { DEV_BLOCK *block = dcr->block; char ed1[200]; DEV_RECORD *rec; uint64_t last_bytes = dev->VolCatInfo.VolCatBytes; uint64_t written=0; nb_gb *= 1024*1024*1024; /* convert size from nb to GB */ init_total_speed(); empty_block(block); rec = new_record(); rec->data = check_pool_memory_size(rec->data, block->buf_len); rec->data_len = block->buf_len-100; fill_buffer(mode, rec->data, rec->data_len); Pmsg3(0, _("Begin writing %i files of %sB with blocks of %u bytes.\n"), nb, edit_uint64_with_suffix(nb_gb, ed1), block->buf_len); for (uint32_t j=0; jwrite_block_to_dev()) { Pmsg0(0, _("\nError writing block to device.\n")); goto bail_out; } if ((block->BlockNumber % 500) == 0) { printf("+"); fflush(stdout); } written += dev->VolCatInfo.VolCatBytes - last_bytes; last_bytes = dev->VolCatInfo.VolCatBytes; mix_buffer(mode, rec->data, rec->data_len); } printf("\n"); weofcmd(); print_speed(written); } print_total_speed(); printf("\n"); free_record(rec); return true; bail_out: free_record(rec); return false; } /* TODO: use UAContext */ static int btape_find_arg(const char *keyword) { for (int i=1; i 0) { file_size = atoi(argv[i]); if (file_size > 100) { Pmsg0(0, _("The file_size is too big, stop this test with Ctrl-c.\n")); } } i = btape_find_arg("nb_file"); if (i > 0) { nb_file = atoi(argv[i]); } if (btape_find_arg("skip_zero") > 0) { do_zero = false; } if (btape_find_arg("skip_random") > 0) { do_random = false; } if (btape_find_arg("skip_raw") > 0) { do_raw = false; } if (btape_find_arg("skip_block") > 0) { do_block = false; } if (do_raw) { dev->rewind(dcr); if (do_zero) { Pmsg0(0, _("Test with zero data, should give the " "maximum throughput.\n")); if (file_size) { ok(speed_test_raw(FILL_ZERO, file_size, nb_file)); } else { ok(speed_test_raw(FILL_ZERO, 1, nb_file)); ok(speed_test_raw(FILL_ZERO, 2, nb_file)); ok(speed_test_raw(FILL_ZERO, 4, nb_file)); } } if (do_random) { Pmsg0(0, _("Test with random data, should give the minimum " "throughput.\n")); if (file_size) { ok(speed_test_raw(FILL_RANDOM, file_size, nb_file)); } else { ok(speed_test_raw(FILL_RANDOM, 1, nb_file)); ok(speed_test_raw(FILL_RANDOM, 2, nb_file)); ok(speed_test_raw(FILL_RANDOM, 4, nb_file)); } } } if (do_block) { dev->rewind(dcr); if (do_zero) { Pmsg0(0, _("Test with zero data and bacula block structure.\n")); if (file_size) { ok(speed_test_bacula(FILL_ZERO, file_size, nb_file)); } else { ok(speed_test_bacula(FILL_ZERO, 1, nb_file)); ok(speed_test_bacula(FILL_ZERO, 2, nb_file)); ok(speed_test_bacula(FILL_ZERO, 4, nb_file)); } } if (do_random) { Pmsg0(0, _("Test with random data, should give the minimum " "throughput.\n")); if (file_size) { ok(speed_test_bacula(FILL_RANDOM, file_size, nb_file)); } else { ok(speed_test_bacula(FILL_RANDOM, 1, nb_file)); ok(speed_test_bacula(FILL_RANDOM, 2, nb_file)); ok(speed_test_bacula(FILL_RANDOM, 4, nb_file)); } } } } const int num_recs = 10000; static bool write_two_files() { DEV_BLOCK *block; DEV_RECORD *rec; int len, i, j; int *p; bool rc = false; /* bad return code */ DEVICE *dev = dcr->dev; /* * Set big max_file_size so that write_record_to_block * doesn't insert any additional EOF marks * Do calculation in 64 bits to avoid overflow. */ dev->max_file_size = (uint64_t)2 * (uint64_t)num_recs * (uint64_t)dev->max_block_size; Pmsg2(-1, _("\n=== Write, rewind, and re-read test ===\n\n" "I'm going to write %d records and an EOF\n" "then write %d records and an EOF, then rewind,\n" "and re-read the data to verify that it is correct.\n\n" "This is an *essential* feature ...\n\n"), num_recs, num_recs); block = dcr->block; empty_block(block); rec = new_record(); rec->data = check_pool_memory_size(rec->data, block->buf_len); rec->data_len = block->buf_len-100; len = rec->data_len/sizeof(i); if (!dev->rewind(dcr)) { Pmsg1(0, _("Bad status from rewind. ERR=%s\n"), dev->bstrerror()); goto bail_out; } for (i=1; i<=num_recs; i++) { p = (int *)rec->data; for (j=0; jwrite_block_to_dev()) { Pmsg0(0, _("Error writing block to device.\n")); goto bail_out; } } Pmsg2(0, _("Wrote %d blocks of %d bytes.\n"), num_recs, rec->data_len); weofcmd(); for (i=num_recs+1; i<=2*num_recs; i++) { p = (int *)rec->data; for (j=0; jwrite_block_to_dev()) { Pmsg0(0, _("Error writing block to device.\n")); goto bail_out; } } Pmsg2(0, _("Wrote %d blocks of %d bytes.\n"), num_recs, rec->data_len); weofcmd(); if (dev->has_cap(CAP_TWOEOF)) { weofcmd(); } rc = true; bail_out: free_record(rec); if (!rc) { exit_code = 1; } return rc; } /* * This test writes Bacula blocks to the tape in * several files. It then rewinds the tape and attepts * to read these blocks back checking the data. */ static bool write_read_test() { DEV_BLOCK *block; DEV_RECORD *rec; bool rc = false; int len, i, j; int *p; rec = new_record(); if (!write_two_files()) { goto bail_out; } block = dcr->block; empty_block(block); if (!dev->rewind(dcr)) { Pmsg1(0, _("Bad status from rewind. ERR=%s\n"), dev->bstrerror()); goto bail_out; } else { Pmsg0(0, _("Rewind OK.\n")); } rec->data = check_pool_memory_size(rec->data, block->buf_len); rec->data_len = block->buf_len-100; len = rec->data_len/sizeof(i); /* Now read it back */ for (i=1; i<=2*num_recs; i++) { read_again: if (!dcr->read_block_from_dev(NO_BLOCK_NUMBER_CHECK)) { if (dev_state(dev, ST_EOF)) { Pmsg0(-1, _("Got EOF on tape.\n")); if (i == num_recs+1) { goto read_again; } } Pmsg2(0, _("Read block %d failed! ERR=%s\n"), i, dev->print_errmsg()); goto bail_out; } memset(rec->data, 0, rec->data_len); if (!read_record_from_block(dcr, rec)) { berrno be; Pmsg2(0, _("Read record failed. Block %d! ERR=%s\n"), i, be.bstrerror(dev->dev_errno)); goto bail_out; } p = (int *)rec->data; for (j=0; jblock; DEV_RECORD *rec; bool rc = false; int len, j; bool more = true; int recno = 0; int file = 0, blk = 0; int *p; bool got_eof = false; Pmsg0(0, _("Block position test\n")); empty_block(block); rec = new_record(); rec->data = check_pool_memory_size(rec->data, block->buf_len); rec->data_len = block->buf_len-100; len = rec->data_len/sizeof(j); if (!dev->rewind(dcr)) { Pmsg1(0, _("Bad status from rewind. ERR=%s\n"), dev->bstrerror()); goto bail_out; } else { Pmsg0(0, _("Rewind OK.\n")); } while (more) { /* Set up next item to read based on where we are */ /* At each step, recno is what we print for the "block number" * and file, blk are the real positions to go to. */ switch (recno) { case 0: recno = 5; file = 0; blk = 4; break; case 5: recno = 201; file = 0; blk = 200; break; case 201: recno = num_recs; file = 0; blk = num_recs-1; break; case num_recs: recno = num_recs+1; file = 1; blk = 0; break; case num_recs+1: recno = num_recs+601; file = 1; blk = 600; break; case num_recs+601: recno = 2*num_recs; file = 1; blk = num_recs-1; break; case 2*num_recs: more = false; continue; } Pmsg2(-1, _("Reposition to file:block %d:%d\n"), file, blk); uint64_t addr = file; addr = (addr<<32) + blk; if (!dev->reposition(dcr, addr)) { Pmsg0(0, _("Reposition error.\n")); goto bail_out; } read_again: if (!dcr->read_block_from_dev(NO_BLOCK_NUMBER_CHECK)) { if (dev_state(dev, ST_EOF)) { Pmsg0(-1, _("Got EOF on tape.\n")); if (!got_eof) { got_eof = true; goto read_again; } } Pmsg4(0, _("Read block %d failed! file=%d blk=%d. ERR=%s\n\n"), recno, file, blk, dev->print_errmsg()); Pmsg0(0, _("This may be because the tape drive block size is not\n" " set to variable blocking as normally used by Bacula.\n" " Please see the Tape Testing chapter in the manual and \n" " look for using mt with defblksize and setoptions\n" "If your tape drive block size is correct, then perhaps\n" " your SCSI driver is *really* stupid and does not\n" " correctly report the file:block after a FSF. In this\n" " case try setting:\n" " Fast Forward Space File = no\n" " in your Device resource.\n")); goto bail_out; } memset(rec->data, 0, rec->data_len); if (!read_record_from_block(dcr, rec)) { berrno be; Pmsg1(0, _("Read record failed! ERR=%s\n"), be.bstrerror(dev->dev_errno)); goto bail_out; } p = (int *)rec->data; for (j=0; jhas_cap(CAP_TWOEOF)) { weofcmd(); } dev->close(dcr); /* release device */ if (!open_the_device()) { return -1; } rewindcmd(); Pmsg0(0, _("Now moving to end of medium.\n")); eodcmd(); Pmsg2(-1, _("We should be in file 3. I am at file %d. %s\n"), dev->file, dev->file == 3 ? _("This is correct!") : _("This is NOT correct!!!!")); if (dev->file != 3) { return -1; } Pmsg0(-1, _("\nNow the important part, I am going to attempt to append to the tape.\n\n")); wrcmd(); weofcmd(); if (dev->has_cap(CAP_TWOEOF)) { weofcmd(); } rewindcmd(); Pmsg0(-1, _("Done appending, there should be no I/O errors\n\n")); Pmsg0(-1, _("Doing Bacula scan of blocks:\n")); scan_blocks(); Pmsg0(-1, _("End scanning the tape.\n")); Pmsg2(-1, _("We should be in file 4. I am at file %d. %s\n"), dev->file, dev->file == 4 ? _("This is correct!") : _("This is NOT correct!!!!")); if (dev->file != 4) { return -2; } return 1; } /* * This test exercises the autochanger */ static int autochanger_test() { POOLMEM *results, *changer; int slot, status, loaded; int timeout = dcr->device->max_changer_wait; int sleep_time = 0; Dmsg1(100, "Max changer wait = %d sec\n", timeout); if (!dev->has_cap(CAP_AUTOCHANGER)) { return 1; } if (!(dcr->device && dcr->device->changer_name && dcr->device->changer_command)) { Pmsg0(-1, _("\nAutochanger enabled, but no name or no command device specified.\n")); return 1; } Pmsg0(-1, _("\nAh, I see you have an autochanger configured.\n" "To test the autochanger you must have a blank tape\n" " that I can write on in Slot 1.\n")); if (!get_cmd(_("\nDo you wish to continue with the Autochanger test? (y/n): "))) { return 0; } if (cmd[0] != 'y' && cmd[0] != 'Y') { return 0; } Pmsg0(-1, _("\n\n=== Autochanger test ===\n\n")); results = get_pool_memory(PM_MESSAGE); changer = get_pool_memory(PM_FNAME); try_again: slot = 1; dcr->VolCatInfo.Slot = slot; /* Find out what is loaded, zero means device is unloaded */ Pmsg0(-1, _("3301 Issuing autochanger \"loaded\" command.\n")); edit_device_codes(dcr, &changer, dcr->device->changer_command, "loaded"); status = run_program(changer, timeout, results); Dmsg3(100, "run_prog: %s stat=%d result=\"%s\"\n", changer, status, results); if (status == 0) { loaded = atoi(results); } else { berrno be; Pmsg1(-1, _("3991 Bad autochanger command: %s\n"), changer); Pmsg2(-1, _("3991 result=\"%s\": ERR=%s\n"), results, be.bstrerror(status)); goto bail_out; } if (loaded) { Pmsg1(-1, _("Slot %d loaded. I am going to unload it.\n"), loaded); } else { Pmsg0(-1, _("Nothing loaded in the drive. OK.\n")); } Dmsg1(100, "Results from loaded query=%s\n", results); if (loaded) { dcr->VolCatInfo.Slot = loaded; /* We are going to load a new tape, so close the device */ dev->close(dcr); Pmsg2(-1, _("3302 Issuing autochanger \"unload %d %d\" command.\n"), loaded, dev->drive_index); edit_device_codes(dcr, &changer, dcr->device->changer_command, "unload"); status = run_program(changer, timeout, results); Pmsg2(-1, _("unload status=%s %d\n"), status==0?_("OK"):_("Bad"), status); if (status != 0) { berrno be; Pmsg1(-1, _("3992 Bad autochanger command: %s\n"), changer); Pmsg2(-1, _("3992 result=\"%s\": ERR=%s\n"), results, be.bstrerror(status)); } } /* * Load the Slot 1 */ slot = 1; dcr->VolCatInfo.Slot = slot; Pmsg2(-1, _("3303 Issuing autochanger \"load %d %d\" command.\n"), slot, dev->drive_index); edit_device_codes(dcr, &changer, dcr->device->changer_command, "load"); Dmsg1(100, "Changer=%s\n", changer); dev->close(dcr); status = run_program(changer, timeout, results); if (status == 0) { Pmsg2(-1, _("3303 Autochanger \"load %d %d\" status is OK.\n"), slot, dev->drive_index); } else { berrno be; Pmsg1(-1, _("3993 Bad autochanger command: %s\n"), changer); Pmsg2(-1, _("3993 result=\"%s\": ERR=%s\n"), results, be.bstrerror(status)); goto bail_out; } if (!open_the_device()) { goto bail_out; } /* * Start with sleep_time 0 then increment by 30 seconds if we get * a failure. */ bmicrosleep(sleep_time, 0); if (!dev->rewind(dcr) || !dev->weof(dcr, 1)) { Pmsg1(0, _("Bad status from rewind. ERR=%s\n"), dev->bstrerror()); dev->clrerror(-1); Pmsg0(-1, _("\nThe test failed, probably because you need to put\n" "a longer sleep time in the mtx-script in the load) case.\n" "Adding a 30 second sleep and trying again ...\n")); sleep_time += 30; goto try_again; } else { Pmsg1(0, _("Rewound %s\n"), dev->print_name()); } if (!dev->weof(dcr, 1)) { Pmsg1(0, _("Bad status from weof. ERR=%s\n"), dev->bstrerror()); goto bail_out; } else { Pmsg1(0, _("Wrote EOF to %s\n"), dev->print_name()); } if (sleep_time) { Pmsg1(-1, _("\nThe test worked this time. Please add:\n\n" " sleep %d\n\n" "to your mtx-changer script in the load) case.\n\n"), sleep_time); } else { Pmsg0(-1, _("\nThe test autochanger worked!!\n\n")); } free_pool_memory(changer); free_pool_memory(results); return 1; bail_out: free_pool_memory(changer); free_pool_memory(results); Pmsg0(-1, _("You must correct this error or the Autochanger will not work.\n")); return -2; } static void autochangercmd() { autochanger_test(); } /* * This test assumes that the append test has been done, * then it tests the fsf function. */ static bool fsf_test() { bool set_off = false; Pmsg0(-1, _("\n\n=== Forward space files test ===\n\n" "This test is essential to Bacula.\n\n" "I'm going to write five files then test forward spacing\n\n")); argc = 1; rewindcmd(); wrcmd(); weofcmd(); /* end file 0 */ wrcmd(); wrcmd(); weofcmd(); /* end file 1 */ wrcmd(); wrcmd(); wrcmd(); weofcmd(); /* end file 2 */ wrcmd(); wrcmd(); weofcmd(); /* end file 3 */ wrcmd(); weofcmd(); /* end file 4 */ if (dev->has_cap(CAP_TWOEOF)) { weofcmd(); } test_again: rewindcmd(); Pmsg0(0, _("Now forward spacing 1 file.\n")); if (!dev->fsf(1)) { Pmsg1(0, _("Bad status from fsr. ERR=%s\n"), dev->bstrerror()); goto bail_out; } Pmsg2(-1, _("We should be in file 1. I am at file %d. %s\n"), dev->file, dev->file == 1 ? _("This is correct!") : _("This is NOT correct!!!!")); if (dev->file != 1) { goto bail_out; } Pmsg0(0, _("Now forward spacing 2 files.\n")); if (!dev->fsf(2)) { Pmsg1(0, _("Bad status from fsr. ERR=%s\n"), dev->bstrerror()); goto bail_out; } Pmsg2(-1, _("We should be in file 3. I am at file %d. %s\n"), dev->file, dev->file == 3 ? _("This is correct!") : _("This is NOT correct!!!!")); if (dev->file != 3) { goto bail_out; } rewindcmd(); Pmsg0(0, _("Now forward spacing 4 files.\n")); if (!dev->fsf(4)) { Pmsg1(0, _("Bad status from fsr. ERR=%s\n"), dev->bstrerror()); goto bail_out; } Pmsg2(-1, _("We should be in file 4. I am at file %d. %s\n"), dev->file, dev->file == 4 ? _("This is correct!") : _("This is NOT correct!!!!")); if (dev->file != 4) { goto bail_out; } if (set_off) { Pmsg0(-1, _("The test worked this time. Please add:\n\n" " Fast Forward Space File = no\n\n" "to your Device resource for this drive.\n")); } Pmsg0(-1, "\n"); Pmsg0(0, _("Now forward spacing 1 more file.\n")); if (!dev->fsf(1)) { Pmsg1(0, _("Bad status from fsr. ERR=%s\n"), dev->bstrerror()); } Pmsg2(-1, _("We should be in file 5. I am at file %d. %s\n"), dev->file, dev->file == 5 ? _("This is correct!") : _("This is NOT correct!!!!")); if (dev->file != 5) { goto bail_out; } Pmsg0(-1, _("\n=== End Forward space files test ===\n\n")); return true; bail_out: Pmsg0(-1, _("\nThe forward space file test failed.\n")); if (dev->has_cap(CAP_FASTFSF)) { Pmsg0(-1, _("You have Fast Forward Space File enabled.\n" "I am turning it off then retrying the test.\n")); dev->clear_cap(CAP_FASTFSF); set_off = true; goto test_again; } Pmsg0(-1, _("You must correct this error or Bacula will not work.\n" "Some systems, e.g. OpenBSD, require you to set\n" " Use MTIOCGET= no\n" "in your device resource. Use with caution.\n")); return false; } /* * This is a general test of Bacula's functions * needed to read and write the tape. */ static void testcmd() { int stat; if (!write_read_test()) { exit_code = 1; return; } if (!position_test()) { exit_code = 1; return; } stat = append_test(); if (stat == 1) { /* OK get out */ goto all_done; } if (stat == -1) { /* first test failed */ if (dev->has_cap(CAP_EOM) || dev->has_cap(CAP_FASTFSF)) { Pmsg0(-1, _("\nAppend test failed. Attempting again.\n" "Setting \"Hardware End of Medium = no\n" " and \"Fast Forward Space File = no\n" "and retrying append test.\n\n")); dev->clear_cap(CAP_EOM); /* turn off eom */ dev->clear_cap(CAP_FASTFSF); /* turn off fast fsf */ stat = append_test(); if (stat == 1) { Pmsg0(-1, _("\n\nIt looks like the test worked this time, please add:\n\n" " Hardware End of Medium = No\n\n" " Fast Forward Space File = No\n" "to your Device resource in the Storage conf file.\n")); goto all_done; } if (stat == -1) { Pmsg0(-1, _("\n\nThat appears *NOT* to have corrected the problem.\n")); goto failed; } /* Wrong count after append */ if (stat == -2) { Pmsg0(-1, _("\n\nIt looks like the append failed. Attempting again.\n" "Setting \"BSF at EOM = yes\" and retrying append test.\n")); dev->capabilities |= CAP_BSFATEOM; /* backspace on eom */ stat = append_test(); if (stat == 1) { Pmsg0(-1, _("\n\nIt looks like the test worked this time, please add:\n\n" " Hardware End of Medium = No\n" " Fast Forward Space File = No\n" " BSF at EOM = yes\n\n" "to your Device resource in the Storage conf file.\n")); goto all_done; } } } failed: Pmsg0(-1, _("\nAppend test failed.\n\n" "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" "Unable to correct the problem. You MUST fix this\n" "problem before Bacula can use your tape drive correctly\n" "\nPerhaps running Bacula in fixed block mode will work.\n" "Do so by setting:\n\n" "Minimum Block Size = nnn\n" "Maximum Block Size = nnn\n\n" "in your Storage daemon's Device definition.\n" "nnn must match your tape driver's block size, which\n" "can be determined by reading your tape manufacturers\n" "information, and the information on your kernel dirver.\n" "Fixed block sizes, however, are not normally an ideal solution.\n" "\n" "Some systems, e.g. OpenBSD, require you to set\n" " Use MTIOCGET= no\n" "in your device resource. Use with caution.\n")); exit_code = 1; return; } all_done: Pmsg0(-1, _("\nThe above Bacula scan should have output similar to what follows.\n" "Please double check it ...\n" "=== Sample correct output ===\n" "1 block of 64448 bytes in file 1\n" "End of File mark.\n" "2 blocks of 64448 bytes in file 2\n" "End of File mark.\n" "3 blocks of 64448 bytes in file 3\n" "End of File mark.\n" "1 block of 64448 bytes in file 4\n" "End of File mark.\n" "Total files=4, blocks=7, bytes = 451,136\n" "=== End sample correct output ===\n\n" "If the above scan output is not identical to the\n" "sample output, you MUST correct the problem\n" "or Bacula will not be able to write multiple Jobs to \n" "the tape.\n\n")); if (stat == 1) { if (!re_read_block_test()) { exit_code = 1; } } if (!fsf_test()) { /* do fast forward space file test */ exit_code = 1; } autochanger_test(); /* do autochanger test */ } /* Forward space a file */ static void fsfcmd() { int num = 1; if (argc > 1) { num = atoi(argk[1]); } if (num <= 0) { num = 1; } if (!dev->fsf(num)) { Pmsg1(0, _("Bad status from fsf. ERR=%s\n"), dev->bstrerror()); return; } if (num == 1) { Pmsg0(0, _("Forward spaced 1 file.\n")); } else { Pmsg1(0, _("Forward spaced %d files.\n"), num); } } /* Forward space a record */ static void fsrcmd() { int num = 1; if (argc > 1) { num = atoi(argk[1]); } if (num <= 0) { num = 1; } if (!dev->fsr(num)) { Pmsg1(0, _("Bad status from fsr. ERR=%s\n"), dev->bstrerror()); return; } if (num == 1) { Pmsg0(0, _("Forward spaced 1 record.\n")); } else { Pmsg1(0, _("Forward spaced %d records.\n"), num); } } /* * Read a Bacula block from the tape */ static void rbcmd() { dev->open_device(dcr, OPEN_READ_ONLY); dcr->read_block_from_dev(NO_BLOCK_NUMBER_CHECK); } /* * Write a Bacula block to the tape */ static void wrcmd() { DEV_BLOCK *block = dcr->block; DEV_RECORD *rec = dcr->rec; int i; if (!dev->is_open()) { open_the_device(); } Dsm_check(200); empty_block(block); if (verbose > 1) { dump_block(dcr->dev, block, "test"); } i = block->buf_len - 100; ASSERT (i > 0); rec->data = check_pool_memory_size(rec->data, i); memset(rec->data, i & 0xFF, i); rec->data_len = i; Dsm_check(200); if (!write_record_to_block(dcr, rec)) { Pmsg0(0, _("Error writing record to block.\n")); goto bail_out; } if (!dcr->write_block_to_dev()) { Pmsg0(0, _("Error writing block to device.\n")); goto bail_out; } else { Pmsg1(0, _("Wrote one record of %d bytes.\n"), i); } Pmsg0(0, _("Wrote block to device.\n")); bail_out: Dsm_check(200); } /* * Read a record from the tape */ static void rrcmd() { char *buf; int stat, len; if (!get_cmd(_("Enter length to read: "))) { return; } len = atoi(cmd); if (len < 0 || len > 1000000) { Pmsg0(0, _("Bad length entered, using default of 1024 bytes.\n")); len = 1024; } buf = (char *)malloc(len); stat = read(dev->fd(), buf, len); if (stat > 0 && stat <= len) { errno = 0; } berrno be; Pmsg3(0, _("Read of %d bytes gives stat=%d. ERR=%s\n"), len, stat, be.bstrerror()); free(buf); } /* * Scan tape by reading block by block. Report what is * on the tape. Note, this command does raw reads, and as such * will not work with fixed block size devices. */ static void scancmd() { int stat; int blocks, tot_blocks, tot_files; int block_size; uint64_t bytes; char ec1[50]; blocks = block_size = tot_blocks = 0; bytes = 0; if (dev->at_eot()) { Pmsg0(0, _("End of tape\n")); return; } dev->update_pos(dcr); tot_files = dev->file; Pmsg1(0, _("Starting scan at file %u\n"), dev->file); for (;;) { if ((stat = read(dev->fd(), buf, sizeof(buf))) < 0) { berrno be; dev->clrerror(-1); Mmsg2(dev->errmsg, _("read error on %s. ERR=%s.\n"), dev->dev_name, be.bstrerror()); Pmsg2(0, _("Bad status from read %d. ERR=%s\n"), stat, dev->bstrerror()); if (blocks > 0) { if (blocks==1) { printf(_("1 block of %d bytes in file %d\n"), block_size, dev->file); } else { printf(_("%d blocks of %d bytes in file %d\n"), blocks, block_size, dev->file); } } return; } Dmsg1(200, "read status = %d\n", stat); /* sleep(1); */ if (stat != block_size) { dev->update_pos(dcr); if (blocks > 0) { if (blocks==1) { printf(_("1 block of %d bytes in file %d\n"), block_size, dev->file); } else { printf(_("%d blocks of %d bytes in file %d\n"), blocks, block_size, dev->file); } blocks = 0; } block_size = stat; } if (stat == 0) { /* EOF */ dev->update_pos(dcr); printf(_("End of File mark.\n")); /* Two reads of zero means end of tape */ if (dev->at_eof()) { dev->set_ateot(); } else { dev->set_ateof(); } if (dev->at_eot()) { printf(_("End of tape\n")); break; } } else { /* Got data */ dev->clear_eof(); blocks++; tot_blocks++; bytes += stat; } } dev->update_pos(dcr); tot_files = dev->file - tot_files; printf(_("Total files=%d, blocks=%d, bytes = %s\n"), tot_files, tot_blocks, edit_uint64_with_commas(bytes, ec1)); } /* * Scan tape by reading Bacula block by block. Report what is * on the tape. This function reads Bacula blocks, so if your * Device resource is correctly defined, it should work with * either variable or fixed block sizes. */ static void scan_blocks() { int blocks, tot_blocks, tot_files; uint32_t block_size; uint64_t bytes; DEV_BLOCK *block = dcr->block; char ec1[50]; char buf1[100], buf2[100]; blocks = block_size = tot_blocks = 0; bytes = 0; empty_block(block); dev->update_pos(dcr); tot_files = dev->file; for (;;) { if (!dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK)) { Dmsg1(100, "!read_block(): ERR=%s\n", dev->bstrerror()); if (dev->at_eot()) { if (blocks > 0) { if (blocks==1) { printf(_("1 block of %d bytes in file %d\n"), block_size, dev->file); } else { printf(_("%d blocks of %d bytes in file %d\n"), blocks, block_size, dev->file); } blocks = 0; } goto bail_out; } if (dev->state & ST_EOF) { if (blocks > 0) { if (blocks==1) { printf(_("1 block of %d bytes in file %d\n"), block_size, dev->file); } else { printf(_("%d blocks of %d bytes in file %d\n"), blocks, block_size, dev->file); } blocks = 0; } printf(_("End of File mark.\n")); continue; } if (dev->state & ST_SHORT) { if (blocks > 0) { if (blocks==1) { printf(_("1 block of %d bytes in file %d\n"), block_size, dev->file); } else { printf(_("%d blocks of %d bytes in file %d\n"), blocks, block_size, dev->file); } blocks = 0; } printf(_("Short block read.\n")); continue; } printf(_("Error reading block. ERR=%s\n"), dev->print_errmsg()); goto bail_out; } if (block->block_len != block_size) { if (blocks > 0) { if (blocks==1) { printf(_("1 block of %d bytes in file %d\n"), block_size, dev->file); } else { printf(_("%d blocks of %d bytes in file %d\n"), blocks, block_size, dev->file); } blocks = 0; } block_size = block->block_len; } blocks++; tot_blocks++; bytes += block->block_len; Dmsg7(100, "Blk_blk=%u file,blk=%u,%u blen=%u bVer=%d SessId=%u SessTim=%u\n", block->BlockNumber, dev->file, dev->block_num, block->block_len, block->BlockVer, block->VolSessionId, block->VolSessionTime); if (verbose == 1) { DEV_RECORD *rec = new_record(); read_record_from_block(dcr, rec); Pmsg9(-1, _("Block=%u file,blk=%u,%u blen=%u First rec FI=%s SessId=%u SessTim=%u Strm=%s rlen=%d\n"), block->BlockNumber, dev->file, dev->block_num, block->block_len, FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, rec->VolSessionTime, stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len); rec->remainder = 0; free_record(rec); } else if (verbose > 1) { dump_block(dcr->dev, block, ""); } } bail_out: tot_files = dev->file - tot_files; printf(_("Total files=%d, blocks=%d, bytes = %s\n"), tot_files, tot_blocks, edit_uint64_with_commas(bytes, ec1)); } static void statcmd() { int64_t debug = debug_level; debug_level = 30; Pmsg2(0, _("Device status: %u. ERR=%s\n"), status_dev(dev), dev->bstrerror()); #ifdef xxxx dump_volume_label(dev); #endif debug_level = debug; } /* * First we label the tape, then we fill * it with data get a new tape and write a few blocks. */ static void fillcmd() { DEV_RECORD rec; DEV_BLOCK *block = dcr->block; char ec1[50], ec2[50]; char buf1[100], buf2[100]; uint64_t write_eof; uint64_t rate; uint32_t min_block_size; int fd; struct tm tm; ok = true; stop = 0; vol_num = 0; last_file = 0; last_block_num = 0; BlockNumber = 0; exit_code = 0; Pmsg1(-1, _("\n" "This command simulates Bacula writing to a tape.\n" "It requires either one or two blank tapes, which it\n" "will label and write.\n\n" "If you have an autochanger configured, it will use\n" "the tapes that are in slots 1 and 2, otherwise, you will\n" "be prompted to insert the tapes when necessary.\n\n" "It will print a status approximately\n" "every 322 MB, and write an EOF every %s. If you have\n" "selected the simple test option, after writing the first tape\n" "it will rewind it and re-read the last block written.\n\n" "If you have selected the multiple tape test, when the first tape\n" "fills, it will ask for a second, and after writing a few more \n" "blocks, it will stop. Then it will begin re-reading the\n" "two tapes.\n\n" "This may take a long time -- hours! ...\n\n"), edit_uint64_with_suffix(dev->max_file_size, buf1)); get_cmd(_("Do you want to run the simplified test (s) with one tape\n" "or the complete multiple tape (m) test: (s/m) ")); if (cmd[0] == 's') { Pmsg0(-1, _("Simple test (single tape) selected.\n")); simple = true; } else if (cmd[0] == 'm') { Pmsg0(-1, _("Multiple tape test selected.\n")); simple = false; } else { Pmsg0(000, _("Command aborted.\n")); exit_code = 1; return; } Dmsg1(20, "Begin append device=%s\n", dev->print_name()); Dmsg1(20, "MaxVolSize=%s\n", edit_uint64(dev->max_volume_size, ec1)); /* Use fixed block size to simplify read back */ min_block_size = dev->min_block_size; dev->min_block_size = dev->max_block_size; write_eof = dev->max_file_size / REC_SIZE; /*compute when we add EOF*/ set_volume_name("TestVolume1", 1); dir_ask_sysop_to_create_appendable_volume(dcr); dev->set_append(); /* force volume to be relabeled */ /* * Acquire output device for writing. Note, after acquiring a * device, we MUST release it, which is done at the end of this * subroutine. */ Dmsg0(100, "just before acquire_device\n"); dcr->setVolCatName(dcr->VolumeName); if (!acquire_device_for_append(dcr)) { Pmsg0(000, "Could not acquire_device_for_append()\n"); jcr->setJobStatus(JS_ErrorTerminated); exit_code = 1; return; } block = jcr->dcr->block; Dmsg0(100, "Just after acquire_device_for_append\n"); /* * Write Begin Session Record */ if (!write_session_label(dcr, SOS_LABEL)) { jcr->setJobStatus(JS_ErrorTerminated); Jmsg1(jcr, M_FATAL, 0, _("Write session label failed. ERR=%s\n"), dev->bstrerror()); ok = false; } Pmsg0(-1, _("Wrote Start of Session label.\n")); memset(&rec, 0, sizeof(rec)); rec.data = get_memory(100000); /* max record size */ rec.data_len = REC_SIZE; /* * Put some random data in the record */ fill_buffer(FILL_RANDOM, rec.data, rec.data_len); /* * Generate data as if from File daemon, write to device */ jcr->dcr->VolFirstIndex = 0; time(&jcr->run_time); /* start counting time for rates */ (void)localtime_r(&jcr->run_time, &tm); strftime(buf1, sizeof(buf1), "%H:%M:%S", &tm); if (simple) { Pmsg1(-1, _("%s Begin writing Bacula records to tape ...\n"), buf1); } else { Pmsg1(-1, _("%s Begin writing Bacula records to first tape ...\n"), buf1); } for (file_index = 0; ok && !job_canceled(jcr); ) { rec.VolSessionId = jcr->VolSessionId; rec.VolSessionTime = jcr->VolSessionTime; rec.FileIndex = ++file_index; rec.Stream = STREAM_FILE_DATA; rec.maskedStream = STREAM_FILE_DATA; /* Mix up the data just a bit */ mix_buffer(FILL_RANDOM, rec.data, rec.data_len); Dmsg4(250, "before write_rec FI=%d SessId=%d Strm=%s len=%d\n", rec.FileIndex, rec.VolSessionId, stream_to_ascii(buf1, rec.Stream, rec.FileIndex), rec.data_len); while (!write_record_to_block(dcr, &rec)) { /* * When we get here we have just filled a block */ Dmsg2(150, "!write_record_to_block data_len=%d rem=%d\n", rec.data_len, rec.remainder); /* Write block to tape */ if (!flush_block(block, 1)) { Pmsg0(000, _("Flush block failed.\n")); exit_code = 1; break; } /* Every 5000 blocks (approx 322MB) report where we are. */ if ((block->BlockNumber % 5000) == 0) { now = time(NULL); now -= jcr->run_time; if (now <= 0) { now = 1; /* prevent divide error */ } rate = dev->VolCatInfo.VolCatBytes / now; Pmsg5(-1, _("Wrote block=%u, file,blk=%u,%u VolBytes=%s rate=%sB/s\n"), block->BlockNumber, dev->file, dev->block_num, edit_uint64_with_commas(dev->VolCatInfo.VolCatBytes, ec1), edit_uint64_with_suffix(rate, ec2)); } /* Every X blocks (dev->max_file_size) write an EOF. */ if ((block->BlockNumber % write_eof) == 0) { now = time(NULL); (void)localtime_r(&now, &tm); strftime(buf1, sizeof(buf1), "%H:%M:%S", &tm); Pmsg1(-1, _("%s Flush block, write EOF\n"), buf1); flush_block(block, 0); #ifdef needed_xxx dev->weof(dcr, 1); #endif } /* Get out after writing 1000 blocks to the second tape */ if (++BlockNumber > 1000 && stop != 0) { /* get out */ Pmsg0(000, _("Wrote 1000 blocks on second tape. Done.\n")); break; } } if (!ok) { Pmsg0(000, _("Not OK\n")); exit_code = 1; break; } jcr->JobBytes += rec.data_len; /* increment bytes this job */ Dmsg4(190, "write_record FI=%s SessId=%d Strm=%s len=%d\n", FI_to_ascii(buf1, rec.FileIndex), rec.VolSessionId, stream_to_ascii(buf2, rec.Stream, rec.FileIndex), rec.data_len); /* Get out after writing 1000 blocks to the second tape */ if (BlockNumber > 1000 && stop != 0) { /* get out */ char ed1[50]; Pmsg1(-1, "Done writing %s records ...\n", edit_uint64_with_commas(write_count, ed1)); break; } } /* end big for loop */ if (vol_num > 1) { Dmsg0(100, "Write_end_session_label()\n"); /* Create Job status for end of session label */ if (!job_canceled(jcr) && ok) { jcr->setJobStatus(JS_Terminated); } else if (!ok) { Pmsg0(000, _("Job canceled.\n")); jcr->setJobStatus(JS_ErrorTerminated); exit_code = 1; } if (!write_session_label(dcr, EOS_LABEL)) { Pmsg1(000, _("Error writing end session label. ERR=%s\n"), dev->bstrerror()); ok = false; exit_code = 1; } /* Write out final block of this session */ if (!dcr->write_block_to_device()) { Pmsg0(-1, _("Set ok=false after write_block_to_device.\n")); ok = false; exit_code = 1; } Pmsg0(-1, _("Wrote End of Session label.\n")); /* Save last block info for second tape */ last_block_num2 = last_block_num; last_file2 = last_file; Dmsg1(000, "=== free_block %p\n", last_block2); free_block(last_block2); last_block2 = dup_block(last_block); } sprintf(buf, "%s/btape.state", working_directory); fd = open(buf, O_CREAT|O_TRUNC|O_WRONLY, 0640); if (fd >= 0) { write(fd, &btape_state_level, sizeof(btape_state_level)); write(fd, &simple, sizeof(simple)); write(fd, &last_block_num1, sizeof(last_block_num1)); write(fd, &last_block_num2, sizeof(last_block_num2)); write(fd, &last_file1, sizeof(last_file1)); write(fd, &last_file2, sizeof(last_file2)); write(fd, last_block1->buf, last_block1->buf_len); write(fd, last_block2->buf, last_block2->buf_len); write(fd, first_block->buf, first_block->buf_len); close(fd); Pmsg2(0, _("Wrote state file last_block_num1=%d last_block_num2=%d\n"), last_block_num1, last_block_num2); } else { berrno be; Pmsg2(0, _("Could not create state file: %s ERR=%s\n"), buf, be.bstrerror()); exit_code = 1; ok = false; } now = time(NULL); (void)localtime_r(&now, &tm); strftime(buf1, sizeof(buf1), "%H:%M:%S", &tm); if (ok) { if (simple) { Pmsg3(0, _("\n\n%s Done filling tape at %d:%d. Now beginning re-read of tape ...\n"), buf1, jcr->dcr->dev->file, jcr->dcr->dev->block_num); } else { Pmsg3(0, _("\n\n%s Done filling tapes at %d:%d. Now beginning re-read of first tape ...\n"), buf1, jcr->dcr->dev->file, jcr->dcr->dev->block_num); } jcr->dcr->block = block; if (!do_unfill()) { Pmsg0(000, _("do_unfill failed.\n")); exit_code = 1; ok = false; } } else { Pmsg1(000, _("%s: Error during test.\n"), buf1); } dev->min_block_size = min_block_size; free_memory(rec.data); } /* * Read two tapes written by the "fill" command and ensure * that the data is valid. If stop==1 we simulate full read back * of two tapes. If stop==-1 we simply read the last block and * verify that it is correct. */ static void unfillcmd() { int fd; exit_code = 0; last_block1 = dev->new_block(dcr); last_block2 = dev->new_block(dcr); first_block = dev->new_block(dcr); sprintf(buf, "%s/btape.state", working_directory); fd = open(buf, O_RDONLY); if (fd >= 0) { uint32_t state_level; read(fd, &state_level, sizeof(btape_state_level)); read(fd, &simple, sizeof(simple)); read(fd, &last_block_num1, sizeof(last_block_num1)); read(fd, &last_block_num2, sizeof(last_block_num2)); read(fd, &last_file1, sizeof(last_file1)); read(fd, &last_file2, sizeof(last_file2)); read(fd, last_block1->buf, last_block1->buf_len); read(fd, last_block2->buf, last_block2->buf_len); read(fd, first_block->buf, first_block->buf_len); close(fd); if (state_level != btape_state_level) { Pmsg0(-1, _("\nThe state file level has changed. You must redo\n" "the fill command.\n")); exit_code = 1; return; } } else { berrno be; Pmsg2(-1, _("\nCould not find the state file: %s ERR=%s\n" "You must redo the fill command.\n"), buf, be.bstrerror()); exit_code = 1; return; } if (!do_unfill()) { exit_code = 1; } this_block = NULL; } /* * This is the second part of the fill command. After the tape or * tapes are written, we are called here to reread parts, particularly * the last block. */ static bool do_unfill() { DEV_BLOCK *block = dcr->block; int autochanger; bool rc = false; uint64_t addr; dumped = 0; VolBytes = 0; LastBlock = 0; Pmsg0(000, "Enter do_unfill\n"); dev->set_cap(CAP_ANONVOLS); /* allow reading any volume */ dev->clear_cap(CAP_LABEL); /* don't label anything here */ end_of_tape = 0; time(&jcr->run_time); /* start counting time for rates */ stop = 0; file_index = 0; Dmsg1(900, "=== free_block %p\n", last_block); free_block(last_block); last_block = NULL; last_block_num = last_block_num1; last_file = last_file1; last_block = last_block1; free_restore_volume_list(jcr); jcr->bsr = NULL; bstrncpy(dcr->VolumeName, "TestVolume1|TestVolume2", sizeof(dcr->VolumeName)); create_restore_volume_list(jcr, true); if (jcr->VolList != NULL) { jcr->VolList->Slot = 1; if (jcr->VolList->next != NULL) { jcr->VolList->next->Slot = 2; } } set_volume_name("TestVolume1", 1); if (!simple) { /* Multiple Volume tape */ /* Close device so user can use autochanger if desired */ if (dev->has_cap(CAP_OFFLINEUNMOUNT)) { dev->offline(dcr); } autochanger = autoload_device(dcr, 1, NULL); if (autochanger != 1) { Pmsg1(100, "Autochanger returned: %d\n", autochanger); dev->close(dcr); get_cmd(_("Mount first tape. Press enter when ready: ")); Pmsg0(000, "\n"); } } dev->close(dcr); dev->num_writers = 0; dcr->clear_writing(); if (!acquire_device_for_read(dcr)) { Pmsg1(-1, "%s", dev->print_errmsg()); goto bail_out; } /* * We now have the first tape mounted. * Note, re-reading last block may have caused us to * loose track of where we are (block number unknown). */ Pmsg0(-1, _("Rewinding.\n")); if (!dev->rewind(dcr)) { /* get to a known place on tape */ goto bail_out; } /* Read the first 10000 records */ Pmsg2(-1, _("Reading the first 10000 records from %u:%u.\n"), dev->file, dev->block_num); quickie_count = 0; read_records(dcr, quickie_cb, my_mount_next_read_volume); Pmsg4(-1, _("Reposition from %u:%u to %u:%u\n"), dev->file, dev->block_num, last_file, last_block_num); addr = last_file; addr = (addr << 32) + last_block_num; if (!dev->reposition(dcr, addr)) { Pmsg1(-1, _("Reposition error. ERR=%s\n"), dev->bstrerror()); goto bail_out; } Pmsg1(-1, _("Reading block %u.\n"), last_block_num); if (!dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK)) { Pmsg1(-1, _("Error reading block: ERR=%s\n"), dev->print_errmsg()); goto bail_out; } if (compare_blocks(last_block, block)) { if (simple) { Pmsg0(-1, _("\nThe last block on the tape matches. Test succeeded.\n\n")); rc = true; } else { Pmsg0(-1, _("\nThe last block of the first tape matches.\n\n")); } } if (simple) { goto bail_out; } /* restore info for last block on second Volume */ last_block_num = last_block_num2; last_file = last_file2; last_block = last_block2; /* Multiple Volume tape */ /* Close device so user can use autochanger if desired */ if (dev->has_cap(CAP_OFFLINEUNMOUNT)) { dev->offline(dcr); } set_volume_name("TestVolume2", 2); autochanger = autoload_device(dcr, 1, NULL); if (autochanger != 1) { Pmsg1(100, "Autochanger returned: %d\n", autochanger); dev->close(dcr); get_cmd(_("Mount second tape. Press enter when ready: ")); Pmsg0(000, "\n"); } dev->clear_read(); dcr->clear_writing(); if (!acquire_device_for_read(dcr)) { Pmsg1(-1, "%s", dev->print_errmsg()); goto bail_out; } /* Space to "first" block which is last block not written * on the previous tape. */ Pmsg2(-1, _("Reposition from %u:%u to 0:1\n"), dev->file, dev->block_num); addr = 1; if (!dev->reposition(dcr, addr)) { Pmsg1(-1, _("Reposition error. ERR=%s\n"), dev->bstrerror()); goto bail_out; } Pmsg1(-1, _("Reading block %d.\n"), dev->block_num); if (!dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK)) { Pmsg1(-1, _("Error reading block: ERR=%s\n"), dev->print_errmsg()); goto bail_out; } if (compare_blocks(first_block, block)) { Pmsg0(-1, _("\nThe first block on the second tape matches.\n\n")); } /* Now find and compare the last block */ Pmsg4(-1, _("Reposition from %u:%u to %u:%u\n"), dev->file, dev->block_num, last_file, last_block_num); addr = last_file; addr = (addr<<32) + last_block_num; if (!dev->reposition(dcr, addr)) { Pmsg1(-1, _("Reposition error. ERR=%s\n"), dev->bstrerror()); goto bail_out; } Pmsg1(-1, _("Reading block %d.\n"), dev->block_num); if (!dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK)) { Pmsg1(-1, _("Error reading block: ERR=%s\n"), dev->print_errmsg()); goto bail_out; } if (compare_blocks(last_block, block)) { Pmsg0(-1, _("\nThe last block on the second tape matches. Test succeeded.\n\n")); rc = true; } bail_out: free_block(last_block1); free_block(last_block2); free_block(first_block); last_block = first_block = last_block1 = last_block2 = NULL; return rc; } /* Read 10000 records then stop */ static bool quickie_cb(DCR *dcr, DEV_RECORD *rec) { DEVICE *dev = dcr->dev; quickie_count++; if (quickie_count == 10000) { Pmsg2(-1, _("10000 records read now at %d:%d\n"), dev->file, dev->block_num); } return quickie_count < 10000; } static bool compare_blocks(DEV_BLOCK *last_block, DEV_BLOCK *block) { char *p, *q; union { uint32_t CheckSum; uint32_t block_len; }; ser_declare; p = last_block->buf; q = block->buf; unser_begin(q, BLKHDR2_LENGTH); unser_uint32(CheckSum); unser_uint32(block_len); while (q < (block->buf+block_len)) { if (*p == *q) { p++; q++; continue; } Pmsg0(-1, "\n"); dump_block(NULL, last_block, _("Last block written")); Pmsg0(-1, "\n"); dump_block(NULL, block, _("Block read back")); Pmsg1(-1, _("\n\nThe blocks differ at byte %u\n"), p - last_block->buf); Pmsg0(-1, _("\n\n!!!! The last block written and the block\n" "that was read back differ. The test FAILED !!!!\n" "This must be corrected before you use Bacula\n" "to write multi-tape Volumes.!!!!\n")); return false; } if (verbose) { dump_block(NULL, last_block, _("Last block written")); dump_block(NULL, block, _("Block read back")); } return true; } /* * Write current block to tape regardless of whether or * not it is full. If the tape fills, attempt to * acquire another tape. */ static int flush_block(DEV_BLOCK *block, int dump) { char ec1[50], ec2[50]; uint64_t rate; DEV_BLOCK *tblock; uint32_t this_file, this_block_num; dev->rLock(false); if (!this_block) { this_block = dev->new_block(dcr); } if (!last_block) { last_block = dev->new_block(dcr); } /* Copy block */ this_file = dev->file; this_block_num = dev->block_num; if (!dcr->write_block_to_dev()) { Pmsg3(000, _("Last block at: %u:%u this_dev_block_num=%d\n"), last_file, last_block_num, this_block_num); if (vol_num == 1) { /* * This is 1st tape, so save first tape info separate * from second tape info */ last_block_num1 = last_block_num; last_file1 = last_file; last_block1 = dup_block(last_block); last_block2 = dup_block(last_block); first_block = dup_block(block); /* first block second tape */ } if (verbose) { Pmsg3(000, _("Block not written: FileIndex=%d blk_block=%u Size=%u\n"), file_index, block->BlockNumber, block->block_len); dump_block(dev, last_block, _("Last block written")); Pmsg0(-1, "\n"); dump_block(dev, block, _("Block not written")); } if (stop == 0) { eot_block = block->BlockNumber; eot_block_len = block->block_len; eot_FileIndex = file_index; stop = 1; } now = time(NULL); now -= jcr->run_time; if (now <= 0) { now = 1; /* don't divide by zero */ } rate = dev->VolCatInfo.VolCatBytes / now; vol_size = dev->VolCatInfo.VolCatBytes; Pmsg4(000, _("End of tape %d:%d. Volume Bytes=%s. Write rate = %sB/s\n"), dev->file, dev->block_num, edit_uint64_with_commas(dev->VolCatInfo.VolCatBytes, ec1), edit_uint64_with_suffix(rate, ec2)); if (simple) { stop = -1; /* stop, but do simplified test */ } else { /* Full test in progress */ if (!fixup_device_block_write_error(jcr->dcr)) { Pmsg1(000, _("Cannot fixup device error. %s\n"), dev->bstrerror()); ok = false; dev->Unlock(); return 0; } BlockNumber = 0; /* start counting for second tape */ } dev->Unlock(); return 1; /* end of tape reached */ } /* Save contents after write so that the header is serialized */ memcpy(this_block->buf, block->buf, this_block->buf_len); /* * Note, we always read/write to block, but we toggle * copying it to one or another of two allocated blocks. * Switch blocks so that the block just successfully written is * always in last_block. */ tblock = last_block; last_block = this_block; this_block = tblock; last_file = this_file; last_block_num = this_block_num; dev->Unlock(); return 1; } /* * First we label the tape, then we fill * it with data get a new tape and write a few blocks. */ static void qfillcmd() { DEV_BLOCK *block = dcr->block; DEV_RECORD *rec = dcr->rec; int i, count; Pmsg0(0, _("Test writing blocks of 64512 bytes to tape.\n")); get_cmd(_("How many blocks do you want to write? (1000): ")); count = atoi(cmd); if (count <= 0) { count = 1000; } Dsm_check(200); i = block->buf_len - 100; ASSERT (i > 0); rec->data = check_pool_memory_size(rec->data, i); memset(rec->data, i & 0xFF, i); rec->data_len = i; rewindcmd(); init_speed(); Pmsg1(0, _("Begin writing %d Bacula blocks to tape ...\n"), count); for (i=0; i < count; i++) { if (i % 100 == 0) { printf("+"); fflush(stdout); } if (!write_record_to_block(dcr, rec)) { Pmsg0(0, _("Error writing record to block.\n")); goto bail_out; } if (!dcr->write_block_to_dev()) { Pmsg0(0, _("Error writing block to device.\n")); goto bail_out; } } printf("\n"); print_speed(dev->VolCatInfo.VolCatBytes); weofcmd(); if (dev->has_cap(CAP_TWOEOF)) { weofcmd(); } rewindcmd(); scan_blocks(); bail_out: Dsm_check(200); } /* * Fill a tape using raw write() command */ static void rawfill_cmd() { DEV_BLOCK *block = dcr->block; int stat; uint32_t block_num = 0; uint32_t *p; int my_errno; fill_buffer(FILL_RANDOM, block->buf, block->buf_len); init_speed(); p = (uint32_t *)block->buf; Pmsg1(0, _("Begin writing raw blocks of %u bytes.\n"), block->buf_len); for ( ;; ) { *p = block_num; stat = dev->d_write(dev->fd(), block->buf, block->buf_len); if (stat == (int)block->buf_len) { if ((block_num++ % 100) == 0) { printf("+"); fflush(stdout); } mix_buffer(FILL_RANDOM, block->buf, block->buf_len); jcr->JobBytes += stat; continue; } break; } my_errno = errno; printf("\n"); berrno be; printf(_("Write failed at block %u. stat=%d ERR=%s\n"), block_num, stat, be.bstrerror(my_errno)); print_speed(jcr->JobBytes); weofcmd(); } struct cmdstruct { const char *key; void (*func)(); const char *help; }; static struct cmdstruct commands[] = { {NT_("autochanger"),autochangercmd, _("test autochanger")}, {NT_("bsf"), bsfcmd, _("backspace file")}, {NT_("bsr"), bsrcmd, _("backspace record")}, {NT_("cap"), capcmd, _("list device capabilities")}, {NT_("clear"), clearcmd, _("clear tape errors")}, {NT_("eod"), eodcmd, _("go to end of Bacula data for append")}, {NT_("eom"), eomcmd, _("go to the physical end of medium")}, {NT_("fill"), fillcmd, _("fill tape, write onto second volume")}, {NT_("unfill"), unfillcmd, _("read filled tape")}, {NT_("fsf"), fsfcmd, _("forward space a file")}, {NT_("fsr"), fsrcmd, _("forward space a record")}, {NT_("help"), helpcmd, _("print this command")}, {NT_("label"), labelcmd, _("write a Bacula label to the tape")}, {NT_("load"), loadcmd, _("load a tape")}, {NT_("quit"), quitcmd, _("quit btape")}, {NT_("rawfill"), rawfill_cmd, _("use write() to fill tape")}, {NT_("readlabel"), readlabelcmd, _("read and print the Bacula tape label")}, {NT_("rectest"), rectestcmd, _("test record handling functions")}, {NT_("rewind"), rewindcmd, _("rewind the tape")}, {NT_("scan"), scancmd, _("read() tape block by block to EOT and report")}, {NT_("scanblocks"),scan_blocks, _("Bacula read block by block to EOT and report")}, {NT_("speed"), speed_test, _("[file_size=n(GB)|nb_file=3|skip_zero|skip_random|skip_raw|skip_block] report drive speed")}, {NT_("status"), statcmd, _("print tape status")}, {NT_("test"), testcmd, _("General test Bacula tape functions")}, {NT_("weof"), weofcmd, _("write an EOF on the tape")}, {NT_("wr"), wrcmd, _("write a single Bacula block")}, {NT_("rr"), rrcmd, _("read a single record")}, {NT_("rb"), rbcmd, _("read a single Bacula block")}, {NT_("qfill"), qfillcmd, _("quick fill command")} }; #define comsize (sizeof(commands)/sizeof(struct cmdstruct)) static void do_tape_cmds() { unsigned int i; bool found; while (!quit && get_cmd("*")) { Dsm_check(200); found = false; parse_args(cmd, &args, &argc, argk, argv, MAX_CMD_ARGS); for (i=0; i 0 && fstrsch(argk[0], commands[i].key)) { (*commands[i].func)(); /* go execute command */ found = true; break; } if (*cmd && !found) { Pmsg1(0, _("\"%s\" is an invalid command\n"), cmd); } } } static void helpcmd() { unsigned int i; usage(); printf(_("Interactive commands:\n")); printf(_(" Command Description\n ======= ===========\n")); for (i=0; i \n" " -b specify bootstrap file\n" " -c set configuration file to file\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -p proceed inspite of I/O errors\n" " -s turn off signals\n" " -w set working directory to dir\n" " -v be verbose\n" " -? print this message.\n" "\n"), 2000, "", VERSION, BDATE); } /* * Get next input command from terminal. This * routine is REALLY primitive, and should be enhanced * to have correct backspacing, etc. */ int get_cmd(const char *prompt) { int i = 0; int ch; fprintf(stdout, "%s", prompt); /* We really should turn off echoing and pretty this * up a bit. */ cmd[i] = 0; while ((ch = fgetc(stdin)) != EOF) { if (ch == '\n') { strip_trailing_junk(cmd); return 1; } else if (ch == 4 || ch == 0xd3 || ch == 0x8) { if (i > 0) { cmd[--i] = 0; } continue; } cmd[i++] = ch; cmd[i] = 0; } quit = 1; return 0; } bool BtapeAskDirHandler::dir_create_jobmedia_record(DCR *dcr, bool zero) { dcr->WroteVol = false; return 1; } bool BtapeAskDirHandler::dir_find_next_appendable_volume(DCR *dcr) { Dmsg1(20, "Enter dir_find_next_appendable_volume. stop=%d\n", stop); return dcr->VolumeName[0] != 0; } bool BtapeAskDirHandler::dir_ask_sysop_to_mount_volume(DCR *dcr, bool /* writing */) { DEVICE *dev = dcr->dev; Dmsg0(20, "Enter dir_ask_sysop_to_mount_volume\n"); if (dcr->VolumeName[0] == 0) { return dir_ask_sysop_to_create_appendable_volume(dcr); } Pmsg1(-1, "%s", dev->print_errmsg()); /* print reason */ if (dcr->VolumeName[0] == 0 || strcmp(dcr->VolumeName, "TestVolume2") == 0) { fprintf(stderr, _("Mount second Volume on device %s and press return when ready: "), dev->print_name()); } else { fprintf(stderr, _("Mount Volume \"%s\" on device %s and press return when ready: "), dcr->VolumeName, dev->print_name()); } dev->close(dcr); getchar(); return true; } bool BtapeAskDirHandler::dir_ask_sysop_to_create_appendable_volume(DCR *dcr) { int autochanger; DEVICE *dev = dcr->dev; Dmsg0(20, "Enter dir_ask_sysop_to_create_appendable_volume\n"); if (stop == 0) { set_volume_name("TestVolume1", 1); } else { set_volume_name("TestVolume2", 2); } /* Close device so user can use autochanger if desired */ if (dev->has_cap(CAP_OFFLINEUNMOUNT)) { dev->offline(dcr); } autochanger = autoload_device(dcr, 1, NULL); if (autochanger != 1) { Pmsg1(100, "Autochanger returned: %d\n", autochanger); fprintf(stderr, _("Mount blank Volume on device %s and press return when ready: "), dev->print_name()); dev->close(dcr); getchar(); Pmsg0(000, "\n"); } labelcmd(); VolumeName = NULL; BlockNumber = 0; return true; } static bool my_mount_next_read_volume(DCR *dcr) { char ec1[50], ec2[50]; uint64_t rate; JCR *jcr = dcr->jcr; DEV_BLOCK *block = dcr->block; Dmsg0(20, "Enter my_mount_next_read_volume\n"); Pmsg2(000, _("End of Volume \"%s\" %d records.\n"), dcr->VolumeName, quickie_count); volume_unused(dcr); /* release current volume */ if (LastBlock != block->BlockNumber) { VolBytes += block->block_len; } LastBlock = block->BlockNumber; now = time(NULL); now -= jcr->run_time; if (now <= 0) { now = 1; } rate = VolBytes / now; Pmsg3(-1, _("Read block=%u, VolBytes=%s rate=%sB/s\n"), block->BlockNumber, edit_uint64_with_commas(VolBytes, ec1), edit_uint64_with_suffix(rate, ec2)); if (strcmp(dcr->VolumeName, "TestVolume2") == 0) { end_of_tape = 1; return false; } set_volume_name("TestVolume2", 2); dev->close(dcr); if (!acquire_device_for_read(dcr)) { Pmsg2(0, _("Cannot open Dev=%s, Vol=%s\n"), dev->print_name(), dcr->VolumeName); return false; } return true; /* next volume mounted */ } static void set_volume_name(const char *VolName, int volnum) { DCR *dcr = jcr->dcr; VolumeName = VolName; vol_num = volnum; dev->setVolCatName(VolName); dcr->setVolCatName(VolName); bstrncpy(dcr->VolumeName, VolName, sizeof(dcr->VolumeName)); dcr->VolCatInfo.Slot = volnum; dcr->VolCatInfo.InChanger = true; } bacula-15.0.3/src/stored/cloud_transfer_mgr.c0000644000175000017500000007047614771010173021012 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Bacula Cloud Transfer Manager: * transfer manager wraps around the work queue. * Reports transfer status and error * Reports statistics about current past and future work * Written by Norbert Bizet, May MMXVI * */ #include "cloud_transfer_mgr.h" #include "stored.h" #define ONE_SEC 1000000LL /* number of microseconds in a second */ static const int64_t dbglvl = DT_CLOUD|50; static const char *transfer_state_name[] = {"created", "queued", "process", "done", "error"}; /* constructor * size : the size in bytes of the transfer * funct : function to process * arg : argument passed to the function * cache_fname : cache file name is duplicated in the transfer constructor * volume_name : volume name is duplicated in the transfer constructor * device_name : device name is duplicated in the transfer constructor * part : part index * driver : pointer to the cloud_driver * dcr : pointer to DCR */ transfer::transfer(uint64_t size, transfer_engine* funct, const char *cache_fname, const char *volume_name, const char *device_name, uint32_t part, cloud_driver *driver, uint32_t JobId, DCR *dcr, cloud_proxy *proxy) : m_stat_size(size), m_stat_processed_size(0), m_stat_start(0), m_stat_duration(0), m_stat_eta(0), m_stat_average_rate(0), m_message(NULL), m_state(TRANS_STATE_CREATED), m_mgr(NULL), m_funct(funct), m_wait_timeout_inc_insec(0), m_wait_timeout(time(NULL)), m_debug_retry(true), m_cache_fname(bstrdup(cache_fname)), /* cache fname is duplicated*/ m_volume_name(bstrdup(volume_name)), /* volume name is duplicated*/ m_device_name(bstrdup(device_name)), /* device name is duplicated*/ m_part(part), m_driver(driver), m_job_id(JobId), m_dcr(dcr), m_proxy(proxy), m_res_size(0), m_res_mtime(0), m_workq_elem(NULL), m_use_count(0), m_retry(0), m_cancel(false), m_do_cache_truncate(false), m_restore_bucket(NULL) { pthread_mutex_init(&m_stat_mutex, 0); pthread_mutex_init(&m_mutex, 0); pthread_cond_init(&m_done, NULL); m_message = get_pool_memory(PM_MESSAGE); *m_message = 0; m_restore_bucket = get_pool_memory(PM_MESSAGE); *m_restore_bucket = 0; } /* destructor */ transfer::~transfer() { free_pool_memory(m_restore_bucket); free_pool_memory(m_message); pthread_cond_destroy(&m_done); pthread_mutex_destroy(&m_mutex); pthread_mutex_destroy(&m_stat_mutex); free(m_device_name); free(m_volume_name); free(m_cache_fname); if (m_use_count > 0) { ASSERT(FALSE); Dmsg1(dbglvl, "!!!m_use_count = %d\n", m_use_count); } } /* queue this transfer for processing in the manager workq * ret :true if queuing is successful */ bool transfer::queue() { if (!transition(TRANS_STATE_QUEUED)) { return false; } return true; } /* reset processed size */ void transfer::reset_processed_size() { lock_guard lg(m_stat_mutex); m_stat_processed_size = 0; ASSERTD(m_stat_processed_size==0, "invalid locking of processed size"); } /* set absolute value process size */ void transfer::set_processed_size(uint64_t size) { lock_guard lg(m_stat_mutex); m_stat_processed_size = size; m_stat_duration = get_current_btime()-m_stat_start; if (m_stat_duration > 0) { m_stat_average_rate = (m_stat_processed_size*ONE_SEC)/m_stat_duration; } ASSERTD(m_stat_processed_size <= m_stat_size, "increment_processed_size increment too big"); } /* add increment to the current processed size */ void transfer::increment_processed_size(uint64_t increment) { set_processed_size(m_stat_processed_size+increment); } /* opaque function that processes m_funct with m_arg as parameter * depending on m_funct return value, changes state to TRANS_STATE_DONE * or TRANS_STATE_ERROR */ void transfer::proceed() { if (transition(TRANS_STATE_PROCESSED)) { transfer_state state = m_funct(this); if (!transition(state)) { Mmsg1(m_message, _("wrong transition to %s after proceed\n"), transfer_state_name[state]); } } else { Mmsg(m_message, _("wrong transition to TRANS_STATE_PROCESS in proceed review logic\n")); } } int transfer::wait() { lock_guard lg(m_mutex); int stat = 0; while (m_state != TRANS_STATE_DONE && m_state != TRANS_STATE_ERROR) { if ((stat = pthread_cond_wait(&m_done, &m_mutex)) != 0) { return stat; } } return stat; } int transfer::timedwait(const timeval& tv) { lock_guard lg(m_mutex); struct timespec timeout; struct timeval ttv; struct timezone tz; int stat = 0; timeout.tv_sec = tv.tv_sec; timeout.tv_nsec = tv.tv_usec * 1000; gettimeofday(&ttv, &tz); timeout.tv_nsec += ttv.tv_usec * 1000; timeout.tv_sec += ttv.tv_sec; while (m_state != TRANS_STATE_DONE && m_state != TRANS_STATE_ERROR) { if ((stat = pthread_cond_timedwait(&m_done, &m_mutex, &timeout)) != 0) { return stat; } } return stat; } /* place the cancel flag and wait until processing is done */ bool transfer::cancel() { { lock_guard lg(m_mutex); m_cancel = true; } return wait(); } /* checking the cancel status : doesnt request locking */ bool transfer::is_canceled() const { return m_cancel; } uint32_t transfer::append_status(POOL_MEM& msg) { POOLMEM *tmp_msg = get_pool_memory(PM_MESSAGE); char ec[50], ed1[50], ed2[50]; uint32_t ret=0; lock_guard lg(m_stat_mutex); if (m_state > TRANS_STATE_PROCESSED) { if (m_hash64[0]||m_hash64[1]||m_hash64[2]||m_hash64[3]||m_hash64[4]||m_hash64[5]||m_hash64[6]||m_hash64[7]) { ret = Mmsg(tmp_msg,_("%s/part.%-5d state=%-7s %s%s%s%s size=%sB duration=%ds hash=%02x%02x%02x%02x%02x%02x%02x%02x%s%s\n"), m_volume_name, m_part, transfer_state_name[m_state], (m_retry != 0)?"retry=":"", (m_retry != 0)?edit_uint64(m_retry, ed1):"", (m_retry != 0)?"/":"", (m_retry != 0)?edit_uint64(m_driver->max_upload_retries, ed2):"", edit_uint64_with_suffix(m_stat_size, ec), m_stat_duration/ONE_SEC, m_hash64[0],m_hash64[1],m_hash64[2],m_hash64[3],m_hash64[4],m_hash64[5],m_hash64[6],m_hash64[7], (strlen(m_message) != 0)?" msg=":"", (strlen(m_message) != 0)?m_message:""); } else { ret = Mmsg(tmp_msg,_("%s/part.%-5d state=%-7s %s%s%s%s size=%sB duration=%ds%s%s\n"), m_volume_name, m_part, transfer_state_name[m_state], (m_retry != 0)?"retry=":"", (m_retry != 0)?edit_uint64(m_retry, ed1):"", (m_retry != 0)?"/":"", (m_retry != 0)?edit_uint64(m_driver->max_upload_retries, ed2):"", edit_uint64_with_suffix(m_stat_size, ec), m_stat_duration/ONE_SEC, (strlen(m_message) != 0)?" msg=":"", (strlen(m_message) != 0)?m_message:""); } pm_strcat(msg, tmp_msg); } else { ret = Mmsg(tmp_msg,_("%s/part.%-5d state=%-7s %s%s%s%s size=%sB eta=%ds%s%s\n"), m_volume_name, m_part, transfer_state_name[m_state], (m_retry != 0)?"retry=":"", (m_retry != 0)?edit_uint64(m_retry, ed1):"", (m_retry != 0)?"/":"", (m_retry != 0)?edit_uint64(m_driver->max_upload_retries, ed2):"", edit_uint64_with_suffix(m_stat_size, ec), m_stat_eta/ONE_SEC, (strlen(m_message) != 0)?" msg=":"", (strlen(m_message) != 0)?m_message:""); pm_strcat(msg, tmp_msg); } free_pool_memory(tmp_msg); return ret; } void transfer::append_api_status(OutputWriter &ow) { lock_guard lg(m_stat_mutex); Dmsg2(dbglvl, "transfer::append_api_status state=%d JobId=%d\n", m_state, m_job_id); if (m_state > TRANS_STATE_PROCESSED) { ow.get_output(OT_START_OBJ, OT_STRING,"device_name", NPRTB(m_device_name), OT_STRING,"volume_name", NPRTB(m_volume_name), OT_INT32, "part", m_part, OT_INT32, "jobid", m_job_id, OT_STRING,"state", (m_state == TRANS_STATE_QUEUED) ? (m_wait_timeout_inc_insec == 0) ? "queued":"waiting" :transfer_state_name[m_state], OT_INT64, "size", m_stat_size, OT_DURATION, "duration", m_stat_duration/ONE_SEC, OT_STRING,"message", NPRTB(m_message), OT_INT32, "retry", m_retry, OT_END_OBJ, OT_END); } else { ow.get_output(OT_START_OBJ, OT_STRING,"device_name", NPRTB(m_device_name), OT_STRING,"volume_name", NPRTB(m_volume_name), OT_INT32, "part", m_part, OT_INT32, "jobid", m_job_id, OT_STRING,"state", (m_state == TRANS_STATE_QUEUED) ? (m_wait_timeout_inc_insec == 0) ? "queued":"waiting" :transfer_state_name[m_state], OT_INT64, "size", m_stat_size, OT_INT64, "processed_size", m_stat_processed_size, OT_DURATION, "eta", m_stat_eta/ONE_SEC, OT_STRING,"message", NPRTB(m_message), OT_INT32, "retry", m_retry, OT_END_OBJ, OT_END); } } /* the manager references itself through this function */ void transfer::set_manager(transfer_manager *mgr) { lock_guard lg(m_mutex); m_mgr = mgr; } /* change the state */ bool transfer::transition(transfer_state state) { /* lock state mutex*/ lock_guard lg(m_mutex); /* transition from current state (m_state) to target state (state)*/ bool ret = false; /*impossible transition*/ switch(m_state) { case TRANS_STATE_CREATED: /* CREATED -> QUEUED */ if (state == TRANS_STATE_QUEUED) { /* valid transition*/ ret = true; if (m_mgr) { /*lock manager statistics */ P(m_mgr->m_stat_mutex); /*increment the number of queued transfer*/ if (m_wait_timeout_inc_insec == 0) { /* queued state */ m_mgr->m_stat_nb_transfer_queued++; /*add the current size into manager queued size*/ m_mgr->m_stat_size_queued += m_stat_size; } else { /* pseudo-waiting state */ m_mgr->m_stat_nb_transfer_waiting++; /*add the current size into manager pseudo-waiting size*/ m_mgr->m_stat_size_waiting += m_stat_size; } /*unlock manager statistics */ V(m_mgr->m_stat_mutex); P(m_mgr->m_mutex); m_mgr->add_work(this); V(m_mgr->m_mutex); } } break; case TRANS_STATE_QUEUED: /* QUEUED -> CREATED : back to initial state*/ if (state == TRANS_STATE_CREATED) { /* valid transition*/ ret = true; if (m_mgr) { /*lock manager statistics */ P(m_mgr->m_stat_mutex); /*decrement the number of queued transfer*/ if (m_wait_timeout_inc_insec == 0) { /* queued state */ m_mgr->m_stat_nb_transfer_queued--; /*remove the current size into manager queued size*/ m_mgr->m_stat_size_queued -= m_stat_size; } else { /* pseudo-waiting state */ m_mgr->m_stat_nb_transfer_waiting--; /*remove the current size into manager pseudo-waiting size*/ m_mgr->m_stat_size_waiting -= m_stat_size; } /*unlock manager statistics */ V(m_mgr->m_stat_mutex); P(m_mgr->m_mutex); m_mgr->remove_work(m_workq_elem); V(m_mgr->m_mutex); } } /* QUEUED -> PROCESSED : a worker aquired the transfer*/ if (state == TRANS_STATE_PROCESSED) { /*valid transition*/ ret = true; if (m_mgr) { /*lock manager statistics */ P(m_mgr->m_stat_mutex); /*decrement the number of queued transfer*/ if (m_wait_timeout_inc_insec == 0) { /* queued state */ m_mgr->m_stat_nb_transfer_queued--; /*remove the current size into manager queued size*/ m_mgr->m_stat_size_queued -= m_stat_size; } else { /* pseudo-waiting state */ m_mgr->m_stat_nb_transfer_waiting--; /*remove the current size into manager pseudo-waiting size*/ m_mgr->m_stat_size_waiting -= m_stat_size; } /*increment the number of processed transfer*/ m_mgr->m_stat_nb_transfer_processed++; /*... and add it to the manager processed size*/ m_mgr->m_stat_size_processed += m_stat_size; /*unlock manager statistics */ V(m_mgr->m_stat_mutex); /*transfer starts now*/ P(m_stat_mutex); m_stat_start = get_current_btime(); V(m_stat_mutex); } } break; case TRANS_STATE_PROCESSED: /* PROCESSED -> DONE : Success! */ if (state == TRANS_STATE_DONE) { /*valid transition*/ ret = true; /*transfer stops now : compute transfer duration*/ P(m_stat_mutex); m_stat_duration = get_current_btime()-m_stat_start; if (m_stat_duration > 0) { m_stat_processed_size = m_stat_size; ASSERTD(m_stat_size == m_stat_processed_size, "xfer done before processed size is equal to size."); m_stat_average_rate = (m_stat_size*ONE_SEC)/m_stat_duration; } V(m_stat_mutex); if (m_mgr) { /*lock manager statistics */ P(m_mgr->m_stat_mutex); /* ... from processed to done*/ m_mgr->m_stat_nb_transfer_processed--; m_mgr->m_stat_nb_transfer_done++; m_mgr->m_stat_size_processed -= m_stat_size; m_mgr->m_stat_size_done += m_stat_size; /*add local duration to manager duration */ m_mgr->m_stat_duration_done += m_stat_duration; /*unlock manager statistics */ V(m_mgr->m_stat_mutex); } if (m_proxy) { m_proxy->set(m_volume_name, m_part, m_res_mtime, m_res_size, m_hash64); } /* in both case, success or failure, life keeps going on */ pthread_cond_broadcast(&m_done); } /* PROCESSED -> ERROR : Failure! */ if (state == TRANS_STATE_ERROR) { /*valid transition*/ ret = true; /*transfer stops now, even if in error*/ P(m_stat_mutex); m_stat_duration = get_current_btime()-m_stat_start; V(m_stat_mutex); if (m_mgr) { /*lock manager statistics */ P(m_mgr->m_stat_mutex); /* ... from processed to error*/ m_mgr->m_stat_nb_transfer_processed--; m_mgr->m_stat_nb_transfer_error++; m_mgr->m_stat_size_processed -= m_stat_size; m_mgr->m_stat_size_error += m_stat_size; /*unlock manager statistics */ V(m_mgr->m_stat_mutex); } /* in both case, success or failure, life keeps going on */ pthread_cond_broadcast(&m_done); } /* PROCESSED -> QUEUED */ if (state == TRANS_STATE_QUEUED) { /*valid transition*/ ret = true; if (m_mgr) { /*lock manager statistics */ P(m_mgr->m_stat_mutex); m_mgr->m_stat_nb_transfer_processed--; /*increment the number of queued transfer*/ if (m_wait_timeout_inc_insec == 0) { /* queued state */ m_mgr->m_stat_nb_transfer_queued++; /*add the current size into manager queued size*/ m_mgr->m_stat_size_queued += m_stat_size; } else { /* pseudo-waiting state */ m_mgr->m_stat_nb_transfer_waiting++; /*add the current size into manager queued size*/ m_mgr->m_stat_size_waiting += m_stat_size; } /*remove the current size into manager processed size*/ m_mgr->m_stat_size_processed -= m_stat_size; /*unlock manager statistics */ V(m_mgr->m_stat_mutex); P(m_mgr->m_mutex); m_mgr->add_work(this); V(m_mgr->m_mutex); } /* leave */ pthread_cond_broadcast(&m_done); } break; case TRANS_STATE_DONE: case TRANS_STATE_ERROR: default: ret = false; break; } /* update state when transition is valid*/ if (ret) { m_state = state; } return ret; } void transfer::set_do_cache_truncate(bool do_cache_truncate) { m_do_cache_truncate=do_cache_truncate; } void transfer::set_restore_bucket(const char *restore_bucket) { pm_strcpy(m_restore_bucket, restore_bucket); } void transfer::inc_retry() { lock_guard lg(m_mutex); m_retry++; } int transfer::inc_use_count() { lock_guard lg(m_mutex); return ++m_use_count; } int transfer::dec_use_count() { lock_guard lg(m_mutex); return --m_use_count; } void *transfer_launcher(void *arg) { transfer *t = (transfer *)arg; if (t) { t->proceed(); } return NULL; } /* ----------------------------------------------------------- transfer manager declarations ----------------------------------------------------------- */ /* constructor */ /* nb_workers: maximum number of workers allowed for this manager */ transfer_manager::transfer_manager(uint32_t nb_worker) { transfer *item=NULL; m_transfer_list.init(item, &item->link); pthread_mutex_init(&m_stat_mutex, 0); pthread_mutex_init(&m_mutex, 0); workq_init(&m_wq, nb_worker, transfer_launcher); } /* destructor */ transfer_manager::~transfer_manager() { workq_wait_idle(&m_wq); pthread_mutex_destroy(&m_mutex); pthread_mutex_destroy(&m_stat_mutex); } /* create a new or inc-reference a similar transfer. (factory) * ret: transfer* is ref_counted and must be kept, used * and eventually released by caller with release() */ transfer *transfer_manager::get_xfer(uint64_t size, transfer_engine *funct, POOLMEM *cache_fname, const char *volume_name, const char *device_name, uint32_t part, cloud_driver *driver, uint32_t JobId, DCR *dcr, cloud_proxy *proxy) { lock_guard lg (m_mutex); /* do we have a similar transfer on tap? */ transfer *item; foreach_dlist(item, (&m_transfer_list)) { /* this is where "similar transfer" is defined: * same volume_name, same part idx */ if (strcmp(item->m_volume_name, volume_name) == 0 && item->m_part == part) { item->inc_use_count(); return item; } } /* no existing transfer: create a new one */ item = New(transfer(size, funct, cache_fname,/* cache_fname is duplicated in the transfer constructor*/ volume_name, /* volume_name is duplicated in the transfer constructor*/ device_name,/* device_name is duplicated in the transfer constructor*/ part, driver, JobId, dcr, proxy)); ASSERT(item->m_state == TRANS_STATE_CREATED); item->set_manager(this); /* inc use_count once for m_transfer_list insertion */ item->inc_use_count(); m_transfer_list.append(item); /* inc use_count once for caller ref counting */ item->inc_use_count(); return item; } /* does the xfer belong to us? */ bool transfer_manager::owns(transfer *xfer) { lock_guard lg(m_mutex); transfer *item; foreach_dlist(item, (&m_transfer_list)) { /* same address */ if (item == xfer) { return true; } } return false; } /* un-ref transfer and free if ref count goes to zero * caller must NOT use xfer anymore after this has been called */ void transfer_manager::release(transfer *xfer) { if (xfer) { ASSERTD(owns(xfer), "Wrong Manager"); /* wait should have been done already by caller, * but we cannot afford deleting the transfer while it's not completed */ wait(xfer); /* decrement the caller reference */ if (xfer->dec_use_count() == 1) { /* the only ref left is the one from m_transfer_list * time for deletion */ lock_guard lg(m_mutex); m_transfer_list.remove(xfer); xfer->dec_use_count(); delete xfer; } } } /* accessors to xfer->queue */ bool transfer_manager::queue(transfer *xfer) { if (xfer) { ASSERTD(owns(xfer), "Wrong Manager"); return xfer->queue(); } return false; } /* accessors to xfer->wait */ int transfer_manager::wait(transfer *xfer) { if (xfer) { ASSERTD(owns(xfer), "Wrong Manager"); return xfer->wait(); } return 0; } /* accessors to xfer->timedwait */ int transfer_manager::timedwait(transfer *xfer, const timeval& tv) { if (xfer) { ASSERTD(owns(xfer), "Wrong Manager"); return xfer->timedwait(tv); } return 0; } /* accessors to xfer->cancel */ bool transfer_manager::cancel(transfer *xfer) { if (xfer) { ASSERTD(owns(xfer), "Wrong Manager"); return xfer->cancel(); } return false; } /* append a transfer object to this manager */ int transfer_manager::add_work(transfer* t) { return workq_add(&m_wq, t, t ? &t->m_workq_elem : NULL, 0); } /* remove associated workq_ele_t from this manager workq */ int transfer_manager::remove_work(workq_ele_t *elem) { return workq_remove(&m_wq, elem); } /* search the transfer list for similar transfer */ bool transfer_manager::find(const char *VolName, uint32_t index) { /* Look in the transfer list if we have a download/upload for the current volume */ lock_guard lg(m_mutex); transfer *item; foreach_dlist(item, (&m_transfer_list)) { if (strcmp(item->m_volume_name, VolName) == 0 && item->m_part == index) { return true; } } return false; } /* Call to this function just before displaying global statistics */ void transfer_manager::update_statistics() { /* lock the manager stats */ lock_guard lg_stat(m_stat_mutex); /* lock the queue so order and chaining cannot be modified */ lock_guard lg(m_mutex); /* recompute global average rate */ transfer *t; uint64_t accumulated_done_average_rate = 0; uint32_t nb_done_accumulated = 0; foreach_dlist(t, &m_transfer_list) { lock_guard lg_xferstatmutex(t->m_stat_mutex); if (t->m_stat_average_rate>0) { accumulated_done_average_rate += t->m_stat_average_rate; t->m_stat_average_rate = 0; ++nb_done_accumulated; } } if (nb_done_accumulated) { m_stat_average_rate = accumulated_done_average_rate / nb_done_accumulated; } /* ETA naive calculation for each element in the queue */ if (m_stat_average_rate != 0) { uint64_t accumulator=0; foreach_dlist(t, &m_transfer_list) { if (t->m_state == TRANS_STATE_QUEUED) { lock_guard lg_xferstatmutex(t->m_stat_mutex); accumulator+= t->m_stat_size-t->m_stat_processed_size; t->m_stat_eta = (accumulator / m_stat_average_rate) * ONE_SEC; } if (t->m_state == TRANS_STATE_PROCESSED) { lock_guard lg_xferstatmutex(t->m_stat_mutex); t->m_stat_eta = ((t->m_stat_size-t->m_stat_processed_size) / m_stat_average_rate) * ONE_SEC; } } /* the manager ETA is the ETA of the last transfer in its workq */ m_stat_eta = (accumulator / m_stat_average_rate) * ONE_SEC; } } /* short status of the transfers */ uint32_t transfer_manager::append_status(POOL_MEM& msg, bool verbose) { update_statistics(); char ec0[30],ec1[30],ec2[30],ec3[30],ec4[30],ec5[30]; POOLMEM *tmp_msg = get_pool_memory(PM_MESSAGE); lock_guard lg_stat(m_stat_mutex); uint32_t ret = Mmsg(tmp_msg, _("(%sB/s) (ETA %d s) " "Queued=%d %sB, Waiting=%d %sB, Processing=%d %sB, Done=%d %sB, Failed=%d %sB\n"), edit_uint64_with_suffix(m_stat_average_rate, ec0), m_stat_eta/ONE_SEC, m_stat_nb_transfer_queued, edit_uint64_with_suffix(m_stat_size_queued, ec1), m_stat_nb_transfer_waiting, edit_uint64_with_suffix(m_stat_size_waiting, ec2), m_stat_nb_transfer_processed, edit_uint64_with_suffix(m_stat_size_processed, ec3), m_stat_nb_transfer_done, edit_uint64_with_suffix(m_stat_size_done, ec4), m_stat_nb_transfer_error, edit_uint64_with_suffix(m_stat_size_error, ec5)); pm_strcat(msg, tmp_msg); if (verbose) { lock_guard lg(m_mutex); if (!m_transfer_list.empty()) { ret += Mmsg(tmp_msg, _("------------------------------------------------------------ details ------------------------------------------------------------\n")); pm_strcat(msg, tmp_msg); } transfer *tpkt; foreach_dlist(tpkt, &m_transfer_list) { ret += tpkt->append_status(msg); } } free_pool_memory(tmp_msg); return ret; } void transfer_manager::append_api_status(OutputWriter &ow, bool verbose) { update_statistics(); lock_guard lg_stat(m_stat_mutex); ow.get_output(OT_START_OBJ, OT_INT64, "average_rate", m_stat_average_rate, OT_DURATION, "eta", m_stat_eta/ONE_SEC, OT_INT64, "nb_transfer_queued", m_stat_nb_transfer_queued, OT_INT64, "size_queued", m_stat_size_queued, OT_INT64, "nb_transfer_waiting", m_stat_nb_transfer_waiting, OT_INT64, "size_waiting", m_stat_size_waiting, OT_INT64, "nb_transfer_processed", m_stat_nb_transfer_processed, OT_INT64, "size_processed", m_stat_size_processed, OT_INT64, "nb_transfer_done", m_stat_nb_transfer_done, OT_INT64, "size_done", m_stat_size_done, OT_INT64, "nb_transfer_error", m_stat_nb_transfer_error, OT_INT64, "size_error", m_stat_size_error, OT_INT, "transfers_list_size", m_transfer_list.size(), OT_END); if (verbose) { lock_guard lg(m_mutex); ow.start_list("transfers"); transfer *tpkt; foreach_dlist(tpkt, &m_transfer_list) { tpkt->append_api_status(ow); } ow.end_list(); } ow.get_output(OT_END_OBJ, OT_END); } bacula-15.0.3/src/stored/os.c0000644000175000017500000002512614771010173015544 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * os.c -- Operating System dependent dev.c routines * * written by, Kern Sibbald, MM * separated from dev.c February 2014 * * Note, this is the device dependent code, and may have * to be modified for each system. */ #include "bacula.h" #include "stored.h" /* Returns file position on tape or -1 */ int32_t DEVICE::get_os_tape_file() { struct mtget mt_stat; if (has_cap(CAP_MTIOCGET) && d_ioctl(m_fd, MTIOCGET, (char *)&mt_stat) == 0) { return mt_stat.mt_fileno; } return -1; } void set_os_device_parameters(DCR *dcr) { DEVICE *dev = dcr->dev; if (strcmp(dev->dev_name, "/dev/null") == 0) { return; /* no use trying to set /dev/null */ } #if defined(HAVE_LINUX_OS) || defined(HAVE_WIN32) struct mtop mt_com; Dmsg0(100, "In set_os_device_parameters\n"); #if defined(MTSETBLK) if (dev->min_block_size == dev->max_block_size && dev->min_block_size == 0) { /* variable block mode */ mt_com.mt_op = MTSETBLK; mt_com.mt_count = 0; Dmsg0(100, "Set block size to zero\n"); if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { dev->clrerror(MTSETBLK); } } #endif #if defined(MTSETDRVBUFFER) if (getuid() == 0) { /* Only root can do this */ mt_com.mt_op = MTSETDRVBUFFER; mt_com.mt_count = MT_ST_CLEARBOOLEANS; if (!dev->has_cap(CAP_TWOEOF)) { mt_com.mt_count |= MT_ST_TWO_FM; } if (dev->has_cap(CAP_EOM)) { mt_com.mt_count |= MT_ST_FAST_MTEOM; } Dmsg0(100, "MTSETDRVBUFFER\n"); if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { dev->clrerror(MTSETDRVBUFFER); } } #endif return; #endif #ifdef HAVE_NETBSD_OS struct mtop mt_com; if (dev->min_block_size == dev->max_block_size && dev->min_block_size == 0) { /* variable block mode */ mt_com.mt_op = MTSETBSIZ; mt_com.mt_count = 0; if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { dev->clrerror(MTSETBSIZ); } /* Get notified at logical end of tape */ mt_com.mt_op = MTEWARN; mt_com.mt_count = 1; if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { dev->clrerror(MTEWARN); } } return; #endif #if HAVE_FREEBSD_OS || HAVE_OPENBSD_OS struct mtop mt_com; if (dev->min_block_size == dev->max_block_size && dev->min_block_size == 0) { /* variable block mode */ mt_com.mt_op = MTSETBSIZ; mt_com.mt_count = 0; if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { dev->clrerror(MTSETBSIZ); } } #if defined(MTIOCSETEOTMODEL) if (dev->is_fifo()) { return; /* do not do tape stuff */ } uint32_t neof; if (dev->has_cap(CAP_TWOEOF)) { neof = 2; } else { neof = 1; } if (dev->d_ioctl(dev->fd(), MTIOCSETEOTMODEL, (caddr_t)&neof) < 0) { berrno be; dev->dev_errno = errno; /* save errno */ Mmsg2(dev->errmsg, _("Unable to set eotmodel on device %s: ERR=%s\n"), dev->print_name(), be.bstrerror(dev->dev_errno)); Jmsg(dcr->jcr, M_FATAL, 0, dev->errmsg); } #endif return; #endif #ifdef HAVE_SUN_OS struct mtop mt_com; if (dev->min_block_size == dev->max_block_size && dev->min_block_size == 0) { /* variable block mode */ mt_com.mt_op = MTSRSZ; mt_com.mt_count = 0; if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { dev->clrerror(MTSRSZ); } } return; #endif } bool dev_get_os_pos(DEVICE *dev, struct mtget *mt_stat) { Dmsg0(100, "dev_get_os_pos\n"); return dev->has_cap(CAP_MTIOCGET) && dev->d_ioctl(dev->fd(), MTIOCGET, (char *)mt_stat) == 0 && mt_stat->mt_fileno >= 0; } /* * Return the status of the device. This was meant * to be a generic routine. Unfortunately, it doesn't * seem possible (at least I do not know how to do it * currently), which means that for the moment, this * routine has very little value. * * Returns: status */ uint32_t status_dev(DEVICE *dev) { struct mtget mt_stat; uint32_t stat = 0; if (dev->state & (ST_EOT | ST_WEOT)) { stat |= BMT_EOD; Pmsg0(-20, " EOD"); } if (dev->state & ST_EOF) { stat |= BMT_EOF; Pmsg0(-20, " EOF"); } if (dev->is_tape()) { stat |= BMT_TAPE; Pmsg0(-20,_(" Bacula status:")); Pmsg2(-20,_(" file=%d block=%d\n"), dev->file, dev->block_num); if (dev->d_ioctl(dev->fd(), MTIOCGET, (char *)&mt_stat) < 0) { berrno be; dev->dev_errno = errno; Mmsg2(dev->errmsg, _("ioctl MTIOCGET error on %s. ERR=%s.\n"), dev->print_name(), be.bstrerror()); return 0; } Pmsg0(-20, _(" Device status:")); #if defined(HAVE_LINUX_OS) if (GMT_EOF(mt_stat.mt_gstat)) { stat |= BMT_EOF; Pmsg0(-20, " EOF"); } if (GMT_BOT(mt_stat.mt_gstat)) { stat |= BMT_BOT; Pmsg0(-20, " BOT"); } if (GMT_EOT(mt_stat.mt_gstat)) { stat |= BMT_EOT; Pmsg0(-20, " EOT"); } if (GMT_SM(mt_stat.mt_gstat)) { stat |= BMT_SM; Pmsg0(-20, " SM"); } if (GMT_EOD(mt_stat.mt_gstat)) { stat |= BMT_EOD; Pmsg0(-20, " EOD"); } if (GMT_WR_PROT(mt_stat.mt_gstat)) { stat |= BMT_WR_PROT; Pmsg0(-20, " WR_PROT"); } if (GMT_ONLINE(mt_stat.mt_gstat)) { stat |= BMT_ONLINE; Pmsg0(-20, " ONLINE"); } if (GMT_DR_OPEN(mt_stat.mt_gstat)) { stat |= BMT_DR_OPEN; Pmsg0(-20, " DR_OPEN"); } if (GMT_IM_REP_EN(mt_stat.mt_gstat)) { stat |= BMT_IM_REP_EN; Pmsg0(-20, " IM_REP_EN"); } #elif defined(HAVE_WIN32) if (GMT_EOF(mt_stat.mt_gstat)) { stat |= BMT_EOF; Pmsg0(-20, " EOF"); } if (GMT_BOT(mt_stat.mt_gstat)) { stat |= BMT_BOT; Pmsg0(-20, " BOT"); } if (GMT_EOT(mt_stat.mt_gstat)) { stat |= BMT_EOT; Pmsg0(-20, " EOT"); } if (GMT_EOD(mt_stat.mt_gstat)) { stat |= BMT_EOD; Pmsg0(-20, " EOD"); } if (GMT_WR_PROT(mt_stat.mt_gstat)) { stat |= BMT_WR_PROT; Pmsg0(-20, " WR_PROT"); } if (GMT_ONLINE(mt_stat.mt_gstat)) { stat |= BMT_ONLINE; Pmsg0(-20, " ONLINE"); } if (GMT_DR_OPEN(mt_stat.mt_gstat)) { stat |= BMT_DR_OPEN; Pmsg0(-20, " DR_OPEN"); } if (GMT_IM_REP_EN(mt_stat.mt_gstat)) { stat |= BMT_IM_REP_EN; Pmsg0(-20, " IM_REP_EN"); } #endif /* !SunOS && !OSF */ if (dev->has_cap(CAP_MTIOCGET)) { Pmsg2(-20, _(" file=%d block=%d\n"), mt_stat.mt_fileno, mt_stat.mt_blkno); } else { Pmsg2(-20, _(" file=%d block=%d\n"), -1, -1); } } else { stat |= BMT_ONLINE | BMT_BOT; } return stat; } /* * If implemented in system, clear the tape * error status. */ void DEVICE::clrerror(int func) { const char *msg = NULL; char buf[100]; dev_errno = errno; /* save errno */ if (errno == EIO) { VolCatInfo.VolCatErrors++; } if (!is_tape()) { return; } if (errno == ENOTTY || errno == ENOSYS) { /* Function not implemented */ switch (func) { case -1: break; /* ignore message printed later */ case MTWEOF: msg = "WTWEOF"; clear_cap(CAP_EOF); /* turn off feature */ break; #ifdef MTEOM case MTEOM: msg = "WTEOM"; clear_cap(CAP_EOM); /* turn off feature */ break; #endif case MTFSF: msg = "MTFSF"; clear_cap(CAP_FSF); /* turn off feature */ break; case MTBSF: msg = "MTBSF"; clear_cap(CAP_BSF); /* turn off feature */ break; case MTFSR: msg = "MTFSR"; clear_cap(CAP_FSR); /* turn off feature */ break; case MTBSR: msg = "MTBSR"; clear_cap(CAP_BSR); /* turn off feature */ break; case MTREW: msg = "MTREW"; break; #ifdef MTSETBLK case MTSETBLK: msg = "MTSETBLK"; break; #endif #ifdef MTSETDRVBUFFER case MTSETDRVBUFFER: msg = "MTSETDRVBUFFER"; break; #endif #ifdef MTRESET case MTRESET: msg = "MTRESET"; break; #endif #ifdef MTSETBSIZ case MTSETBSIZ: msg = "MTSETBSIZ"; break; #endif #ifdef MTSRSZ case MTSRSZ: msg = "MTSRSZ"; break; #endif #ifdef MTLOAD case MTLOAD: msg = "MTLOAD"; break; #endif #ifdef MTUNLOCK case MTUNLOCK: msg = "MTUNLOCK"; break; #endif case MTOFFL: msg = "MTOFFL"; break; default: bsnprintf(buf, sizeof(buf), _("unknown func code %d"), func); msg = buf; break; } if (msg != NULL) { dev_errno = ENOSYS; Mmsg1(errmsg, _("I/O function \"%s\" not supported on this device.\n"), msg); Emsg0(M_ERROR, 0, errmsg); } } /* * Now we try different methods of clearing the error * status on the drive so that it is not locked for * further operations. */ /* On some systems such as NetBSD, this clears all errors */ get_os_tape_file(); /* Found on Solaris */ #ifdef MTIOCLRERR { d_ioctl(m_fd, MTIOCLRERR); Dmsg0(200, "Did MTIOCLRERR\n"); } #endif /* Typically on FreeBSD */ #ifdef MTIOCERRSTAT { berrno be; /* Read and clear SCSI error status */ union mterrstat mt_errstat; Dmsg2(200, "Doing MTIOCERRSTAT errno=%d ERR=%s\n", dev_errno, be.bstrerror(dev_errno)); d_ioctl(m_fd, MTIOCERRSTAT, (char *)&mt_errstat); } #endif /* Clear Subsystem Exception TRU64 */ #ifdef MTCSE { struct mtop mt_com; mt_com.mt_op = MTCSE; mt_com.mt_count = 1; /* Clear any error condition on the tape */ d_ioctl(m_fd, MTIOCTOP, (char *)&mt_com); Dmsg0(200, "Did MTCSE\n"); } #endif } bacula-15.0.3/src/stored/stored_conf.c0000644000175000017500000014202414771010173017425 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Configuration file parser for Bacula Storage daemon * * Kern Sibbald, March MM */ #include "bacula.h" #include "stored.h" #include "lib/status-pkt.h" /* First and last resource ids */ int32_t r_first = R_FIRST; int32_t r_last = R_LAST; RES_HEAD **res_head; /* We build the current resource here statically, * then move it to dynamic memory */ #if defined(_MSC_VER) extern "C" { // work around visual compiler mangling variables URES res_all; } #else URES res_all; #endif int32_t res_all_size = sizeof(res_all); #ifdef SD_DEDUP_SUPPORT # include "bee_libsd_dedup.h" #else #define dedup_check_storage_resource(a) true #endif static int const dbglvl = 200; /* Definition of records permitted within each * resource with the routine to process the record * information. */ /* * Globals for the Storage daemon. * name handler value code flags default_value */ static RES_ITEM store_items[] = { {"Name", store_name, ITEM(res_store.hdr.name), 0, ITEM_REQUIRED, 0}, {"Description", store_str, ITEM(res_store.hdr.desc), 0, 0, 0}, {"SdAddress", store_addresses_address, ITEM(res_store.sdaddrs), 0, ITEM_DEFAULT, 9103}, {"SdAddresses", store_addresses, ITEM(res_store.sdaddrs), 0, ITEM_DEFAULT, 9103}, {"Messages", store_res, ITEM(res_store.messages), R_MSGS, 0, 0}, {"SdPort", store_addresses_port, ITEM(res_store.sdaddrs), 0, ITEM_DEFAULT, 9103}, {"WorkingDirectory", store_dir, ITEM(res_store.working_directory), 0, ITEM_REQUIRED, 0}, {"PidDirectory", store_dir, ITEM(res_store.pid_directory), 0, ITEM_REQUIRED, 0}, {"SubsysDirectory", store_dir, ITEM(res_store.subsys_directory), 0, 0, 0}, {"PluginDirectory", store_dir, ITEM(res_store.plugin_directory), 0, 0, 0}, {"ScriptsDirectory", store_dir, ITEM(res_store.scripts_directory), 0, 0, 0}, #if BEEF {"SsdDirectory", store_dir, ITEM(res_store.ssd_directory), 0, 0, 0}, #endif {"MaximumConcurrentJobs", store_pint32, ITEM(res_store.max_concurrent_jobs), 0, ITEM_DEFAULT, 20}, {"ClientConnectTimeout", store_time, ITEM(res_store.ClientConnectTimeout), 0, ITEM_DEFAULT, 60 * 30}, {"HeartbeatInterval", store_time, ITEM(res_store.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60}, #if BEEF {"FipsRequire", store_bool, ITEM(res_store.require_fips), 0, 0, 0}, #endif {"TlsAuthenticate", store_bool, ITEM(res_store.tls_authenticate), 0, 0, 0}, {"TlsEnable", store_bool, ITEM(res_store.tls_enable), 0, 0, 0}, {"TlsPskEnable", store_bool, ITEM(res_store.tls_psk_enable), 0, ITEM_DEFAULT, tls_psk_default}, {"TlsRequire", store_bool, ITEM(res_store.tls_require), 0, 0, 0}, {"TlsVerifyPeer", store_bool, ITEM(res_store.tls_verify_peer), 1, ITEM_DEFAULT, 1}, {"TlsCaCertificateFile", store_dir, ITEM(res_store.tls_ca_certfile), 0, 0, 0}, {"TlsCaCertificateDir", store_dir, ITEM(res_store.tls_ca_certdir), 0, 0, 0}, {"TlsCertificate", store_dir, ITEM(res_store.tls_certfile), 0, 0, 0}, {"TlsKey", store_dir, ITEM(res_store.tls_keyfile), 0, 0, 0}, {"TlsDhFile", store_dir, ITEM(res_store.tls_dhfile), 0, 0, 0}, {"TlsAllowedCn", store_alist_str, ITEM(res_store.tls_allowed_cns), 0, 0, 0}, {"ClientConnectWait", store_time, ITEM(res_store.client_wait), 0, ITEM_DEFAULT, 30 * 60}, {"VerId", store_str, ITEM(res_store.verid), 0, 0, 0}, {"CommCompression", store_bool, ITEM(res_store.comm_compression), 0, ITEM_DEFAULT, true}, #ifdef SD_DEDUP_SUPPORT {"DedupDirectory", store_dir, ITEM(res_store.dedup_dir), 0, 0, 0}, {"DedupIndexDirectory", store_dir, ITEM(res_store.dedup_index_dir), 0, 0, 0}, {"DedupCheckHash", store_bool, ITEM(res_store.dedup_check_hash), 0, 0, 0}, {"DedupScrubMaximumBandwidth", store_speed, ITEM(res_store.dedup_scrub_max_bandwidth), 0, ITEM_DEFAULT, 15*1024*1024}, {"MaximumContainerSize", store_size64, ITEM(res_store.max_container_size), 0, 0, 0}, #endif {"EncryptionCommand", store_strname,ITEM(res_store.encryption_command), 0, 0, 0}, {NULL, NULL, {0}, 0, 0, 0} }; /* Directors that can speak to the Storage daemon */ static RES_ITEM dir_items[] = { {"Name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0}, {"Description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0}, {"Password", store_password, ITEM(res_dir.password), 0, ITEM_REQUIRED, 0}, {"Monitor", store_bool, ITEM(res_dir.monitor), 0, 0, 0}, {"TlsAuthenticate", store_bool, ITEM(res_dir.tls_authenticate), 0, 0, 0}, {"TlsEnable", store_bool, ITEM(res_dir.tls_enable), 0, 0, 0}, {"TlsPskEnable", store_bool, ITEM(res_dir.tls_psk_enable), 0, ITEM_DEFAULT, tls_psk_default}, {"TlsRequire", store_bool, ITEM(res_dir.tls_require), 0, 0, 0}, {"TlsVerifyPeer", store_bool, ITEM(res_dir.tls_verify_peer), 1, ITEM_DEFAULT, 1}, {"TlsCaCertificateFile", store_dir, ITEM(res_dir.tls_ca_certfile), 0, 0, 0}, {"TlsCaCertificateDir", store_dir, ITEM(res_dir.tls_ca_certdir), 0, 0, 0}, {"TlsCertificate", store_dir, ITEM(res_dir.tls_certfile), 0, 0, 0}, {"TlsKey", store_dir, ITEM(res_dir.tls_keyfile), 0, 0, 0}, {"TlsDhFile", store_dir, ITEM(res_dir.tls_dhfile), 0, 0, 0}, {"TlsAllowedCn", store_alist_str, ITEM(res_dir.tls_allowed_cns), 0, 0, 0}, {NULL, NULL, {0}, 0, 0, 0} }; /* Device definition */ static RES_ITEM dev_items[] = { {"Name", store_name, ITEM(res_dev.hdr.name), 0, ITEM_REQUIRED, 0}, {"Description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0}, {"MediaType", store_strname,ITEM(res_dev.media_type), 0, ITEM_REQUIRED, 0}, {"DeviceType", store_devtype,ITEM(res_dev.dev_type), 0, 0, 0}, {"ArchiveDevice", store_strname,ITEM(res_dev.device_name), 0, ITEM_REQUIRED, 0}, {"AlignedDevice", store_strname,ITEM(res_dev.adevice_name), 0, 0, 0}, {"HardwareEndOfFile", store_bit, ITEM(res_dev.cap_bits), CAP_EOF, ITEM_DEFAULT, 1}, {"HardwareEndOfMedium", store_bit, ITEM(res_dev.cap_bits), CAP_EOM, ITEM_DEFAULT, 1}, {"BackwardSpaceRecord", store_bit, ITEM(res_dev.cap_bits), CAP_BSR, ITEM_DEFAULT, 1}, {"BackwardSpaceFile", store_bit, ITEM(res_dev.cap_bits), CAP_BSF, ITEM_DEFAULT, 1}, {"BsfAtEom", store_bit, ITEM(res_dev.cap_bits), CAP_BSFATEOM, ITEM_DEFAULT, 0}, {"TwoEof", store_bit, ITEM(res_dev.cap_bits), CAP_TWOEOF, ITEM_DEFAULT, 0}, {"ForwardSpaceRecord", store_bit, ITEM(res_dev.cap_bits), CAP_FSR, ITEM_DEFAULT, 1}, {"ForwardSpaceFile", store_bit, ITEM(res_dev.cap_bits), CAP_FSF, ITEM_DEFAULT, 1}, {"FastForwardSpaceFile", store_bit, ITEM(res_dev.cap_bits), CAP_FASTFSF, ITEM_DEFAULT, 1}, {"RemovableMedia", store_bit, ITEM(res_dev.cap_bits), CAP_REM, ITEM_DEFAULT, 1}, {"RandomAccess", store_bit, ITEM(res_dev.cap_bits), CAP_RACCESS, 0, 0}, {"AutomaticMount", store_bit, ITEM(res_dev.cap_bits), CAP_AUTOMOUNT, ITEM_DEFAULT, 0}, {"LabelMedia", store_bit, ITEM(res_dev.cap_bits), CAP_LABEL, ITEM_DEFAULT, 0}, {"AlwaysOpen", store_bit, ITEM(res_dev.cap_bits), CAP_ALWAYSOPEN, ITEM_DEFAULT, 1}, {"Autochanger", store_bit, ITEM(res_dev.cap_bits), CAP_AUTOCHANGER, ITEM_DEFAULT, 0}, {"CloseOnPoll", store_bit, ITEM(res_dev.cap_bits), CAP_CLOSEONPOLL, ITEM_DEFAULT, 0}, {"BlockPositioning", store_bit, ITEM(res_dev.cap_bits), CAP_POSITIONBLOCKS, ITEM_DEFAULT, 1}, {"UseMtiocGet", store_bit, ITEM(res_dev.cap_bits), CAP_MTIOCGET, ITEM_DEFAULT, 1}, {"CheckLabels", store_bit, ITEM(res_dev.cap_bits), CAP_CHECKLABELS, ITEM_DEFAULT, 0}, {"RequiresMount", store_bit, ITEM(res_dev.cap_bits), CAP_REQMOUNT, ITEM_DEFAULT, 0}, {"OfflineOnUnmount", store_bit, ITEM(res_dev.cap_bits), CAP_OFFLINEUNMOUNT, ITEM_DEFAULT, 0}, {"BlockChecksum", store_bit, ITEM(res_dev.cap_bits), CAP_BLOCKCHECKSUM, ITEM_DEFAULT, 1}, {"UseLintape", store_bit, ITEM(res_dev.cap_bits), CAP_LINTAPE, ITEM_DEFAULT, 0}, {"Enabled", store_bool, ITEM(res_dev.enabled), 0, ITEM_DEFAULT, 1}, {"AutoSelect", store_bool, ITEM(res_dev.autoselect), 0, ITEM_DEFAULT, 1}, {"ReadOnly", store_bool, ITEM(res_dev.read_only), 0, ITEM_DEFAULT, 0}, {"SetVolumeAppendOnly", store_bool, ITEM(res_dev.set_vol_append_only), 0, ITEM_DEFAULT, 0}, {"SetVolumeImmutable", store_bool, ITEM(res_dev.set_vol_immutable), 0, ITEM_DEFAULT, 0}, {"SetVolumeReadOnly", store_bool, ITEM(res_dev.set_vol_read_only), 0, ITEM_DEFAULT, 0}, {"MinimumVolumeProtectionTime", store_time, ITEM(res_dev.min_volume_protection_time), 0, ITEM_DEFAULT, 30*24*60*60}, {"ChangerDevice", store_strname,ITEM(res_dev.changer_name), 0, 0, 0}, {"ControlDevice", store_strname,ITEM(res_dev.control_name), 0, 0, 0}, {"ChangerCommand", store_strname,ITEM(res_dev.changer_command), 0, 0, 0}, {"AlertCommand", store_strname,ITEM(res_dev.alert_command), 0, 0, 0}, {"LockCommand", store_strname,ITEM(res_dev.lock_command), 0, 0, 0}, {"WormCommand", store_strname,ITEM(res_dev.worm_command), 0, 0, 0}, {"MaximumChangerWait", store_time, ITEM(res_dev.max_changer_wait), 0, ITEM_DEFAULT, 5 * 60}, {"MaximumOpenWait", store_time, ITEM(res_dev.max_open_wait), 0, ITEM_DEFAULT, 5 * 60}, {"MaximumNetworkBufferSize", store_pint32, ITEM(res_dev.max_network_buffer_size), 0, 0, 0}, {"VolumePollInterval", store_time, ITEM(res_dev.vol_poll_interval), 0, ITEM_DEFAULT, 5 * 60}, {"MaximumRewindWait", store_time, ITEM(res_dev.max_rewind_wait), 0, ITEM_DEFAULT, 5 * 60}, {"MinimumBlockSize", store_size32, ITEM(res_dev.min_block_size), 0, 0, 0}, {"MaximumBlockSize", store_maxblocksize, ITEM(res_dev.max_block_size), 0, 0, 0}, {"PaddingSize", store_size32, ITEM(res_dev.padding_size), 0, ITEM_DEFAULT, 4096}, {"FileAlignment", store_size32, ITEM(res_dev.file_alignment), 0, ITEM_DEFAULT, 4096}, {"MinimumAlignedSize", store_size32, ITEM(res_dev.min_aligned_size), 0, ITEM_DEFAULT, 4096}, {"MaximumVolumeSize", store_size64, ITEM(res_dev.max_volume_size), 0, 0, 0}, {"MaximumFileSize", store_size64, ITEM(res_dev.max_file_size), 0, ITEM_DEFAULT, 1000000000}, {"MaximumFileIndex", store_size64, ITEM(res_dev.max_file_index), 0, ITEM_DEFAULT, 100000000}, /* ***BEEF*** */ {"VolumeCapacity", store_size64, ITEM(res_dev.volume_capacity), 0, 0, 0}, {"MinimumFeeSpace", store_size64, ITEM(res_dev.min_free_space), 0, ITEM_DEFAULT, 5000000}, {"MaximumConcurrentJobs", store_pint32, ITEM(res_dev.max_concurrent_jobs), 0, 0, 0}, {"SpoolDirectory", store_dir, ITEM(res_dev.spool_directory), 0, 0, 0}, {"MaximumSpoolSize", store_size64, ITEM(res_dev.max_spool_size), 0, 0, 0}, {"MaximumJobSpoolSize", store_size64, ITEM(res_dev.max_job_spool_size), 0, 0, 0}, {"DriveIndex", store_pint32, ITEM(res_dev.drive_index), 0, 0, 0}, {"MaximumPartSize", store_size64, ITEM(res_dev.max_part_size), 0, ITEM_DEFAULT, 0}, {"MaximumVolumeParts", store_pint32, ITEM(res_dev.max_vol_parts_num), 0, ITEM_DEFAULT, 0}, {"MountPoint", store_strname,ITEM(res_dev.mount_point), 0, 0, 0}, {"MountCommand", store_strname,ITEM(res_dev.mount_command), 0, 0, 0}, {"UnmountCommand", store_strname,ITEM(res_dev.unmount_command), 0, 0, 0}, {"WritePartCommand", store_strname,ITEM(res_dev.write_part_command), 0, 0, 0}, {"FreeSpaceCommand", store_strname,ITEM(res_dev.free_space_command), 0, 0, 0}, {"LabelType", store_label, ITEM(res_dev.label_type), 0, 0, 0}, {"Cloud", store_res, ITEM(res_dev.cloud), R_CLOUD, 0, 0}, #ifdef SD_DEDUP_SUPPORT {"Dedupengine", store_res, ITEM(res_dev.dedup), R_DEDUP, 0, 0}, #endif {"SyncOnClose", store_bit, ITEM(res_dev.cap_bits), CAP_SYNCONCLOSE, ITEM_DEFAULT, 1}, {"VolumeEncryption", store_enctype,ITEM(res_dev.volume_encryption), 0, ITEM_DEFAULT, 0}, {NULL, NULL, {0}, 0, 0, 0} }; /* Autochanger definition */ static RES_ITEM changer_items[] = { {"Name", store_name, ITEM(res_changer.hdr.name), 0, ITEM_REQUIRED, 0}, {"Description", store_str, ITEM(res_changer.hdr.desc), 0, 0, 0}, {"Device", store_alist_res, ITEM(res_changer.device), R_DEVICE, ITEM_REQUIRED, 0}, {"ChangerDevice", store_strname, ITEM(res_changer.changer_name), 0, ITEM_REQUIRED, 0}, {"ChangerCommand", store_strname, ITEM(res_changer.changer_command), 0, ITEM_REQUIRED, 0}, {"LockCommand", store_strname,ITEM(res_changer.lock_command), 0, 0, 0}, {NULL, NULL, {0}, 0, 0, 0} }; /* Cloud driver definition */ static RES_ITEM cloud_items[] = { {"Name", store_name, ITEM(res_cloud.hdr.name), 0, ITEM_REQUIRED, 0}, {"Description", store_str, ITEM(res_cloud.hdr.desc), 0, 0, 0}, {"Driver", store_cloud_driver, ITEM(res_cloud.driver_type), 0, ITEM_REQUIRED, 0}, {"HostName", store_strname,ITEM(res_cloud.host_name), 0, 0, ITEM_REQUIRED}, // Only S3/File {"BucketName", store_strname,ITEM(res_cloud.bucket_name), 0, ITEM_REQUIRED, 0}, {"Region", store_strname,ITEM(res_cloud.region), 0, 0, 0}, {"AccessKey", store_strname,ITEM(res_cloud.access_key), 0, 0, 0}, {"SecretKey", store_strname,ITEM(res_cloud.secret_key), 0, 0, 0}, {"Protocol", store_protocol,ITEM(res_cloud.protocol), 0, ITEM_DEFAULT, 0}, /* HTTPS */ {"BlobEndpoint", store_strname,ITEM(res_cloud.blob_endpoint), 0, 0, 0}, {"FileEndpoint", store_strname,ITEM(res_cloud.file_endpoint), 0, 0, 0}, {"QueueEndpoint", store_strname,ITEM(res_cloud.queue_endpoint), 0, 0, 0}, {"TableEndpoint", store_strname,ITEM(res_cloud.table_endpoint), 0, 0, 0}, {"EndpointSuffix", store_strname,ITEM(res_cloud.endpoint_suffix), 0, 0, 0}, {"UriStyle", store_uri_style, ITEM(res_cloud.uri_style), 0, ITEM_DEFAULT, 0}, /* VirtualHost */ {"TruncateCache", store_truncate, ITEM(res_cloud.trunc_opt), 0, ITEM_DEFAULT, TRUNC_NO}, {"Upload", store_upload, ITEM(res_cloud.upload_opt), 0, ITEM_DEFAULT, UPLOAD_NO}, {"MaximumConcurrentUploads", store_pint32, ITEM(res_cloud.max_concurrent_uploads), 0, ITEM_DEFAULT, 3}, {"MaximumConcurrentDownloads", store_pint32, ITEM(res_cloud.max_concurrent_downloads), 0, ITEM_DEFAULT, 3}, {"MaximumUploadBandwidth", store_speed, ITEM(res_cloud.upload_limit), 0, 0, 0}, {"MaximumDownloadBandwidth", store_speed, ITEM(res_cloud.download_limit), 0, 0, 0}, {"DriverCommand", store_strname, ITEM(res_cloud.driver_command), 0, 0, 0}, {"TransferPriority", store_transfer_priority, ITEM(res_cloud.transfer_priority), 0, ITEM_DEFAULT, 0}, {"TransferRetention", store_time, ITEM(res_cloud.transfer_retention), 0, ITEM_DEFAULT, 5 * 3600 * 24}, {"StorageClass", store_objects_default_tier, ITEM(res_cloud.objects_default_tier), 0, ITEM_DEFAULT, 0}, {NULL, NULL, {0}, 0, 0, 0} }; /* Message resource */ extern RES_ITEM msgs_items[]; /* Statistics resource */ extern RES_ITEM collector_items[]; /* This is the master resource definition */ /* ATTN the order of the items must match the R_FIRST->R_LAST list in stored_conf.h !!! */ RES_TABLE resources[] = { {"Director", dir_items, R_DIRECTOR}, {"Storage", store_items, R_STORAGE}, {"Device", dev_items, R_DEVICE}, {"Messages", msgs_items, R_MSGS}, {"Autochanger", changer_items, R_AUTOCHANGER}, {"Cloud", cloud_items, R_CLOUD}, {"Statistics", collector_items, R_COLLECTOR}, #ifdef SD_DEDUP_SUPPORT {"Dedupengine", dedup_items, R_DEDUP}, #endif {NULL, NULL, 0} }; /* * Device types * * device type device code = token */ s_kw dev_types[] = { {"File", B_FILE_DEV}, {"Tape", B_TAPE_DEV}, {"Fifo", B_FIFO_DEV}, {"VTape", B_VTAPE_DEV}, {"Vtl", B_VTL_DEV}, {"Aligned", B_ALIGNED_DEV}, #ifdef SD_DEDUP_SUPPORT {"Dedup", B_DEDUP_DEV}, #endif {"Null", B_NULL_DEV}, {"Cloud", B_CLOUD_DEV}, {NULL, 0} }; /* * Device Encryption types * * encryption type encryption code = token */ s_kw enc_types[] = { {"No", ET_NO}, {"Yes", ET_YES}, {"Strong", ET_STRONG}, {NULL, 0} }; /* * Store Encryption Type (None, Enable, Strong) * */ void store_enctype(LEX *lc, RES_ITEM *item, int index, int pass) { bool found = false; lex_get_token(lc, T_NAME); /* Store the label pass 2 so that type is defined */ for (int i=0; enc_types[i].name; i++) { if (strcasecmp(lc->str, enc_types[i].name) == 0) { *(uint32_t *)(item->value) = enc_types[i].token; found = true; break; } } if (!found) { scan_err1(lc, _("Expected a Device Encryption Type keyword, got: %s"), lc->str); } scan_to_eol(lc); set_bit(index, res_all.hdr.item_present); } /* * Store Device Type (File, FIFO, Tape, Cloud, ...) * */ void store_devtype(LEX *lc, RES_ITEM *item, int index, int pass) { bool found = false; lex_get_token(lc, T_NAME); /* Store the label pass 2 so that type is defined */ for (int i=0; dev_types[i].name; i++) { if (strcasecmp(lc->str, dev_types[i].name) == 0) { *(uint32_t *)(item->value) = dev_types[i].token; found = true; break; } } if (!found) { scan_err1(lc, _("Expected a Device Type keyword, got: %s"), lc->str); } scan_to_eol(lc); set_bit(index, res_all.hdr.item_present); } /* * Cloud drivers * * driver driver code */ s_kw cloud_drivers[] = { {"S3", C_S3_DRIVER}, {"File", C_FILE_DRIVER}, #if BEEF {"Azure", C_WAS_DRIVER}, {"Google", C_GOOGLE_DRIVER}, {"Oracle", C_ORACLE_DRIVER}, {"Generic", C_GEN_DRIVER}, {"Swift", C_SWIFT_DRIVER}, #endif {"Amazon", C_AWS_DRIVER}, {NULL, 0} }; /* * Store Device Type (File, FIFO, Tape, Cloud, ...) * */ void store_cloud_driver(LEX *lc, RES_ITEM *item, int index, int pass) { bool found = false; lex_get_token(lc, T_NAME); /* Store the label pass 2 so that type is defined */ for (int i=0; cloud_drivers[i].name; i++) { if (strcasecmp(lc->str, cloud_drivers[i].name) == 0) { *(uint32_t *)(item->value) = cloud_drivers[i].token; found = true; break; } } if (!found) { scan_err1(lc, _("Expected a Cloud driver keyword, got: %s"), lc->str); } scan_to_eol(lc); set_bit(index, res_all.hdr.item_present); } /* * Cloud Truncate cache options * * Option option code = token */ s_kw trunc_opts[] = { {"No", TRUNC_NO}, {"AfterUpload", TRUNC_AFTER_UPLOAD}, {"AtEndOfJob", TRUNC_AT_ENDOFJOB}, {"ConfDefault", TRUNC_CONF_DEFAULT}, /* only use as a parameter */ {NULL, 0} }; /* * Store Cloud Truncate cache option (AfterUpload, AtEndOfJob, No) * */ void store_truncate(LEX *lc, RES_ITEM *item, int index, int pass) { bool found = false; lex_get_token(lc, T_NAME); /* Store the label pass 2 so that type is defined */ for (int i=0; trunc_opts[i].name; i++) { if (strcasecmp(lc->str, trunc_opts[i].name) == 0) { *(uint32_t *)(item->value) = trunc_opts[i].token; found = true; break; } } if (!found) { scan_err1(lc, _("Expected a Truncate Cache option keyword, got: %s"), lc->str); } scan_to_eol(lc); set_bit(index, res_all.hdr.item_present); } /* * convert string to truncate option * Return true if option matches one of trunc_opts (case insensitive) * */ bool find_truncate_option(const char* truncate, uint32_t& truncate_option) { for (int i=0; trunc_opts[i].name; i++) { if (strcasecmp(truncate, trunc_opts[i].name) == 0) { truncate_option = trunc_opts[i].token; return true; } } return false; } /* * Cloud Upload options * * Option option code = token */ s_kw upload_opts[] = { {"No", UPLOAD_NO}, {"Manual", UPLOAD_NO}, /*identical and preferable to No, No is kept for backward compatibility */ {"EachPart", UPLOAD_EACHPART}, {"AtEndOfJob", UPLOAD_AT_ENDOFJOB}, {NULL, 0} }; /* * Store Cloud Upload option (EachPart, AtEndOfJob, No) * */ void store_upload(LEX *lc, RES_ITEM *item, int index, int pass) { bool found = false; lex_get_token(lc, T_NAME); /* Store the label pass 2 so that type is defined */ for (int i=0; upload_opts[i].name; i++) { if (strcasecmp(lc->str, upload_opts[i].name) == 0) { *(uint32_t *)(item->value) = upload_opts[i].token; found = true; break; } } if (!found) { scan_err1(lc, _("Expected a Cloud Upload option keyword, got: %s"), lc->str); } scan_to_eol(lc); set_bit(index, res_all.hdr.item_present); } /* * Cloud connection protocol options * * Option option code = token */ s_kw proto_opts[] = { {"HTTPS", 0}, {"HTTP", 1}, {NULL, 0} }; /* * Store Cloud connect protocol option (HTTPS, HTTP) * */ void store_protocol(LEX *lc, RES_ITEM *item, int index, int pass) { bool found = false; lex_get_token(lc, T_NAME); /* Store the label pass 2 so that type is defined */ for (int i=0; proto_opts[i].name; i++) { if (strcasecmp(lc->str, proto_opts[i].name) == 0) { *(uint32_t *)(item->value) = proto_opts[i].token; found = true; break; } } if (!found) { scan_err1(lc, _("Expected a Cloud communications protocol option keyword, got: %s"), lc->str); } scan_to_eol(lc); set_bit(index, res_all.hdr.item_present); } /* * Cloud Uri Style options * * Option option code = token */ s_kw uri_opts[] = { {"VirtualHost", 0}, {"Path", 1}, {NULL, 0} }; /* * Store Cloud Uri Style option * */ void store_uri_style(LEX *lc, RES_ITEM *item, int index, int pass) { bool found = false; lex_get_token(lc, T_NAME); /* Store the label pass 2 so that type is defined */ for (int i=0; uri_opts[i].name; i++) { if (strcasecmp(lc->str, uri_opts[i].name) == 0) { *(uint32_t *)(item->value) = uri_opts[i].token; found = true; break; } } if (!found) { scan_err1(lc, _("Expected a Cloud Uri Style option keyword, got: %s"), lc->str); } scan_to_eol(lc); set_bit(index, res_all.hdr.item_present); } /* * Store Maximum Block Size, and check it is not greater than MAX_BLOCK_LENGTH * */ void store_maxblocksize(LEX *lc, RES_ITEM *item, int index, int pass) { store_size32(lc, item, index, pass); if (*(uint32_t *)(item->value) > MAX_BLOCK_LENGTH) { scan_err2(lc, _("Maximum Block Size configured value %u is greater than allowed maximum: %u"), *(uint32_t *)(item->value), MAX_BLOCK_LENGTH ); } } /* * Cloud Glacier restore priority level * * Option option code = token */ s_kw restore_prio_opts[] = { {"High", CLOUD_RESTORE_PRIO_HIGH}, {"Medium", CLOUD_RESTORE_PRIO_MEDIUM}, {"Low", CLOUD_RESTORE_PRIO_LOW}, {NULL, 0} }; /* * Cloud Object tier level * * Option option code = token */ s_kw objects_default_tiers[] = { {"S3Standard", S3_STANDARD}, {"S3StandardIA", S3_STANDARD_IA}, {"S3IntelligentTiering", S3_INTELLIGENT_TIERING}, {"S3OneZoneIA", S3_ONE_ZONE_IA}, {"S3GlacierInstantRetrieval", S3_GLACIER_INSTANT_RETRIEVAL}, {"S3GlacierFlexibleRetrieval", S3_GLACIER_FLEXIBLE_RETRIEVAL}, {"S3GlacierDeepArchive", S3_GLACIER_DEEP_ARCHIVE}, {"S3Rrs", S3_RRS}, {NULL, 0} }; /* * Store Cloud restoration priority */ void store_transfer_priority(LEX *lc, RES_ITEM *item, int index, int pass) { bool found = false; lex_get_token(lc, T_NAME); /* Store the label pass 2 so that type is defined */ for (int i=0; restore_prio_opts[i].name; i++) { if (strcasecmp(lc->str, restore_prio_opts[i].name) == 0) { *(uint32_t *)(item->value) = restore_prio_opts[i].token; found = true; break; } } if (!found) { scan_err1(lc, _("Expected a Cloud Restore Priority Style option keyword (High, Medium, Low), got: %s"), lc->str); } scan_to_eol(lc); set_bit(index, res_all.hdr.item_present); } /* * Store Cloud object default tier */ void store_objects_default_tier(LEX *lc, RES_ITEM *item, int index, int pass) { bool found = false; lex_get_token(lc, T_NAME); /* Store the label pass 2 so that type is defined */ for (int i=0; objects_default_tiers[i].name; i++) { if (strcasecmp(lc->str, objects_default_tiers[i].name) == 0) { *(uint32_t *)(item->value) = objects_default_tiers[i].token; found = true; break; } } if (!found) { scan_err1(lc, _("Expected a Cloud Objects default tier, got: %s"), lc->str); } scan_to_eol(lc); set_bit(index, res_all.hdr.item_present); } static void dump_store_resource(STORES *store, void sendit(const char *msg, int len, STATUS_PKT *sp), STATUS_PKT *sp) { OutputWriter ow(sp->api_opts); POOL_MEM tmp; char *p; ow.start_group("resource"); ow.get_output(OT_START_OBJ, OT_STRING, "Name", store->hdr.name, OT_STRING, "Description", store->hdr.desc, OT_STRING, "WorkingDirectory", store->working_directory, OT_STRING, "PidDirectory", store->pid_directory, OT_STRING, "SubsysDirectory", store->subsys_directory, OT_STRING, "PluginDirectory", store->plugin_directory, OT_STRING, "ScriptsDirectory", store->scripts_directory, #if BEEF OT_BOOL, "FIPSRequire", store->require_fips, OT_STRING, "SsdDirectory", store->ssd_directory, #endif OT_INT, "SDPort", get_first_port_host_order(store->sdaddrs), OT_STRING, "SDAddress", get_first_address(store->sdaddrs, tmp.c_str(), PM_MESSAGE), OT_INT64, "HeartbeatInterval", store->heartbeat_interval, OT_INT32, "MaximumConcurrentJobs", store->max_concurrent_jobs, OT_INT64, "ClientConnectTimeout", store->ClientConnectTimeout, OT_BOOL, "TLSEnable", store->tls_enable, OT_BOOL, "TLSPSKEnable", store->tls_psk_enable, OT_BOOL, "TLSRequire", store->tls_require, OT_BOOL, "TLSAuthenticate", store->tls_authenticate, OT_BOOL, "TLSVerifyPeer", store->tls_verify_peer, OT_INT64, "ClientConnectWait", store->client_wait, OT_STRING, "VerId", store->verid, OT_BOOL, "CommCommpression", store->comm_compression, #ifdef SD_DEDUP_SUPPORT OT_STRING, "DedupDirectory", store->dedup_dir, OT_STRING, "DedupIndexDirectory", store->dedup_index_dir, OT_BOOL, "DedupCheckHash", store->dedup_check_hash, OT_INT64, "DedupScrupMaximumBandwidth", store->dedup_scrub_max_bandwidth, OT_INT64, "MaximumContainerSize", store->max_container_size, #endif OT_END_OBJ, OT_END); p = ow.end_group("resource"); sendit(p, strlen(p), sp); } /* In order to link with libbaccfg * Empty method is here just be able to link with libbaccfg (see overload in @parse_conf.h). * FileDaemon should use overloaded one, see methodb below. * TODO this should be removed as soon as Director's dump_resource() is updated to work * with STATUS_PKT. * */ void dump_resource(int type, RES *ares, void sendit(void *sock, const char *fmt, ...), void *sock) { /**** NOT USED ****/ } /* Dump contents of resource */ void dump_resource(int type, RES *rres, void sendit(const char *msg, int len, STATUS_PKT *sp), STATUS_PKT *sp) { URES *res = (URES *)rres; char buf[1000]; int recurse = 1, len; POOL_MEM msg(PM_MESSAGE); if (res == NULL) { len = Mmsg(msg, _("Warning: no \"%s\" resource (%d) defined.\n"), res_to_str(type), type); sendit(msg.c_str(), len, sp); return; } Dmsg1(dbglvl, "dump_resource type=%d\n", type); if (type < 0) { /* no recursion */ type = - type; recurse = 0; } switch (type) { case R_DIRECTOR: len = Mmsg(msg, "Director: name=%s\n", res->res_dir.hdr.name); sendit(msg.c_str(), len, sp); break; case R_STORAGE: { STORES *store = (STORES *)rres; dump_store_resource(store, sendit, sp); break; } case R_DEVICE: len = Mmsg(msg, "Device: name=%s MediaType=%s Device=%s LabelType=%d\n", res->res_dev.hdr.name, res->res_dev.media_type, res->res_dev.device_name, res->res_dev.label_type); sendit(msg.c_str(), len, sp); len = Mmsg(msg, " rew_wait=%lld min_bs=%d max_bs=%d chgr_wait=%lld\n", res->res_dev.max_rewind_wait, res->res_dev.min_block_size, res->res_dev.max_block_size, res->res_dev.max_changer_wait); sendit(msg.c_str(), len, sp); len = Mmsg(msg, " max_jobs=%d max_files=%lld max_size=%lld\n", res->res_dev.max_volume_jobs, res->res_dev.max_volume_files, res->res_dev.max_volume_size); len = Mmsg(msg, " min_block_size=%lld max_block_size=%lld\n", res->res_dev.min_block_size, res->res_dev.max_block_size); sendit(msg.c_str(), len, sp); len = Mmsg(msg, " max_file_size=%lld capacity=%lld\n", res->res_dev.max_file_size, res->res_dev.volume_capacity); sendit(msg.c_str(), len, sp); len = Mmsg(msg, " spool_directory=%s\n", NPRT(res->res_dev.spool_directory)); sendit(msg.c_str(), len, sp); len = Mmsg(msg, " max_spool_size=%lld max_job_spool_size=%lld\n", res->res_dev.max_spool_size, res->res_dev.max_job_spool_size); sendit(msg.c_str(), len, sp); if (res->res_dev.worm_command) { len = Mmsg(msg, " worm command=%s\n", res->res_dev.worm_command); sendit(msg.c_str(), len, sp); } if (res->res_dev.changer_res) { len = Mmsg(msg, " changer=%p\n", res->res_dev.changer_res); sendit(msg.c_str(), len, sp); } bstrncpy(buf, " ", sizeof(buf)); if (res->res_dev.cap_bits & CAP_EOF) { bstrncat(buf, "CAP_EOF ", sizeof(buf)); } if (res->res_dev.cap_bits & CAP_BSR) { bstrncat(buf, "CAP_BSR ", sizeof(buf)); } if (res->res_dev.cap_bits & CAP_BSF) { bstrncat(buf, "CAP_BSF ", sizeof(buf)); } if (res->res_dev.cap_bits & CAP_FSR) { bstrncat(buf, "CAP_FSR ", sizeof(buf)); } if (res->res_dev.cap_bits & CAP_FSF) { bstrncat(buf, "CAP_FSF ", sizeof(buf)); } if (res->res_dev.cap_bits & CAP_EOM) { bstrncat(buf, "CAP_EOM ", sizeof(buf)); } if (res->res_dev.cap_bits & CAP_REM) { bstrncat(buf, "CAP_REM ", sizeof(buf)); } if (res->res_dev.cap_bits & CAP_RACCESS) { bstrncat(buf, "CAP_RACCESS ", sizeof(buf)); } if (res->res_dev.cap_bits & CAP_AUTOMOUNT) { bstrncat(buf, "CAP_AUTOMOUNT ", sizeof(buf)); } if (res->res_dev.cap_bits & CAP_LABEL) { bstrncat(buf, "CAP_LABEL ", sizeof(buf)); } if (res->res_dev.cap_bits & CAP_ANONVOLS) { bstrncat(buf, "CAP_ANONVOLS ", sizeof(buf)); } if (res->res_dev.cap_bits & CAP_ALWAYSOPEN) { bstrncat(buf, "CAP_ALWAYSOPEN ", sizeof(buf)); } if (res->res_dev.cap_bits & CAP_CHECKLABELS) { bstrncat(buf, "CAP_CHECKLABELS ", sizeof(buf)); } if (res->res_dev.cap_bits & CAP_REQMOUNT) { bstrncat(buf, "CAP_REQMOUNT ", sizeof(buf)); } if (res->res_dev.cap_bits & CAP_OFFLINEUNMOUNT) { bstrncat(buf, "CAP_OFFLINEUNMOUNT ", sizeof(buf)); } bstrncat(buf, "\n", sizeof(buf)); sendit(buf, strlen(buf), sp); /* Send caps string */ if (res->res_dev.cloud) { len = Mmsg(msg, " --->Cloud: name=%s\n", res->res_dev.cloud->hdr.name); sendit(msg.c_str(), len, sp); } break; case R_CLOUD: len = Mmsg(msg, "Cloud: name=%s Driver=%d\n" " HostName=%s\n" " BucketName=%s\n" " AccessKey=%s SecretKey=%s\n" " AuthRegion=%s\n" " Protocol=%d UriStyle=%d\n", res->res_cloud.hdr.name, res->res_cloud.driver_type, res->res_cloud.host_name, res->res_cloud.bucket_name, res->res_cloud.access_key, res->res_cloud.secret_key, res->res_cloud.region, res->res_cloud.protocol, res->res_cloud.uri_style); sendit(msg.c_str(), len, sp); break; case R_DEDUP: len = Mmsg(msg, "Dedup: name=%s Driver=%d\n" " DedupDirectory=%s\n", res->res_dedup.hdr.name, res->res_dedup.driver_type, res->res_dedup.dedup_dir); sendit(msg.c_str(), len, sp); break; case R_AUTOCHANGER: DEVRES *dev; len = Mmsg(msg, "Changer: name=%s Changer_devname=%s\n Changer_cmd=%s\n", res->res_changer.hdr.name, res->res_changer.changer_name, res->res_changer.changer_command); sendit(msg.c_str(), len, sp); foreach_alist(dev, res->res_changer.device) { len = Mmsg(msg, " --->Device: name=%s\n", dev->hdr.name); sendit(msg.c_str(), len, sp); } break; case R_MSGS: len = Mmsg(msg, "Messages: name=%s\n", res->res_msgs.hdr.name); sendit(msg.c_str(), len, sp); if (res->res_msgs.mail_cmd) { len = Mmsg(msg, " mailcmd=%s\n", res->res_msgs.mail_cmd); sendit(msg.c_str(), len, sp); } if (res->res_msgs.operator_cmd) { len = Mmsg(msg, " opcmd=%s\n", res->res_msgs.operator_cmd); sendit(msg.c_str(), len, sp); } break; case R_COLLECTOR: dump_collector_resource(res->res_collector, sendit, sp); break; default: len = Mmsg(msg, _("Warning: unknown resource type %d\n"), type); sendit(msg.c_str(), len, sp); break; } rres = GetNextRes(type, rres); if (recurse && rres) dump_resource(type, rres, sendit, sp); } /* * Free memory of resource. * NB, we don't need to worry about freeing any references * to other resources as they will be freed when that * resource chain is traversed. Mainly we worry about freeing * allocated strings (names). */ void free_resource(RES *sres, int type) { URES *res = (URES *)sres; if (res == NULL) return; /* common stuff -- free the resource name */ if (res->res_dir.hdr.name) { free(res->res_dir.hdr.name); } if (res->res_dir.hdr.desc) { free(res->res_dir.hdr.desc); } switch (type) { case R_DIRECTOR: if (res->res_dir.password) { free(res->res_dir.password); } if (res->res_dir.address) { free(res->res_dir.address); } if (res->res_dir.tls_ctx) { free_tls_context(res->res_dir.tls_ctx); } if (res->res_dir.psk_ctx) { free_psk_context(res->res_dir.psk_ctx); } if (res->res_dir.tls_ca_certfile) { free(res->res_dir.tls_ca_certfile); } if (res->res_dir.tls_ca_certdir) { free(res->res_dir.tls_ca_certdir); } if (res->res_dir.tls_certfile) { free(res->res_dir.tls_certfile); } if (res->res_dir.tls_keyfile) { free(res->res_dir.tls_keyfile); } if (res->res_dir.tls_dhfile) { free(res->res_dir.tls_dhfile); } if (res->res_dir.tls_allowed_cns) { delete res->res_dir.tls_allowed_cns; } break; case R_AUTOCHANGER: if (res->res_changer.changer_name) { free(res->res_changer.changer_name); } if (res->res_changer.changer_command) { free(res->res_changer.changer_command); } if (res->res_changer.lock_command) { free(res->res_changer.lock_command); } if (res->res_changer.device) { delete res->res_changer.device; } rwl_destroy(&res->res_changer.changer_lock); break; case R_STORAGE: if (res->res_store.sdaddrs) { free_addresses(res->res_store.sdaddrs); } if (res->res_store.sddaddrs) { free_addresses(res->res_store.sddaddrs); } if (res->res_store.working_directory) { free(res->res_store.working_directory); } if (res->res_store.pid_directory) { free(res->res_store.pid_directory); } if (res->res_store.subsys_directory) { free(res->res_store.subsys_directory); } if (res->res_store.plugin_directory) { free(res->res_store.plugin_directory); } if (res->res_store.scripts_directory) { free(res->res_store.scripts_directory); } if (res->res_store.ssd_directory) { free(res->res_store.ssd_directory); } if (res->res_store.tls_ctx) { free_tls_context(res->res_store.tls_ctx); } if (res->res_store.psk_ctx) { free_psk_context(res->res_store.psk_ctx); } if (res->res_store.tls_ca_certfile) { free(res->res_store.tls_ca_certfile); } if (res->res_store.tls_ca_certdir) { free(res->res_store.tls_ca_certdir); } if (res->res_store.tls_certfile) { free(res->res_store.tls_certfile); } if (res->res_store.tls_keyfile) { free(res->res_store.tls_keyfile); } if (res->res_store.tls_dhfile) { free(res->res_store.tls_dhfile); } if (res->res_store.tls_allowed_cns) { delete res->res_store.tls_allowed_cns; } if (res->res_store.verid) { free(res->res_store.verid); } if (res->res_store.dedup_dir) { free(res->res_store.dedup_dir); } if (res->res_store.dedup_index_dir) { free(res->res_store.dedup_index_dir); } if (res->res_store.encryption_command) { free(res->res_store.encryption_command); } break; case R_CLOUD: if (res->res_cloud.host_name) { free(res->res_cloud.host_name); } if (res->res_cloud.bucket_name) { free(res->res_cloud.bucket_name); } if (res->res_cloud.access_key) { free(res->res_cloud.access_key); } if (res->res_cloud.secret_key) { free(res->res_cloud.secret_key); } if (res->res_cloud.region) { free(res->res_cloud.region); } if (res->res_cloud.blob_endpoint) { free(res->res_cloud.blob_endpoint); } if (res->res_cloud.file_endpoint) { free(res->res_cloud.file_endpoint); } if (res->res_cloud.queue_endpoint) { free(res->res_cloud.queue_endpoint); } if (res->res_cloud.table_endpoint) { free(res->res_cloud.table_endpoint); } if (res->res_cloud.endpoint_suffix) { free(res->res_cloud.endpoint_suffix); } if (res->res_cloud.driver_command) { free(res->res_cloud.driver_command); } break; case R_DEDUP: if (res->res_dedup.dedup_dir) { free(res->res_dedup.dedup_dir); } if (res->res_dedup.dedup_index_dir) { free(res->res_dedup.dedup_index_dir); } if (res->res_dedup.dedup_err_msg) { free(res->res_dedup.dedup_err_msg); } break; case R_DEVICE: if (res->res_dev.media_type) { free(res->res_dev.media_type); } if (res->res_dev.device_name) { free(res->res_dev.device_name); } if (res->res_dev.adevice_name) { free(res->res_dev.adevice_name); } if (res->res_dev.control_name) { free(res->res_dev.control_name); } if (res->res_dev.changer_name) { free(res->res_dev.changer_name); } if (res->res_dev.changer_command) { free(res->res_dev.changer_command); } if (res->res_dev.alert_command) { free(res->res_dev.alert_command); } if (res->res_dev.worm_command) { free(res->res_dev.worm_command); } if (res->res_dev.lock_command) { free(res->res_dev.lock_command); } if (res->res_dev.spool_directory) { free(res->res_dev.spool_directory); } if (res->res_dev.mount_point) { free(res->res_dev.mount_point); } if (res->res_dev.mount_command) { free(res->res_dev.mount_command); } if (res->res_dev.unmount_command) { free(res->res_dev.unmount_command); } if (res->res_dev.write_part_command) { free(res->res_dev.write_part_command); } if (res->res_dev.free_space_command) { free(res->res_dev.free_space_command); } break; case R_MSGS: if (res->res_msgs.mail_cmd) { free(res->res_msgs.mail_cmd); } if (res->res_msgs.operator_cmd) { free(res->res_msgs.operator_cmd); } free_msgs_res((MSGS *)res); /* free message resource */ res = NULL; break; case R_COLLECTOR: free_collector_resource(res->res_collector); break; default: Dmsg1(0, _("Unknown resource type %d\n"), type); break; } /* Common stuff again -- free the resource, recurse to next one */ if (res) { free(res); } } /* Get the size of a resource object */ int get_resource_size(int type) { int size = -1; switch (type) { case R_DIRECTOR: size = sizeof(DIRRES); break; case R_STORAGE: size = sizeof(STORES); break; case R_DEVICE: size = sizeof(DEVRES); break; case R_MSGS: size = sizeof(MSGS); break; case R_AUTOCHANGER: size = sizeof(AUTOCHANGER); break; case R_CLOUD: size = sizeof(CLOUD); break; case R_DEDUP: size = sizeof(DEDUPRES); break; case R_COLLECTOR: size = sizeof(COLLECTOR); break; default: break; } return size; } /* Save the new resource by chaining it into the head list for * the resource. If this is pass 2, we update any resource * or alist pointers. */ bool save_resource(CONFIG *config, int type, RES_ITEM *items, int pass) { URES *res; int rindex = type - r_first; int i, size; int error = 0; /* * Ensure that all required items are present */ for (i=0; items[i].name; i++) { if (items[i].flags & ITEM_REQUIRED) { if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) { Mmsg(config->m_errmsg, _("\"%s\" directive is required in \"%s\" resource, but not found.\n"), items[i].name, resources[rindex].name); return false; } } /* If this triggers, take a look at lib/parse_conf.h */ if (i >= MAX_RES_ITEMS) { Mmsg(config->m_errmsg, _("Too many directives in \"%s\" resource\n"), resources[rindex].name); return false; } } /* During pass 2, we looked up pointers to all the resources * referrenced in the current resource, , now we * must copy their address from the static record to the allocated * record. */ if (pass == 2) { DEVRES *dev; int errstat; switch (type) { /* Resources not containing a resource */ case R_MSGS: case R_CLOUD: case R_DEDUP: break; /* Resources containing a resource or an alist */ case R_DIRECTOR: if ((res = (URES *)GetResWithName(R_DIRECTOR, res_all.res_dir.hdr.name)) == NULL) { Mmsg(config->m_errmsg, _("Cannot find Director resource %s\n"), res_all.res_dir.hdr.name); return false; } res->res_dir.tls_allowed_cns = res_all.res_dir.tls_allowed_cns; break; case R_STORAGE: if ((res = (URES *)GetResWithName(R_STORAGE, res_all.res_dir.hdr.name)) == NULL) { Mmsg(config->m_errmsg, _("Cannot find Storage resource %s\n"), res_all.res_dir.hdr.name); return false; } res->res_store.messages = res_all.res_store.messages; res->res_store.tls_allowed_cns = res_all.res_store.tls_allowed_cns; break; case R_AUTOCHANGER: if ((res = (URES *)GetResWithName(type, res_all.res_changer.hdr.name)) == NULL) { Mmsg(config->m_errmsg, _("Cannot find AutoChanger resource %s\n"), res_all.res_changer.hdr.name); return false; } /* we must explicitly copy the device alist pointer */ res->res_changer.device = res_all.res_changer.device; /* * Now update each device in this resource to point back * to the changer resource. */ foreach_alist(dev, res->res_changer.device) { dev->changer_res = (AUTOCHANGER *)&res->res_changer; } if ((errstat = rwl_init(&res->res_changer.changer_lock, PRIO_SD_ACH_ACCESS)) != 0) { berrno be; Mmsg(config->m_errmsg, _("Unable to init lock for Autochanger=%s: ERR=%s\n"), res_all.res_changer.hdr.name, be.bstrerror(errstat)); return false; } break; case R_DEVICE: if ((res = (URES *)GetResWithName(R_DEVICE, res_all.res_dev.hdr.name)) == NULL) { Mmsg(config->m_errmsg, _("Cannot find Device resource %s\n"), res_all.res_dir.hdr.name); return false; } res->res_dev.cloud = res_all.res_dev.cloud; #ifdef SD_DEDUP_SUPPORT res->res_dev.dedup = res_all.res_dev.dedup; /* If a dedupengine is required but not defined, look for the * "default_legacy_dedupengine" aka the DDE defined in storage */ if (res->res_dev.dev_type == B_DEDUP_DEV && res->res_dev.dedup == NULL) { /* The "Dedupengine" is not specified in this Device resource */ /* If there is only one Dedupengine defined use it */ int i = 0; DEDUPRES *dedup, *prev_dedup = NULL; foreach_res(dedup, R_DEDUP) { prev_dedup = dedup; i++; } if (i == 1) { res->res_dev.dedup = prev_dedup; Dmsg2(100, "Select dedupengine %s for device %s\n", prev_dedup->hdr.name, res_all.res_dir.hdr.name); } else if (i == 0){ Mmsg(config->m_errmsg, _("Cannot find any dedupengine for the device %s\n"), res_all.res_dir.hdr.name); return false; } else { /* i > 1 */ Mmsg(config->m_errmsg, _("Dedupengine unspecified for device %s\n"), res_all.res_dir.hdr.name); return false; } } #endif break; case R_COLLECTOR: if ((res = (URES *)GetResWithName(R_COLLECTOR, res_all.res_collector.hdr.name)) == NULL) { Mmsg(config->m_errmsg, _("Cannot find Statistics resource %s\n"), res_all.res_collector.hdr.name); return false; } res->res_collector.metrics = res_all.res_collector.metrics; // Dmsg2(100, "metrics = 0x%p 0x%p\n", res->res_collector.metrics, res_all.res_collector.metrics); break; default: printf(_("Unknown resource type %d\n"), type); error = 1; break; } if (res_all.res_dir.hdr.name) { free(res_all.res_dir.hdr.name); res_all.res_dir.hdr.name = NULL; } if (res_all.res_dir.hdr.desc) { free(res_all.res_dir.hdr.desc); res_all.res_dir.hdr.desc = NULL; } return true; } /* The following code is only executed on pass 1 */ size = get_resource_size(type); if (size < 0) { printf(_("Unknown resource type %d\n"), type); error = 1; } /* Common */ if (!error) { if (!config->insert_res(rindex, size)) { return false; } } if (pass == 1 && type == R_STORAGE) { if (!dedup_check_storage_resource(config)) { return false; } } return true; } bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code) { config->init(configfile, NULL, exit_code, (void *)&res_all, res_all_size, r_first, r_last, resources, &res_head); return config->parse_config(); } bacula-15.0.3/src/stored/device.c0000644000175000017500000002430214771010173016355 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Higher Level Device routines. * Knows about Bacula tape labels and such * * NOTE! In general, subroutines that have the word * "device" in the name do locking. Subroutines * that have the word "dev" in the name do not * do locking. Thus if xxx_device() calls * yyy_dev(), all is OK, but if xxx_device() * calls yyy_device(), everything will hang. * Obviously, no zzz_dev() is allowed to call * a www_device() or everything falls apart. * * Concerning the routines dev->rLock()() and block_device() * see the end of this module for details. In general, * blocking a device leaves it in a state where all threads * other than the current thread block when they attempt to * lock the device. They remain suspended (blocked) until the device * is unblocked. So, a device is blocked during an operation * that takes a long time (initialization, mounting a new * volume, ...) locking a device is done for an operation * that takes a short time such as writing data to the * device. * * * Kern Sibbald, MM, MMI * */ #include "bacula.h" /* pull in global headers */ #include "stored.h" /* pull in Storage Deamon headers */ /* Forward referenced functions */ /* * This is the dreaded moment. We either have an end of * medium condition or worse, and error condition. * Attempt to "recover" by obtaining a new Volume. * * Here are a few things to know: * dcr->VolCatInfo contains the info on the "current" tape for this job. * dev->VolCatInfo contains the info on the tape in the drive. * The tape in the drive could have changed several times since * the last time the job used it (jcr->VolCatInfo). * dcr->VolumeName is the name of the current/desired tape in the drive. * * We enter with device locked, and * exit with device locked. * * Note, we are called only from one place in block.c for the daemons. * The btape utility calls it from btape.c. * * Returns: true on success * false on failure */ bool fixup_device_block_write_error(DCR *dcr, int retries) { char PrevVolName[MAX_NAME_LENGTH]; DEV_BLOCK *block = dcr->block; DEV_BLOCK *ameta_block = dcr->ameta_block; DEV_BLOCK *adata_block = dcr->adata_block; char b1[30], b2[30]; time_t wait_time; char dt[MAX_TIME_LENGTH]; JCR *jcr = dcr->jcr; DEVICE *dev; int blocked; /* save any previous blocked status */ bool ok = false; bool save_adata = dcr->dev->adata; Enter(100); if (save_adata) { dcr->set_ameta(); /* switch to working with ameta */ } dev = dcr->dev; blocked = dev->blocked(); wait_time = time(NULL); /* * If we are blocked at entry, unblock it, and set our own block status */ if (blocked != BST_NOT_BLOCKED) { unblock_device(dev); } block_device(dev, BST_DOING_ACQUIRE); /* Continue unlocked, but leave BLOCKED */ dev->Unlock(); bstrncpy(PrevVolName, dev->getVolCatName(), sizeof(PrevVolName)); bstrncpy(dev->VolHdr.PrevVolumeName, PrevVolName, sizeof(dev->VolHdr.PrevVolumeName)); /* create temporary block, that will be released at the end, current blocks * have been saved in local DEV_BLOCK above and will be restored before to * leave the function */ dev->new_dcr_blocks(dcr); /* Inform User about end of medium */ Jmsg(jcr, M_INFO, 0, _("End of medium on Volume \"%s\" Bytes=%s Blocks=%s at %s.\n"), PrevVolName, edit_uint64_with_commas(dev->VolCatInfo.VolCatBytes, b1), edit_uint64_with_commas(dev->VolCatInfo.VolCatBlocks, b2), bstrftime(dt, sizeof(dt), time(NULL))); Dmsg1(150, "set_unload dev=%s\n", dev->print_name()); dev->set_unload(); /* Clear DCR Start/End Block/File positions */ dcr->VolFirstIndex = dcr->VolLastIndex = 0; dcr->StartAddr = dcr->EndAddr = 0; dcr->VolMediaId = 0; dcr->WroteVol = false; if (!dcr->mount_next_write_volume()) { dev->free_dcr_blocks(dcr); dcr->block = block; dcr->ameta_block = ameta_block; dcr->adata_block = adata_block; dev->Lock(); goto bail_out; } Dmsg2(150, "must_unload=%d dev=%s\n", dev->must_unload(), dev->print_name()); dev->notify_newvol_in_attached_dcrs(dcr->VolumeName); dev->Lock(); /* lock again */ dev->VolCatInfo.VolCatJobs++; /* increment number of jobs on vol */ if (!dir_update_volume_info(dcr, false, false)) { /* send Volume info to Director */ goto bail_out; } Jmsg(jcr, M_INFO, 0, _("New volume \"%s\" mounted on device %s at %s.\n"), dcr->VolumeName, dev->print_name(), bstrftime(dt, sizeof(dt), time(NULL))); /* * If this is a new tape, the label_blk will contain the * label, so write it now. If this is a previously * used tape, mount_next_write_volume() will return an * empty label_blk, and nothing will be written. */ Dmsg0(190, "write label block to dev\n"); if (!dcr->write_block_to_dev()) { berrno be; Pmsg1(0, _("write_block_to_device Volume label failed. ERR=%s\n"), be.bstrerror(dev->dev_errno)); dev->free_dcr_blocks(dcr); dcr->block = block; dcr->ameta_block = ameta_block; dcr->adata_block = adata_block; goto bail_out; } dev->free_dcr_blocks(dcr); dcr->block = block; dcr->ameta_block = ameta_block; dcr->adata_block = adata_block; /* Clear NewVol now because dir_get_volume_info() already done */ jcr->dcr->NewVol = false; set_new_volume_parameters(dcr); jcr->run_time += time(NULL) - wait_time; /* correct run time for mount wait */ /* Write overflow block to device */ Dmsg0(190, "Write overflow block to dev\n"); if (save_adata) { dcr->set_adata(); /* try to write block we entered with */ } if (!dcr->write_block_to_dev()) { berrno be; Dmsg1(0, _("write_block_to_device overflow block failed. ERR=%s\n"), be.bstrerror(dev->dev_errno)); /* Note: recursive call */ if (retries-- <= 0 || !fixup_device_block_write_error(dcr, retries)) { Jmsg2(jcr, M_FATAL, 0, _("Catastrophic error. Cannot write overflow block to device %s. ERR=%s\n"), dev->print_name(), be.bstrerror(dev->dev_errno)); goto bail_out; } } ok = true; bail_out: if (save_adata) { dcr->set_ameta(); /* Do unblock ... on ameta */ } /* * At this point, the device is locked and blocked. * Unblock the device, restore any entry blocked condition, then * return leaving the device locked (as it was on entry). */ unblock_device(dev); if (blocked != BST_NOT_BLOCKED) { block_device(dev, blocked); } if (save_adata) { dcr->set_adata(); /* switch back to what we entered with */ } Leave(100); return ok; /* device locked */ } void set_start_vol_position(DCR *dcr) { DEVICE *dev = dcr->dev; /* Set new start position */ if (dev->is_tape()) { dcr->StartAddr = dcr->EndAddr = dev->get_full_addr(); } else { if (dev->adata) { dev = dcr->ameta_dev; } /* * Note: we only update the DCR values for ameta blocks * because all the indexing (JobMedia) is done with * ameta blocks/records, which may point to adata. */ dcr->StartAddr = dcr->EndAddr = dev->get_full_addr(); } } /* * We have a new Volume mounted, so reset the Volume parameters * concerning this job. The global changes were made earlier * in the dev structure. */ void set_new_volume_parameters(DCR *dcr) { JCR *jcr = dcr->jcr; Dmsg1(40, "set_new_volume_parameters dev=%s\n", dcr->dev->print_name()); if (dcr->NewVol) { while (dcr->VolumeName[0] == 0) { int retries = 5; wait_for_device(dcr, retries); } if (dir_get_volume_info(dcr, dcr->VolumeName, GET_VOL_INFO_FOR_WRITE)) { dcr->dev->clear_wait(); } else { Dmsg1(40, "getvolinfo failed. No new Vol: %s", jcr->errmsg); } } set_new_file_parameters(dcr); jcr->NumWriteVolumes++; dcr->NewVol = false; } /* * We are now in a new Volume file, so reset the Volume parameters * concerning this job. The global changes were made earlier * in the dev structure. */ void set_new_file_parameters(DCR *dcr) { set_start_vol_position(dcr); /* Reset indicies */ Dmsg3(1000, "Reset indices Vol=%s were: FI=%d LI=%d\n", dcr->VolumeName, dcr->VolFirstIndex, dcr->VolLastIndex); dcr->VolFirstIndex = 0; dcr->VolLastIndex = 0; dcr->NewFile = false; dcr->WroteVol = false; } /* * First Open of the device. Expect dev to already be initialized. * * This routine is used only when the Storage daemon starts * and always_open is set, and in the stand-alone utility * routines such as bextract. * * Note, opening of a normal file is deferred to later so * that we can get the filename; the device_name for * a file is the directory only. * * Returns: false on failure * true on success */ bool first_open_device(DCR *dcr) { DEVICE *dev = dcr->dev; bool ok = true; Dmsg0(120, "start open_output_device()\n"); if (!dev) { return false; } dev->rLock(false); /* Defer opening files */ if (!dev->is_tape()) { Dmsg0(129, "Device is file, deferring open.\n"); goto bail_out; } Dmsg0(129, "Opening device.\n"); if (!dev->open_device(dcr, OPEN_READ_ONLY)) { Jmsg1(NULL, M_FATAL, 0, _("dev open failed: %s\n"), dev->errmsg); ok = false; goto bail_out; } Dmsg1(129, "open dev %s OK\n", dev->print_name()); bail_out: dev->rUnlock(); return ok; } bacula-15.0.3/src/stored/stored.h0000644000175000017500000000572214771010173016430 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Storage daemon specific defines and includes * */ #ifndef __STORED_H_ #define __STORED_H_ #define STORAGE_DAEMON 1 /* Set to debug Lock() and Unlock() only */ #define DEV_DEBUG_LOCK /* * Set to define all SD locks except Lock() * currently this does not work. More locks * must be converted. */ #define SD_DEBUG_LOCK #ifdef SD_DEBUG_LOCK const int sd_dbglvl = 300; #else const int sd_dbglvl = 300; #endif #ifdef COMMUNITY #undef SD_DEDUP_SUPPORT #endif #ifdef HAVE_WIN32 #undef SD_DEDUP_SUPPORT #endif #ifdef HAVE_MTIO_H #include #else # ifdef HAVE_SYS_MTIO_H # include # else # ifdef HAVE_SYS_TAPE_H # include # else /* Needed for Mac 10.6 (Snow Leopard) */ # include "lib/bmtio.h" # endif # endif #endif #include "lock.h" #include "block.h" #include "record.h" #include "dev.h" #include "stored_conf.h" #include "bsr.h" #include "jcr.h" #include "vol_mgr.h" #include "reserve.h" #include "protos.h" #include "dedup_interface.h" #ifdef HAVE_LIBZ #include /* compression headers */ #else #define uLongf uint32_t #endif #ifdef HAVE_FNMATCH #include #else #include "lib/fnmatch.h" #endif #ifdef HAVE_DIRENT_H #include #endif #include "file_dev.h" #include "tape_dev.h" #include "fifo_dev.h" #include "null_dev.h" #include "vtape_dev.h" #include "cloud_dev.h" #include "aligned_dev.h" #include "store_mngr.h" #ifdef SD_DEDUP_SUPPORT #include "dedup_dev.h" #endif #include "win_file_dev.h" #include "win_tape_dev.h" #include "sd_plugins.h" int breaddir(DIR *dirp, POOLMEM *&d_name); typedef struct { int bacula_storage_config_devices; int bacula_storage_config_autochangers; int bacula_storage_memory_bufs; int bacula_storage_memory_heap; int bacula_storage_memory_maxbufs; int bacula_storage_memory_maxbytes; int bacula_storage_memory_smbytes; } sdstatmetrics_t; /* Daemon globals from stored.c */ extern STORES *me; /* "Global" daemon resource */ extern bool forge_on; /* proceed inspite of I/O errors */ extern pthread_mutex_t device_release_mutex; extern pthread_cond_t wait_device_release; /* wait for any device to be released */ extern bool update_permanent_stats(void *data); extern bstatcollect *statcollector; extern sdstatmetrics_t sdstatmetrics; extern bool got_caps_needed; #endif /* __STORED_H_ */ bacula-15.0.3/src/stored/parse_bsr.c0000644000175000017500000006510314771010173017102 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Parse a Bootstrap Records (used for restores) * * Kern Sibbald, June MMII * */ #include "bacula.h" #include "stored.h" static void s_err(const char *file, int line, LEX *lc, const char *msg, ...); static bool add_restore_volume(JCR *jcr, VOL_LIST *vol, bool add_to_read_list); typedef BSR * (ITEM_HANDLER)(LEX *lc, BSR *bsr); static BSR *store_client(LEX *lc, BSR *bsr); static BSR *store_count(LEX *lc, BSR *bsr); static BSR *store_device(LEX *lc, BSR *bsr); static BSR *store_exclude(LEX *lc, BSR *bsr); static BSR *store_fileregex(LEX *lc, BSR *bsr); static BSR *store_findex(LEX *lc, BSR *bsr); static BSR *store_include(LEX *lc, BSR *bsr); static BSR *store_jobid(LEX *lc, BSR *bsr); static BSR *store_joblevel(LEX *lc, BSR *bsr); static BSR *store_job(LEX *lc, BSR *bsr); static BSR *store_jobtype(LEX *lc, BSR *bsr); static BSR *store_mediatype(LEX *lc, BSR *bsr); static BSR *store_nothing(LEX *lc, BSR *bsr); static BSR *store_sessid(LEX *lc, BSR *bsr); static BSR *store_sesstime(LEX *lc, BSR *bsr); static BSR *store_slot(LEX *lc, BSR *bsr); static BSR *store_stream(LEX *lc, BSR *bsr); static BSR *store_voladdr(LEX *lc, BSR *bsr); static BSR *store_volblock(LEX *lc, BSR *bsr); static BSR *store_volfile(LEX *lc, BSR *bsr); static BSR *store_vol(LEX *lc, BSR *bsr); static bool is_fast_rejection_ok(BSR *bsr); static bool is_positioning_ok(BSR *bsr); struct kw_items { const char *name; ITEM_HANDLER *handler; }; /* * List of all keywords permitted in bsr files and their handlers */ struct kw_items items[] = { {"client", store_client}, {"count", store_count}, {"device", store_device}, {"exclude", store_exclude}, {"fileindex", store_findex}, {"fileregex", store_fileregex}, {"include", store_include}, {"jobid", store_jobid}, {"joblevel", store_joblevel}, {"job", store_job}, {"jobtype", store_jobtype}, {"mediatype", store_mediatype}, {"slot", store_slot}, {"storage", store_nothing}, {"stream", store_stream}, {"voladdr", store_voladdr}, {"volblock", store_volblock}, {"volume", store_vol}, {"volfile", store_volfile}, {"volsessionid", store_sessid}, {"volsessiontime", store_sesstime}, {NULL, NULL} }; /* * Create a BSR record */ BSR *new_bsr() { BSR *bsr = (BSR *)malloc(sizeof(BSR)); memset(bsr, 0, sizeof(BSR)); return bsr; } /********************************************************************* * * Parse Bootstrap file * */ BSR *parse_bsr(JCR *jcr, char *fname) { LEX *lc = NULL; int token, i; BSR *root_bsr = new_bsr(); BSR *bsr = root_bsr; Dmsg1(300, "Enter parse_bsf %s\n", fname); if ((lc = lex_open_file(lc, fname, s_err)) == NULL) { berrno be; Emsg2(M_ERROR_TERM, 0, _("Cannot open bootstrap file %s: %s\n"), fname, be.bstrerror()); } lc->caller_ctx = (void *)jcr; while ((token=lex_get_token(lc, T_ALL)) != T_EOF) { Dmsg1(300, "parse got token=%s\n", lex_tok_to_str(token)); if (token == T_EOL) { continue; } for (i=0; items[i].name; i++) { if (strcasecmp(items[i].name, lc->str) == 0) { token = lex_get_token(lc, T_ALL); Dmsg1 (300, "in T_IDENT got token=%s\n", lex_tok_to_str(token)); if (token != T_EQUALS) { scan_err1(lc, "expected an equals, got: %s", lc->str); bsr = NULL; break; } Dmsg1(300, "calling handler for %s\n", items[i].name); /* Call item handler */ bsr = items[i].handler(lc, bsr); i = -1; break; } } if (i >= 0) { Dmsg1(300, "Keyword = %s\n", lc->str); scan_err1(lc, "Keyword %s not found", lc->str); bsr = NULL; break; } if (!bsr) { break; } } lc = lex_close_file(lc); Dmsg0(300, "Leave parse_bsf()\n"); if (!bsr) { free_bsr(root_bsr); root_bsr = NULL; } if (root_bsr) { root_bsr->use_fast_rejection = is_fast_rejection_ok(root_bsr); root_bsr->use_positioning = is_positioning_ok(root_bsr); } for (bsr=root_bsr; bsr; bsr=bsr->next) { bsr->root = root_bsr; } return root_bsr; } static BSR *store_client(LEX *lc, BSR *bsr) { int token; BSR_CLIENT *client; for (;;) { token = lex_get_token(lc, T_NAME); if (token == T_ERROR) { return NULL; } client = (BSR_CLIENT *)malloc(sizeof(BSR_CLIENT)); memset(client, 0, sizeof(BSR_CLIENT)); bstrncpy(client->ClientName, lc->str, sizeof(client->ClientName)); /* Add it to the end of the client chain */ if (!bsr->client) { bsr->client = client; } else { BSR_CLIENT *bc = bsr->client; for ( ;bc->next; bc=bc->next) { } bc->next = client; } token = lex_get_token(lc, T_ALL); if (token != T_COMMA) { break; } } return bsr; } static BSR *store_count(LEX *lc, BSR *bsr) { int token; token = lex_get_token(lc, T_PINT32); if (token == T_ERROR) { return NULL; } bsr->count = lc->pint32_val; scan_to_eol(lc); return bsr; } /* Shove the Device name in each Volume in the current bsr */ static BSR *store_device(LEX *lc, BSR *bsr) { int token; token = lex_get_token(lc, T_STRING); if (token == T_ERROR) { return NULL; } if (!bsr->volume) { Emsg1(M_ERROR,0, _("Device \"%s\" in bsr at inappropriate place.\n"), lc->str); return bsr; } BSR_VOLUME *bv; for (bv=bsr->volume; bv; bv=bv->next) { bstrncpy(bv->device, lc->str, sizeof(bv->device)); } return bsr; } static BSR *store_findex(LEX *lc, BSR *bsr) { int token; BSR_FINDEX *findex; for (;;) { token = lex_get_token(lc, T_PINT32_RANGE); if (token == T_ERROR) { return NULL; } findex = (BSR_FINDEX *)malloc(sizeof(BSR_FINDEX)); memset(findex, 0, sizeof(BSR_FINDEX)); findex->findex = lc->pint32_val; findex->findex2 = lc->pint32_val2; /* Add it to the end of the chain */ if (!bsr->FileIndex) { bsr->FileIndex = findex; } else { /* Add to end of chain */ BSR_FINDEX *bs = bsr->FileIndex; for ( ;bs->next; bs=bs->next) { } bs->next = findex; } token = lex_get_token(lc, T_ALL); if (token != T_COMMA) { break; } } return bsr; } static BSR *store_fileregex(LEX *lc, BSR *bsr) { int token; int rc; token = lex_get_token(lc, T_STRING); if (token == T_ERROR) { return NULL; } if (bsr->fileregex) free(bsr->fileregex); bsr->fileregex = bstrdup(lc->str); if (bsr->fileregex_re == NULL) bsr->fileregex_re = (regex_t *)bmalloc(sizeof(regex_t)); rc = regcomp(bsr->fileregex_re, bsr->fileregex, REG_EXTENDED|REG_NOSUB); if (rc != 0) { char prbuf[500]; regerror(rc, bsr->fileregex_re, prbuf, sizeof(prbuf)); Emsg2(M_ERROR, 0, _("REGEX '%s' compile error. ERR=%s\n"), bsr->fileregex, prbuf); return NULL; } return bsr; } static BSR *store_jobid(LEX *lc, BSR *bsr) { int token; BSR_JOBID *jobid; for (;;) { token = lex_get_token(lc, T_PINT32_RANGE); if (token == T_ERROR) { return NULL; } jobid = (BSR_JOBID *)malloc(sizeof(BSR_JOBID)); memset(jobid, 0, sizeof(BSR_JOBID)); jobid->JobId = lc->pint32_val; jobid->JobId2 = lc->pint32_val2; /* Add it to the end of the chain */ if (!bsr->JobId) { bsr->JobId = jobid; } else { /* Add to end of chain */ BSR_JOBID *bs = bsr->JobId; for ( ;bs->next; bs=bs->next) { } bs->next = jobid; } token = lex_get_token(lc, T_ALL); if (token != T_COMMA) { break; } } return bsr; } static BSR *store_jobtype(LEX *lc, BSR *bsr) { /* *****FIXME****** */ Pmsg0(-1, _("JobType not yet implemented\n")); return bsr; } static BSR *store_joblevel(LEX *lc, BSR *bsr) { /* *****FIXME****** */ Pmsg0(-1, _("JobLevel not yet implemented\n")); return bsr; } static BSR *store_job(LEX *lc, BSR *bsr) { int token; BSR_JOB *job; for (;;) { token = lex_get_token(lc, T_NAME); if (token == T_ERROR) { return NULL; } job = (BSR_JOB *)malloc(sizeof(BSR_JOB)); memset(job, 0, sizeof(BSR_JOB)); bstrncpy(job->Job, lc->str, sizeof(job->Job)); /* Add it to the end of the client chain */ if (!bsr->job) { bsr->job = job; } else { /* Add to end of chain */ BSR_JOB *bc = bsr->job; for ( ;bc->next; bc=bc->next) { } bc->next = job; } token = lex_get_token(lc, T_ALL); if (token != T_COMMA) { break; } } return bsr; } /* Shove the MediaType in each Volume in the current bsr */ static BSR *store_mediatype(LEX *lc, BSR *bsr) { int token; token = lex_get_token(lc, T_STRING); if (token == T_ERROR) { return NULL; } if (!bsr->volume) { Emsg1(M_ERROR,0, _("MediaType %s in bsr at inappropriate place.\n"), lc->str); return bsr; } BSR_VOLUME *bv; for (bv=bsr->volume; bv; bv=bv->next) { bstrncpy(bv->MediaType, lc->str, sizeof(bv->MediaType)); } return bsr; } static BSR *store_vol(LEX *lc, BSR *bsr) { int token; BSR_VOLUME *volume; char *p, *n; token = lex_get_token(lc, T_STRING); if (token == T_ERROR) { return NULL; } if (bsr->volume) { bsr->next = new_bsr(); bsr->next->prev = bsr; bsr = bsr->next; } /* This may actually be more than one volume separated by a | * If so, separate them. */ for (p=lc->str; p && *p; ) { n = strchr(p, '|'); if (n) { *n++ = 0; } volume = (BSR_VOLUME *)malloc(sizeof(BSR_VOLUME)); memset(volume, 0, sizeof(BSR_VOLUME)); bstrncpy(volume->VolumeName, p, sizeof(volume->VolumeName)); /* Add it to the end of the volume chain */ if (!bsr->volume) { bsr->volume = volume; } else { BSR_VOLUME *bc = bsr->volume; for ( ;bc->next; bc=bc->next) { } bc->next = volume; } p = n; } return bsr; } static bool is_positioning_ok(BSR *bsr) { /* * Every bsr should have a volfile entry and a volblock entry * or a VolAddr * if we are going to use positioning */ for ( ; bsr; bsr=bsr->next) { if (!((bsr->volfile && bsr->volblock) || bsr->voladdr)) { return false; } } return true; } static bool is_fast_rejection_ok(BSR *bsr) { /* * Although, this can be optimized, for the moment, require * all bsrs to have both sesstime and sessid set before * we do fast rejection. */ for ( ; bsr; bsr=bsr->next) { if (!(bsr->sesstime && bsr->sessid)) { return false; } } return true; } static BSR *store_nothing(LEX *lc, BSR *bsr) { int token; token = lex_get_token(lc, T_STRING); if (token == T_ERROR) { return NULL; } return bsr; } /* * Routine to handle Volume start/end file */ static BSR *store_volfile(LEX *lc, BSR *bsr) { int token; BSR_VOLFILE *volfile; for (;;) { token = lex_get_token(lc, T_PINT32_RANGE); if (token == T_ERROR) { return NULL; } volfile = (BSR_VOLFILE *)malloc(sizeof(BSR_VOLFILE)); memset(volfile, 0, sizeof(BSR_VOLFILE)); volfile->sfile = lc->pint32_val; volfile->efile = lc->pint32_val2; /* Add it to the end of the chain */ if (!bsr->volfile) { bsr->volfile = volfile; } else { /* Add to end of chain */ BSR_VOLFILE *bs = bsr->volfile; for ( ;bs->next; bs=bs->next) { } bs->next = volfile; } token = lex_get_token(lc, T_ALL); if (token != T_COMMA) { break; } } return bsr; } /* * Routine to handle Volume start/end Block */ static BSR *store_volblock(LEX *lc, BSR *bsr) { int token; BSR_VOLBLOCK *volblock; for (;;) { token = lex_get_token(lc, T_PINT32_RANGE); if (token == T_ERROR) { return NULL; } volblock = (BSR_VOLBLOCK *)malloc(sizeof(BSR_VOLBLOCK)); memset(volblock, 0, sizeof(BSR_VOLBLOCK)); volblock->sblock = lc->pint32_val; volblock->eblock = lc->pint32_val2; /* Add it to the end of the chain */ if (!bsr->volblock) { bsr->volblock = volblock; } else { /* Add to end of chain */ BSR_VOLBLOCK *bs = bsr->volblock; for ( ;bs->next; bs=bs->next) { } bs->next = volblock; } token = lex_get_token(lc, T_ALL); if (token != T_COMMA) { break; } } return bsr; } /* * Routine to handle Volume start/end address */ static BSR *store_voladdr(LEX *lc, BSR *bsr) { int token; BSR_VOLADDR *voladdr; for (;;) { token = lex_get_token(lc, T_PINT64_RANGE); if (token == T_ERROR) { return NULL; } voladdr = (BSR_VOLADDR *)malloc(sizeof(BSR_VOLADDR)); memset(voladdr, 0, sizeof(BSR_VOLADDR)); voladdr->saddr = lc->pint64_val; voladdr->eaddr = lc->pint64_val2; /* Add it to the end of the chain */ if (!bsr->voladdr) { bsr->voladdr = voladdr; } else { /* Add to end of chain */ BSR_VOLADDR *bs = bsr->voladdr; for ( ;bs->next; bs=bs->next) { } bs->next = voladdr; } token = lex_get_token(lc, T_ALL); if (token != T_COMMA) { break; } } return bsr; } static BSR *store_sessid(LEX *lc, BSR *bsr) { int token; BSR_SESSID *sid; for (;;) { token = lex_get_token(lc, T_PINT32_RANGE); if (token == T_ERROR) { return NULL; } sid = (BSR_SESSID *)malloc(sizeof(BSR_SESSID)); memset(sid, 0, sizeof(BSR_SESSID)); sid->sessid = lc->pint32_val; sid->sessid2 = lc->pint32_val2; /* Add it to the end of the chain */ if (!bsr->sessid) { bsr->sessid = sid; } else { /* Add to end of chain */ BSR_SESSID *bs = bsr->sessid; for ( ;bs->next; bs=bs->next) { } bs->next = sid; } token = lex_get_token(lc, T_ALL); if (token != T_COMMA) { break; } } return bsr; } static BSR *store_sesstime(LEX *lc, BSR *bsr) { int token; BSR_SESSTIME *stime; for (;;) { token = lex_get_token(lc, T_PINT32); if (token == T_ERROR) { return NULL; } stime = (BSR_SESSTIME *)malloc(sizeof(BSR_SESSTIME)); memset(stime, 0, sizeof(BSR_SESSTIME)); stime->sesstime = lc->pint32_val; /* Add it to the end of the chain */ if (!bsr->sesstime) { bsr->sesstime = stime; } else { /* Add to end of chain */ BSR_SESSTIME *bs = bsr->sesstime; for ( ;bs->next; bs=bs->next) { } bs->next = stime; } token = lex_get_token(lc, T_ALL); if (token != T_COMMA) { break; } } return bsr; } static BSR *store_stream(LEX *lc, BSR *bsr) { int token; BSR_STREAM *stream; for (;;) { token = lex_get_token(lc, T_INT32); if (token == T_ERROR) { return NULL; } stream = (BSR_STREAM *)malloc(sizeof(BSR_STREAM)); memset(stream, 0, sizeof(BSR_STREAM)); stream->stream = lc->int32_val; /* Add it to the end of the chain */ if (!bsr->stream) { bsr->stream = stream; } else { /* Add to end of chain */ BSR_STREAM *bs = bsr->stream; for ( ;bs->next; bs=bs->next) { } bs->next = stream; } token = lex_get_token(lc, T_ALL); if (token != T_COMMA) { break; } } return bsr; } static BSR *store_slot(LEX *lc, BSR *bsr) { int token; token = lex_get_token(lc, T_PINT32); if (token == T_ERROR) { return NULL; } if (!bsr->volume) { Emsg1(M_ERROR,0, _("Slot %d in bsr at inappropriate place.\n"), lc->pint32_val); return bsr; } bsr->volume->Slot = lc->pint32_val; scan_to_eol(lc); return bsr; } static BSR *store_include(LEX *lc, BSR *bsr) { scan_to_eol(lc); return bsr; } static BSR *store_exclude(LEX *lc, BSR *bsr) { scan_to_eol(lc); return bsr; } void dump_volfile(BSR_VOLFILE *volfile) { if (volfile) { Pmsg2(-1, _("VolFile : %u-%u\n"), volfile->sfile, volfile->efile); dump_volfile(volfile->next); } } void dump_volblock(BSR_VOLBLOCK *volblock) { if (volblock) { Pmsg2(-1, _("VolBlock : %u-%u\n"), volblock->sblock, volblock->eblock); dump_volblock(volblock->next); } } void dump_voladdr(DEVICE *dev, BSR_VOLADDR *voladdr) { if (voladdr) { if (dev) { char ed1[50], ed2[50]; Pmsg2(-1, _("VolAddr : %s-%llu\n"), dev->print_addr(ed1, sizeof(ed1), voladdr->saddr), dev->print_addr(ed2, sizeof(ed2), voladdr->eaddr)); } else { Pmsg2(-1, _("VolAddr : %llu-%llu\n"), voladdr->saddr, voladdr->eaddr); } dump_voladdr(dev, voladdr->next); } } void dump_findex(BSR_FINDEX *FileIndex) { if (FileIndex) { if (FileIndex->findex == FileIndex->findex2) { Pmsg1(-1, _("FileIndex : %u\n"), FileIndex->findex); } else { Pmsg2(-1, _("FileIndex : %u-%u\n"), FileIndex->findex, FileIndex->findex2); } dump_findex(FileIndex->next); } } void dump_jobid(BSR_JOBID *jobid) { if (jobid) { if (jobid->JobId == jobid->JobId2) { Pmsg1(-1, _("JobId : %u\n"), jobid->JobId); } else { Pmsg2(-1, _("JobId : %u-%u\n"), jobid->JobId, jobid->JobId2); } dump_jobid(jobid->next); } } void dump_sessid(BSR_SESSID *sessid) { if (sessid) { if (sessid->sessid == sessid->sessid2) { Pmsg1(-1, _("SessId : %u\n"), sessid->sessid); } else { Pmsg2(-1, _("SessId : %u-%u\n"), sessid->sessid, sessid->sessid2); } dump_sessid(sessid->next); } } void dump_volume(BSR_VOLUME *volume) { if (volume) { Pmsg1(-1, _("VolumeName : %s\n"), volume->VolumeName); Pmsg1(-1, _(" MediaType : %s\n"), volume->MediaType); Pmsg1(-1, _(" Device : %s\n"), volume->device); Pmsg1(-1, _(" Slot : %d\n"), volume->Slot); dump_volume(volume->next); } } void dump_client(BSR_CLIENT *client) { if (client) { Pmsg1(-1, _("Client : %s\n"), client->ClientName); dump_client(client->next); } } void dump_job(BSR_JOB *job) { if (job) { Pmsg1(-1, _("Job : %s\n"), job->Job); dump_job(job->next); } } void dump_sesstime(BSR_SESSTIME *sesstime) { if (sesstime) { Pmsg1(-1, _("SessTime : %u\n"), sesstime->sesstime); dump_sesstime(sesstime->next); } } void dump_bsr(DEVICE *dev, BSR *bsr, bool recurse) { int64_t save_debug = debug_level; debug_level = 1; if (!bsr) { Pmsg0(-1, _("BSR is NULL\n")); debug_level = save_debug; return; } Pmsg1(-1, _("Next : 0x%x\n"), bsr->next); Pmsg1(-1, _("Root bsr : 0x%x\n"), bsr->root); dump_volume(bsr->volume); dump_sessid(bsr->sessid); dump_sesstime(bsr->sesstime); dump_volfile(bsr->volfile); dump_volblock(bsr->volblock); dump_voladdr(dev, bsr->voladdr); dump_client(bsr->client); dump_jobid(bsr->JobId); dump_job(bsr->job); dump_findex(bsr->FileIndex); if (bsr->count) { Pmsg1(-1, _("count : %u\n"), bsr->count); Pmsg1(-1, _("found : %u\n"), bsr->found); } Pmsg1(-1, _("done : %s\n"), bsr->done?_("yes"):_("no")); Pmsg1(-1, _("positioning : %d\n"), bsr->use_positioning); Pmsg1(-1, _("fast_reject : %d\n"), bsr->use_fast_rejection); if (recurse && bsr->next) { Pmsg0(-1, "\n"); dump_bsr(dev, bsr->next, true); } debug_level = save_debug; } /********************************************************************* * * Free bsr resources */ static void free_bsr_item(BSR *bsr) { BSR *next; while (bsr) { next = bsr->next; free(bsr); bsr = next; } } /* * Remove a single item from the bsr tree */ void remove_bsr(BSR *bsr) { free_bsr_item((BSR *)bsr->volume); free_bsr_item((BSR *)bsr->client); free_bsr_item((BSR *)bsr->sessid); free_bsr_item((BSR *)bsr->sesstime); free_bsr_item((BSR *)bsr->volfile); free_bsr_item((BSR *)bsr->volblock); free_bsr_item((BSR *)bsr->voladdr); free_bsr_item((BSR *)bsr->JobId); free_bsr_item((BSR *)bsr->job); free_bsr_item((BSR *)bsr->FileIndex); free_bsr_item((BSR *)bsr->JobType); free_bsr_item((BSR *)bsr->JobLevel); if (bsr->fileregex) { bfree(bsr->fileregex); } if (bsr->fileregex_re) { regfree(bsr->fileregex_re); free(bsr->fileregex_re); } if (bsr->attr) { free_attr(bsr->attr); } if (bsr->next) { bsr->next->prev = bsr->prev; } if (bsr->prev) { bsr->prev->next = bsr->next; } free(bsr); } /* * Free all bsrs in chain */ void free_bsr(BSR *bsr) { BSR *next_bsr; while (bsr) { next_bsr = bsr->next; /* Remove (free) current bsr */ remove_bsr(bsr); /* Now get the next one */ bsr = next_bsr; } } /***************************************************************** * Routines for handling volumes */ static VOL_LIST *new_restore_volume() { VOL_LIST *vol; vol = (VOL_LIST *)malloc(sizeof(VOL_LIST)); memset(vol, 0, sizeof(VOL_LIST)); return vol; } /* * Create a list of Volumes (and Slots and Start positions) to be * used in the current restore job. */ void create_restore_volume_list(JCR *jcr, bool add_to_read_list) { char *p, *n; VOL_LIST *vol; /* * Build a list of volumes to be processed */ jcr->NumReadVolumes = 0; jcr->CurReadVolume = 0; if (jcr->bsr) { BSR *bsr = jcr->bsr; if (!bsr->volume || !bsr->volume->VolumeName[0]) { return; } for ( ; bsr; bsr=bsr->next) { BSR_VOLUME *bsrvol; BSR_VOLFILE *volfile; uint32_t sfile = UINT32_MAX; /* Find minimum start file so that we can forward space to it */ for (volfile = bsr->volfile; volfile; volfile=volfile->next) { if (volfile->sfile < sfile) { sfile = volfile->sfile; } } /* Now add volumes for this bsr */ for (bsrvol = bsr->volume; bsrvol; bsrvol=bsrvol->next) { vol = new_restore_volume(); bstrncpy(vol->VolumeName, bsrvol->VolumeName, sizeof(vol->VolumeName)); bstrncpy(vol->MediaType, bsrvol->MediaType, sizeof(vol->MediaType)); bstrncpy(vol->device, bsrvol->device, sizeof(vol->device)); vol->Slot = bsrvol->Slot; vol->start_file = sfile; if (add_restore_volume(jcr, vol, add_to_read_list)) { jcr->NumReadVolumes++; Dmsg2(400, "Added volume=%s mediatype=%s\n", vol->VolumeName, vol->MediaType); } else { Dmsg1(400, "Duplicate volume %s\n", vol->VolumeName); free((char *)vol); } sfile = 0; /* start at beginning of second volume */ } } } else { /* This is the old way -- deprecated */ for (p = jcr->dcr->VolumeName; p && *p; ) { n = strchr(p, '|'); /* volume name separator */ if (n) { *n++ = 0; /* Terminate name */ } vol = new_restore_volume(); bstrncpy(vol->VolumeName, p, sizeof(vol->VolumeName)); bstrncpy(vol->MediaType, jcr->dcr->media_type, sizeof(vol->MediaType)); if (add_restore_volume(jcr, vol, add_to_read_list)) { jcr->NumReadVolumes++; } else { free((char *)vol); } p = n; } } } /* * Add current volume to end of list, only if the Volume * is not already in the list. * * returns: 1 if volume added * 0 if volume already in list */ static bool add_restore_volume(JCR *jcr, VOL_LIST *vol, bool add_to_read_list) { VOL_LIST *next = jcr->VolList; if (add_to_read_list) { /* Add volume to volume manager's read list */ add_read_volume(jcr, vol->VolumeName); } if (!next) { /* list empty ? */ jcr->VolList = vol; /* yes, add volume */ } else { /* Loop through all but last */ for ( ; next->next; next=next->next) { if (strcmp(vol->VolumeName, next->VolumeName) == 0) { /* Save smallest start file */ if (vol->start_file < next->start_file) { next->start_file = vol->start_file; } return false; /* already in list */ } } /* Check last volume in list */ if (strcmp(vol->VolumeName, next->VolumeName) == 0) { if (vol->start_file < next->start_file) { next->start_file = vol->start_file; } return false; /* already in list */ } next->next = vol; /* add volume */ } return true; } void free_restore_volume_list(JCR *jcr) { VOL_LIST *vol = jcr->VolList; VOL_LIST *tmp; for ( ; vol; ) { tmp = vol->next; remove_read_volume(jcr, vol->VolumeName); free(vol); vol = tmp; } jcr->VolList = NULL; } /* * Format a scanner error message */ static void s_err(const char *file, int line, LEX *lc, const char *msg, ...) { JCR *jcr = (JCR *)(lc->caller_ctx); va_list arg_ptr; char buf[MAXSTRING]; va_start(arg_ptr, msg); bvsnprintf(buf, sizeof(buf), msg, arg_ptr); va_end(arg_ptr); if (jcr) { Jmsg(jcr, M_FATAL, 0, _("Bootstrap file error: %s\n" " : Line %d, col %d of file %s\n"), buf, lc->line_no, lc->col_no, lc->fname); } else { e_msg(file, line, M_FATAL, 0, _("Bootstrap file error: %s\n" " : Line %d, col %d of file %s\n"), buf, lc->line_no, lc->col_no, lc->fname); } lc->last_result = -1; } bacula-15.0.3/src/stored/s3_driver.c0000644000175000017500000007777714771010173017046 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Routines for writing to the Cloud using S3 protocol. * NOTE!!! This cloud driver is not compatible with * any disk-changer script for changing Volumes. * It does however work with Bacula Virtual autochangers. * * Written by Kern Sibbald, May MMXVI * */ #include "s3_driver.h" #if BEEF #include "bee_s3_cloud_glacier.h" #else #include "cloud_glacier.h" #endif #include #include #ifdef HAVE_LIBS3 static const int64_t dbglvl = DT_CLOUD|50; static const char *S3Errors[] = { "OK", "InternalError", "OutOfMemory", "Interrupted", "InvalidBucketNameTooLong", "InvalidBucketNameFirstCharacter", "InvalidBucketNameCharacter", "InvalidBucketNameCharacterSequence", "InvalidBucketNameTooShort", "InvalidBucketNameDotQuadNotation", "QueryParamsTooLong", "FailedToInitializeRequest", "MetaDataHeadersTooLong", "BadMetaData", "BadContentType", "ContentTypeTooLong", "BadMD5", "MD5TooLong", "BadCacheControl", "CacheControlTooLong", "BadContentDispositionFilename", "ContentDispositionFilenameTooLong", "BadContentEncoding", "ContentEncodingTooLong", "BadIfMatchETag", "IfMatchETagTooLong", "BadIfNotMatchETag", "IfNotMatchETagTooLong", "HeadersTooLong", "KeyTooLong", "UriTooLong", "XmlParseFailure", "EmailAddressTooLong", "UserIdTooLong", "UserDisplayNameTooLong", "GroupUriTooLong", "PermissionTooLong", "TargetBucketTooLong", "TargetPrefixTooLong", "TooManyGrants", "BadGrantee", "BadPermission", "XmlDocumentTooLarge", "NameLookupError", "FailedToConnect", "ServerFailedVerification", "ConnectionFailed", "AbortedByCallback", "NotSupported", "AccessDenied", "AccountProblem", "AmbiguousGrantByEmailAddress", "BadDigest", "BucketAlreadyExists", "BucketAlreadyOwnedByYou", "BucketNotEmpty", "CredentialsNotSupported", "CrossLocationLoggingProhibited", "EntityTooSmall", "EntityTooLarge", "ExpiredToken", "IllegalVersioningConfigurationException", "IncompleteBody", "IncorrectNumberOfFilesInPostRequest", "InlineDataTooLarge", "ErrorInternalError", "InvalidAccessKeyId", "InvalidAddressingHeader", "InvalidArgument", "InvalidBucketName", "InvalidBucketState", "InvalidDigest", "InvalidEncryptionAlgorithmError", "InvalidLocationConstraint", "InvalidObjectState", "InvalidPart", "InvalidPartOrder", "InvalidPayer", "InvalidPolicyDocument", "InvalidRange", "InvalidRequest", "InvalidSecurity", "InvalidSOAPRequest", "InvalidStorageClass", "InvalidTargetBucketForLogging", "InvalidToken", "InvalidURI", "KeyTooLong", "MalformedACLError", "MalformedPOSTRequest", "MalformedXML", "MaxMessageLengthExceeded", "MaxPostPreDataLengthExceededError", "MetadataTooLarge", "MethodNotAllowed", "MissingAttachment", "MissingContentLength", "MissingRequestBodyError", "MissingSecurityElement", "MissingSecurityHeader", "NoLoggingStatusForKey", "NoSuchBucket", "NoSuchKey", "NoSuchLifecycleConfiguration", "NoSuchUpload", "NoSuchVersion", "NotImplemented", "NotSignedUp", "NotSuchBucketPolicy", "OperationAborted", "PermanentRedirect", "PreconditionFailed", "Redirect", "RestoreAlreadyInProgress", "RequestIsNotMultiPartContent", "RequestTimeout", "RequestTimeTooSkewed", "RequestTorrentOfBucketError", "SignatureDoesNotMatch", "ServiceUnavailable", "SlowDown", "TemporaryRedirect", "TokenRefreshRequired", "TooManyBuckets", "UnexpectedContent", "UnresolvableGrantByEmailAddress", "UserKeyMustBeSpecified", "QuotaExceeded", "Unknown", "HttpErrorMovedTemporarily", "HttpErrorBadRequest", "HttpErrorForbidden", "HttpErrorNotFound", "HttpErrorConflict", "HttpErrorUnknown", "Undefined" }; #define S3ErrorsSize (sizeof(S3Errors)/sizeof(char *)) typedef cloud_glacier *(*newGlacierDriver_t)(void); struct glacier_driver_item { const char *name; void *handle; newGlacierDriver_t newDriver; cloud_glacier *ptr; /*static instance used for bcloud only. Driver uses its own instance.*/ bool builtin; bool loaded; }; #ifndef RTLD_NOW #define RTLD_NOW 2 #endif glacier_driver_item glacier_item = {0}; static cloud_glacier *load_glacier_driver(const char* plugin_directory) { if (!glacier_item.newDriver) { POOL_MEM fname(PM_FNAME); Mmsg(fname, "%s/bacula-sd-cloud-glacier-s3-driver-%s%s", plugin_directory, VERSION, ".so"); glacier_item.handle = dlopen(fname.c_str(), RTLD_NOW); if (glacier_item.handle) { glacier_item.newDriver = (newGlacierDriver_t)dlsym(glacier_item.handle, "BaculaCloudGlacier"); if (!glacier_item.newDriver) { dlclose(glacier_item.handle); return NULL; } } } if (glacier_item.newDriver) { return glacier_item.newDriver(); } return NULL; } static void unload_drivers() { if (glacier_item.handle) { dlclose(glacier_item.handle); } } #ifdef __cplusplus extern "C" { #endif cloud_driver *BaculaCloudDriver() { return New(s3_driver); } /* Needed for bcloud utility (backdoor glacier driver pre-loading) */ void BaculaInitGlacier(const char* plugin_directory) { glacier_item.ptr = load_glacier_driver(plugin_directory); } #ifdef __cplusplus } #endif /* * Our Bacula context for s3_xxx callbacks * NOTE: only items needed for particular callback are set */ class bacula_ctx { public: cancel_callback *cancel_cb; transfer *xfer; POOLMEM *&errMsg; ilist *parts; alist *aparts; int isTruncated; char* nextMarker; int64_t obj_len; const char *caller; FILE *infile; FILE *outfile; alist *volumes; S3Status status; bwlimit *limit; /* Used to control the bandwidth */ cleanup_cb_type *cleanup_cb; cleanup_ctx_type *cleanup_ctx; bool isRestoring; bacula_ctx(POOLMEM *&err) : cancel_cb(NULL), xfer(NULL), errMsg(err), parts(NULL), aparts(NULL), isTruncated(0), nextMarker(NULL), obj_len(0), caller(NULL), infile(NULL), outfile(NULL), volumes(NULL), status(S3StatusOK), limit(NULL), cleanup_cb(NULL), cleanup_ctx(NULL), isRestoring(false) { /* reset error message (necessary in case of retry) */ errMsg[0] = 0; } bacula_ctx(transfer *t) : cancel_cb(NULL), xfer(t), errMsg(t->m_message), parts(NULL), aparts(NULL), isTruncated(0), nextMarker(NULL), obj_len(0), caller(NULL), infile(NULL), outfile(NULL), volumes(NULL), status(S3StatusOK), limit(NULL), cleanup_cb(NULL), cleanup_ctx(NULL), isRestoring(false) { /* reset error message (necessary in case of retry) */ errMsg[0] = 0; } }; /* Imported functions */ const char *mode_to_str(int mode); /* Forward referenced functions */ /* Const and Static definitions */ static S3Status responsePropertiesCallback( const S3ResponseProperties *properties, void *callbackData); static void responseCompleteCallback( S3Status status, const S3ErrorDetails *oops, void *callbackData); S3ResponseHandler responseHandler = { &responsePropertiesCallback, &responseCompleteCallback }; static S3Status responsePropertiesCallback( const S3ResponseProperties *properties, void *callbackData) { bacula_ctx *ctx = (bacula_ctx *)callbackData; ASSERT(ctx); if (ctx->xfer && properties) { if (properties->contentLength > 0) { ctx->xfer->m_res_size = properties->contentLength; } if (properties->lastModified > 0) { ctx->xfer->m_res_mtime = properties->lastModified; } if (properties->restore) { const char *c = strchr( properties->restore, '"' ); c++; /* t stand for true, all the rest is considered false */ ctx->isRestoring = (*c=='t'); } } return S3StatusOK; } static void responseCompleteCallback( S3Status status, const S3ErrorDetails *oops, void *callbackCtx) { bacula_ctx *ctx = (bacula_ctx *)callbackCtx; const char *msg = NULL; Enter(dbglvl); if (ctx) { ctx->status = status; /* return completion status */ } if (status < 0 || status > S3ErrorsSize) { status = (S3Status)S3ErrorsSize; } if (oops) { msg = oops->message; } if (!msg) { msg = S3Errors[status]; } if ((status != S3StatusOK) && ctx->errMsg) { POOL_MEM tmp; Mmsg(tmp, " %s %s ERR=%s", ctx->caller, S3Errors[status], msg); pm_strcat(ctx->errMsg, tmp); if (oops->furtherDetails) { pm_strcat(ctx->errMsg, " "); pm_strcat(ctx->errMsg, oops->furtherDetails); } if (oops->curlError) { pm_strcat(ctx->errMsg, " "); pm_strcat(ctx->errMsg, oops->curlError); } for (int i=0; iextraDetailsCount; ++i) { pm_strcat(ctx->errMsg, " "); pm_strcat(ctx->errMsg, oops->extraDetails[i].name); pm_strcat(ctx->errMsg, " : "); pm_strcat(ctx->errMsg, oops->extraDetails[i].value); } } Leave(dbglvl); return; } bool xfer_cancel_cb(void *arg) { transfer *xfer = (transfer*)arg; if (xfer) { return xfer->is_canceled(); } return false; }; s3_driver::s3_driver() : m_glacier_driver(NULL) { s3ctx = {0}; }; s3_driver::~s3_driver() { if (m_glacier_driver) { delete m_glacier_driver; } }; static int putObjectCallback(int buf_len, char *buf, void *callbackCtx) { bacula_ctx *ctx = (bacula_ctx *)callbackCtx; ssize_t rbytes = 0; int read_len; if (ctx->xfer->is_canceled()) { POOL_MEM msg; Mmsg(msg, _("Job cancelled.\n")); pm_strcat(ctx->errMsg, msg); return -1; } if (ctx->obj_len) { read_len = (ctx->obj_len > buf_len) ? buf_len : ctx->obj_len; rbytes = fread(buf, 1, read_len, ctx->infile); Dmsg6(dbglvl, "%s xfer=part.%lu thread=%lu rbytes=%d bufsize=%u remlen=%lu\n", ctx->caller, ctx->xfer->m_part, pthread_self(), rbytes, buf_len, ctx->obj_len); if (rbytes <= 0) { berrno be; POOL_MEM msg; Mmsg(msg, "%s Error reading input file: ERR=%s\n", ctx->caller, be.bstrerror()); pm_strcat(ctx->errMsg, msg); goto get_out; } ctx->obj_len -= rbytes; ctx->xfer->increment_processed_size(rbytes); if (ctx->limit) { ctx->limit->control_bwlimit(rbytes); } } get_out: return rbytes; } S3PutObjectHandler putObjectHandler = { responseHandler, &putObjectCallback }; /* * Put a cache object into the cloud */ S3Status s3_driver::put_object(transfer *xfer, const char *cache_fname, const char *cloud_fname) { Enter(dbglvl); bacula_ctx ctx(xfer); ctx.limit = upload_limit.use_bwlimit() ? &upload_limit : NULL; struct stat statbuf; if (lstat(cache_fname, &statbuf) == -1) { berrno be; Mmsg2(ctx.errMsg, "Failed to stat file %s. ERR=%s\n", cache_fname, be.bstrerror()); goto get_out; } ctx.obj_len = statbuf.st_size; if (!(ctx.infile = bfopen(cache_fname, "r"))) { berrno be; Mmsg2(ctx.errMsg, "Failed to open input file %s. ERR=%s\n", cache_fname, be.bstrerror()); goto get_out; } ctx.caller = "S3_put_object"; S3_put_object(&s3ctx, cloud_fname, ctx.obj_len, NULL, NULL, 0, &putObjectHandler, &ctx); get_out: if (ctx.infile) { fclose(ctx.infile); /* input file */ } /* no error so far -> retrieve uploaded part info */ if (ctx.errMsg[0] == 0) { ilist parts; if (get_one_cloud_volume_part(cloud_fname, &parts, ctx.errMsg)) { /* only one part is returned */ cloud_part *p = (cloud_part *)parts.get(parts.last_index()); if (p) { xfer->m_res_size = p->size; xfer->m_res_mtime = p->mtime; bmemzero(xfer->m_hash64, 64); } } } else { Dmsg1(dbglvl, "put_object ERROR: %s\n", ctx.errMsg); } Leave(dbglvl); return ctx.status; } static S3Status getObjectDataCallback(int buf_len, const char *buf, void *callbackCtx) { bacula_ctx *ctx = (bacula_ctx *)callbackCtx; ssize_t wbytes; Enter(dbglvl); if (ctx->xfer->is_canceled()) { POOL_MEM msg; Mmsg(msg, _("Job cancelled.\n")); pm_strcat(ctx->errMsg, msg); Leave(dbglvl); return S3StatusAbortedByCallback; } /* Write buffer to output file */ wbytes = fwrite(buf, 1, buf_len, ctx->outfile); if (wbytes < 0) { berrno be; POOL_MEM msg; Mmsg(msg, "%s Error writing output file: ERR=%s\n", ctx->caller, be.bstrerror()); pm_strcat(ctx->errMsg, msg); Leave(dbglvl); return S3StatusAbortedByCallback; } ctx->xfer->increment_processed_size(wbytes); if (ctx->limit) { ctx->limit->control_bwlimit(wbytes); } Leave(dbglvl); return ((wbytes < buf_len) ? S3StatusAbortedByCallback : S3StatusOK); } int s3_driver::get_cloud_object(transfer *xfer, const char *cloud_fname, const char *cache_fname) { int64_t ifModifiedSince = -1; int64_t ifNotModifiedSince = -1; const char *ifMatch = 0; const char *ifNotMatch = 0; uint64_t startByte = 0; uint64_t byteCount = 0; bacula_ctx ctx(xfer); ctx.limit = download_limit.use_bwlimit() ? &download_limit : NULL; bool retry = false; Enter(dbglvl); /* Initialize handlers */ S3GetConditions getConditions = { ifModifiedSince, ifNotModifiedSince, ifMatch, ifNotMatch }; S3GetObjectHandler getObjectHandler = { { &responsePropertiesCallback, &responseCompleteCallback }, &getObjectDataCallback }; /* see if cache file already exists */ struct stat buf; if (lstat(cache_fname, &buf) == -1) { ctx.outfile = bfopen(cache_fname, "w"); } else { /* Exists so truncate and write from beginning */ ctx.outfile = bfopen(cache_fname, "r+"); } if (!ctx.outfile) { berrno be; Mmsg2(ctx.errMsg, "Could not open cache file %s. ERR=%s\n", cache_fname, be.bstrerror()); goto get_out; } ctx.caller = "S3_get_object"; S3_get_object(&s3ctx, cloud_fname, &getConditions, startByte, byteCount, NULL, 0, &getObjectHandler, &ctx); /* Archived objects (in GLACIER or DEEP_ARCHIVE) will return InvalidObjectStateError */ retry = (ctx.status == S3StatusErrorInvalidObjectState); if (retry) { restore_cloud_object(xfer, cloud_fname); } if (fclose(ctx.outfile) < 0) { berrno be; Mmsg2(ctx.errMsg, "Error closing cache file %s: %s\n", cache_fname, be.bstrerror()); } get_out: if (retry) { Leave(dbglvl); return CLOUD_DRIVER_COPY_PART_TO_CACHE_RETRY; } if (ctx.errMsg[0] == 0) { Leave(dbglvl); return CLOUD_DRIVER_COPY_PART_TO_CACHE_OK; } else { Leave(dbglvl); return CLOUD_DRIVER_COPY_PART_TO_CACHE_ERROR; } } bool s3_driver::move_cloud_part(const char *VolumeName, uint32_t apart, const char *to, cancel_callback *cancel_cb, POOLMEM *&err, int& exists) { POOLMEM *cloud_fname = get_pool_memory(PM_FNAME); cloud_fname[0] = 0; make_cloud_filename(cloud_fname, VolumeName, apart); POOLMEM *dest_cloud_fname = get_pool_memory(PM_FNAME); dest_cloud_fname[0] = 0; add_vol_and_part(dest_cloud_fname, VolumeName, to); int64_t lastModifiedReturn=0LL; bacula_ctx ctx(err); ctx.caller = "S3_copy_object"; Dmsg3(dbglvl, "%s trying to move %s to %s\n", ctx.caller, cloud_fname, dest_cloud_fname); S3_copy_object(&s3ctx, //bucketContext cloud_fname, //key NULL, //destinationBucket -> same dest_cloud_fname, // destinationKey NULL, // putProperties &lastModifiedReturn, 0, //eTagReturnSize NULL, //eTagReturn NULL, // requestContext=NULL -> process now 0, // timeoutMs &responseHandler, // handler &ctx //callbackData ); free_pool_memory(dest_cloud_fname); free_pool_memory(cloud_fname); if (ctx.status == S3StatusOK) { exists = true; Mmsg(err, "%s", to); /* remove source part */ Dmsg3(dbglvl, "%s move sucessful trying to unlink %s", ctx.caller, cloud_fname, dest_cloud_fname); ctx.caller = "S3_delete_object"; S3_delete_object(&s3ctx, cloud_fname, NULL, 0, &responseHandler, &ctx); if (ctx.status != S3StatusOK) { /* error message should have been filled within response cb */ return false; } else { Dmsg1(dbglvl, "move_cloud_part: Unlink sucessful for file %s.\n", cloud_fname); } return true; } else if (ctx.status == S3StatusXmlParseFailure) { /* source doesn't exist. OK. */ exists = false; err[0] = 0; return true; } else { return (err[0] == 0); } } /* * libs3 callback for clean_cloud_volume() */ static S3Status partsAndCopieslistBucketCallback( int isTruncated, const char *nextMarker, int numObj, const S3ListBucketContent *object, int commonPrefixesCount, const char **commonPrefixes, void *callbackCtx) { bacula_ctx *ctx = (bacula_ctx *)callbackCtx; cleanup_ctx_type *cleanup_ctx = ctx->cleanup_ctx; cleanup_cb_type *cb = ctx->cleanup_cb; Enter(dbglvl); for (int i = 0; (cleanup_ctx && (i < numObj)); i++) { const S3ListBucketContent *obj = &(object[i]); if (obj && cb(obj->key, cleanup_ctx)) { ctx->aparts->append(bstrdup(obj->key)); Dmsg1(dbglvl, "partsAndCopieslistBucketCallback: %s retrieved\n", obj->key); } if (ctx->cancel_cb && ctx->cancel_cb->fct && ctx->cancel_cb->fct(ctx->cancel_cb->arg)) { POOL_MEM msg; Mmsg(msg, _("Job cancelled.\n")); pm_strcat(ctx->errMsg, msg); Leave(dbglvl); return S3StatusAbortedByCallback; } } ctx->isTruncated = isTruncated; if (ctx->nextMarker) { bfree_and_null(ctx->nextMarker); } if (isTruncated && numObj>0) { /* Workaround a bug with nextMarker */ const S3ListBucketContent *obj = &(object[numObj-1]); ctx->nextMarker = bstrdup(obj->key); } Leave(dbglvl); return S3StatusOK; } S3ListBucketHandler partsAndCopiesListBucketHandler = { responseHandler, &partsAndCopieslistBucketCallback }; /* remove part* from volume instead of part.* in truncate. Intended to remove copies created when part is overwritten */ /* If legit parts are still present in the volume, they will be deleted, except part.1 */ bool s3_driver::clean_cloud_volume(const char *VolumeName, cleanup_cb_type *cb, cleanup_ctx_type *context, cancel_callback *cancel_cb, POOLMEM *&err) { Enter(dbglvl); if (strlen(VolumeName) == 0) { pm_strcpy(err, "Invalid argument"); Leave(dbglvl); return false; } alist parts; bacula_ctx ctx(err); ctx.cancel_cb = cancel_cb; ctx.aparts = &parts; ctx.isTruncated = 1; /* pass into the while loop at least once */ ctx.caller = "S3_list_bucket"; ctx.cleanup_cb = cb; ctx.cleanup_ctx = context; while (ctx.isTruncated!=0) { ctx.isTruncated = 0; S3_list_bucket(&s3ctx, VolumeName, ctx.nextMarker, NULL, 0, NULL, 0, &partsAndCopiesListBucketHandler, &ctx); Dmsg4(dbglvl, "clean_cloud_volume isTruncated=%d, nextMarker=%s, nbparts=%d, err=%s\n", ctx.isTruncated, ctx.nextMarker, ctx.aparts->size(), ctx.errMsg?ctx.errMsg:"None"); if (ctx.status != S3StatusOK) { pm_strcpy(err, S3Errors[ctx.status]); bfree_and_null(ctx.nextMarker); Leave(dbglvl); return false; } } bfree_and_null(ctx.nextMarker); char *part = NULL; foreach_alist(part, &parts) { if (cancel_cb && cancel_cb->fct && cancel_cb->fct(cancel_cb->arg)) { Mmsg(err, _("Job cancelled.\n")); Leave(dbglvl); return false; } /* don't forget to specify the volume name is the object path */ Dmsg1(dbglvl, "Object to cleanup: %s\n", part); ctx.caller = "S3_delete_object"; S3_delete_object(&s3ctx, part, NULL, 0, &responseHandler, &ctx); if (ctx.status != S3StatusOK) { /* error message should have been filled within response cb */ Leave(dbglvl); return false; } else { Dmsg2(dbglvl, "clean_cloud_volume for %s: Unlink file %s.\n", VolumeName, part); } } Leave(dbglvl); return true; } /* * Not thread safe */ bool s3_driver::truncate_cloud_volume(const char *VolumeName, ilist *trunc_parts, cancel_callback *cancel_cb, POOLMEM *&err) { Enter(dbglvl); bacula_ctx ctx(err); int last_index = (int)trunc_parts->last_index(); POOLMEM *cloud_fname = get_pool_memory(PM_FNAME); for (int i=1; (i<=last_index); i++) { if (!trunc_parts->get(i)) { continue; } if (cancel_cb && cancel_cb->fct && cancel_cb->fct(cancel_cb->arg)) { Mmsg(err, _("Job cancelled.\n")); goto get_out; } /* don't forget to specify the volume name is the object path */ make_cloud_filename(cloud_fname, VolumeName, i); Dmsg1(dbglvl, "Object to truncate: %s\n", cloud_fname); ctx.caller = "S3_delete_object"; S3_delete_object(&s3ctx, cloud_fname, NULL, 0, &responseHandler, &ctx); if (ctx.status != S3StatusOK) { /* error message should have been filled within response cb */ goto get_out; } } get_out: free_pool_memory(cloud_fname); bfree_and_null(ctx.nextMarker); Leave(dbglvl); return (err[0] == 0); } void s3_driver::make_cloud_filename(POOLMEM *&filename, const char *VolumeName, uint32_t apart) { filename[0] = 0; add_vol_and_part(filename, VolumeName, "part", apart); Dmsg1(dbglvl, "make_cloud_filename: %s\n", filename); } bool s3_driver::retry_put_object(S3Status status, int retry) { if (S3_status_is_retryable(status)) { Dmsg2(dbglvl, "retry copy_cache_part_to_cloud() status=%s %d\n", S3_get_status_name(status), retry); bmicrosleep((max_upload_retries - retry + 1) * 3, 0); /* Wait more and more after each retry */ return true; } return false; } /* * Copy a single cache part to the cloud */ bool s3_driver::copy_cache_part_to_cloud(transfer *xfer) { Enter(dbglvl); POOLMEM *cloud_fname = get_pool_memory(PM_FNAME); make_cloud_filename(cloud_fname, xfer->m_volume_name, xfer->m_part); uint32_t retry = max_upload_retries; S3Status status = S3StatusOK; do { /* when the driver decide to retry, it must reset the processed size */ xfer->reset_processed_size(); status = put_object(xfer, xfer->m_cache_fname, cloud_fname); if (status != S3StatusOK) { xfer->inc_retry(); } --retry; } while (retry_put_object(status, retry) && (retry>0)); free_pool_memory(cloud_fname); Leave(dbglvl); return (status == S3StatusOK); } /* * Copy a single object (part) from the cloud to the cache */ int s3_driver::copy_cloud_part_to_cache(transfer *xfer) { Enter(dbglvl); POOLMEM *cloud_fname = get_pool_memory(PM_FNAME); make_cloud_filename(cloud_fname, xfer->m_volume_name, xfer->m_part); int rtn = get_cloud_object(xfer, cloud_fname, xfer->m_cache_fname); free_pool_memory(cloud_fname); Leave(dbglvl); return rtn; } bool s3_driver::restore_cloud_object(transfer *xfer, const char *cloud_fname) { if (m_glacier_driver) { return m_glacier_driver->restore_cloud_object(xfer,cloud_fname); } return false; } bool s3_driver::is_waiting_on_server(transfer *xfer) { Enter(dbglvl); POOL_MEM cloud_fname(PM_FNAME); make_cloud_filename(cloud_fname.addr(), xfer->m_volume_name, xfer->m_part); if (m_glacier_driver) { Leave(dbglvl); return m_glacier_driver->is_waiting_on_server(xfer, cloud_fname.addr()); } Leave(dbglvl); return false; } /* * NOTE: See the SD Cloud resource in stored_conf.h */ bool s3_driver::init(CLOUD *cloud, POOLMEM *&err) { S3Status status; if (cloud->host_name == NULL) { Mmsg1(err, "Failed to initialize S3 Cloud. ERR=Hostname not set in cloud resource %s\n", cloud->hdr.name); return false; } if (cloud->access_key == NULL) { Mmsg1(err, "Failed to initialize S3 Cloud. ERR=AccessKey not set in cloud resource %s\n", cloud->hdr.name); return false; } if (cloud->secret_key == NULL) { Mmsg1(err, "Failed to initialize S3 Cloud. ERR=SecretKey not set in cloud resource %s\n", cloud->hdr.name); return false; } /* Setup bucket context for S3 lib */ s3ctx.hostName = cloud->host_name; s3ctx.bucketName = cloud->bucket_name; s3ctx.protocol = (S3Protocol)cloud->protocol; s3ctx.uriStyle = (S3UriStyle)cloud->uri_style; s3ctx.accessKeyId = cloud->access_key; s3ctx.secretAccessKey = cloud->secret_key; s3ctx.authRegion = cloud->region; if ((status = S3_initialize("s3", S3_INIT_ALL, s3ctx.hostName)) != S3StatusOK) { Mmsg1(err, "Failed to initialize S3 lib. ERR=%s\n", S3_get_status_name(status)); return false; } #if BEEF /*load glacier */ if (me) { if (m_glacier_driver) { delete m_glacier_driver; m_glacier_driver = NULL; } m_glacier_driver = load_glacier_driver(me->plugin_directory); if (m_glacier_driver) { if (!m_glacier_driver->init(cloud, err)) { return false; } } } #endif return true; } bool s3_driver::start_of_job(POOLMEM *&msg) { if (msg) { Mmsg(msg, _("Using S3 cloud driver Host=%s Bucket=%s"), s3ctx.hostName, s3ctx.bucketName); } return true; } bool s3_driver::end_of_job(POOLMEM *&msg) { return true; } bool s3_driver::term(POOLMEM *&msg) { unload_drivers(); S3_deinitialize(); return true; } /* * libs3 callback for get_num_cloud_volume_parts_list() */ static S3Status partslistBucketCallback( int isTruncated, const char *nextMarker, int numObj, const S3ListBucketContent *object, int commonPrefixesCount, const char **commonPrefixes, void *callbackCtx) { bacula_ctx *ctx = (bacula_ctx *)callbackCtx; Enter(dbglvl); for (int i = 0; ctx->parts && (i < numObj); i++) { const S3ListBucketContent *obj = &(object[i]); const char *ext=strstr(obj->key, "part."); if (obj && ext!=NULL) { cloud_part *part = (cloud_part*) malloc(sizeof(cloud_part)); part->index = atoi(&(ext[5])); part->mtime = obj->lastModified; part->size = obj->size; bmemzero(part->hash64, 64); ctx->parts->put(part->index, part); Dmsg1(dbglvl, "partslistBucketCallback: part.%d retrieved\n", part->index); } } ctx->isTruncated = isTruncated; if (ctx->nextMarker) { bfree_and_null(ctx->nextMarker); } if (isTruncated && numObj>0) { /* Workaround a bug with nextMarker */ const S3ListBucketContent *obj = &(object[numObj-1]); ctx->nextMarker = bstrdup(obj->key); } Leave(dbglvl); if (ctx->cancel_cb && ctx->cancel_cb->fct && ctx->cancel_cb->fct(ctx->cancel_cb->arg)) { POOL_MEM msg; Mmsg(msg, _("Job cancelled.\n")); pm_strcat(ctx->errMsg, msg); Leave(dbglvl); return S3StatusAbortedByCallback; } Leave(dbglvl); return S3StatusOK; } S3ListBucketHandler partslistBucketHandler = { responseHandler, &partslistBucketCallback }; bool s3_driver::get_cloud_volume_parts_list(const char* VolumeName, ilist *parts, cancel_callback *cancel_cb, POOLMEM *&err) { Enter(dbglvl); if (!parts || strlen(VolumeName) == 0) { pm_strcpy(err, "Invalid argument"); Leave(dbglvl); return false; } bacula_ctx ctx(err); ctx.cancel_cb = cancel_cb; ctx.parts = parts; ctx.isTruncated = 1; /* pass into the while loop at least once */ ctx.caller = "S3_list_bucket"; while (ctx.isTruncated!=0) { ctx.isTruncated = 0; S3_list_bucket(&s3ctx, VolumeName, ctx.nextMarker, NULL, 0, NULL, 0, &partslistBucketHandler, &ctx); Dmsg4(dbglvl, "get_cloud_volume_parts_list isTruncated=%d, nextMarker=%s, nbparts=%d, err=%s\n", ctx.isTruncated, ctx.nextMarker, ctx.parts->size(), ctx.errMsg?ctx.errMsg:"None"); if (ctx.status != S3StatusOK) { pm_strcpy(err, S3Errors[ctx.status]); bfree_and_null(ctx.nextMarker); Leave(dbglvl); return false; } } bfree_and_null(ctx.nextMarker); Leave(dbglvl); return true; } bool s3_driver::get_one_cloud_volume_part(const char* part_path_name, ilist *parts, POOLMEM *&err) { Enter(dbglvl); if (!parts || strlen(part_path_name) == 0) { pm_strcpy(err, "Invalid argument"); Leave(dbglvl); return false; } bacula_ctx ctx(err); ctx.parts = parts; ctx.isTruncated = 0; /* ignore truncation in this case */ ctx.caller = "S3_list_bucket"; /* S3 documentation claims the parts will be returned in binary order */ /* so part.1 < part.11 b.e. This assumed, the first part retrieved is the one we seek */ S3_list_bucket(&s3ctx, part_path_name, ctx.nextMarker, NULL, 1, NULL, 0, &partslistBucketHandler, &ctx); Dmsg4(dbglvl, "get_one_cloud_volume_part isTruncated=%d, nextMarker=%s, nbparts=%d, err=%s\n", ctx.isTruncated, ctx.nextMarker, ctx.parts->size(), ctx.errMsg?ctx.errMsg:"None"); if (ctx.status != S3StatusOK) { pm_strcpy(err, S3Errors[ctx.status]); bfree_and_null(ctx.nextMarker); Leave(dbglvl); return false; } bfree_and_null(ctx.nextMarker); Leave(dbglvl); return true; } /* * libs3 callback for get_cloud_volumes_list() */ static S3Status volumeslistBucketCallback( int isTruncated, const char *nextMarker, int numObj, const S3ListBucketContent *object, int commonPrefixesCount, const char **commonPrefixes, void *callbackCtx) { bacula_ctx *ctx = (bacula_ctx *)callbackCtx; Enter(dbglvl); for (int i = 0; ctx->volumes && (i < commonPrefixesCount); i++) { char *cp = bstrdup(commonPrefixes[i]); cp[strlen(cp)-1] = 0; ctx->volumes->append(cp); } ctx->isTruncated = isTruncated; if (ctx->nextMarker) { bfree_and_null(ctx->nextMarker); } if (isTruncated && numObj>0) { /* Workaround a bug with nextMarker */ const S3ListBucketContent *obj = &(object[numObj-1]); ctx->nextMarker = bstrdup(obj->key); } if (ctx->cancel_cb && ctx->cancel_cb->fct && ctx->cancel_cb->fct(ctx->cancel_cb->arg)) { POOL_MEM msg; Mmsg(msg, _("Job cancelled.\n")); pm_strcat(ctx->errMsg, msg); Leave(dbglvl); return S3StatusAbortedByCallback; } Leave(dbglvl); return S3StatusOK; } S3ListBucketHandler volumeslistBucketHandler = { responseHandler, &volumeslistBucketCallback }; bool s3_driver::get_cloud_volumes_list(alist *volumes, cancel_callback *cancel_cb, POOLMEM *&err) { Enter(dbglvl); if (!volumes) { pm_strcpy(err, "Invalid argument"); Leave(dbglvl); return false; } bacula_ctx ctx(err); ctx.volumes = volumes; ctx.cancel_cb = cancel_cb; ctx.isTruncated = 1; /* pass into the while loop at least once */ ctx.caller = "S3_list_bucket"; while (ctx.isTruncated!=0) { ctx.isTruncated = 0; S3_list_bucket(&s3ctx, NULL, ctx.nextMarker, "/", 0, NULL, 0, &volumeslistBucketHandler, &ctx); if (ctx.status != S3StatusOK) { break; } } bfree_and_null(ctx.nextMarker); Leave(dbglvl); return (err[0] == 0); } #endif /* HAVE_LIBS3 */ bacula-15.0.3/src/stored/bextract.c0000644000175000017500000006017514771010173016742 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Dumb program to extract files from a Bacula backup. * * Kern E. Sibbald, MM * */ #include "bacula.h" #include "stored.h" #include "ch.h" #include "findlib/find.h" #ifdef HAVE_ZSTD #include ZSTD_DCtx *ZSTD_decompress_workset = NULL; #endif #ifdef HAVE_LZO #include #include #endif extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code); static void do_extract(char *fname); static bool record_cb(DCR *dcr, DEV_RECORD *rec); static DEVICE *dev = NULL; static DCR *dcr; static BFILE bfd; static JCR *jcr; static FF_PKT *ff; static BSR *bsr = NULL; static bool extract = false; static int non_support_data = 0; static long total = 0; static ATTR *attr; static POOLMEM *curr_fname; static char *where; static uint64_t num_errors = 0; static uint64_t num_records = 0; static uint32_t num_files = 0; static uint32_t compress_buf_size = 70000; static POOLMEM *compress_buf; static int prog_name_msg = 0; static int win32_data_msg = 0; static char *VolumeName = NULL; static char *wbuf; /* write buffer address */ static uint32_t wsize; /* write size */ static uint64_t fileAddr = 0; /* file write address */ static CONFIG *config; #define CONFIG_FILE "bacula-sd.conf" char *configfile = NULL; bool skip_extract = false; static void usage() { fprintf(stderr, _( PROG_COPYRIGHT "\n%sVersion: %s (%s)\n\n" "Usage: bextract \n" " -b specify a bootstrap file\n" " -c specify a Storage configuration file\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -T send debug traces to trace file (stored in /tmp)\n" " -e exclude list\n" " -i include list\n" " -p proceed inspite of I/O errors\n" " -t read data from volume, do not write anything\n" " -v verbose\n" " -V specify Volume names (separated by |)\n" " -? print this message\n\n"), 2000, BDEMO, VERSION, BDATE); exit(1); } int main (int argc, char *argv[]) { int ch; FILE *fd; char line[1000]; bool got_inc = false; BtoolsAskDirHandler askdir_handler; init_askdir_handler(&askdir_handler); setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); lmgr_init_thread(); working_directory = "/tmp"; my_name_is(argc, argv, "bextract"); init_msg(NULL, NULL); /* setup message handler */ OSDependentInit(); ff = init_find_files(); binit(&bfd); while ((ch = getopt(argc, argv, "Ttb:c:d:e:i:pvV:?")) != -1) { switch (ch) { case 't': skip_extract = true; break; case 'b': /* bootstrap file */ bsr = parse_bsr(NULL, optarg); break; case 'T': /* Send debug to trace file */ set_trace(1); break; case 'c': /* specify config file */ if (configfile != NULL) { free(configfile); } configfile = bstrdup(optarg); break; case 'd': /* debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { char *p; /* We probably find a tag list -d 10,sql,bvfs */ if ((p = strchr(optarg, ',')) != NULL) { *p = 0; } debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } if (p) { debug_parse_tags(p+1, &debug_level_tags); } } break; case 'e': /* exclude list */ if ((fd = bfopen(optarg, "rb")) == NULL) { berrno be; Pmsg2(0, _("Could not open exclude file: %s, ERR=%s\n"), optarg, be.bstrerror()); exit(1); } while (fgets(line, sizeof(line), fd) != NULL) { strip_trailing_junk(line); Dmsg1(900, "add_exclude %s\n", line); add_fname_to_exclude_list(ff, line); } fclose(fd); break; case 'i': /* include list */ if ((fd = bfopen(optarg, "rb")) == NULL) { berrno be; Pmsg2(0, _("Could not open include file: %s, ERR=%s\n"), optarg, be.bstrerror()); exit(1); } while (fgets(line, sizeof(line), fd) != NULL) { strip_trailing_junk(line); Dmsg1(900, "add_include %s\n", line); add_fname_to_include_list(ff, 0, line); } fclose(fd); got_inc = true; break; case 'p': forge_on = true; break; case 'v': verbose++; break; case 'V': /* Volume name */ VolumeName = optarg; break; case '?': default: usage(); } /* end switch */ } /* end while */ argc -= optind; argv += optind; if (argc != 2) { Pmsg0(0, _("Wrong number of arguments: \n")); usage(); } if (configfile == NULL) { configfile = bstrdup(CONFIG_FILE); } config = New(CONFIG()); parse_sd_config(config, configfile, M_ERROR_TERM); setup_me(); load_sd_plugins(me->plugin_directory); if (!got_inc) { /* If no include file, */ add_fname_to_include_list(ff, 0, "/"); /* include everything */ } where = argv[1]; do_extract(argv[0]); if (bsr) { free_bsr(bsr); } if (prog_name_msg) { Pmsg1(000, _("%d Program Name and/or Program Data Stream records ignored.\n"), prog_name_msg); } if (win32_data_msg) { Pmsg1(000, _("%d Win32 data or Win32 gzip data stream records. Ignored.\n"), win32_data_msg); } #ifdef HAVE_ZSTD ZSTD_freeDCtx(ZSTD_decompress_workset); #endif term_include_exclude_files(ff); term_find_files(ff); return 0; } static void do_extract(char *devname) { char ed1[50]; struct stat statp; enable_backup_privileges(NULL, 1); jcr = setup_jcr("bextract", devname, bsr, VolumeName, SD_READ, true/*read dedup data*/, 10/*retry count*/); if (!jcr) { exit(1); } dev = jcr->read_dcr->dev; if (!dev) { exit(1); } dcr = jcr->read_dcr; /* Make sure where directory exists and that it is a directory */ if (stat(where, &statp) < 0) { berrno be; Emsg2(M_ERROR_TERM, 0, _("Cannot stat %s. It must exist. ERR=%s\n"), where, be.bstrerror()); } if (!S_ISDIR(statp.st_mode)) { Emsg1(M_ERROR_TERM, 0, _("%s must be a directory.\n"), where); } free(jcr->where); jcr->where = bstrdup(where); attr = new_attr(jcr); compress_buf = get_memory(compress_buf_size); curr_fname = get_pool_memory(PM_FNAME); *curr_fname = 0; if (jcr->dedup && skip_extract) { /* We want to force the checksum verification for blocks that come from DDE */ jcr->dedup->set_checksum_after_rehydration(true); } read_records(dcr, record_cb, mount_next_read_volume); /* If output file is still open, it was the last one in the * archive since we just hit an end of file, so close the file. */ if (is_bopen(&bfd)) { set_attributes(jcr, attr, &bfd); } dev->free_dedup_rehydration_interface(dcr); // disconnect from the DDE release_device(dcr); free_attr(attr); free_jcr(jcr); dev->term(NULL); free_pool_memory(curr_fname); printf(_("%u files restored.\n"), num_files); if (num_errors) { printf(_("Found %s error%s\n"), edit_uint64(num_errors, ed1), num_errors>1? "s":""); } return; } static bool store_data(int32_t stream, BFILE *bfd, char *data, const int32_t length) { bool skip_win32 = false; /* This is a workaround of VSS Plugin backups that were incorrectly * generating WIN32 streams with the offset option */ if (attr->data_stream == STREAM_WIN32_DATA && stream & STREAM_BIT_OFFSETS) { set_portable_backup(bfd); skip_win32 = true; /* handle data as regular files */ Dmsg0(50, "Found incorrect stream for WIN32 data\n"); } if (is_win32_stream(attr->data_stream) && !have_win32_api() && !skip_win32) { set_portable_backup(bfd); if (!processWin32BackupAPIBlock(bfd, data, length)) { berrno be; Emsg2(M_ERROR_TERM, 0, _("Write error on %s: %s\n"), attr->ofname, be.bstrerror()); return false; } } else if (bwrite(bfd, data, length) != (ssize_t)length) { berrno be; Emsg2(M_ERROR_TERM, 0, _("Write error on %s: %s\n"), attr->ofname, be.bstrerror()); return false; } return true; } /* * Called here for each record from read_records() */ static bool record_cb(DCR *dcr, DEV_RECORD *rec) { int stat, ret=true; JCR *jcr = dcr->jcr; char ed1[50]; bool restoredatap = false; POOLMEM *orgdata = NULL; uint32_t orgdata_len = 0; if (rec->FileIndex < 0) { return true; /* we don't want labels */ } /* Do rehydration */ if (rec->Stream & STREAM_BIT_DEDUPLICATION_DATA) { if (!jcr->read_dcr->dev->setup_dedup_rehydration_interface(jcr->read_dcr)) { Jmsg0(jcr, M_FATAL, 0, _("Cannot do rehydration, device is not dedup aware\n")); return false; } int size; bool despite_of_error = forge_on; int err = jcr->dedup->record_rehydration(dcr, rec, jcr->dedup->get_msgbuf(), jcr->errmsg, despite_of_error, &size); if (err) { Jmsg(jcr, M_ERROR, 0, _("Got rehydration error at file=%d record=%s fname=%s volsessionid=%d volsessiontime=%d Msg=%s"), num_files, edit_uint64(num_records, ed1), curr_fname, rec->VolSessionId, rec->VolSessionTime, jcr->errmsg); if (err == -1) { /* cannot read data from DDE */ Jmsg(jcr, M_FATAL, 0, "%s", jcr->errmsg); return false; } else { jcr->JobStatus = JS_Running; /* We want to continue */ num_errors++; } } /* Keep old pointers and restore them when at the end, * after this point, we need to use goto bail_out */ orgdata = rec->data; orgdata_len = rec->data_len; restoredatap = true; rec->data = jcr->dedup->get_msgbuf(); rec->data_len = size; } /* In this mode, we do not create any file on disk, just read * everything from the volume. */ if (skip_extract) { switch (rec->maskedStream) { case STREAM_UNIX_ATTRIBUTES: case STREAM_UNIX_ATTRIBUTES_EX: if (!unpack_attributes_record(jcr, rec->Stream, rec->data, rec->data_len, attr)) { Emsg0(M_ERROR_TERM, 0, _("Cannot continue.\n")); } if (verbose) { attr->data_stream = decode_stat(attr->attr, &attr->statp, sizeof(attr->statp), &attr->LinkFI); build_attr_output_fnames(jcr, attr); print_ls_output(jcr, attr); } pm_strcpy(curr_fname, attr->fname); num_files++; break; } num_records++; /* We display some progress information if verbose not set or set to 2 */ if (verbose != 1 && (num_records % 200000) == 0L) { fprintf(stderr, "\rfiles=%d records=%s\n", num_files, edit_uint64(num_records, ed1)); } ret = true; goto bail_out; } /* File Attributes stream */ switch (rec->maskedStream) { case STREAM_UNIX_ATTRIBUTES: case STREAM_UNIX_ATTRIBUTES_EX: case STREAM_UNIX_ATTRIBUTE_UPDATE: /* If extracting, it was from previous stream, so * close the output file. */ if (extract) { if (!is_bopen(&bfd)) { Emsg0(M_ERROR, 0, _("Logic error output file should be open but is not.\n")); } set_attributes(jcr, attr, &bfd); extract = false; } if (!unpack_attributes_record(jcr, rec->Stream, rec->data, rec->data_len, attr)) { Emsg0(M_ERROR_TERM, 0, _("Cannot continue.\n")); } /* Keep the name of the current file if we find a bad block */ pm_strcpy(curr_fname, attr->fname); if (file_is_included(ff, attr->fname) && !file_is_excluded(ff, attr->fname)) { attr->data_stream = decode_stat(attr->attr, &attr->statp, sizeof(attr->statp), &attr->LinkFI); if (!is_restore_stream_supported(attr->data_stream)) { if (!non_support_data++) { Jmsg(jcr, M_ERROR, 0, _("%s stream not supported on this Client.\n"), stream_to_ascii(attr->data_stream)); } extract = false; goto bail_out; } build_attr_output_fnames(jcr, attr); if (attr->type == FT_DELETED) { /* TODO: choose the right fname/ofname */ Jmsg(jcr, M_INFO, 0, _("%s was deleted.\n"), attr->fname); extract = false; goto bail_out; } extract = false; stat = create_file(jcr, attr, &bfd, REPLACE_ALWAYS); switch (stat) { case CF_ERROR: case CF_SKIP: break; case CF_EXTRACT: extract = true; print_ls_output(jcr, attr); num_files++; fileAddr = 0; break; case CF_CREATED: set_attributes(jcr, attr, &bfd); print_ls_output(jcr, attr); num_files++; fileAddr = 0; break; } } break; case STREAM_RESTORE_OBJECT: case STREAM_PLUGIN_OBJECT: case STREAM_PLUGIN_META_BLOB: case STREAM_PLUGIN_META_CATALOG: /* nothing to do */ break; /* Data stream and extracting */ case STREAM_FILE_DATA: case STREAM_SPARSE_DATA: case STREAM_WIN32_DATA: if (extract) { if (is_offset_stream(rec->Stream)) { ser_declare; uint64_t faddr; wbuf = rec->data + OFFSET_FADDR_SIZE; wsize = rec->data_len - OFFSET_FADDR_SIZE; ser_begin(rec->data, OFFSET_FADDR_SIZE); unser_uint64(faddr); if (fileAddr != faddr) { fileAddr = faddr; if (blseek(&bfd, (boffset_t)fileAddr, SEEK_SET) < 0) { berrno be; Emsg3(M_ERROR_TERM, 0, _("Seek error Addr=%llu on %s: %s\n"), fileAddr, attr->ofname, be.bstrerror()); } } } else { wbuf = rec->data; wsize = rec->data_len; } total += wsize; Dmsg2(8, "Write %u bytes, total=%u\n", wsize, total); store_data(rec->Stream, &bfd, wbuf, wsize); fileAddr += wsize; } break; /* GZIP data stream */ case STREAM_GZIP_DATA: case STREAM_SPARSE_GZIP_DATA: case STREAM_WIN32_GZIP_DATA: #ifdef HAVE_LIBZ if (extract) { uLong compress_len = compress_buf_size; int stat = Z_BUF_ERROR; if (is_offset_stream(rec->Stream)) { ser_declare; uint64_t faddr; char ec1[50]; wbuf = rec->data + OFFSET_FADDR_SIZE; wsize = rec->data_len - OFFSET_FADDR_SIZE; ser_begin(rec->data, OFFSET_FADDR_SIZE); unser_uint64(faddr); if ((rec->Stream & STREAM_BIT_OFFSETS) == 0 && fileAddr != faddr) { fileAddr = faddr; if (blseek(&bfd, (boffset_t)fileAddr, SEEK_SET) < 0) { berrno be; Emsg3(M_ERROR, 0, _("Seek to %s error on %s: ERR=%s\n"), edit_uint64(fileAddr, ec1), attr->ofname, be.bstrerror()); extract = false; goto bail_out; } } } else { wbuf = rec->data; wsize = rec->data_len; } while (compress_len < 10000000 && (stat=uncompress((Byte *)compress_buf, &compress_len, (const Byte *)wbuf, (uLong)wsize)) == Z_BUF_ERROR) { /* The buffer size is too small, try with a bigger one */ compress_len = 2 * compress_len; compress_buf = check_pool_memory_size(compress_buf, compress_len); } if (stat != Z_OK) { Emsg1(M_ERROR, 0, _("Uncompression error. ERR=%d\n"), stat); extract = false; goto bail_out; } Dmsg2(100, "Write uncompressed %d bytes, total before write=%d\n", compress_len, total); store_data(rec->Stream, &bfd, compress_buf, compress_len); total += compress_len; fileAddr += compress_len; Dmsg2(100, "Compress len=%d uncompressed=%d\n", rec->data_len, compress_len); } #else if (extract) { Emsg0(M_ERROR, 0, _("GZIP data stream found, but GZIP not configured!\n")); extract = false; goto bail_out; } #endif break; /* Compressed data stream */ case STREAM_COMPRESSED_DATA: case STREAM_SPARSE_COMPRESSED_DATA: case STREAM_WIN32_COMPRESSED_DATA: if (extract) { uint32_t comp_magic, comp_len; uint16_t comp_level, comp_version; if (is_offset_stream(rec->Stream)) { ser_declare; uint64_t faddr; char ec1[50]; wbuf = rec->data + OFFSET_FADDR_SIZE; wsize = rec->data_len - OFFSET_FADDR_SIZE; ser_begin(rec->data, OFFSET_FADDR_SIZE); unser_uint64(faddr); if ((rec->Stream & STREAM_BIT_OFFSETS) == 0 && fileAddr != faddr) { fileAddr = faddr; if (blseek(&bfd, (boffset_t)fileAddr, SEEK_SET) < 0) { berrno be; Emsg3(M_ERROR, 0, _("Seek to %s error on %s: ERR=%s\n"), edit_uint64(fileAddr, ec1), attr->ofname, be.bstrerror()); extract = false; goto bail_out; } } } else { wbuf = rec->data; wsize = rec->data_len; } /* read compress header */ unser_declare; unser_begin(wbuf, sizeof(comp_stream_header)); unser_uint32(comp_magic); unser_uint32(comp_len); unser_uint16(comp_level); unser_uint16(comp_version); Dmsg4(200, "Compressed data stream found: magic=0x%x, len=%d, level=%d, ver=0x%x\n", comp_magic, comp_len, comp_level, comp_version); /* version check */ if (comp_version != COMP_HEAD_VERSION) { Emsg1(M_ERROR, 0, _("Compressed header version error. version=0x%x\n"), comp_version); ret = false; goto bail_out; } /* size check */ if (comp_len + sizeof(comp_stream_header) != wsize) { Emsg2(M_ERROR, 0, _("Compressed header size error. comp_len=%d, msglen=%d\n"), comp_len, wsize); ret = false; goto bail_out; } switch(comp_magic) { #ifdef HAVE_LZO case COMPRESS_LZO1X: { int r; lzo_uint compress_len = compress_buf_size; const unsigned char *cbuf = (const unsigned char*) wbuf + sizeof(comp_stream_header); int real_compress_len = wsize - sizeof(comp_stream_header); Dmsg2(200, "Comp_len=%d msglen=%d\n", compress_len, wsize); while ((r=lzo1x_decompress_safe(cbuf, real_compress_len, (unsigned char *)compress_buf, &compress_len, NULL)) == LZO_E_OUTPUT_OVERRUN) { /* The buffer size is too small, try with a bigger one */ compress_len = 2 * compress_len; compress_buf = check_pool_memory_size(compress_buf, compress_len); } if (r != LZO_E_OK) { Emsg1(M_ERROR, 0, _("LZO uncompression error. ERR=%d\n"), r); extract = false; goto bail_out; } Dmsg2(100, "Write uncompressed %d bytes, total before write=%d\n", compress_len, total); store_data(rec->Stream, &bfd, compress_buf, compress_len); total += compress_len; fileAddr += compress_len; Dmsg2(100, "Compress len=%d uncompressed=%d\n", rec->data_len, compress_len); break; } #endif #ifdef HAVE_ZSTD case COMPRESS_ZSTD: { if (!ZSTD_decompress_workset) { ZSTD_decompress_workset = ZSTD_createDCtx(); } size_t compress_len = compress_buf_size; const unsigned char*cbuf = (const unsigned char*) wbuf + sizeof(comp_stream_header); size_t real_compress_len = wsize - sizeof(comp_stream_header); Dmsg2(200, "Comp_len=%d msglen=%d\n", compress_len, wsize); unsigned long long rSize = ZSTD_getFrameContentSize(cbuf, real_compress_len); if (rSize == ZSTD_CONTENTSIZE_ERROR || rSize == ZSTD_CONTENTSIZE_UNKNOWN) { Emsg1(M_ERROR, 0, _("ZSTD uncompression error. ERR=%d\n"), rSize); extract = false; goto bail_out; } compress_buf = check_pool_memory_size(compress_buf, rSize); rSize = ZSTD_decompressDCtx(ZSTD_decompress_workset, (unsigned char *)compress_buf, rSize, cbuf, real_compress_len); if (rSize == ZSTD_CONTENTSIZE_ERROR || rSize == ZSTD_CONTENTSIZE_UNKNOWN) { Emsg1(M_ERROR, 0, _("ZSTD uncompression error. ERR=%d\n"), rSize); goto bail_out; } Dmsg2(100, "Write uncompressed %d bytes, total before write=%d\n", (int)rSize, total); store_data(rec->Stream, &bfd, compress_buf, rSize); total += rSize; fileAddr += rSize; Dmsg2(100, "Compress len=%d uncompressed=%d\n", rec->data_len, rSize); } break; #endif default: Emsg1(M_ERROR, 0, _("Compression algorithm 0x%x found, but not supported!\n"), comp_magic); extract = false; goto bail_out; } } break; case STREAM_MD5_DIGEST: case STREAM_SHA1_DIGEST: case STREAM_SHA256_DIGEST: case STREAM_SHA512_DIGEST: case STREAM_XXHASH64_DIGEST: case STREAM_XXH3_64_DIGEST: case STREAM_XXH3_128_DIGEST: break; case STREAM_SIGNED_DIGEST: case STREAM_ENCRYPTED_SESSION_DATA: // TODO landonf: Investigate crypto support in the storage daemon break; case STREAM_FILEEVENT: // Nothing to do in particular break; case STREAM_PROGRAM_NAMES: case STREAM_PROGRAM_DATA: if (!prog_name_msg) { Pmsg0(000, _("Got Program Name or Data Stream. Ignored.\n")); prog_name_msg++; } break; case STREAM_PLUGIN_NAME: /* Just ignore the plugin name */ break; default: /* If extracting, weird stream (not 1 or 2), close output file anyway */ if (extract) { if (!is_bopen(&bfd)) { Emsg0(M_ERROR, 0, _("Logic error output file should be open but is not.\n")); } set_attributes(jcr, attr, &bfd); extract = false; } Jmsg(jcr, M_ERROR, 0, _("Unexpected stream=\"%s\" ignored. This shouldn't happen!\n"), stream_to_ascii(rec->Stream)); break; } /* end switch */ bail_out: if (restoredatap) { rec->data = orgdata; rec->data_len = orgdata_len; } return ret; } bacula-15.0.3/src/stored/record_read.c0000644000175000017500000002750214771010173017374 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * record_read.c -- Volume (tape/disk) record read functions * * Kern Sibbald, April MMI * added BB02 format October MMII * */ #include "bacula.h" #include "stored.h" /* Imported subroutines */ static const int read_dbglvl = 200|DT_VOLUME; static const int dbgep = 400|DT_VOLUME; /* debug execution path */ /* * Read the header record */ static bool read_header(DCR *dcr, DEV_BLOCK *block, DEV_RECORD *rec) { ser_declare; uint32_t VolSessionId; uint32_t VolSessionTime; int32_t FileIndex; int32_t Stream; uint32_t rhl; char buf1[100], buf2[100]; Dmsg0(dbgep, "=== rpath 1 read_header\n"); ASSERT2(!block->adata, "Block is adata. Wrong!"); /* Clear state flags */ rec->state_bits = 0; if (block->dev->is_tape()) { rec->state_bits |= REC_ISTAPE; } rec->Addr = ((DEVICE *)block->dev)->EndAddr; /* * Get the header. There is always a full header, * otherwise we find it in the next block. */ Dmsg4(read_dbglvl, "adata=%d Block=%d Ver=%d block_len=%u\n", block->adata, block->BlockNumber, block->BlockVer, block->block_len); if (block->BlockVer == 1) { rhl = RECHDR1_LENGTH; } if (block->BlockVer == 2) { rhl = RECHDR2_LENGTH; } else { rhl = RECHDR3_LENGTH; } if (rec->remlen >= rhl) { Dmsg0(dbgep, "=== rpath 2 begin unserial header\n"); Dmsg4(read_dbglvl, "read_header: remlen=%d data_len=%d rem=%d blkver=%d\n", rec->remlen, rec->data_len, rec->remainder, block->BlockVer); unser_begin(block->bufp, WRITE_RECHDR_LENGTH); if (block->BlockVer == 1) { unser_uint32(VolSessionId); unser_uint32(VolSessionTime); } else { VolSessionId = block->VolSessionId; VolSessionTime = block->VolSessionTime; } unser_int32(FileIndex); unser_int32(Stream); unser_uint32(rec->data_bytes); if (dcr->dev->have_adata_header(dcr, rec, FileIndex, Stream, VolSessionId)) { return true; } block->bufp += rhl; block->binbuf -= rhl; rec->remlen -= rhl; /* If we are looking for more (remainder!=0), we reject anything * where the VolSessionId and VolSessionTime don't agree */ if (rec->remainder && (rec->VolSessionId != VolSessionId || rec->VolSessionTime != VolSessionTime)) { rec->state_bits |= REC_NO_MATCH; Dmsg0(read_dbglvl, "remainder and VolSession doesn't match\n"); Dmsg0(dbgep, "=== rpath 4 VolSession no match\n"); return false; /* This is from some other Session */ } /* if Stream is negative, it means that this is a continuation * of a previous partially written record. */ if (Stream < 0) { /* continuation record? */ Dmsg0(dbgep, "=== rpath 5 negative stream\n"); Dmsg1(read_dbglvl, "Got negative Stream => continuation. remainder=%d\n", rec->remainder); rec->state_bits |= REC_CONTINUATION; if (!rec->remainder) { /* if we didn't read previously */ Dmsg0(dbgep, "=== rpath 6 no remainder\n"); rec->data_len = 0; /* return data as if no continuation */ } else if (rec->Stream != -Stream) { Dmsg0(dbgep, "=== rpath 7 wrong cont stream\n"); rec->state_bits |= REC_NO_MATCH; return false; /* This is from some other Session */ } rec->Stream = -Stream; /* set correct Stream */ } else { /* Regular record */ Dmsg0(dbgep, "=== rpath 8 normal stream\n"); rec->Stream = Stream; rec->data_len = 0; /* transfer to beginning of data */ } rec->maskedStream = rec->Stream & STREAMMASK_TYPE; rec->VolSessionId = VolSessionId; rec->VolSessionTime = VolSessionTime; rec->FileIndex = FileIndex; if (FileIndex > 0) { Dmsg0(dbgep, "=== rpath 9 FileIndex>0\n"); if (block->FirstIndex == 0) { Dmsg0(dbgep, "=== rpath 10 FirstIndex\n"); block->FirstIndex = FileIndex; } block->LastIndex = rec->FileIndex; } Dmsg6(read_dbglvl, "read_header: FI=%s SessId=%d Strm=%s len=%u rec->remlen=%d data_len=%d\n", FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_bytes, rec->remlen, rec->data_len); } else { Dmsg0(dbgep, "=== rpath 11a block out of records\n"); /* * No more records in this block because the number * of remaining bytes are less than a record header * length, so return empty handed, but indicate that * he must read again. By returning, we allow the * higher level routine to fetch the next block and * then reread. */ Dmsg0(read_dbglvl, "read_header: End of block\n"); rec->state_bits |= (REC_NO_HEADER | REC_BLOCK_EMPTY); empty_block(block); /* mark block empty */ return false; } /* Sanity check */ if (rec->data_bytes >= MAX_BLOCK_LENGTH) { Dmsg0(dbgep, "=== rpath 11b maxlen too big\n"); /* * Something is wrong, force read of next block, abort * continuing with this block. */ rec->state_bits |= (REC_NO_HEADER | REC_BLOCK_EMPTY); empty_block(block); Jmsg2(dcr->jcr, M_WARNING, 0, _("Sanity check failed. maxlen=%d datalen=%d. Block discarded.\n"), MAX_BLOCK_LENGTH, rec->data_bytes); return false; } rec->data = check_pool_memory_size(rec->data, rec->data_len+rec->data_bytes); rec->rstate = st_data; return true; } /* * We have just read a header, now read the data into the record. * Note, if we do not read the full record, we will return to * read the next header, which will then come back here later * to finish reading the full record. */ static void read_data(DEV_BLOCK *block, DEV_RECORD *rec) { char buf1[100], buf2[100]; Dmsg2(dbgep, "=== rpath 22 read_data remlen=%ld data_bytes=%ld\n", rec->remlen, rec->data_bytes); ASSERT2(!block->adata, "Block is adata. Wrong!"); /* * At this point, we have read the header, now we * must transfer as much of the data record as * possible taking into account: 1. A partial * data record may have previously been transferred, * 2. The current block may not contain the whole data * record. */ if (rec->remlen >= rec->data_bytes) { Dmsg0(dbgep, "=== rpath 23 full record\n"); /* Got whole record */ memcpy(rec->data+rec->data_len, block->bufp, rec->data_bytes); block->bufp += rec->data_bytes; block->binbuf -= rec->data_bytes; rec->data_len += rec->data_bytes; rec->remainder = 0; Dmsg6(190, "Rdata full adata=%d FI=%s SessId=%d Strm=%s len=%d block=%p\n", block->adata, FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len, block); } else { Dmsg0(dbgep, "=== rpath 24 partial record\n"); /* Partial record */ memcpy(rec->data+rec->data_len, block->bufp, rec->remlen); block->bufp += rec->remlen; block->binbuf -= rec->remlen; rec->data_len += rec->remlen; rec->remainder = 1; /* partial record transferred */ Dmsg1(read_dbglvl, "read_data: partial xfered=%d\n", rec->data_len); rec->state_bits |= (REC_PARTIAL_RECORD | REC_BLOCK_EMPTY); } } /* * Read a Record from the block * Returns: false if nothing read or if the continuation record does not match. * In both of these cases, a block read must be done. * true if at least the record header was read, this * routine may have to be called again with a new * block if the entire record was not read. */ bool read_record_from_block(DCR *dcr, DEV_RECORD *rec) { bool save_adata = dcr->dev->adata; bool rtn; bool firstcall=true; Dmsg0(dbgep, "=== rpath 1 Enter read_record_from block\n"); /* We return from a previous call with aligned volume, we did not read * the data on disk, let's read the next ameta record. */ if (rec->rstate == st_header_only) { rec->remainder = 0; rec->rstate = st_header; } /* Update the Record number only if we have a new record */ if (rec->remainder == 0) { rec->RecNum = dcr->block->RecNum; rec->VolumeName = dcr->CurrentVol->VolumeName; /* From JCR::VolList, freed at the end */ rec->Addr = rec->StartAddr = dcr->block->BlockAddr; } rec->BlockVer = dcr->block->BlockVer; /* needed for unser_volume_label() */ rec->blkh_options = dcr->block->blkh_options; /* needed for unser_volume_label() */ /* We read the next record */ dcr->block->RecNum++; for ( ;; ) { switch (rec->rstate) { case st_none: dump_block(dcr->dev, dcr->ameta_block, "st_none"); case st_header: Dmsg0(dbgep, "=== rpath 33 st_header\n"); dcr->set_ameta(); rec->remlen = dcr->block->binbuf; /* Note read_header sets rec->rstate on return true */ if (!read_header(dcr, dcr->block, rec)) { /* sets state */ Dmsg0(dbgep, "=== rpath 34 failed read header\n"); Dmsg0(read_dbglvl, "read_header returned EOF.\n"); goto fail_out; } continue; case st_header_only: /* We come from the st_cont_adata_rechdr state and we may read * or not the data associated */ Dmsg0(dbgep, "=== rpath 37 st_header_only\n"); goto get_out; case st_data: Dmsg0(dbgep, "=== rpath 37 st_data\n"); read_data(dcr->block, rec); rec->rstate = st_header; /* next pass look for a header */ goto get_out; case st_adata_blkhdr: dcr->set_adata(); dcr->dev->read_adata_block_header(dcr); rec->rstate = st_header; continue; case st_adata_rechdr: Dmsg1(dbgep, "=== rpath 35 st_adata_rechdr RecNum=%d\n", dcr->block->RecNum); if (!dcr->dev->read_adata_record_header(dcr, dcr->block, rec, &firstcall)) { /* sets state */ Dmsg0(dbgep, "=== rpath 36 failed read_adata rechdr\n"); Dmsg0(100, "read_link returned EOF.\n"); goto fail_out; } continue; case st_adata: switch (dcr->dev->read_adata(dcr, rec)) { case -1: goto fail_out; case 0: continue; case 1: goto get_out; } default: Dmsg0(dbgep, "=== rpath 50 default\n"); Dmsg0(0, "======= In default !!!!!\n"); Pmsg1(190, "Read: unknown state=%d\n", rec->rstate); goto fail_out; } } get_out: char buf1[100], buf2[100]; Dmsg6(read_dbglvl, "read_rec return: FI=%s Strm=%s len=%d rem=%d remainder=%d Num=%d\n", FI_to_ascii(buf1, rec->FileIndex), stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len, rec->remlen, rec->remainder, rec->RecNum); rtn = true; goto out; fail_out: rec->rstate = st_none; rtn = false; out: if (save_adata) { dcr->set_adata(); } else { dcr->set_ameta(); } return rtn; } bacula-15.0.3/src/stored/reserve.c0000644000175000017500000013127414771010173016600 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Drive reservation functions for Storage Daemon * * Written by Kern Sibbald, MM * * Split from job.c and acquire.c June 2005 * */ #include "bacula.h" #include "stored.h" const int dbglvl = 150; static brwlock_t reservation_lock; int reservations_lock_count = 0; /* Forward referenced functions */ static int can_reserve_drive(DCR *dcr, RCTX &rctx); static bool is_vol_in_autochanger(RCTX &rctx, VOLRES *vol); static bool reserve_device_for_append(DCR *dcr, RCTX &rctx); static bool reserve_device_for_read(DCR *dcr); static bool use_device_cmd(JCR *jcr); static int reserve_device(RCTX &rctx); static void pop_reserve_messages(JCR *jcr); static void queue_reserve_message(JCR *jcr); static int is_pool_ok(DCR *dcr); //void switch_device(DCR *dcr, DEVICE *dev); /* Requests from the Director daemon */ static char use_storage[] = "use storage=%127s media_type=%127s " "pool_name=%127s pool_type=%127s append=%d copy=%d stripe=%d wait=%d\n"; static char use_device[] = "use device=%127s\n"; /* Responses sent to Director daemon */ static char OK_device[] = "3000 OK use device device=%s protect=%d encrypt=%d\n"; static char NO_device[] = "3924 Device \"%s\" not in SD Device" " resources or no matching Media Type or is disabled.\n"; static char BAD_use[] = "3913 Bad use command: %s\n"; /* * This allows a given thread to recursively call lock_reservations. * It must, of course, call unlock_... the same number of times. */ void init_reservations_lock() { int errstat; if ((errstat=rwl_init(&reservation_lock)) != 0) { berrno be; Emsg1(M_ABORT, 0, _("Unable to initialize reservation lock. ERR=%s\n"), be.bstrerror(errstat)); } init_vol_list_lock(); } /* This applies to a drive and to Volumes */ void _lock_reservations(const char *file, int line) { int errstat; reservations_lock_count++; if ((errstat=rwl_writelock_p(&reservation_lock, file, line)) != 0) { berrno be; Emsg2(M_ABORT, 0, "rwl_writelock failure. stat=%d: ERR=%s\n", errstat, be.bstrerror(errstat)); } } void _unlock_reservations() { int errstat; reservations_lock_count--; if ((errstat=rwl_writeunlock(&reservation_lock)) != 0) { berrno be; Emsg2(M_ABORT, 0, "rwl_writeunlock failure. stat=%d: ERR=%s\n", errstat, be.bstrerror(errstat)); } } void term_reservations_lock() { rwl_destroy(&reservation_lock); term_vol_list_lock(); } void DCR::clear_reserved() { if (m_reserved) { m_reserved = false; dev->dec_reserved(); Dmsg3(dbglvl, "Dec reserve=%d writers=%d dev=%s\n", dev->num_reserved(), dev->num_writers, dev->print_name()); if (dev->num_reserved() == 0) { dev->reserved_pool_name[0] = 0; } } } void DCR::set_reserved_for_append() { if (dev->num_reserved() == 0) { bstrncpy(dev->reserved_pool_name, pool_name, sizeof(dev->reserved_pool_name)); Dmsg1(dbglvl, "Set reserve pool: %s\n", pool_name); } m_reserved = true; dev->set_append_reserve(); dev->inc_reserved(); Dmsg3(dbglvl, "Inc reserve=%d writers=%d dev=%s\n", dev->num_reserved(), dev->num_writers, dev->print_name()); } void DCR::set_reserved_for_read() { /* Called for each volume read, no need to increment each time */ if (!m_reserved) { m_reserved = true; dev->set_read_reserve(); dev->inc_reserved(); Dmsg2(dbglvl, "Inc reserve=%d dev=%s\n", dev->num_reserved(), dev->print_name()); } } /* * Remove any reservation from a drive and tell the system * that the volume is unused at least by us. */ void DCR::unreserve_device(bool locked) { if (!locked) { dev->Lock(); } if (is_reserved()) { clear_reserved(); reserved_volume = false; /* If we set read mode in reserving, remove it */ if (dev->can_read()) { remove_read_volume(jcr, this->VolumeName); dev->clear_read(); } if (dev->num_writers < 0) { Jmsg1(jcr, M_ERROR, 0, _("Hey! num_writers=%d!!!!\n"), dev->num_writers); dev->num_writers = 0; } if (dev->num_reserved() == 0 && dev->num_writers == 0) { generate_plugin_event(jcr, bsdEventDeviceClose, this); volume_unused(this); } } if (!locked) { dev->Unlock(); } } bool use_cmd(JCR *jcr) { /* * Get the device, media, and pool information */ if (!use_device_cmd(jcr)) { jcr->setJobStatus(JS_ErrorTerminated); memset(jcr->sd_auth_key, 0, strlen(jcr->sd_auth_key)); return false; } return true; } /* * Storage search options */ struct store_opts_t { bool PreferMountedVols; bool exact_match; bool autochanger_only; bool try_low_use_drive; bool any_drive; }; /* *** Old pre-8.8 algorithm *** not used here * jcr->PreferMountedVols = true * MntVol exact chgonly lowuse any * 1 * {true, true, false, false, false}, * 2 * {true, false, true, false, false}, * 3 * {true, false, true, false, true}, * jcr->PreferMountedVols = false * MntVol exact chgonly lowuse any * 4 * {false, false, true, false, false}, * 5 * {false, false, true, true, false}, * low instantaneous use * * 6 * {false, false, false, true, false}, * 7 * {true, true, false, false, false}, * 8 * {true, false, true, false, false}, * 9 * {true, false, true, false, true} *** End old pre-8.8 algorithm *** */ store_opts_t store_opts[] = { /* jcr->PreferMountedVols = true */ /* MntVol exact chgonly lowuse any */ /* 1 */ {true, true, false, true, false}, /* low global use */ /* 2 */ {true, false, true, true, false}, /* 3 */ {true, false, true, false, true}, /* 4 */ {true, true, false, true, false}, /* 5 */ {false, true, false, false, false}, /* 6 */ {false, false, false, false, false}, /* 7 */ {true, false, false, false, true}, /* jcr->PreferMountedVols = false */ /* MntVol exact chgonly lowuse any */ /* 8 */ {false, false, true, true, false}, /* 9 */ {false, false, true, false, false}, /* 10 */ {false, false, false, true, false}, /* 11 */ {true, true, false, true, true}, /* 12 */ {true, false, true, true, false}, /* 13 */ {true, false, true, false, true} }; static void set_options(RCTX &rctx, int num) { rctx.PreferMountedVols = store_opts[num-1].PreferMountedVols; rctx.exact_match = store_opts[num-1].exact_match; rctx.autochanger_only = store_opts[num-1].autochanger_only; rctx.try_low_use_drive = store_opts[num-1].try_low_use_drive; rctx.any_drive = store_opts[num-1].any_drive; rctx.low_use_drive = NULL; } static void prt_options(RCTX &rctx, int num) { Dmsg6(dbglvl, "Inx=%d mntVol=%d exact=%d chgonly=%d low_use=%d any=%d\n", num, rctx.PreferMountedVols, rctx.exact_match, rctx.autochanger_only, rctx.try_low_use_drive, rctx.any_drive); } /* * We get the following type of information: * * use storage=xxx media_type=yyy pool_name=xxx pool_type=yyy append=1 copy=0 strip=0 * use device=zzz * use device=aaa * use device=bbb * use storage=xxx media_type=yyy pool_name=xxx pool_type=yyy append=0 copy=0 strip=0 * use device=bbb * */ static bool use_device_cmd(JCR *jcr) { POOL_MEM store_name, dev_name, media_type, pool_name, pool_type; BSOCK *dir = jcr->dir_bsock; int32_t append; bool ok; int32_t Copy, Stripe, Wait; DIRSTORE *store; RCTX rctx; alist *dirstore; memset(&rctx, 0, sizeof(RCTX)); rctx.jcr = jcr; /* * If there are multiple devices, the director sends us * use_device for each device that it wants to use. */ dirstore = New(alist(10, not_owned_by_alist)); jcr->reserve_msgs = New(alist(10, not_owned_by_alist)); do { Dmsg1(dbglvl, "msg); ok = sscanf(dir->msg, use_storage, store_name.c_str(), media_type.c_str(), pool_name.c_str(), pool_type.c_str(), &append, &Copy, &Stripe, &Wait) == 8; if (!ok) { break; } if (append) { jcr->write_store = dirstore; } else { jcr->read_store = dirstore; } rctx.append = append; unbash_spaces(store_name); unbash_spaces(media_type); unbash_spaces(pool_name); unbash_spaces(pool_type); store = new DIRSTORE; dirstore->append(store); memset(store, 0, sizeof(DIRSTORE)); store->device = New(alist(10)); bstrncpy(store->name, store_name, sizeof(store->name)); bstrncpy(store->media_type, media_type, sizeof(store->media_type)); bstrncpy(store->pool_name, pool_name, sizeof(store->pool_name)); bstrncpy(store->pool_type, pool_type, sizeof(store->pool_type)); store->append = append; /* Now get all devices */ while (dir->recv() >= 0) { Dmsg1(dbglvl, "msg); ok = sscanf(dir->msg, use_device, dev_name.c_str()) == 1; if (!ok) { break; } unbash_spaces(dev_name); store->device->append(bstrdup(dev_name.c_str())); } } while (ok && dir->recv() >= 0); #ifdef xxxx /* Developers debug code */ char *device_name; if (debug_level >= dbglvl) { foreach_alist(store, dirstore) { Dmsg5(dbglvl, "Storage=%s media_type=%s pool=%s pool_type=%s append=%d\n", store->name, store->media_type, store->pool_name, store->pool_type, store->append); foreach_alist(device_name, store->device) { Dmsg1(dbglvl, " Device=%s\n", device_name); } } } #endif init_jcr_device_wait_timers(jcr); jcr->dcr = new_dcr(jcr, NULL, NULL, !rctx.append); /* get a dcr */ if (!jcr->dcr) { BSOCK *dir = jcr->dir_bsock; dir->fsend(_("3939 Could not get dcr\n")); Dmsg1(dbglvl, ">dird: %s", dir->msg); ok = false; } /* * At this point, we have a list of all the Director's Storage * resources indicated for this Job, which include Pool, PoolType, * storage name, and Media type. * Then for each of the Storage resources, we have a list of * device names that were given. * * Wiffle through Storage resources sent to us and find one that can do the backup. */ if (ok) { int wait_for_device_retries = 0; int repeat = 0; bool fail = false; rctx.notify_dir = true; /* Put new dcr in proper location */ if (rctx.append) { rctx.jcr->dcr = jcr->dcr; } else { rctx.jcr->read_dcr = jcr->dcr; } lock_reservations(); for ( ; !fail && !job_canceled(jcr) && !jcr->is_incomplete(); ) { int i; pop_reserve_messages(jcr); rctx.suitable_device = false; rctx.have_volume = false; rctx.VolumeName[0] = 0; rctx.any_drive = false; if (jcr->PreferMountedVols) { for (i=1; i<=7; i++) { set_options(rctx, i); prt_options(rctx, i); if ((ok = find_suitable_device_for_job(jcr, rctx))) { break; } } } else { for (i=8; i<=13; i++) { set_options(rctx, i); prt_options(rctx, i); if ((ok = find_suitable_device_for_job(jcr, rctx))) { break; } } } if (ok) { break; } if (!Wait) { /* Director does some kind of 'quick round' now, does not want to wait for device to become available. * If none of the devices is ready now, director will probably do a second use_device call, * this time with 'wait' set to 1 */ break; } /* Keep reservations locked *except* during wait_for_device() */ unlock_reservations(); /* * The idea of looping on repeat a few times it to ensure * that if there is some subtle timing problem between two * jobs, we will simply try again, and most likely succeed. * This can happen if one job reserves a drive or finishes using * a drive at the same time a second job wants it. */ if (repeat++ > 1) { /* try algorithm 3 times */ bmicrosleep(30, 0); /* wait a bit */ Dmsg1(dbglvl, "repeat reserve algorithm JobId=%d\n", jcr->JobId); } else if (!rctx.suitable_device || !wait_for_any_device(jcr, wait_for_device_retries)) { Dmsg0(dbglvl, "Fail. !suitable_device || !wait_for_device\n"); fail = true; } lock_reservations(); dir->signal(BNET_HEARTBEAT); /* Inform Dir that we are alive */ } unlock_reservations(); if (!ok) { /* * If we get here, there are no suitable devices available, which * means nothing configured. If a device is suitable but busy * with another Volume, we will not come here. */ dir->fsend(NO_device, dev_name.c_str()); Dmsg1(dbglvl, ">dird: %s", dir->msg); } } else { unbash_spaces(dir->msg); pm_strcpy(jcr->errmsg, dir->msg); Jmsg(jcr, M_FATAL, 0, _("Failed command: %s\n"), jcr->errmsg); dir->fsend(BAD_use, jcr->errmsg); Dmsg1(dbglvl, ">dird: %s", dir->msg); } release_reserve_messages(jcr); return ok; } /* * Search for a device suitable for this job. * Note, this routine sets sets rctx.suitable_device if any * device exists within the SD. The device may not be actually * useable. * It also returns if it finds a useable device. */ bool find_suitable_device_for_job(JCR *jcr, RCTX &rctx) { bool ok = false; DIRSTORE *store; char *device_name; alist *dirstore; DCR *dcr = jcr->dcr; if (rctx.append) { dirstore = jcr->write_store; } else { dirstore = jcr->read_store; } Dmsg5(dbglvl, "Start find_suit_dev PrefMnt=%d exact=%d suitable=%d chgronly=%d any=%d\n", rctx.PreferMountedVols, rctx.exact_match, rctx.suitable_device, rctx.autochanger_only, rctx.any_drive); /* * If the appropriate conditions of this if are met, namely that * we are appending and the user wants mounted drive (or we * force try a mounted drive because they are all busy), we * start by looking at all the Volumes in the volume list. */ if (!is_vol_list_empty() && rctx.append && rctx.PreferMountedVols) { dlist *temp_vol_list; VOLRES *vol = NULL; temp_vol_list = dup_vol_list(jcr); /* Look through reserved volumes for one we can use */ Dmsg0(dbglvl, "look for vol in vol list\n"); foreach_dlist(vol, temp_vol_list) { if (!vol->dev) { Dmsg1(dbglvl, "vol=%s no dev\n", vol->vol_name); continue; } bstrncpy(dcr->VolumeName, vol->vol_name, sizeof(dcr->VolumeName)); /* Check with Director if this Volume is OK */ if (!dir_get_volume_info(dcr, dcr->VolumeName, GET_VOL_INFO_FOR_WRITE)) { continue; } Dmsg1(dbglvl, "vol=%s OK for this job\n", vol->vol_name); foreach_alist(store, dirstore) { int stat; rctx.store = store; foreach_alist(device_name, store->device) { /* Found a device, try to use it */ rctx.device_name = device_name; rctx.device = vol->dev->device; if (vol->dev->read_only) { continue; } if (vol->dev->is_autochanger()) { Dmsg1(dbglvl, "vol=%s is in changer\n", vol->vol_name); if (!is_vol_in_autochanger(rctx, vol) || !vol->dev->autoselect || !vol->dev->enabled) { continue; } } else if (strcmp(device_name, vol->dev->device->hdr.name) != 0) { Dmsg2(dbglvl, "device=%s not suitable want %s\n", vol->dev->device->hdr.name, device_name); continue; } bstrncpy(rctx.VolumeName, vol->vol_name, sizeof(rctx.VolumeName)); rctx.have_volume = true; /* Try reserving this device and volume */ Dmsg2(dbglvl, "Try reserve vol=%s on device=%s\n", rctx.VolumeName, device_name); stat = reserve_device(rctx); if (stat == 1) { /* found available device */ Dmsg1(dbglvl, "Device reserved=%s\n", device_name); ok = true; } else { /* Error or no suitable device found */ Dmsg0(dbglvl, "No suitable device found.\n"); rctx.have_volume = false; rctx.VolumeName[0] = 0; } } if (ok) { break; } } /* end of loop over storages */ if (ok) { break; } } /* end for loop over reserved volumes */ Dmsg0(dbglvl, "lock volumes\n"); free_temp_vol_list(temp_vol_list); } /* End test for vol_list, ... */ if (ok) { Dmsg1(dbglvl, "OK dev found. Vol=%s from in-use vols list\n", rctx.VolumeName); return true; } /* * No reserved volume we can use, so now search for an available device. * * For each storage device that the user specified, we * search and see if there is a resource for that device. */ foreach_alist(store, dirstore) { rctx.store = store; foreach_alist(device_name, store->device) { int stat; rctx.device_name = device_name; stat = search_res_for_device(rctx); if (stat == 1) { /* found available device */ Dmsg1(dbglvl, "available device found=%s\n", device_name); ok = true; break; } else if (stat == 0) { /* device busy */ Dmsg1(dbglvl, "No usable device=%s, busy: not use\n", device_name); } else { /* otherwise error */ Dmsg0(dbglvl, "No usable device found.\n"); } } if (ok) { break; } } if (ok) { Dmsg1(dbglvl, "OK dev found. Vol=%s\n", rctx.VolumeName); } else { Dmsg0(dbglvl, "Leave find_suit_dev: no dev found.\n"); } return ok; } /* * Search for a particular storage device with particular storage * characteristics (MediaType). */ int search_res_for_device(RCTX &rctx) { AUTOCHANGER *changer; int stat; Dmsg1(dbglvl, "search res for %s\n", rctx.device_name); /* Look through Autochangers first */ foreach_res(changer, R_AUTOCHANGER) { Dmsg1(dbglvl, "Try match changer res=%s\n", changer->hdr.name); /* Find resource, and make sure we were able to open it */ if (strcmp(rctx.device_name, changer->hdr.name) == 0) { /* Try each device in this AutoChanger */ foreach_alist(rctx.device, changer->device) { Dmsg1(dbglvl, "Top try changer device %s\n", rctx.device->hdr.name); if (rctx.store->append && rctx.device->read_only) { continue; } if (!rctx.device->dev) { Dmsg1(dbglvl, "Device %s not initialized correctly\n", rctx.device->hdr.name); continue; } if (!rctx.device->autoselect) { Dmsg1(dbglvl, "Device %s not autoselect skipped.\n", rctx.device->hdr.name); continue; /* device is not available */ } if (rctx.try_low_use_drive) { if (!rctx.low_use_drive) { rctx.low_use_drive = rctx.device->dev; Dmsg2(dbglvl, "Set low_use usage=%lld drv=%s\n", rctx.low_use_drive->usage, rctx.low_use_drive->print_name()); } else if ((rctx.low_use_drive->usage > rctx.device->dev->usage) || (rctx.low_use_drive->usage == rctx.device->dev->usage && rctx.low_use_drive->num_reserved() > rctx.device->dev->num_reserved())) { rctx.low_use_drive = rctx.device->dev; Dmsg2(dbglvl, "Reset low_use usage=%lld drv=%s\n", rctx.low_use_drive->usage, rctx.low_use_drive->print_name()); } else { Dmsg2(dbglvl, "Skip low_use usage=%lld drv=%s\n", rctx.low_use_drive->usage, rctx.low_use_drive->print_name()); } } else { Dmsg2(dbglvl, "try reserve vol=%s on device=%s\n", rctx.VolumeName, rctx.device->hdr.name); stat = reserve_device(rctx); if (stat != 1) { /* try another device */ continue; } /* Debug code */ if (rctx.store->append) { Dmsg2(dbglvl, "Device %s reserved=%d for append.\n", rctx.device->hdr.name, rctx.jcr->dcr->dev->num_reserved()); } else { Dmsg2(dbglvl, "Device %s reserved=%d for read.\n", rctx.device->hdr.name, rctx.jcr->read_dcr->dev->num_reserved()); } return stat; } } /* If found a drive try to reserve it */ if (rctx.try_low_use_drive && rctx.low_use_drive) { rctx.device = rctx.low_use_drive->device; Dmsg2(dbglvl, "Try reserve vol=%s on device=%s\n", rctx.VolumeName, rctx.device->hdr.name); stat = reserve_device(rctx); if (stat == 1) { /* Debug code */ if (rctx.store->append) { Dmsg3(dbglvl, "JobId=%d device %s reserved=%d for append.\n", rctx.jcr->JobId, rctx.device->hdr.name, rctx.jcr->dcr->dev->num_reserved()); } else { Dmsg3(dbglvl, "JobId=%d device %s reserved=%d for read.\n", rctx.jcr->JobId, rctx.device->hdr.name, rctx.jcr->read_dcr->dev->num_reserved()); } } else { Dmsg2(dbglvl, "Reserve for %s failed for JobId=%d.\n", rctx.store->append ? "append" : "read", rctx.jcr->JobId); } return stat; } } } /* Now if requested look through regular devices */ if (!rctx.autochanger_only) { foreach_res(rctx.device, R_DEVICE) { Dmsg1(dbglvl, "Try match res=%s\n", rctx.device->hdr.name); /* Find resource, and make sure we were able to open it */ if (strcmp(rctx.device_name, rctx.device->hdr.name) == 0) { Dmsg2(dbglvl, "Try reserve vol=%s on device=%s\n", rctx.VolumeName, rctx.device->hdr.name); stat = reserve_device(rctx); if (stat != 1) { /* try another device */ continue; } /* Debug code */ if (rctx.store->append) { Dmsg2(dbglvl, "Device %s reserved=%d for append.\n", rctx.device->hdr.name, rctx.jcr->dcr->dev->num_reserved()); } else { Dmsg2(dbglvl, "Device %s reserved=%d for read.\n", rctx.device->hdr.name, rctx.jcr->read_dcr->dev->num_reserved()); } return stat; } } } return -1; /* nothing found */ } /* * Walk through the autochanger resources and check if * the volume is in one of them. * * Returns: true if volume is in device * false otherwise */ static bool is_vol_in_autochanger(RCTX &rctx, VOLRES *vol) { AUTOCHANGER *changer = vol->dev->device->changer_res; /* Find resource, and make sure we were able to open it */ if (changer && strcmp(rctx.device_name, changer->hdr.name) == 0) { Dmsg1(dbglvl, "Found changer device %s\n", vol->dev->device->hdr.name); return true; } Dmsg1(dbglvl, "Incorrect changer device %s\n", changer->hdr.name); return false; } /* * Try to reserve a specific device. * * Returns: 1 -- OK, have DCR * 0 -- must wait * -1 -- fatal error */ static int reserve_device(RCTX &rctx) { bool ok; DCR *dcr; const int name_len = MAX_NAME_LENGTH; /* Make sure MediaType is OK */ Dmsg2(dbglvl, "chk MediaType device=%s request=%s\n", rctx.device->media_type, rctx.store->media_type); if (strcmp(rctx.device->media_type, rctx.store->media_type) != 0) { return -1; } /* Make sure device exists -- i.e. we can stat() it */ if (!rctx.device->dev) { rctx.device->dev = init_dev(rctx.jcr, rctx.device); } if (!rctx.device->dev) { if (rctx.device->changer_res) { Jmsg(rctx.jcr, M_WARNING, 0, _("\n" " Device \"%s\" in changer \"%s\" requested by DIR could not be opened or does not exist.\n"), rctx.device->hdr.name, rctx.device_name); } else { Jmsg(rctx.jcr, M_WARNING, 0, _("\n" " Device \"%s\" requested by DIR could not be opened or does not exist.\n"), rctx.device_name); } return -1; /* no use waiting */ } else if (!rctx.device->dev->enabled) { if (verbose) { Jmsg(rctx.jcr, M_WARNING, 0, _("\n" " Device \"%s\" requested by DIR is disabled.\n"), rctx.device_name); } return -1; /* no use waiting */ } rctx.suitable_device = true; Dmsg1(dbglvl, "try reserve %s\n", rctx.device->hdr.name); if (rctx.store->append) { dcr = new_dcr(rctx.jcr, rctx.jcr->dcr, rctx.device->dev, SD_APPEND); } else { dcr = new_dcr(rctx.jcr, rctx.jcr->read_dcr, rctx.device->dev, SD_READ); } if (!dcr) { BSOCK *dir = rctx.jcr->dir_bsock; dir->fsend(_("3926 Could not get dcr for device: %s\n"), rctx.device_name); Dmsg1(dbglvl, ">dird: %s", dir->msg); return -1; } bstrncpy(dcr->pool_name, rctx.store->pool_name, name_len); bstrncpy(dcr->pool_type, rctx.store->pool_type, name_len); bstrncpy(dcr->media_type, rctx.store->media_type, name_len); bstrncpy(dcr->dev_name, rctx.device_name, name_len); if (rctx.store->append) { Dmsg2(dbglvl, "call reserve for append: have_vol=%d vol=%s\n", rctx.have_volume, rctx.VolumeName); ok = reserve_device_for_append(dcr, rctx); if (!ok) { goto bail_out; } rctx.jcr->dcr = dcr; Dmsg5(dbglvl, "Reserved=%d dev_name=%s mediatype=%s pool=%s ok=%d\n", dcr->dev->num_reserved(), dcr->dev_name, dcr->media_type, dcr->pool_name, ok); Dmsg4(dbglvl, "Vol=%s num_writers=%d, reserved=%d have_vol=%d\n", rctx.VolumeName, dcr->dev->num_writers, dcr->dev->num_reserved(), rctx.have_volume); if (rctx.have_volume) { Dmsg0(dbglvl, "Call reserve_volume for append.\n"); if (reserve_volume(dcr, rctx.VolumeName)) { Dmsg1(dbglvl, "Reserved vol=%s\n", rctx.VolumeName); } else { Dmsg1(dbglvl, "Could not reserve vol=%s\n", rctx.VolumeName); goto bail_out; } } else { dcr->any_volume = true; Dmsg0(dbglvl, "no vol, call find_next_appendable_vol.\n"); if (dir_find_next_appendable_volume(dcr)) { bstrncpy(rctx.VolumeName, dcr->VolumeName, sizeof(rctx.VolumeName)); rctx.have_volume = true; Dmsg1(dbglvl, "looking for Volume=%s\n", rctx.VolumeName); if (!dcr->can_i_use_volume() || !is_pool_ok(dcr)) { rctx.have_volume = false; rctx.VolumeName[0] = 0; dcr->unreserve_device(false); goto bail_out; } } else { dcr->dev->clear_wait(); Dmsg0(dbglvl, "No next volume found\n"); rctx.have_volume = false; rctx.VolumeName[0] = 0; /* * If there is at least one volume that is valid and in use, * but we get here, check if we are running with prefers * non-mounted drives. In that case, we have selected a * non-used drive and our one and only volume is mounted * elsewhere, so we bail out and retry using that drive. */ if (dcr->found_in_use() && !rctx.PreferMountedVols) { rctx.PreferMountedVols = true; if (dcr->VolumeName[0]) { dcr->unreserve_device(false); } goto bail_out; } /* * Note. Under some circumstances, the Director can hand us * a Volume name that is not the same as the one on the current * drive, and in that case, the call above to find the next * volume will fail because in attempting to reserve the Volume * the code will realize that we already have a tape mounted, * and it will fail. This *should* only happen if there are * writers, thus the following test. In that case, we simply * bail out, and continue waiting, rather than plunging on * and hoping that the operator can resolve the problem. */ if (dcr->dev->num_writers != 0) { if (dcr->VolumeName[0]) { dcr->unreserve_device(false); } goto bail_out; } } } } else { ok = reserve_device_for_read(dcr); if (ok) { rctx.jcr->read_dcr = dcr; Dmsg5(dbglvl, "Read reserved=%d dev_name=%s mediatype=%s pool=%s ok=%d\n", dcr->dev->num_reserved(), dcr->dev_name, dcr->media_type, dcr->pool_name, ok); } } if (!ok) { goto bail_out; } if (rctx.notify_dir) { POOL_MEM dev_name; BSOCK *dir = rctx.jcr->dir_bsock; int protect = 0, encrypt = 0; if (rctx.device->set_vol_immutable || rctx.device->set_vol_read_only) { protect = 1; } if (rctx.device->volume_encryption) { encrypt = 1; } pm_strcpy(dev_name, rctx.device->hdr.name); bash_spaces(dev_name); ok = dir->fsend(OK_device, dev_name.c_str(), protect, encrypt); /* Return real device name */ Dmsg1(dbglvl, ">dird: %s", dir->msg); if (!ok) { dcr->unreserve_device(false); } } else { ok = true; } return ok ? 1 : -1; bail_out: rctx.have_volume = false; rctx.VolumeName[0] = 0; Dmsg0(dbglvl, "Not OK.\n"); return 0; } /* * We reserve the device for appending by incrementing * num_reserved(). We do virtually all the same work that * is done in acquire_device_for_append(), but we do * not attempt to mount the device. This routine allows * the DIR to reserve multiple devices before *really* * starting the job. It also permits the SD to refuse * certain devices (not up, ...). * * Note, in reserving a device, if the device is for the * same pool and the same pool type, then it is acceptable. * The Media Type has already been checked. If we are * the first to reserve the device, we put the pool * name and pool type in the device record. */ static bool reserve_device_for_append(DCR *dcr, RCTX &rctx) { JCR *jcr = dcr->jcr; DEVICE *dev = dcr->dev; bool ok = false; ASSERT2(dcr, "No dcr in reserve_device_for_append!"); if (job_canceled(jcr)) { return false; } dev->Lock(); /* If device is being read or reserved for read, we cannot write it */ if (dev->can_read() || dev->is_reserved_for_read()) { Mmsg(jcr->errmsg, _("3603 JobId=%u %s device %s is busy reading.\n"), jcr->JobId, dev->print_type(), dev->print_name()); queue_reserve_message(jcr); Dmsg1(dbglvl, "Failed: %s", jcr->errmsg); goto bail_out; } /* If device is unmounted, we are out of luck */ if (dev->is_device_unmounted()) { Mmsg(jcr->errmsg, _("3604 JobId=%u %s device %s is BLOCKED due to user unmount.\n"), jcr->JobId, dev->print_type(), dev->print_name()); queue_reserve_message(jcr); Dmsg1(dbglvl, "Failed: %s", jcr->errmsg); goto bail_out; } Dmsg2(dbglvl, "reserve_append %s device is %s\n", dev->print_type(), dev->print_name()); /* Now do detailed tests ... */ if (can_reserve_drive(dcr, rctx) != 1) { Dmsg0(dbglvl, "can_reserve_drive!=1\n"); goto bail_out; } /* Note: on failure this returns jcr->errmsg properly edited */ if (generate_plugin_event(jcr, bsdEventDeviceTryOpen, dcr) != bRC_OK) { queue_reserve_message(jcr); goto bail_out; } dcr->set_reserved_for_append(); ok = true; bail_out: dev->Unlock(); return ok; } /* * We "reserve" the drive by setting the ST_READ bit. No one else * should touch the drive until that is cleared. * This allows the DIR to "reserve" the device before actually * starting the job. */ static bool reserve_device_for_read(DCR *dcr) { DEVICE *dev = dcr->dev; JCR *jcr = dcr->jcr; bool ok = false; ASSERT2(dcr, "No dcr in reserve_device_for_read!"); if (job_canceled(jcr)) { return false; } dev->Lock(); if (dev->is_device_unmounted()) { Mmsg(jcr->errmsg, _("3601 JobId=%u %s device %s is BLOCKED due to user unmount.\n"), jcr->JobId, dev->print_type(), dev->print_name()); queue_reserve_message(jcr); Dmsg1(dbglvl, "Device %s is BLOCKED due to user unmount.\n", dev->print_name()); goto bail_out; } if (dev->is_busy()) { Mmsg(jcr->errmsg, _("3602 JobId=%u %s device %s is busy (already reading/writing)." " read=%d, writers=%d reserved=%d\n"), jcr->JobId, dev->print_type(), dev->print_name(), dev->state & ST_READ?1:0, dev->num_writers, dev->num_reserved()); queue_reserve_message(jcr); Dmsg4(dbglvl, "Device %s is busy ST_READ=%d num_writers=%d reserved=%d.\n", dev->print_name(), dev->state & ST_READ?1:0, dev->num_writers, dev->num_reserved()); goto bail_out; } /* Note: on failure this returns jcr->errmsg properly edited */ if (generate_plugin_event(jcr, bsdEventDeviceTryOpen, dcr) != bRC_OK) { queue_reserve_message(jcr); goto bail_out; } dev->clear_append(); dcr->set_reserved_for_read(); ok = true; bail_out: dev->Unlock(); return ok; } static bool is_max_jobs_ok(DCR *dcr) { DEVICE *dev = dcr->dev; JCR *jcr = dcr->jcr; Dmsg6(dbglvl, "MaxJobs=%d VolCatJobs=%d writers=%d reserves=%d Status=%s Vol=%s\n", dcr->VolCatInfo.VolCatMaxJobs, dcr->VolCatInfo.VolCatJobs, dev->num_writers, dev->num_reserved(), dcr->VolCatInfo.VolCatStatus, dcr->VolumeName); /* Limit max concurrent jobs on this drive */ if (dev->max_concurrent_jobs > 0 && (int)dev->max_concurrent_jobs <= (dev->num_writers + dev->num_reserved())) { /* Max Concurrent Jobs depassed or already reserved */ Mmsg(jcr->errmsg, _("3609 JobId=%u Max concurrent jobs=%d exceeded on %s device %s.\n"), (uint32_t)jcr->JobId, dev->max_concurrent_jobs, dev->print_type(), dev->print_name()); queue_reserve_message(jcr); Dmsg1(dbglvl, "Failed: %s", jcr->errmsg); return false; } if (strcmp(dcr->VolCatInfo.VolCatStatus, "Recycle") == 0) { return true; } if (!dev->allow_maxbytes_concurrency(dcr)) { queue_reserve_message(jcr); Dmsg1(dbglvl, "reserve dev failed: %s", jcr->errmsg); return false; /* wait */ } if (dcr->VolCatInfo.VolCatMaxJobs > 0 && (int)dcr->VolCatInfo.VolCatMaxJobs <= (dev->num_writers + dev->num_reserved())) { /* Max Job Vols depassed or already reserved */ Mmsg(jcr->errmsg, _("3611 JobId=%u Volume max jobs=%d exceeded on %s device %s.\n"), (uint32_t)jcr->JobId, dcr->VolCatInfo.VolCatMaxJobs, dev->print_type(), dev->print_name()); queue_reserve_message(jcr); Dmsg1(dbglvl, "reserve dev failed: %s", jcr->errmsg); return false; /* wait */ } return true; } static int is_pool_ok(DCR *dcr) { DEVICE *dev = dcr->dev; JCR *jcr = dcr->jcr; if (dev->num_writers >= 0) { /* Now check if we want the same Pool and pool type */ if (strcmp(dev->pool_name, dcr->pool_name) == 0 && strcmp(dev->pool_type, dcr->pool_type) == 0) { /* OK, compatible device */ Dmsg1(dbglvl, "OK dev: %s pool matches\n", dev->print_name()); return 1; } } else if (dev->num_reserved() > 0) { if (strcmp(dev->reserved_pool_name, dcr->pool_name) == 0) { /* OK, compatible device */ Dmsg1(dbglvl, "OK dev: %s pool matches\n", dev->print_name()); return 1; } } /* Drive Pool not suitable for us */ Mmsg(jcr->errmsg, _( "3608 JobId=%u wants Pool=\"%s\" but have Pool=\"%s\" nreserve=%d on %s device %s.\n"), (uint32_t)jcr->JobId, dcr->pool_name, dev->pool_name, dev->num_reserved(), dev->print_type(), dev->print_name()); Dmsg1(dbglvl, "Failed: %s", jcr->errmsg); queue_reserve_message(jcr); return 0; } /* * Returns: 1 if drive can be reserved * 0 if we should wait * -1 on error or impossibility */ static int can_reserve_drive(DCR *dcr, RCTX &rctx) { DEVICE *dev = dcr->dev; JCR *jcr = dcr->jcr; Dmsg5(dbglvl, "PrefMnt=%d exact=%d suitable=%d chgronly=%d any=%d\n", rctx.PreferMountedVols, rctx.exact_match, rctx.suitable_device, rctx.autochanger_only, rctx.any_drive); /* Check for max jobs on this Volume */ if (!is_max_jobs_ok(dcr)) { return 0; } /* setting any_drive overrides PreferMountedVols flag */ if (!rctx.any_drive) { /* * When PreferMountedVols is set, we keep track of the * drive in use that has the least number of writers, then if * no unmounted drive is found, we try that drive. This * helps spread the load to the least used drives. */ if (rctx.try_low_use_drive && dev == rctx.low_use_drive && is_pool_ok(dcr)) { Dmsg2(dbglvl, "OK dev=%s == low_drive=%s.\n", dev->print_name(), rctx.low_use_drive->print_name()); bstrncpy(dev->pool_name, dcr->pool_name, sizeof(dev->pool_name)); bstrncpy(dev->pool_type, dcr->pool_type, sizeof(dev->pool_type)); return 1; } /* If he wants a free drive, but this one is busy, no go */ if (!rctx.PreferMountedVols && dev->is_busy()) { Mmsg(jcr->errmsg, _("3605 JobId=%u wants free drive but %s device %s is busy.\n"), jcr->JobId, dev->print_type(), dev->print_name()); queue_reserve_message(jcr); Dmsg1(dbglvl, "Failed: %s", jcr->errmsg); return 0; } /* Check for prefer mounted volumes */ if (rctx.PreferMountedVols && !dev->vol && dev->is_tape()) { Mmsg(jcr->errmsg, _("3606 JobId=%u prefers mounted drives, but %s device %s has no Volume.\n"), jcr->JobId, dev->print_type(), dev->print_name()); queue_reserve_message(jcr); Dmsg1(dbglvl, "Failed: %s", jcr->errmsg); return 0; /* No volume mounted */ } /* Check for exact Volume name match */ /* ***FIXME*** for Disk, we can accept any volume that goes with this * drive. */ if (rctx.exact_match && rctx.have_volume) { bool ok; Dmsg5(dbglvl, "PrefMnt=%d exact=%d suitable=%d chgronly=%d any=%d\n", rctx.PreferMountedVols, rctx.exact_match, rctx.suitable_device, rctx.autochanger_only, rctx.any_drive); Dmsg4(dbglvl, "have_vol=%d have=%s resvol=%s want=%s\n", rctx.have_volume, dev->VolHdr.VolumeName, dev->vol?dev->vol->vol_name:"*none*", rctx.VolumeName); ok = strcmp(dev->VolHdr.VolumeName, rctx.VolumeName) == 0 || (dev->vol && strcmp(dev->vol->vol_name, rctx.VolumeName) == 0); if (!ok) { Mmsg(jcr->errmsg, _("3607 JobId=%u wants Vol=\"%s\" drive has Vol=\"%s\" on %s device %s.\n"), jcr->JobId, rctx.VolumeName, dev->VolHdr.VolumeName, dev->print_type(), dev->print_name()); queue_reserve_message(jcr); Dmsg3(dbglvl, "not OK: dev have=%s resvol=%s want=%s\n", dev->VolHdr.VolumeName, dev->vol?dev->vol->vol_name:"*none*", rctx.VolumeName); return 0; } if (!dcr->can_i_use_volume()) { return 0; /* fail if volume on another drive */ } } } /* Check for unused autochanger drive */ if (rctx.autochanger_only && !dev->is_busy() && dev->VolHdr.VolumeName[0] == 0 && is_pool_ok(dcr)) { /* Device is available but not yet reserved, reserve it for us */ Dmsg1(dbglvl, "OK Res Unused autochanger %s.\n", dev->print_name()); bstrncpy(dev->pool_name, dcr->pool_name, sizeof(dev->pool_name)); bstrncpy(dev->pool_type, dcr->pool_type, sizeof(dev->pool_type)); return 1; /* reserve drive */ } /* * Handle the case that there are no writers */ if (dev->num_writers == 0) { /* Now check if there are any reservations on the drive */ if (dev->num_reserved()) { return is_pool_ok(dcr); } else if (dev->can_append()) { if (is_pool_ok(dcr)) { return 1; } else { /* Changing pool, unload old tape if any in drive */ Dmsg0(dbglvl, "OK dev: num_writers=0, not reserved, pool change, unload changer\n"); /* ***FIXME*** use set_unload() */ unload_autochanger(dcr, -1); } } /* Device is available but not yet reserved, reserve it for us */ Dmsg1(dbglvl, "OK Dev avail reserved %s\n", dev->print_name()); bstrncpy(dev->pool_name, dcr->pool_name, sizeof(dev->pool_name)); bstrncpy(dev->pool_type, dcr->pool_type, sizeof(dev->pool_type)); return 1; /* reserve drive */ } /* * Check if the device is in append mode with writers (i.e. * available if pool is the same). */ if (dev->can_append() || dev->num_writers > 0 || dev->num_reserved() > 0) { return is_pool_ok(dcr); } else { Pmsg1(000, _("Logic error!!!! JobId=%u Should not get here.\n"), (int)jcr->JobId); Mmsg(jcr->errmsg, _("3910 JobId=%u Logic error!!!! %s device %s Should not get here.\n"), jcr->JobId, dev->print_type(), dev->print_name()); queue_reserve_message(jcr); Jmsg0(jcr, M_FATAL, 0, _("Logic error!!!! Should not get here.\n")); return -1; /* error, should not get here */ } Mmsg(jcr->errmsg, _("3911 JobId=%u failed reserve %s device %s.\n"), jcr->JobId, dev->print_type(), dev->print_name()); queue_reserve_message(jcr); Dmsg1(dbglvl, "Failed: No reserve %s\n", dev->print_name()); return 0; } /* * Queue a reservation error or failure message for this jcr */ static void queue_reserve_message(JCR *jcr) { int i; alist *msgs; char *msg; jcr->lock(); msgs = jcr->reserve_msgs; if (!msgs) { goto bail_out; } /* * Look for duplicate message. If found, do * not insert */ for (i=msgs->size()-1; i >= 0; i--) { msg = (char *)msgs->get(i); if (!msg) { goto bail_out; } /* Comparison based on 4 digit message number */ if (strncmp(msg, jcr->errmsg, 4) == 0) { goto bail_out; } } /* Message unique, so insert it */ jcr->reserve_msgs->push(bstrdup(jcr->errmsg)); bail_out: jcr->unlock(); } /* * Send any reservation messages queued for this jcr */ void send_drive_reserve_messages(JCR *jcr, void sendit(const char *msg, int len, void *sarg), void *arg) { int i; alist *msgs; char *msg; jcr->lock(); msgs = jcr->reserve_msgs; if (!msgs || msgs->size() == 0) { goto bail_out; } for (i=msgs->size()-1; i >= 0; i--) { msg = (char *)msgs->get(i); if (msg) { sendit(" ", 3, arg); sendit(msg, strlen(msg), arg); } else { break; } } bail_out: jcr->unlock(); } /* * Pop and release any reservations messages */ static void pop_reserve_messages(JCR *jcr) { alist *msgs; char *msg; jcr->lock(); msgs = jcr->reserve_msgs; if (!msgs) { goto bail_out; } while ((msg = (char *)msgs->pop())) { free(msg); } bail_out: jcr->unlock(); } /* * Also called from acquire.c */ void release_reserve_messages(JCR *jcr) { pop_reserve_messages(jcr); jcr->lock(); if (!jcr->reserve_msgs) { goto bail_out; } delete jcr->reserve_msgs; jcr->reserve_msgs = NULL; bail_out: jcr->unlock(); } bacula-15.0.3/src/stored/win_file_dev.h0000644000175000017500000000202114771010173017547 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Inspired by vtape.h */ #ifndef __WIN_FILE_DEV_H_ #define __WIN_FILE_DEV_H_ class win_file_dev : public file_dev { public: win_file_dev() { }; ~win_file_dev() { m_fd = -1; }; virtual int device_specific_init(JCR *jcr, DEVRES *device) { // Don't forget to do capabilities |= CAP_LSEEK in device_specific_init() return file_dev::device_specific_init(jcr, device); } }; #endif bacula-15.0.3/src/stored/win_tape_dev.h0000644000175000017500000000257314771010173017575 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Inspired by vtape.h */ #ifndef __WIN_TAPE_DEV_H_ #define __WIN_TAPE_DEV_H_ class win_tape_dev : public DEVICE { public: win_tape_dev() { }; ~win_tape_dev() { }; /* interface from DEVICE */ int d_close(int); int d_open(const char *pathname, int flags); int d_ioctl(int fd, ioctl_req_t request, char *op=NULL); ssize_t d_read(int, void *buffer, size_t count); ssize_t d_write(int, const void *buffer, size_t count); boffset_t lseek(DCR *dcr, off_t offset, int whence) { return -1; }; boffset_t lseek(int fd, off_t offset, int whence); bool open_device(DCR *dcr, int omode); int tape_op(struct mtop *mt_com); int tape_get(struct mtget *mt_com); int tape_pos(struct mtpos *mt_com); }; #endif /* __WIN_TAPE_DEV_H_ */ bacula-15.0.3/src/stored/file_dev.c0000644000175000017500000007717514771010173016713 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * file_dev.c -- low level operations on file devices * * written by, Kern Sibbald, MM * separated from dev.c February 2014 * */ #include "bacula.h" #include "stored.h" #ifdef HAVE_LINUX_OS #include #endif static const int dbglvl = 100; /* Imported functions */ const char *mode_to_str(int mode); int file_dev::use_protect() { if (device->set_vol_immutable || device->set_vol_read_only) { return 1; } return 0; } /* default primitives are designed for file */ int DEVICE::d_open(const char *pathname, int flags) { return ::open(pathname, flags | O_CLOEXEC); } int DEVICE::d_close(int fd) { return ::close(fd); } int DEVICE::d_ioctl(int fd, ioctl_req_t request, char *mt_com) { #ifdef HAVE_WIN32 return -1; #else return ::ioctl(fd, request, mt_com); #endif } ssize_t DEVICE::d_read(int fd, void *buffer, size_t count) { return ::read(fd, buffer, count); } ssize_t DEVICE::d_write(int fd, const void *buffer, size_t count) { return ::write(fd, buffer, count); } /* Rewind file device */ bool DEVICE::rewind(DCR *dcr) { Enter(dbglvl); Dmsg3(400, "rewind res=%d fd=%d %s\n", num_reserved(), m_fd, print_name()); state &= ~(ST_EOT|ST_EOF|ST_WEOT); /* remove EOF/EOT flags */ block_num = file = 0; file_size = 0; file_addr = 0; if (m_fd < 0) { Mmsg1(errmsg, _("Rewind failed: device %s is not open.\n"), print_name()); Leave(dbglvl); return false; } if (is_file()) { if (lseek(dcr, (boffset_t)0, SEEK_SET) < 0) { berrno be; dev_errno = errno; Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); Leave(dbglvl); return false; } } Leave(dbglvl); return true; } /* * Reposition the device to file, block * Returns: false on failure * true on success */ bool DEVICE::reposition(DCR *dcr, uint64_t raddr) { if (!is_open()) { dev_errno = EBADF; Mmsg0(errmsg, _("Bad call to reposition. Device not open\n")); Emsg0(M_FATAL, 0, errmsg); return false; } Dmsg1(100, "===== lseek to %llu\n", raddr); if (lseek(dcr, (boffset_t)raddr, SEEK_SET) == (boffset_t)-1) { berrno be; dev_errno = errno; Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); return false; } file_addr = raddr; return true; } /* Seek to specified place */ boffset_t DEVICE::lseek(DCR *dcr, boffset_t offset, int whence) { #if defined(HAVE_WIN32) return ::_lseeki64(m_fd, (__int64)offset, whence); #else return ::lseek(m_fd, offset, whence); #endif } /* * Open a file device. For Aligned type we open both Volumes */ bool file_dev::open_device(DCR *dcr, int omode) { POOL_MEM archive_name(PM_FNAME); struct stat sp; Enter(dbglvl); if (DEVICE::open_device(dcr, omode)) { Leave(dbglvl); return true; } omode = openmode; get_autochanger_loaded_slot(dcr); /* * Handle opening of File Autochanger */ pm_strcpy(archive_name, dev_name); /* * If this is a virtual autochanger (i.e. changer_res != NULL) * we simply use the device name, assuming it has been * appropriately setup by the "autochanger". */ if (!device->changer_res || device->changer_command[0] == 0 || strcmp(device->changer_command, "/dev/null") == 0) { if (VolCatInfo.VolCatName[0] == 0) { Mmsg(errmsg, _("Could not open file device %s. No Volume name given.\n"), print_name()); if (dcr->jcr) { pm_strcpy(dcr->jcr->errmsg, errmsg); } clear_opened(); Leave(dbglvl); return false; } /* If not /dev/null concatenate VolumeName */ if (!is_null()) { if (!IsPathSeparator(archive_name.c_str()[strlen(archive_name.c_str())-1])) { pm_strcat(archive_name, "/"); } pm_strcat(archive_name, getVolCatName()); } } mount(1); /* do mount if required */ set_mode(omode); /* Check if volume needs to be opened with O_APPEND */ int append = append_open_needed(getVolCatName()) ? O_APPEND : 0; /* If creating file, give 0640 permissions */ Dmsg3(100, "open disk: mode=%s open(%s, 0x%x, 0640)\n", mode_to_str(omode), archive_name.c_str(), mode); /* Use system open() */ if ((m_fd = ::open(archive_name.c_str(), mode|O_CLOEXEC|append, 0640)) < 0) { /* Open may fail if we want to write to the Immutable volume */ if ((errno == EACCES || errno == EPERM) && use_protect()) { bool immutable = check_for_immutable(getVolCatName()); bool readonly = check_for_read_only(-1, getVolCatName()); Dmsg3(DT_VOLUME|40, "volume=%s immutable=%d readonly=%d\n", getVolCatName(), immutable, readonly); if (immutable || readonly) { /* Volume has immutable flag set, we need to clear it */ if (!check_volume_protection_time(getVolCatName())) { /* Volume cannot be reused yet */ MmsgD1(dbglvl, errmsg, _("Cannot open Volume %s for writing/truncating, " "because Minimum Volume Protection Time hasn't expired yet\n"), getVolCatName()); } else { bool tryopen = false; /* Flag can be cleared, volume can probably be reused */ if (immutable && clear_immutable(getVolCatName(), &errmsg)) { tryopen = true; } if (readonly && set_writable(-1, getVolCatName(), &errmsg) == 0) { tryopen = true; } if (tryopen) { /* It should be now possible to open the device with desired mode */ if ((m_fd = ::open(archive_name.c_str(), mode|O_CLOEXEC|append, 0640)) < 0) { berrno be; dev_errno = errno; MmsgD3(40, errmsg, _("Could not open(%s,%s,0640): ERR=%s\n"), archive_name.c_str(), mode_to_str(omode), be.bstrerror()); } // else open is ok } else { // Cannot clear the flags berrno be; MmsgD2(40, errmsg, _("Could not clear volume protection on %s ERR=%s\n"), getVolCatName(), be.bstrerror()); } } } else { // not immutable or readonly, other issue berrno be; dev_errno = errno; MmsgD3(40, errmsg, _("Could not open(%s,%s,0640): ERR=%s\n"), archive_name.c_str(), mode_to_str(omode), be.bstrerror()); } } else { // Not a permission issue on this device berrno be; dev_errno = errno; MmsgD3(40, errmsg, _("Could not open(%s,%s,0640): ERR=%s\n"), archive_name.c_str(), mode_to_str(omode), be.bstrerror()); } } if (m_fd >= 0) { /* Open is OK, now let device get control */ Dmsg2(40, "Did open(%s,%s,0640)\n", archive_name.c_str(), mode_to_str(omode)); device_specific_open(dcr); dev_errno = 0; file = 0; file_addr = 0; /* Refresh the underline device id */ if (fstat(m_fd, &sp) == 0) { devno = sp.st_dev; } } else { if (dcr->jcr) { pm_strcpy(dcr->jcr->errmsg, errmsg); } } Dmsg1(100, "open dev: disk fd=%d opened\n", m_fd); state |= preserve; /* reset any important state info */ Leave(dbglvl); return m_fd >= 0; } int file_dev::set_writable(int fd, const char *vol_name, POOLMEM **error) { POOL_MEM fname; get_volume_fpath(vol_name, fname.handle()); /* Need write access only for the storage daemon user We can extend to the * group, but in many case, it's a system group, so all users with tape/disk * group can have access */ int ret = bchmod(fd, fname.c_str(), 0600); if (ret < 0) { berrno be; MmsgD1(DT_VOLUME|50, error, _("Unable to change permission to 0600. ERR=%s\n"), be.bstrerror()); } return ret; } int file_dev::set_readonly(int fd, const char *vol_name, POOLMEM **error) { POOL_MEM fname; get_volume_fpath(vol_name, fname.handle()); /* Mark the file -r-------- */ int ret = bchmod(fd, fname.c_str(), 0400); if (ret < 0) { berrno be; MmsgD1(DT_VOLUME|50, error, _("Unable to change permission to 0400. ERR=%s\n"), be.bstrerror()); } return ret; } int file_dev::set_atime(int fd, const char *vol_name, btime_t val, POOLMEM **error) { struct stat sp; int ret; POOL_MEM fname; get_volume_fpath(vol_name, fname.handle()); if (bstat(fd, fname.c_str(), &sp) < 0) { berrno be; MmsgD2(DT_VOLUME|50, error, _("Unable to stat %s. ERR=%s\n"), fname.c_str(), be.bstrerror()); return -1; } ret = set_own_time(fd, fname.c_str(), val, sp.st_mtime); if (ret < 0) { berrno be; MmsgD2(DT_VOLUME|50, error, _("Unable to set atime/mtime to %s. ERR=%s\n"), fname.c_str(), be.bstrerror()); } return ret; } /* * Truncate a volume. If this is aligned disk, we * truncate both volumes. */ bool DEVICE::truncate(DCR *dcr) { struct stat st; DEVICE *dev = this; Dmsg1(100, "truncate %s\n", print_name()); switch (dev_type) { case B_VTL_DEV: case B_VTAPE_DEV: case B_TAPE_DEV: /* maybe we should rewind and write and eof ???? */ return true; /* we don't really truncate tapes */ default: break; } Dmsg2(100, "Truncate adata=%d fd=%d\n", dev->adata, dev->m_fd); /* Need to clear the APPEND flag before truncating */ if (dev->device->set_vol_append_only) { if (!clear_append_only(dcr->VolumeName, &errmsg)) { Mmsg2(errmsg, _("Unable to clear append_only flag for volume %s on device %s.\n"), dcr->VolumeName, print_name()); return false; } } if (dev->device->set_vol_read_only) { if (set_writable(dev->m_fd, dcr->VolumeName, &errmsg) < 0) { Mmsg3(errmsg, _("Unable to set write permission for volume %s on device %s. %s\n"), dcr->VolumeName, print_name(), errmsg); return false; } } if (ftruncate(dev->m_fd, 0) != 0) { berrno be; Mmsg2(errmsg, _("Unable to truncate device %s. ERR=%s\n"), print_name(), be.bstrerror()); return false; } /* * Check for a successful ftruncate() and issue a work-around for devices * (mostly cheap NAS) that don't support truncation. * Workaround supplied by Martin Schmid as a solution to bug #1011. * 1. close file * 2. delete file * 3. open new file with same mode * 4. change ownership to original */ if (fstat(dev->m_fd, &st) != 0) { berrno be; Mmsg2(errmsg, _("Unable to stat device %s. ERR=%s\n"), print_name(), be.bstrerror()); return false; } if (st.st_size != 0) { /* ftruncate() didn't work */ POOL_MEM archive_name(PM_FNAME); pm_strcpy(archive_name, dev_name); if (!IsPathSeparator(archive_name.c_str()[strlen(archive_name.c_str())-1])) { pm_strcat(archive_name, "/"); } pm_strcat(archive_name, dcr->VolumeName); if (dev->is_adata()) { pm_strcat(archive_name, ADATA_EXTENSION); } Mmsg2(errmsg, _("Device %s doesn't support ftruncate(). Recreating file %s.\n"), print_name(), archive_name.c_str()); /* Close file and blow it away */ ::close(dev->m_fd); ::unlink(archive_name.c_str()); /* Recreate the file -- of course, empty */ dev->set_mode(CREATE_READ_WRITE); if ((dev->m_fd = ::open(archive_name.c_str(), mode|O_CLOEXEC, st.st_mode)) < 0) { berrno be; dev_errno = errno; Mmsg2(errmsg, _("Could not reopen: %s, ERR=%s\n"), archive_name.c_str(), be.bstrerror()); Dmsg1(40, "reopen failed: %s", errmsg); Emsg0(M_FATAL, 0, errmsg); return false; } /* Reset proper owner */ chown(archive_name.c_str(), st.st_uid, st.st_gid); } return true; } /* * (Un)mount the device */ bool DEVICE::mount_file(int mount, int dotimeout) { POOL_MEM ocmd(PM_FNAME); POOLMEM *results; DIR* dp; char *icmd; POOL_MEM dname(PM_FNAME); int status, tries, name_max, count; berrno be; Dsm_check(200); if (mount) { icmd = device->mount_command; } else { icmd = device->unmount_command; } clear_freespace_ok(); edit_mount_codes(ocmd, icmd); Dmsg2(100, "mount_file: cmd=%s mounted=%d\n", ocmd.c_str(), !!is_mounted()); if (dotimeout) { /* Try at most 10 times to (un)mount the device. This should perhaps be configurable. */ tries = 10; } else { tries = 1; } results = get_memory(4000); /* If busy retry each second */ Dmsg1(100, "mount_file run_prog=%s\n", ocmd.c_str()); while ((status = run_program_full_output(ocmd.c_str(), max_open_wait/2, results)) != 0) { /* Doesn't work with internationalization (This is not a problem) */ if (mount && fnmatch("*is already mounted on*", results, 0) == 0) { break; } if (!mount && fnmatch("* not mounted*", results, 0) == 0) { break; } if (tries-- > 0) { /* Sometimes the device cannot be mounted because it is already mounted. * Try to unmount it, then remount it */ if (mount) { Dmsg1(400, "Trying to unmount the device %s...\n", print_name()); mount_file(0, 0); } bmicrosleep(1, 0); continue; } Dmsg5(100, "Device %s cannot be %smounted. stat=%d result=%s ERR=%s\n", print_name(), (mount ? "" : "un"), status, results, be.bstrerror(status)); Mmsg(errmsg, _("Device %s cannot be %smounted. ERR=%s\n"), print_name(), (mount ? "" : "un"), be.bstrerror(status)); /* * Now, just to be sure it is not mounted, try to read the filesystem. */ name_max = pathconf(".", _PC_NAME_MAX); if (name_max < 1024) { name_max = 1024; } if (!(dp = opendir(device->mount_point))) { berrno be; dev_errno = errno; Dmsg3(100, "mount_file: failed to open dir %s (dev=%s), ERR=%s\n", device->mount_point, print_name(), be.bstrerror()); goto get_out; } count = 0; while (1) { if (breaddir(dp, dname.addr()) != 0) { dev_errno = EIO; Dmsg2(129, "mount_file: failed to find suitable file in dir %s (dev=%s)\n", device->mount_point, print_name()); break; } if ((strcmp(dname.c_str(), ".")) && (strcmp(dname.c_str(), "..")) && (strcmp(dname.c_str(), ".keep"))) { count++; /* dname.c_str() != ., .. or .keep (Gentoo-specific) */ break; } else { Dmsg2(129, "mount_file: ignoring %s in %s\n", dname.c_str(), device->mount_point); } } closedir(dp); Dmsg1(100, "mount_file: got %d files in the mount point (not counting ., .. and .keep)\n", count); if (count > 0) { /* If we got more than ., .. and .keep */ /* there must be something mounted */ if (mount) { Dmsg1(100, "Did Mount by count=%d\n", count); break; } else { /* An unmount request. We failed to unmount - report an error */ set_mounted(true); free_pool_memory(results); Dmsg0(200, "== error mount=1 wanted unmount\n"); return false; } } get_out: set_mounted(false); free_pool_memory(results); Dmsg0(200, "============ mount=0\n"); Dsm_check(200); return false; } set_mounted(mount); /* set/clear mounted flag */ free_pool_memory(results); /* Do not check free space when unmounting */ Dmsg1(200, "============ mount=%d\n", mount); return true; } /* * Check if the current position on the volume corresponds to * what is in the catalog. * */ bool file_dev::is_eod_valid(DCR *dcr) { JCR *jcr = dcr->jcr; if (has_cap(CAP_LSEEK)) { char ed1[50], ed2[50]; boffset_t ameta_size, adata_size, size; ameta_size = lseek(dcr, (boffset_t)0, SEEK_END); adata_size = get_adata_size(dcr); size = ameta_size + adata_size; if (VolCatInfo.VolCatAmetaBytes == (uint64_t)ameta_size && VolCatInfo.VolCatAdataBytes == (uint64_t)adata_size) { if (is_aligned()) { Jmsg(jcr, M_INFO, 0, _("Ready to append to end of Volumes \"%s\"" " ameta size=%s adata size=%s\n"), dcr->VolumeName, edit_uint64_with_commas(VolCatInfo.VolCatAmetaBytes, ed1), edit_uint64_with_commas(VolCatInfo.VolCatAdataBytes, ed2)); } else { Jmsg(jcr, M_INFO, 0, _("Ready to append to end of Volume \"%s\"" " size=%s\n"), dcr->VolumeName, edit_uint64_with_commas(VolCatInfo.VolCatAmetaBytes, ed1)); } } else if ((uint64_t)ameta_size >= VolCatInfo.VolCatAmetaBytes && (uint64_t)adata_size >= VolCatInfo.VolCatAdataBytes) { if ((uint64_t)ameta_size != VolCatInfo.VolCatAmetaBytes) { Jmsg(jcr, M_WARNING, 0, _("For Volume \"%s\":\n" " The sizes do not match! Metadata Volume=%s Catalog=%s\n" " Correcting Catalog\n"), dcr->VolumeName, edit_uint64_with_commas(ameta_size, ed1), edit_uint64_with_commas(VolCatInfo.VolCatAmetaBytes, ed2)); } if ((uint64_t)adata_size != VolCatInfo.VolCatAdataBytes) { Jmsg(jcr, M_WARNING, 0, _("For aligned Volume \"%s\":\n" " Aligned sizes do not match! Aligned Volume=%s Catalog=%s\n" " Correcting Catalog\n"), dcr->VolumeName, edit_uint64_with_commas(adata_size, ed1), edit_uint64_with_commas(VolCatInfo.VolCatAdataBytes, ed2)); } VolCatInfo.VolCatAmetaBytes = ameta_size; VolCatInfo.VolCatAdataBytes = adata_size; VolCatInfo.VolCatBytes = size; VolCatInfo.VolCatFiles = (uint32_t)(size >> 32); if (!dir_update_volume_info(dcr, false, true)) { Jmsg(jcr, M_WARNING, 0, _("Error updating Catalog\n")); dcr->mark_volume_in_error(); return false; } } else { Mmsg(jcr->errmsg, _("Bacula cannot write on disk Volume \"%s\" because: " "The sizes do not match! Volume=%s Catalog=%s\n"), dcr->VolumeName, edit_uint64_with_commas(size, ed1), edit_uint64_with_commas(VolCatInfo.VolCatBytes, ed2)); Jmsg(jcr, M_ERROR, 0, "%s", jcr->errmsg); Dmsg1(100, "%s", jcr->errmsg); dcr->mark_volume_in_error(); return false; } } return true; } /* Check if attribute is supported for current platform */ bool file_dev::is_attribute_supported(int attr) { int supported = false; switch (attr) { #ifdef HAVE_APPEND_FL case FS_APPEND_FL: supported = true; break; #endif // HAVE_APPEND_FL #ifdef HAVE_IMMUTABLE_FL case FS_IMMUTABLE_FL: supported = true; break; #endif // HAVE_IMMUTABLE_FL default: break; } Dmsg2(DT_VOLUME|50, "File attribute: 0x%08x %s supported\n", attr, supported ? "is" : "is not"); return supported; } /* Get full volume path (archive dir + volume name) */ void file_dev::get_volume_fpath(const char *vol_name, POOLMEM **fname) { pm_strcpy(fname, dev_name); if (!IsPathSeparator((*fname)[strlen(*fname)-1])) { pm_strcat(fname, "/"); } pm_strcat(fname, vol_name); Dmsg1(DT_VOLUME|250, "Full volume path built: %s\n", *fname); } /* Check if volume can be reused or not yet. * Used in the truncate path. * This method is based on the 'MinimumVolumeProtection' time directive, * current system time is compared against m_time of volume file for immutable flag * * For the read-only (on NetApp Snaplock for example), we check a_time * * @return true if volume can be reused * @return false if volume's protection time hasn't expired yet, * hence volume cannot be reused now */ bool file_dev::check_volume_protection_time(const char *vol_name) { if (!device->set_vol_immutable && !device->set_vol_read_only) { Dmsg1(DT_VOLUME|50, "SetVolumeImmutable/SetVolumeReadOnly turned off for volume: %s\n", vol_name); return true; } struct stat sp; POOL_MEM fname(PM_FNAME); if (device->min_volume_protection_time == 0) { Dmsg1(DT_VOLUME|50, _("Immutable flag cannot be cleared for volume: %s, " "because Minimum Volume Protection Time is set to 0\n"), vol_name); Mmsg(errmsg, _("Immutable/ReadOnly flag cannot be cleared for volume: %s, " "because Minimum Volume Protection Time is set to 0\n"), vol_name); return false; } get_volume_fpath(vol_name, fname.handle()); if (stat(fname.c_str(), &sp)) { if (errno == ENOENT) { Dmsg1(DT_VOLUME|50, "Protection time is ok for volume %s, because it does not exist yet\n", fname.c_str()); /* Volume does not exist at all so we can just proceed */ return true; } /* We have an error otherwise */ berrno be; Dmsg2(DT_VOLUME|50, "Failed to stat %s, ERR=%s", fname.c_str(), be.bstrerror()); Mmsg2(errmsg, "Failed to stat %s, ERR=%s", fname.c_str(), be.bstrerror()); return false; } /* Check if enough time elapsed since last file's modification and compare it with current */ time_t expiration_time; time_t now = time(NULL); if (device->set_vol_immutable) { expiration_time = sp.st_mtime + device->min_volume_protection_time; } else { // ReadOnly, we check both and we take the biggest one expiration_time = MAX(sp.st_atime, sp.st_mtime + device->min_volume_protection_time); } if (expiration_time > now) { char dt[50], dt2[50]; bstrftime(dt, sizeof(dt), expiration_time); bstrftime(dt2, sizeof(dt2), now); Mmsg1(errmsg, _("Immutable/ReadOnly flag cannot be cleared for volume: %s, " "because Minimum Volume Protection Time hasn't expired yet.\n"), vol_name); Dmsg3(DT_VOLUME|50, "Immutable/ReadOnly flag cannot be cleared for volume: %s, " "because:\nexpiration time: %s\nnow: %s\n", vol_name, dt, dt2); return false; } Dmsg1(DT_VOLUME|50, "Immutable/ReadOnly flag can be cleared for volume: %s\n", vol_name); return true; } #ifdef HAVE_FS_IOC_GETFLAGS bool file_dev::check_for_attr(const char *vol_name, int attr) { int tmp_fd, ioctl_ret; union { int get_attr; char garbadge[16]; // be sure that buggy ioctl will not overwrite usefull data }; bool ret = false; POOL_MEM fname(PM_FNAME); if (!is_attribute_supported(attr)) { errno = ENOSYS; return ret; } get_volume_fpath(vol_name, fname.handle()); if ((tmp_fd = d_open(fname.c_str(), O_RDONLY|O_CLOEXEC)) < 0) { berrno be; Dmsg2(DT_VOLUME|50, "Failed to open %s, ERR=%s\n", fname.c_str(), be.bstrerror()); return ret; } ioctl_ret = d_ioctl(tmp_fd, FS_IOC_GETFLAGS, (char *)&get_attr); if (ioctl_ret < 0) { berrno be; Dmsg2(DT_VOLUME|50, "Failed to get attributes for %s, ERR=%s\n", fname.c_str(), be.bstrerror()); } else { ret = get_attr & attr; const char *msg_str = ret ? "set" : "not set"; Dmsg3(DT_VOLUME|50, "Attribute: 0x%08x is %s for volume: %s\n", attr, msg_str, fname.c_str()); } d_close(tmp_fd); return ret; } #else bool file_dev::check_for_attr(const char *vol_name, int attr) { Dmsg2(DT_VOLUME|50, "Returning from mocked check_for_attr() for volume: %s, attr: 0x%08x\n", vol_name, attr); return true; } #endif // HAVE_FS_IOC_GETFLAGS #ifdef HAVE_FS_IOC_SETFLAGS bool file_dev::modify_fattr(const char *vol_name, int attr, bool set, POOLMEM **error) { bool ret = false; int tmp_fd, ioctl_ret; union { int get_attr; char garbadge1[16]; // be sure that buggy ioctl will not overwrite usefull data }; union { int set_attr; char garbadge2[16]; // be sure that buggy ioctl will not overwrite usefull data }; const char *msg_str = set ? "set" : "cleared"; POOL_MEM fname(PM_FNAME); if (!got_caps_needed) { MmsgD1(DT_VOLUME|50, error, _("Early return from modify_fattr for volume %s, do not have caps needed\n"), vol_name); return false; /* We cannot set needed attributes, no work here */ } if (!is_attribute_supported(attr)) { MmsgD2(DT_VOLUME|50, error, _("File attribute 0x%0x is not supported for volume %s\n"), attr, vol_name); return ret; } get_volume_fpath(vol_name, fname.handle()); if ((tmp_fd = d_open(fname.c_str(), O_RDONLY|O_CLOEXEC)) < 0) { berrno be; MmsgD2(DT_VOLUME|50, error, _("Failed to open %s, ERR=%s"), fname.c_str(), be.bstrerror()); return false; } ioctl_ret = d_ioctl(tmp_fd, FS_IOC_GETFLAGS, (char *)&get_attr); if (ioctl_ret < 0) { berrno be; MmsgD2(DT_VOLUME|50, error, _("Failed to get attributes for %s, ERR=%s"), fname.c_str(), be.bstrerror()); goto bail_out; } /* The flag might be already present, so, no need to set or clear it */ if (set && (get_attr & attr)) { ret = true; goto bail_out; } if (!set && (get_attr & attr) == 0) { ret = true; goto bail_out; } if (set) { /* Add new attribute to the currently set ones */ set_attr = get_attr | attr; } else { /* Inverse the desired attribute and later and it with the current state * so that we clear only desired flag and do not touch all the rest */ int rev_mask = ~attr; set_attr = get_attr & rev_mask; } ioctl_ret = d_ioctl(tmp_fd, FS_IOC_SETFLAGS, (char *)&set_attr); if (ioctl_ret < 0) { berrno be; if (set) { MmsgD3(DT_VOLUME|50, error, _("Failed to set 0x%0x attribute for %s, err: %d\n"), attr, fname.c_str(), errno); } else { MmsgD3(DT_VOLUME|50, error, _("Failed to clear 0x%0x attribute for %s, err: %d\n"), attr, fname.c_str(), errno); } goto bail_out; } Dmsg3(DT_VOLUME|50, "Attribute: 0x%08x was %s for volume: %s\n", attr, msg_str, fname.c_str()); ret = true; bail_out: if (tmp_fd >= 0) { d_close(tmp_fd); } return ret; } #else bool file_dev::modify_fattr(const char *vol_name, int attr, bool set, POOLMEM **error) { MmsgD3(DT_VOLUME|50, error, _("Returning from mocked modify_fattr() for volume: %s, attr: 0x%08x, set: %d\n"), vol_name, attr, set); return false; } #endif // HAVE_FS_IOC_SETFLAGS bool file_dev::set_fattr(const char *vol_name, int attr, POOLMEM **error) { return modify_fattr(vol_name, attr, true, error); } bool file_dev::clear_fattr(const char *vol_name, int attr, POOLMEM **error) { return modify_fattr(vol_name, attr, false, error); } #ifdef HAVE_APPEND_FL bool file_dev::append_open_needed(const char *vol_name) { return check_for_attr(vol_name, FS_APPEND_FL); } bool file_dev::set_append_only(const char *vol_name, POOLMEM **error) { return set_fattr(vol_name, FS_APPEND_FL, error); } bool file_dev::clear_append_only(const char *vol_name, POOLMEM **error) { return clear_fattr(vol_name, FS_APPEND_FL, error); } #else bool file_dev::append_open_needed(const char *vol_name) { Dmsg1(DT_VOLUME|50, "Returning from mocked append_open_needed() for volume: %s\n", vol_name); return false; } bool file_dev::set_append_only(const char *vol_name, POOLMEM **error) { MmsgD1(DT_VOLUME|50, error, _("Returning from mocked set_append_only() for volume: %s\n"), vol_name); return false; } bool file_dev::clear_append_only(const char *vol_name, POOLMEM **error) { MmsgD1(DT_VOLUME|50, error, _("Returning from mocked clear_append_only() for volume: %s\n"), vol_name); return false; } #endif // HAVE_APPEND_FL #ifdef HAVE_IMMUTABLE_FL bool file_dev::set_immutable(const char *vol_name, POOLMEM **error) { return set_fattr(vol_name, FS_IMMUTABLE_FL, error); } bool file_dev::clear_immutable(const char *vol_name, POOLMEM **error) { return clear_fattr(vol_name, FS_IMMUTABLE_FL, error); } bool file_dev::check_for_immutable(const char* vol_name) { return check_for_attr(vol_name, FS_IMMUTABLE_FL); } #else bool file_dev::set_immutable(const char *vol_name, POOLMEM **error) { MmsgD1(DT_VOLUME|50, error, _("Returning from mocked set_immutable() for volume: %s\n"), vol_name); return false; } bool file_dev::clear_immutable(const char *vol_name, POOLMEM **error) { MmsgD1(DT_VOLUME|50, error, _("Returning from mocked clear_immutable() for volume: %s\n"), vol_name); return false; } bool file_dev::check_for_immutable(const char* vol_name) { Dmsg1(DT_VOLUME|50, _("Returning from mocked check_for_immutable() for volume: %s\n"), vol_name); return true; } #endif // HAVE_IMMUTABLE_FL bool file_dev::check_for_read_only(int fd, const char *vol) { if (!device->set_vol_read_only) { return false; // Feature not used } struct stat sp; POOL_MEM fname; get_volume_fpath(vol, fname.handle()); if (bstat(fd, fname.c_str(), &sp) < 0) { return false; // Not found, no problem? } if ((sp.st_mode & 07777) == S_IRUSR) { return true; } return false; } bool check_for_immutable(const char *vol_name); /* * Position device to end of medium (end of data) * Returns: true on succes * false on error */ bool file_dev::eod(DCR *dcr) { boffset_t pos; Enter(100); if (m_fd < 0) { dev_errno = EBADF; Mmsg1(errmsg, _("Bad call to eod. Device %s not open\n"), print_name()); Dmsg1(100, "%s", errmsg); return false; } if (at_eot()) { Leave(100); return true; } clear_eof(); /* remove EOF flag */ block_num = file = 0; file_size = 0; file_addr = 0; if (is_fifo()) { Leave(100); return true; } pos = lseek(dcr, (boffset_t)0, SEEK_END); Dmsg1(200, "====== Seek to %lld\n", pos); if (pos >= 0) { update_pos(dcr); set_eot(); Leave(100); return true; } dev_errno = errno; berrno be; Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); Dmsg1(100, "%s", errmsg); Leave(100); return false; } const char *file_dev::print_type() { return "File"; } int file_dev::device_specific_init(JCR *jcr, DEVRES *device) { // Called by child to get the CAP_LSEEK capabilities |= CAP_LSEEK; return 0; } bool file_dev::is_fs_nearly_full(uint64_t threshold) { uint64_t freeval, totalval; get_freespace(&freeval, &totalval); if (totalval > 0) { if (freeval < threshold) { return true; } } return false; } bool file_dev::get_os_device_freespace() { int64_t freespace, totalspace; if (fs_get_free_space(dev_name, &freespace, &totalspace) == 0) { set_freespace(freespace, totalspace, 0, true); Mmsg(errmsg, ""); return true; } else { set_freespace(0, 0, 0, false); /* No valid freespace */ } return false; } bacula-15.0.3/src/stored/dev.c0000644000175000017500000011742614771010173015706 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * dev.c -- low level operations on device (storage device) * * written by, Kern Sibbald, MM * * NOTE!!!! None of these routines are reentrant. You must * use dev->rLock() and dev->Unlock() at a higher level, * or use the xxx_device() equivalents. By moving the * thread synchronization to a higher level, we permit * the higher level routines to "seize" the device and * to carry out operations without worrying about who * set what lock (i.e. race conditions). * * Note, this is the device dependent code, and may have * to be modified for each system, but is meant to * be as "generic" as possible. * * The purpose of this code is to develop a SIMPLE Storage * daemon. More complicated coding (double buffering, writer * thread, ...) is left for a later version. */ /* * Handling I/O errors and end of tape conditions are a bit tricky. * This is how it is currently done when writing. * On either an I/O error or end of tape, * we will stop writing on the physical device (no I/O recovery is * attempted at least in this daemon). The state flag will be sent * to include ST_EOT, which is ephemeral, and ST_WEOT, which is * persistent. Lots of routines clear ST_EOT, but ST_WEOT is * cleared only when the problem goes away. Now when ST_WEOT * is set all calls to write_block_to_device() call the fix_up * routine. In addition, all threads are blocked * from writing on the tape by calling lock_dev(), and thread other * than the first thread to hit the EOT will block on a condition * variable. The first thread to hit the EOT will continue to * be able to read and write the tape (he sort of tunnels through * the locking mechanism -- see lock_dev() for details). * * Now presumably somewhere higher in the chain of command * (device.c), someone will notice the EOT condition and * get a new tape up, get the tape label read, and mark * the label for rewriting. Then this higher level routine * will write the unwritten buffer to the new volume. * Finally, he will release * any blocked threads by doing a broadcast on the condition * variable. At that point, we should be totally back in * business with no lost data. */ #include "bacula.h" #include "stored.h" #ifndef O_NONBLOCK #define O_NONBLOCK 0 #endif static const int dbglvl = 150; /* Imported functions and variables */ extern void set_os_device_parameters(DCR *dcr); extern bool dev_get_os_pos(DEVICE *dev, struct mtget *mt_stat); extern uint32_t status_dev(DEVICE *dev); /* Forward referenced functions */ const char *mode_to_str(int mode); DEVICE *m_init_dev(JCR *jcr, DEVRES *device, bool adata); /* To open device readonly when appropriate, like for bls for dedup devices */ int device_default_open_mode = omd_rdonly; /* * Device specific initialization. */ int DEVICE::device_specific_init(JCR *jcr, DEVRES *device) { return 0; // OK } int DEVICE::device_specific_close(DCR *dcr) { int ret = 0; if (m_fd >= 0) { ret = d_close(m_fd) ; clear_opened(); } return ret; } /* * Initialize the device with the operating system and * initialize buffer pointers. * * Returns: true device already open * false device setup but not open * * Note, for a tape, the VolName is the name we give to the * volume (not really used here), but for a file, the * VolName represents the name of the file to be created/opened. * In the case of a file, the full name is the device name * (archive_name) with the VolName concatenated. * * This is generic common code. It should be called prior to any * device specific code. Note! This does not open anything. */ bool DEVICE::open_device(DCR *dcr, int omode) { Enter(dbglvl); preserve = 0; ASSERT2(!adata, "Attempt to open adata dev"); if (is_open()) { if (openmode == omode) { Leave(dbglvl); return true; } else { Dmsg1(200, "Close fd=%d for mode change in open().\n", m_fd); device_specific_close(dcr); preserve = state & (ST_LABEL|ST_APPEND|ST_READ); } } openmode = omode; if (dcr) { dcr->setVolCatName(dcr->VolumeName); VolCatInfo = dcr->VolCatInfo; /* structure assign */ } state &= ~(ST_NOSPACE|ST_LABEL|ST_APPEND|ST_READ|ST_EOT|ST_WEOT|ST_EOF); label_type = B_BACULA_LABEL; if (openmode == OPEN_READ_WRITE && has_cap(CAP_STREAM)) { openmode = OPEN_WRITE_ONLY; } Leave(dbglvl); return false; } void DEVICE::set_mode(int new_mode) { switch (new_mode) { case CREATE_READ_WRITE: mode = O_CREAT | O_RDWR | O_BINARY; break; case OPEN_READ_WRITE: mode = O_RDWR | O_BINARY; break; case OPEN_READ_ONLY: mode = O_RDONLY | O_BINARY; break; case OPEN_WRITE_ONLY: mode = O_WRONLY | O_BINARY; break; default: Jmsg0(NULL, M_ABORT, 0, _("Illegal mode given to open dev.\n")); } } /* * Called to indicate that we have just read an * EOF from the device. */ void DEVICE::set_ateof() { set_eof(); file_addr = 0; set_file_size(0); block_num = 0; } /* * Called to indicate we are now at the end of the tape, and * writing is not possible. */ void DEVICE::set_ateot() { /* Make tape effectively read-only */ Dmsg0(200, "==== Set AtEof\n"); state |= (ST_EOF|ST_EOT|ST_WEOT); clear_append(); } /* * Set the position of the device -- only for files * For other devices, there is no generic way to do it. * Returns: true on succes * false on error */ bool DEVICE::update_pos(DCR *dcr) { boffset_t pos; bool ok = true; if (!is_open()) { dev_errno = EBADF; Mmsg0(errmsg, _("Bad device call. Device not open\n")); Emsg1(M_FATAL, 0, "%s", errmsg); return false; } if (is_file()) { file = 0; file_addr = 0; pos = lseek(dcr, (boffset_t)0, SEEK_CUR); if (pos < 0) { berrno be; dev_errno = errno; Pmsg1(000, _("Seek error: ERR=%s\n"), be.bstrerror()); Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); ok = false; } else { file_addr = pos; block_num = (uint32_t)pos; file = (uint32_t)(pos >> 32); } } return ok; } void DEVICE::set_slot(int32_t slot) { m_slot = slot; if (vol) vol->clear_slot(); } void DEVICE::clear_slot() { m_slot = -1; if (vol) vol->set_slot(-1); } /* * Set to unload the current volume in the drive */ void DEVICE::set_unload() { if (!m_unload && VolHdr.VolumeName[0] != 0) { m_unload = true; notify_newvol_in_attached_dcrs(NULL); } } /* * Clear volume header */ void DEVICE::clear_volhdr() { Dmsg1(100, "Clear volhdr vol=%s\n", VolHdr.VolumeName); memset(&VolHdr, 0, sizeof(VolHdr)); setVolCatInfo(false); } void DEVICE::set_volcatinfo_from_dcr(DCR *dcr) { VolCatInfo = dcr->VolCatInfo; } bool DEVICE::sync_data(DCR *dcr) { int ret=true; #ifndef HAVE_WIN32 if (!has_cap(CAP_SYNCONCLOSE)) { return true; } if (!is_open()) { Dmsg2(200, "device %s closed vol=%s\n", print_name(), VolHdr.VolumeName); return true; /* already closed */ } while ((ret = fsync(m_fd)) < 0 && errno == EINTR) { bmicrosleep(0, 5000); } if (ret < 0) { berrno be; dev_errno = errno; Mmsg(errmsg, _("Error syncing volume \"%s\" on device %s. ERR=%s."), VolHdr.VolumeName, print_name(), be.bstrerror()); return false; } ret = true; #endif return ret; } /* * Close the device * Can enter with dcr==NULL */ bool DEVICE::close(DCR *dcr) { bool ok = true; Dmsg5(40, "close_dev vol=%s fd=%d dev=%p adata=%d dev=%s\n", VolHdr.VolumeName, m_fd, this, adata, print_name()); offline_or_rewind(dcr); if (!is_open()) { Dmsg2(200, "device %s already closed vol=%s\n", print_name(), VolHdr.VolumeName); return true; /* already closed */ } switch (dev_type) { case B_VTL_DEV: case B_VTAPE_DEV: case B_TAPE_DEV: unlock_door(); /* Fall through wanted */ default: if (d_close(m_fd) != 0) { berrno be; dev_errno = errno; Mmsg(errmsg, _("Error closing volume \"%s\" device %s. ERR=%s.\n"), VolHdr.VolumeName, print_name(), be.bstrerror()); ok = false; } break; } unmount(1); /* do unmount if required */ /* Clean up device packet so it can be reused */ clear_opened(); /* * Only when SAN shared storage configured, * when the device is unloaded, the slot information * can be incorrect, so clear the slot. */ /* ***BEEF ***/ if (is_tape() && device->lock_command && device->control_name) { clear_slot(); } state &= ~(ST_LABEL|ST_READ|ST_APPEND|ST_EOT|ST_WEOT|ST_EOF| ST_NOSPACE|ST_MOUNTED|ST_MEDIA|ST_SHORT); label_type = B_BACULA_LABEL; file = block_num = 0; set_file_size(0); file_addr = 0; EndFile = EndBlock = 0; openmode = 0; clear_volhdr(); memset(&VolCatInfo, 0, sizeof(VolCatInfo)); if (tid) { stop_thread_timer(tid); tid = 0; } return ok; } /* * If timeout, wait until the mount command returns 0. * If !timeout, try to mount the device only once. */ bool DEVICE::mount(int timeout) { Enter(dbglvl); if (!is_mounted() && device->mount_command) { Leave(dbglvl); return mount_file(1, timeout); } Leave(dbglvl); return true; } /* * Unmount the device * If timeout, wait until the unmount command returns 0. * If !timeout, try to unmount the device only once. */ bool DEVICE::unmount(int timeout) { Enter(dbglvl); if (is_mounted() && requires_mount() && device->unmount_command) { Leave(dbglvl); return mount_file(0, timeout); } Leave(dbglvl); return true; } /* * Edit codes into (Un)MountCommand, Write(First)PartCommand * %% = % * %a = archive device name * %e = erase (set if cannot mount and first part) * %n = part number * %m = mount point * %v = last part name * * omsg = edited output message * imsg = input string containing edit codes (%x) * */ void DEVICE::edit_mount_codes(POOL_MEM &omsg, const char *imsg) { const char *p; const char *str; char add[20]; POOL_MEM archive_name(PM_FNAME); omsg.c_str()[0] = 0; Dmsg1(800, "edit_mount_codes: %s\n", imsg); for (p=imsg; *p; p++) { if (*p == '%') { switch (*++p) { case '%': str = "%"; break; case 'a': str = dev_name; break; case 'e': str = "0"; /* ***FIXME*** this may be useful for Cloud */ #ifdef xxx if (num_dvd_parts == 0) { if (truncating || blank_dvd) { str = "2"; } else { str = "1"; } } else { str = "0"; } #endif break; case 'n': bsnprintf(add, sizeof(add), "%d", part); str = add; break; case 'm': str = device->mount_point; break; default: add[0] = '%'; add[1] = *p; add[2] = 0; str = add; break; } } else { add[0] = *p; add[1] = 0; str = add; } Dmsg1(1900, "add_str %s\n", str); pm_strcat(omsg, (char *)str); Dmsg1(1800, "omsg=%s\n", omsg.c_str()); } } /* return the last timer interval (ms) * or 0 if something goes wrong */ btime_t DEVICE::get_timer_count() { btime_t temp = last_timer; last_timer = get_current_btime(); temp = last_timer - temp; /* get elapsed time */ return (temp>0)?temp:0; /* take care of skewed clock */ } /* read from fd */ ssize_t DEVICE::read(void *buf, size_t len) { ssize_t read_len; ssize_t stat_read_len = 0; get_timer_count(); read_len = d_read(m_fd, buf, len); last_tick = get_timer_count(); DevReadTime += last_tick; VolCatInfo.VolReadTime += last_tick; if (read_len > 0) { /* skip error */ DevReadBytes += read_len; stat_read_len = read_len; } collector_update_add2_value_int64(devstatcollector, devstatmetrics.bacula_storage_device_readbytes, stat_read_len, devstatmetrics.bacula_storage_device_readtime, last_tick); return read_len; } /* write to fd */ ssize_t DEVICE::write(const void *buf, size_t len) { ssize_t write_len; ssize_t stat_write_len = 0; get_timer_count(); write_len = d_write(m_fd, buf, len); last_tick = get_timer_count(); DevWriteTime += last_tick; VolCatInfo.VolWriteTime += last_tick; if (write_len > 0) { /* skip error */ DevWriteBytes += write_len; stat_write_len = write_len; } collector_update_add2_value_int64(devstatcollector, devstatmetrics.bacula_storage_device_writebytes, stat_write_len, devstatmetrics.bacula_storage_device_writetime, last_tick); return write_len; } /* to make the difference with an "real" auto changer */ bool DEVICE::is_virtual_autochanger() const { return device->changer_command && (device->changer_command[0] == 0 || strcmp(device->changer_command, "/dev/null") == 0); } /* Return the resource name for the device */ const char *DEVICE::name() const { return device->hdr.name; } uint32_t DEVICE::get_file() { if (is_tape()) { return file; } else { uint64_t bytes = VolCatInfo.VolCatAdataBytes + VolCatInfo.VolCatAmetaBytes; return (uint32_t)(bytes >> 32); } } uint32_t DEVICE::get_block_num() { if (is_tape()) { return block_num; } else { return VolCatInfo.VolCatAdataBlocks + VolCatInfo.VolCatAmetaBlocks; } } /* * Walk through all attached jcrs indicating the volume has changed * Note: If you have the new VolumeName, it is passed here, * otherwise pass a NULL. */ void DEVICE::notify_newvol_in_attached_dcrs(const char *newVolumeName) { Dmsg2(140, "Notify dcrs of vol change. oldVolume=%s NewVolume=%s\n", getVolCatName(), newVolumeName?newVolumeName:"*None*"); Lock_dcrs(); DCR *mdcr; foreach_dlist(mdcr, attached_dcrs) { if (mdcr->jcr->JobId == 0) { continue; /* ignore console */ } mdcr->NewVol = true; mdcr->NewFile = true; if (newVolumeName && mdcr->VolumeName != newVolumeName) { bstrncpy(mdcr->VolumeName, newVolumeName, sizeof(mdcr->VolumeName)); Dmsg2(140, "Set NewVol=%s in JobId=%d\n", mdcr->VolumeName, mdcr->jcr->JobId); } } Unlock_dcrs(); } /* * Walk through all attached jcrs indicating the File has changed */ void DEVICE::notify_newfile_in_attached_dcrs() { Dmsg1(140, "Notify dcrs of file change. Volume=%s\n", getVolCatName()); Lock_dcrs(); DCR *mdcr; foreach_dlist(mdcr, attached_dcrs) { if (mdcr->jcr->JobId == 0) { continue; /* ignore console */ } Dmsg1(140, "Notify JobI=%d\n", mdcr->jcr->JobId); mdcr->NewFile = true; } Unlock_dcrs(); } /* * Free memory allocated for the device * Can enter with dcr==NULL */ void DEVICE::term(DCR *dcr) { Dmsg1(900, "term dev: %s\n", print_name()); if (!dcr) { d_close(m_fd); } else { close(dcr); } if (dev_name) { free_memory(dev_name); dev_name = NULL; } if (adev_name) { free_memory(adev_name); adev_name = NULL; } if (prt_name) { free_memory(prt_name); prt_name = NULL; } if (errmsg) { free_pool_memory(errmsg); errmsg = NULL; } pthread_mutex_destroy(&m_mutex); pthread_cond_destroy(&wait); pthread_cond_destroy(&wait_next_vol); pthread_mutex_destroy(&spool_mutex); pthread_mutex_destroy(&freespace_mutex); if (attached_dcrs) { delete attached_dcrs; attached_dcrs = NULL; } /* We let the DEVRES pointer if not our device */ if (device && device->dev == this) { device->dev = NULL; } if (crypto_device_ctx) { block_cipher_context_free(crypto_device_ctx); crypto_device_ctx = NULL; } delete this; } /* Get freespace values */ void DEVICE::get_freespace(uint64_t *freeval, uint64_t *totalval) { if (get_os_device_freespace()) { P(freespace_mutex); if (is_freespace_ok()) { *freeval = free_space; *totalval = total_space; } else { *freeval = *totalval = 0; } V(freespace_mutex); } } /* Set freespace values */ void DEVICE::set_freespace(uint64_t freeval, uint64_t totalval, int errnoval, bool valid) { P(freespace_mutex); free_space = freeval; total_space = totalval; free_space_errno = errnoval; if (valid) { set_freespace_ok(); } else { clear_freespace_ok(); } V(freespace_mutex); } /* Convenient function that return true only if the device back-end is a * filesystem that its nearly full. (the free space is below the given threshold) */ bool DEVICE::is_fs_nearly_full(uint64_t threshold) { return false; } static const char *modes[] = { "CREATE_READ_WRITE", "OPEN_READ_WRITE", "OPEN_READ_ONLY", "OPEN_WRITE_ONLY" }; const char *mode_to_str(int mode) { static char buf[100]; if (mode < 1 || mode > 4) { bsnprintf(buf, sizeof(buf), "BAD mode=%d", mode); return buf; } return modes[mode-1]; } void DEVICE::setVolCatName(const char *name) { bstrncpy(VolCatInfo.VolCatName, name, sizeof(VolCatInfo.VolCatName)); setVolCatInfo(false); } void DEVICE::setVolCatStatus(const char *status) { bstrncpy(VolCatInfo.VolCatStatus, status, sizeof(VolCatInfo.VolCatStatus)); setVolCatInfo(false); } void DEVICE::updateVolCatBytes(uint64_t bytes) { Lock_VolCatInfo(); VolCatInfo.VolCatAmetaBytes += bytes; VolCatInfo.VolCatBytes += bytes; VolCatInfo.BytesWritten += bytes; /* We keep the count of written bytes since the last catalog update */ setVolCatInfo(false); Unlock_VolCatInfo(); } /* Used by subclass like dedup to take account of bytes going into dedup. * The accounting is expected to be done in VolCatAdataBytes & VolCatBytes */ void DEVICE::updateVolCatExtraBytes(uint64_t bytes) { } /* * Note, for us a hole is bytes skipped. The OS * may have a different idea of a hole due to * the filesystem block size. */ void DEVICE::updateVolCatHoleBytes(uint64_t hole) { return; } void DEVICE::updateVolCatPadding(uint64_t padding) { Lock_VolCatInfo(); VolCatInfo.VolCatAmetaPadding += padding; VolCatInfo.VolCatPadding += padding; setVolCatInfo(false); Unlock_VolCatInfo(); } void DEVICE::updateVolCatBlocks(uint32_t blocks) { Lock_VolCatInfo(); VolCatInfo.VolCatAmetaBlocks += blocks; VolCatInfo.VolCatBlocks += blocks; setVolCatInfo(false); Unlock_VolCatInfo(); } void DEVICE::updateVolCatWrites(uint32_t writes) { Lock_VolCatInfo(); VolCatInfo.VolCatAmetaWrites += writes; VolCatInfo.VolCatWrites += writes; setVolCatInfo(false); Unlock_VolCatInfo(); } void DEVICE::updateVolCatReads(uint32_t reads) { Lock_VolCatInfo(); VolCatInfo.VolCatAmetaReads += reads; VolCatInfo.VolCatReads += reads; setVolCatInfo(false); Unlock_VolCatInfo(); } void DEVICE::updateVolCatReadBytes(uint64_t bytes) { Lock_VolCatInfo(); VolCatInfo.VolCatAmetaRBytes += bytes; VolCatInfo.VolCatRBytes += bytes; setVolCatInfo(false); Unlock_VolCatInfo(); } void DEVICE::set_nospace() { state |= ST_NOSPACE; } void DEVICE::clear_nospace() { state &= ~ST_NOSPACE; } /* Put device in append mode */ void DEVICE::set_append() { state &= ~(ST_NOSPACE|ST_READ|ST_EOT|ST_EOF|ST_WEOT); /* remove EOF/EOT flags */ state |= ST_APPEND; } /* Clear append mode */ void DEVICE::clear_append() { state &= ~ST_APPEND; } /* Put device in read mode */ void DEVICE::set_read() { state &= ~(ST_APPEND|ST_EOT|ST_EOF|ST_WEOT); /* remove EOF/EOT flags */ state |= ST_READ; } /* Clear read mode */ void DEVICE::clear_read() { state &= ~ST_READ; } /* * Get freespace using OS calls * TODO: See if it's working with mount commands */ bool DEVICE::get_os_device_freespace() { return false; } /* Update the free space on the device */ bool DEVICE::update_freespace() { POOL_MEM ocmd(PM_FNAME); POOLMEM* results; char* icmd; char* p; uint64_t free, total; char ed1[50]; bool ok = false; int status; berrno be; if (!is_file()) { Mmsg(errmsg, ""); return true; } /* The device must be mounted in order for freespace to work */ if (requires_mount()) { mount(1); } if (get_os_device_freespace()) { Dmsg4(20, "get_os_device_freespace: free_space=%s freespace_ok=%d free_space_errno=%d have_media=%d\n", edit_uint64(free_space, ed1), !!is_freespace_ok(), free_space_errno, !!have_media()); return true; } icmd = device->free_space_command; if (!icmd) { set_freespace(0, 0, 0, false); Dmsg2(20, "ERROR: update_free_space_dev: free_space=%s, free_space_errno=%d (!icmd)\n", edit_uint64(free_space, ed1), free_space_errno); Mmsg(errmsg, _("No FreeSpace command defined.\n")); return false; } edit_mount_codes(ocmd, icmd); Dmsg1(20, "update_freespace: cmd=%s\n", ocmd.c_str()); results = get_pool_memory(PM_MESSAGE); Dmsg1(20, "Run freespace prog=%s\n", ocmd.c_str()); status = run_program_full_output(ocmd.c_str(), max_open_wait/2, results); Dmsg2(20, "Freespace status=%d result=%s\n", status, results); /* Should report "1223232 12323232\n" "free total\n" */ if (status == 0) { free = str_to_int64(results) * 1024; p = results; if (skip_nonspaces(&p)) { total = str_to_int64(p) * 1024; } else { total = 0; } Dmsg1(400, "Free space program run: Freespace=%s\n", results); if (free >= 0) { set_freespace(free, total, 0, true); /* have valid freespace */ Mmsg(errmsg, ""); ok = true; } } else { set_freespace(0, 0, EPIPE, false); /* no valid freespace */ Mmsg2(errmsg, _("Cannot run free space command. Results=%s ERR=%s\n"), results, be.bstrerror(status)); dev_errno = free_space_errno; Dmsg4(20, "Cannot get free space on device %s. free_space=%s, " "free_space_errno=%d ERR=%s\n", print_name(), edit_uint64(free_space, ed1), free_space_errno, errmsg); } free_pool_memory(results); Dmsg4(20, "leave update_freespace: free_space=%s freespace_ok=%d free_space_errno=%d have_media=%d\n", edit_uint64(free_space, ed1), !!is_freespace_ok(), free_space_errno, !!have_media()); return ok; } bool DEVICE::weof(DCR */*dcr*/, int num) { Dmsg1(129, "=== weof_dev=%s\n", print_name()); if (!is_open()) { dev_errno = EBADF; Mmsg1(errmsg, _("Bad call to weof_dev. Device %s not open\n"), print_name()); Emsg0(M_FATAL, 0, errmsg); return false; } if (!can_append()) { Mmsg1(errmsg, _("Attempt to WEOF on non-appendable Volume %s\n"), VolHdr.VolumeName); Emsg0(M_FATAL, 0, errmsg); return false; } set_file_size(0); return true; } /* * Very basic functions -- no device specific code. * Returns: true on succes * false on error */ bool DEVICE::eod(DCR *dcr) { bool ok = true; Enter(dbglvl); if (m_fd < 0) { dev_errno = EBADF; Mmsg1(errmsg, _("Bad call to eod. Device %s not open\n"), print_name()); Dmsg1(100, "%s", errmsg); Leave(dbglvl); return false; } if (at_eot()) { Leave(100); return true; } clear_eof(); /* remove EOF flag */ block_num = file = 0; set_file_size(0); file_addr = 0; Leave(100); return ok; } bool DEVICE::is_eod_valid(DCR *dcr) { return true; } bool DEVICE::open_next_part(DCR */*dcr*/) { return true; } bool DEVICE::close_part(DCR */*dcr*/) { return true; } DEVICE *DEVICE::get_dev(DCR */*dcr*/) { return this; } uint32_t DEVICE::get_hi_addr() { return (uint32_t)(file_addr >> 32); } uint32_t DEVICE::get_hi_addr(boffset_t addr) { return (uint32_t)(addr >> 32); } uint32_t DEVICE::get_low_addr() { return (uint32_t)(file_addr); } uint32_t DEVICE::get_low_addr(boffset_t addr) { return (uint32_t)(addr); } uint64_t DEVICE::get_full_addr() { return file_addr; } uint64_t DEVICE::get_full_addr(boffset_t addr) { return addr; } uint64_t DEVICE::get_full_addr(uint32_t hi, uint32_t low) { return ((uint64_t)hi)<<32 | (uint64_t)low; } /* Note: this subroutine is not in the class */ uint64_t get_full_addr(uint32_t hi, uint32_t low) { return ((uint64_t)hi)<<32 | (uint64_t)low; } /* Print the file address */ char *DEVICE::print_addr(char *buf, int32_t buf_len) { buf[0] = 0; bsnprintf(buf, buf_len, "%llu", get_full_addr()); return buf; } char *DEVICE::print_addr(char *buf, int32_t buf_len, boffset_t addr) { buf[0] = 0; bsnprintf(buf, buf_len, "%llu", addr); return buf; } bool DEVICE::do_size_checks(DCR *dcr, DEV_BLOCK *block) { JCR *jcr = dcr->jcr; if (is_pool_size_reached(dcr, true)) { if (!dir_get_pool_info(dcr, &VolCatInfo)) { Dmsg0(50, "Error updating volume info.\n"); } } if (is_user_volume_size_reached(dcr, true)) { Dmsg0(50, "Calling terminate_writing_volume\n"); terminate_writing_volume(dcr); reread_last_block(dcr); /* Only used on tapes */ dev_errno = ENOSPC; return false; } /* * Limit maximum File size on volume to user specified value. * In practical terms, this means to put an EOF mark on * a tape after every X bytes. This effectively determines * how many index records we have (JobMedia). If you set * max_file_size too small, it will cause a lot of shoe-shine * on very fast modern tape (LTO-3 and above). */ if ((max_file_size > 0) && (file_size+block->binbuf) >= max_file_size) { set_file_size(0); /* reset file size */ if (!weof(dcr, 1)) { /* write eof */ Dmsg0(50, "WEOF error in max file size.\n"); Jmsg(jcr, M_FATAL, 0, _("Unable to write EOF. ERR=%s\n"), bstrerror()); Dmsg0(40, "Calling terminate_writing_volume\n"); terminate_writing_volume(dcr); dev_errno = ENOSPC; return false; } if (!do_new_file_bookkeeping(dcr)) { /* Error message already sent */ return false; } } return true; } bool DEVICE::get_tape_alerts(DCR *dcr) { return true; } void DEVICE::show_tape_alerts(DCR *dcr, alert_list_type type, alert_list_which which, alert_cb alert_callback) { return; } int DEVICE::delete_alerts() { return 0; } // TODO: fill metrics description void DEVICE::register_metrics(bstatcollect *collector) { POOL_MEM met(PM_NAME); devstatcollector = collector; if (!collector){ return; } Dmsg2(100, "DEVICE::register_metrics called. 0x%p collector=0x%p\n", this, collector); Mmsg(met, "bacula.storage.%s.device.%s.readbytes", me->hdr.name, name()); devstatmetrics.bacula_storage_device_readbytes = devstatcollector->registration(met.c_str(), METRIC_INT, METRIC_UNIT_BYTE, (char*)"The number of bytes read from device."); Mmsg(met, "bacula.storage.%s.device.%s.readtime", me->hdr.name, name()); devstatmetrics.bacula_storage_device_readtime = devstatcollector->registration(met.c_str(), METRIC_INT, METRIC_UNIT_MSEC, (char*)"Time spent reading from device."); Mmsg(met, "bacula.storage.%s.device.%s.readspeed", me->hdr.name, name()); devstatmetrics.bacula_storage_device_readspeed = devstatcollector->registration(met.c_str(), METRIC_FLOAT, METRIC_UNIT_BYTESEC, (char*)"Device read throughput."); Mmsg(met, "bacula.storage.%s.device.%s.writespeed", me->hdr.name, name()); devstatmetrics.bacula_storage_device_writespeed = devstatcollector->registration(met.c_str(), METRIC_FLOAT, METRIC_UNIT_BYTESEC, (char*)"Device write throughput."); Mmsg(met, "bacula.storage.%s.device.%s.status", me->hdr.name, name()); devstatmetrics.bacula_storage_device_status = devstatcollector->registration_bool(met.c_str(), METRIC_UNIT_STATUS, enabled, (char*)"Show if device is enabled (True/1) or disabled (False/0)."); Mmsg(met, "bacula.storage.%s.device.%s.writebytes", me->hdr.name, name()); devstatmetrics.bacula_storage_device_writebytes = devstatcollector->registration(met.c_str(), METRIC_INT, METRIC_UNIT_BYTE, (char*)"The number of bytes written to device."); Mmsg(met, "bacula.storage.%s.device.%s.writetime", me->hdr.name, name()); devstatmetrics.bacula_storage_device_writetime = devstatcollector->registration(met.c_str(), METRIC_INT, METRIC_UNIT_MSEC, (char*)"Time spent writing to device."); /* total and free space metrics registration */ Mmsg(met, "bacula.storage.%s.device.%s.freespace", me->hdr.name, name()); devstatmetrics.bacula_storage_device_freespace = devstatcollector->registration(met.c_str(), METRIC_INT, METRIC_UNIT_BYTE, (char*)"The size of available space of the disk storage for device (could be shared)."); Mmsg(met, "bacula.storage.%s.device.%s.totalspace", me->hdr.name, name()); devstatmetrics.bacula_storage_device_totalspace = devstatcollector->registration(met.c_str(), METRIC_INT, METRIC_UNIT_BYTE, (char*)"The size of the disk storage for device (could be shared)."); }; bool DEVICE::get_tape_worm(DCR *dcr) { return false; } int DEVICE::use_volume_encryption() { return device->volume_encryption != ET_NO; }; bool DEVICE::load_encryption_key(DCR *dcr, const char *operation, const char *volume_name, uint32_t *enc_cipher_key_size, unsigned char *enc_cipher_key, uint32_t *master_keyid_size, unsigned char *master_keyid) { JCR *jcr = dcr->jcr; enum { op_none, op_label, op_read }; bool ok = true; // No error Dmsg4(100, "load_encryption_key %s %s enc=%ld ver=%d\n", operation, volume_name, device->volume_encryption, VolHdr.BlockVer); int op = op_none; if (0 == strcmp(operation, "LABEL")) { op = op_label; } else if (0 == strcmp(operation, "READ")) { op = op_read; } /* don't use encryption if volume encryption is not enable or we are reading * (aka not recycling) a BB02 volume */ if (device->volume_encryption == ET_NO && (op != op_label && (VolHdr.blkh_options & BLKHOPT_ENCRYPT_VOL))) { /* we expect an error later */ Jmsg1(jcr, M_WARNING, 0, _("Trying to read encrypted volume \"%s\" on an un-encrypted device\n"), volume_name); } if (device->volume_encryption == ET_NO || (op != op_label && !(VolHdr.blkh_options & BLKHOPT_ENCRYPT_VOL))) { return true; /* No error */ } if (me->encryption_command == NULL || me->encryption_command[0] == '\0') { if (jcr == NULL || !jcr->is_job_canceled()) { Jmsg0(jcr, M_FATAL, 0, _("The \"Encryption Command\" is empty, impossible to call the key-manager program\n")); } return false; /* error */ } POOLMEM *encrypt_program = get_pool_memory(PM_FNAME); POOL_MEM results(PM_MESSAGE); POOL_MEM err_msg(PM_MESSAGE); POOL_MEM envv; edit_device_codes(dcr, &encrypt_program, me->encryption_command, "load"); char *envp[5]; Mmsg(envv, "OPERATION=%s", operation); envp[0] = bstrdup(envv.c_str()); Mmsg(envv, "VOLUME_NAME=%s", volume_name); envp[1] = bstrdup(envv.c_str()); if (op == op_read && enc_cipher_key != NULL && *enc_cipher_key_size > 0) { char buf[2*MAX_ENC_CIPHER_KEY_LEN]; // for the base64 encoded enc_cipherkey bin_to_base64_pad(buf, sizeof(buf), (char *)enc_cipher_key, *enc_cipher_key_size); Mmsg(envv, "ENC_CIPHER_KEY=%s", buf); } else { Mmsg(envv, "ENC_CIPHER_KEY="); } envp[2] = bstrdup(envv.c_str()); if (op == op_read && master_keyid != NULL && *master_keyid_size > 0) { char buf[2*MAX_MASTERKEY_ID_LEN]; // for the base64 encoded masterkey_id bin_to_base64_pad(buf, sizeof(buf), (char *)master_keyid, *master_keyid_size); Mmsg(envv, "MASTER_KEYID=%s", buf); } else { Mmsg(envv, "MASTER_KEYID="); } envp[3] = bstrdup(envv.c_str()); envp[4] = NULL; Dmsg3(60, "Run keymanager op=%s volume=%s %s\n", operation, volume_name, encrypt_program); for (char **p=envp; *p!=NULL; p++) Dmsg1(200, "keymanager query %s\n", *p); uint32_t timeout = 60; int status = run_program_full_output(encrypt_program, timeout, results.addr(), envp); free_pool_memory(encrypt_program); for (unsigned i = 0; i < sizeof(envp)/sizeof(char *)-1; i++) { bfree(envp[i]); } block_cipher_type cipher = BLOCK_CIPHER_NONE; const char *cipher_name = "undefined"; int cipher_key_size = 0; /* from the cipher */ char *in_cipher_key = NULL; int in_cipher_key_size = 0; char *in_enc_cipher_key = NULL; int in_enc_cipher_key_size = 0; int in_master_keyid_size = 0; char *in_master_keyid = NULL; char *comment = NULL; char keybuf[4096], enckeybuf[4096], masterkeyidbuf[4096]; if (status == 0) { if (chk_dbglvl(200)) { /* display the response of the key manager */ char *s = results.c_str(); while (*s != '\0') { char *e = strchr(s, '\n'); if (e != NULL) { char c = *e; *e = '\0'; Dmsg1(200, "keymanager response %s\n", s); *e = c; s = e + 1; } } } /* iterate line to retrieve field */ char *p = results.c_str(); char *fieldname = NULL; char *value = NULL; char *end; while (*p != '\0') { while (isspace(*p)) p++; fieldname = p; /* start of the fieldname */ while (isalnum(*p) || *p == '_') p++; if (fieldname == p) { /* fieldname is empty, EOF */ break; } if (isblank(*p) || *p=='=' || *p==':') { end = p; } else { Dmsg1(10, "keymanager response format mismatch at %d\n", p-results.c_str()); Mmsg(err_msg, "line format mismatch"); break; } while (isblank(*p)) p++; /* skip blank just before the separator */ if (*p != '=' && *p != ':') { Dmsg2(10, "keymanager response wrong separator %d %d\n", p-results.c_str(), *p); Mmsg(err_msg, "wrong separator"); break; } p++; /* skip the separator */ while (isblank(*p)) p++; /* skip blank just after the separator */ *end = '\0'; /* end the fieldname */ value = p; /* start of the value */ while (*p && *p != '\n') p++; if (*p != '\0') { *p = '\0'; p++; } Dmsg3(200, "keymanager response fieldname=%s value=\"%s\" pos=%d\n", fieldname, value, p-results.c_str()); if (0==strcmp("error", fieldname)) { Mmsg(err_msg, "got error message: \"%s\"", value); break; } else if (0==strcmp("volume_name", fieldname)) { /* ignore */ } else if (0==strcmp("comment", fieldname)) { comment=value; /* ignore */ } else if (0==strcmp("cipher", fieldname)) { cipher_name = value; if (0==strcasecmp("AES_128_XTS", value)) { cipher = BLOCK_CIPHER_AES_128_XTS; cipher_key_size = 32; } else if (0==strcasecmp("AES_256_XTS", value)) { cipher = BLOCK_CIPHER_AES_256_XTS; cipher_key_size = 64; } else if (0==strcasecmp("NULL", value)) { cipher = BLOCK_CIPHER_NULL; cipher_key_size = 16; } else { cipher_key_size = 0; Mmsg(err_msg, "unknown cipher: \"%s\"", value); break; } } else if (0==strcmp("cipher_key", fieldname)) { in_cipher_key = value; } else if (0==strcmp("enc_cipher_key", fieldname)) { in_enc_cipher_key = value; } else if (0==strcmp("master_keyid", fieldname)) { in_master_keyid = value; } } if (err_msg.c_str()[0] == '\0') { /* no error, check that we have all the parameter we need */ if (cipher == BLOCK_CIPHER_NONE) { Mmsg(err_msg, "cipher is missing"); } else if (in_cipher_key == NULL) { Mmsg(err_msg, "key is missing"); } } if (err_msg.c_str()[0] == '\0' && in_cipher_key != NULL) { /* no error, check that we can decode the key */ in_cipher_key_size = base64_to_bin(keybuf, sizeof(keybuf), in_cipher_key, strlen(in_cipher_key)); if (cipher_key_size != in_cipher_key_size) { Mmsg(err_msg, "Wrong cipher key size for \"%s\" expect %d, got %d", cipher_name, cipher_key_size, in_cipher_key_size); } } if (err_msg.c_str()[0] == '\0' && in_enc_cipher_key != NULL) { /* no error, check that we can decode the key */ in_enc_cipher_key_size = base64_to_bin(enckeybuf, sizeof(enckeybuf), in_enc_cipher_key, strlen(in_enc_cipher_key)); } if (err_msg.c_str()[0] == '\0' && in_master_keyid != NULL) { /* no error, check that we can decode the master_keyid*/ in_master_keyid_size = base64_to_bin(masterkeyidbuf, sizeof(masterkeyidbuf), in_master_keyid, strlen(in_master_keyid)); } } else { /* status != 0 the script returned an error code */ berrno be; be.set_errno(status); Mmsg(err_msg, "the key-manager returned an error see in key-manager log file, code=%d ERR=%s", status, be.bstrerror()); } if (crypto_device_ctx != NULL) { block_cipher_context_free(crypto_device_ctx); crypto_device_ctx = NULL; } if (err_msg.c_str()[0] == '\0' && in_enc_cipher_key_size > 0) { if (in_enc_cipher_key_size > MAX_ENC_CIPHER_KEY_LEN) { Mmsg(err_msg, "encrypted key is too large"); } else { *enc_cipher_key_size = in_enc_cipher_key_size; memcpy(enc_cipher_key, enckeybuf, *enc_cipher_key_size); } } if (err_msg.c_str()[0] == '\0' && in_master_keyid_size > 0) { if (in_master_keyid_size > MAX_MASTERKEY_ID_LEN) { Mmsg(err_msg, "masterkey id is too large"); } else { *master_keyid_size = in_master_keyid_size; memcpy(master_keyid, masterkeyidbuf, *master_keyid_size); } } if (err_msg.c_str()[0] == '\0') { /* initialize the crypto context */ crypto_device_ctx = block_cipher_context_new(cipher); block_cipher_init_key(crypto_device_ctx, (unsigned char*)keybuf); Jmsg(jcr, M_INFO, 0, _("3305 LoadEncryptionKey for Volume \"%s\", status is OK.\n"), dcr->VolumeName); if (comment!=NULL) { /* ignored for now */ } Dmsg1(60, "load encryption key for volume %s OK\n", dcr->VolumeName); } if (err_msg.c_str()[0] != '\0') { Dmsg2(10, "load encryption key for volume %s Err=%s\n", dcr->VolumeName, err_msg.c_str()); #if 0 Jmsg(jcr, M_FATAL, 0, _("3992 Bad LoadEncryptionKey %s Volume \"%s\": " "ERR=%s\n"), operation, dcr->VolumeName, err_msg.c_str()); #endif if (jcr != NULL) { Mmsg(jcr->errmsg, _("3992 Bad LoadEncryptionKey %s Volume \"%s\": " "ERR=%s\n"), operation, dcr->VolumeName, err_msg.c_str()); } ok = false; } else { /* crypto key successfully loaded */ if (op == op_label) { } } return ok; } bacula-15.0.3/src/stored/bscan.c0000644000175000017500000014543714771010173016221 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Program to scan a Bacula Volume and compare it with * the catalog and optionally synchronize the catalog * with the tape. * * Kern E. Sibbald, December 2001 */ #include "bacula.h" #include "stored.h" #include "findlib/find.h" #include "cats/cats.h" extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code); /* Forward referenced functions */ static void do_scan(void); static bool record_cb(DCR *dcr, DEV_RECORD *rec); static int create_file_attributes_record(JCR *mjcr, ATTR *attrs, DEV_RECORD *rec); static int create_media_record(BDB *db, MEDIA_DBR *mr, VOLUME_LABEL *vl); static bool update_media_record(BDB *db, MEDIA_DBR *mr); static int create_pool_record(BDB *db, POOL_DBR *pr); static JCR *create_job_record(BDB *db, JOB_DBR *mr, SESSION_LABEL *label, DEV_RECORD *rec); static int update_job_record(BDB *db, JOB_DBR *mr, SESSION_LABEL *elabel, DEV_RECORD *rec); static int create_client_record(BDB *db, CLIENT_DBR *cr); static int create_fileset_record(BDB *db, FILESET_DBR *fsr); static int create_jobmedia_record(BDB *db, JCR *jcr); static JCR *create_jcr(JOB_DBR *jr, DEV_RECORD *rec, uint32_t JobId); static int update_digest_record(BDB *db, char *digest, DEV_RECORD *rec, int type); /* Local variables */ static DEVICE *dev = NULL; static BDB *db; static JCR *bjcr; /* jcr for bscan */ static BSR *bsr = NULL; static MEDIA_DBR mr; static POOL_DBR pr; static JOB_DBR jr; static CLIENT_DBR cr; static FILESET_DBR fsr; static ATTR_DBR ar; static FILE_DBR fr; static SESSION_LABEL label; static SESSION_LABEL elabel; static ATTR *attr; static time_t lasttime = 0; static const char *db_driver = "NULL"; static const char *db_name = "bacula"; static const char *db_user = "bacula"; static const char *db_password = ""; static const char *db_host = NULL; static const char *db_ssl_mode = NULL; static const char *db_ssl_key = NULL; static const char *db_ssl_cert = NULL; static const char *db_ssl_ca = NULL; static const char *db_ssl_capath = NULL; static const char *db_ssl_cipher = NULL; static int db_port = 0; static const char *wd = NULL; static bool update_db = false; static bool update_vol_info = false; static bool list_records = false; static int ignored_msgs = 0; static uint64_t currentVolumeSize; static int last_pct = -1; static bool showProgress = false; static int num_jobs = 0; static int num_pools = 0; static int num_media = 0; static int num_files = 0; static int num_plugin_objects = 0; static CONFIG *config; #define CONFIG_FILE "bacula-sd.conf" char *configfile = NULL; static void usage() { fprintf(stderr, _( PROG_COPYRIGHT "\n%sVersion: %s (%s)\n\n" "Usage: bscan [ options ] \n" " -b bootstrap specify a bootstrap file\n" " -c specify configuration file\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -m update media info in database\n" " -D specify the driver database name (default NULL)\n" " -n specify the database name (default bacula)\n" " -u specify database user name (default bacula)\n" " -P specify database password (default none)\n" " -h specify database host (default NULL)\n" " -t specify database port (default 0)\n" " -p proceed inspite of I/O errors\n" " -r list records\n" " -s synchronize or store in database\n" " -S show scan progress periodically\n" " -v verbose\n" " -V specify Volume names (separated by |)\n" " -w specify working directory (default from conf file)\n" " -? print this message\n\n"), 2001, BDEMO, VERSION, BDATE); exit(1); } int main (int argc, char *argv[]) { int ch; struct stat stat_buf; char *VolumeName = NULL; BtoolsAskDirHandler askdir_handler; init_askdir_handler(&askdir_handler); setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); lmgr_init_thread(); my_name_is(argc, argv, "bscan"); init_msg(NULL, NULL); OSDependentInit(); while ((ch = getopt(argc, argv, "b:c:d:D:h:o:k:e:a:mn:pP:rsSt:u:vV:w:?")) != -1) { switch (ch) { case 'S' : showProgress = true; break; case 'b': bsr = parse_bsr(NULL, optarg); break; case 'c': /* specify config file */ if (configfile != NULL) { free(configfile); } configfile = bstrdup(optarg); break; case 'D': db_driver = optarg; break; case 'd': /* debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { char *p; /* We probably find a tag list -d 10,sql,bvfs */ if ((p = strchr(optarg, ',')) != NULL) { *p = 0; } debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } if (p) { debug_parse_tags(p+1, &debug_level_tags); } } break; case 'h': db_host = optarg; break; case 'o': db_ssl_mode = optarg; break; case 'k': db_ssl_key = optarg; break; case 'e': db_ssl_cert = optarg; break; case 'a': db_ssl_ca = optarg; break; case 't': db_port = atoi(optarg); break; case 'm': update_vol_info = true; break; case 'n': db_name = optarg; break; case 'u': db_user = optarg; break; case 'P': db_password = optarg; break; case 'p': forge_on = true; break; case 'r': list_records = true; break; case 's': update_db = true; break; case 'v': verbose++; break; case 'V': /* Volume name */ VolumeName = optarg; break; case 'w': wd = optarg; break; case '?': default: usage(); } } argc -= optind; argv += optind; if (argc != 1) { Pmsg0(0, _("Wrong number of arguments: \n")); usage(); } if (configfile == NULL) { configfile = bstrdup(CONFIG_FILE); } config = New(CONFIG()); parse_sd_config(config, configfile, M_ERROR_TERM); setup_me(); load_sd_plugins(me->plugin_directory); /* Check if -w option given, otherwise use resource for working directory */ if (wd) { working_directory = wd; } else if (!me->working_directory) { Emsg1(M_ERROR_TERM, 0, _("No Working Directory defined in %s. Cannot continue.\n"), configfile); } else { working_directory = me->working_directory; } /* Check that working directory is good */ if (stat(working_directory, &stat_buf) != 0) { Emsg1(M_ERROR_TERM, 0, _("Working Directory: %s not found. Cannot continue.\n"), working_directory); } if (!S_ISDIR(stat_buf.st_mode)) { Emsg1(M_ERROR_TERM, 0, _("Working Directory: %s is not a directory. Cannot continue.\n"), working_directory); } bjcr = setup_jcr("bscan", argv[0], bsr, VolumeName, SD_READ); if (!bjcr) { exit(1); } dev = bjcr->read_dcr->dev; if (showProgress) { char ed1[50]; struct stat sb; fstat(dev->fd(), &sb); currentVolumeSize = sb.st_size; Pmsg1(000, _("First Volume Size = %s\n"), edit_uint64(currentVolumeSize, ed1)); } //TODO 'mult_db_connections' arg could be passed from commandline if needed db = db_init_database(NULL, db_driver, db_name, db_user, db_password, db_host, db_port, NULL, db_ssl_mode, db_ssl_key, db_ssl_cert, db_ssl_ca, db_ssl_capath, db_ssl_cipher, true, false); if (!db || !db_open_database(NULL, db)) { Pmsg2(000, _("Could not open Catalog \"%s\", database \"%s\".\n"), db_driver, db_name); if (db) { Jmsg(NULL, M_FATAL, 0, _("%s"), db_strerror(db)); Pmsg1(000, "%s", db_strerror(db)); db_close_database(NULL, db); } Jmsg(NULL, M_ERROR_TERM, 0, _("Could not open Catalog \"%s\", database \"%s\".\n"), db_driver, db_name); } Dmsg0(200, "Database opened\n"); if (verbose) { Pmsg2(000, _("Using Database: %s, User: %s\n"), db_name, db_user); } do_scan(); if (update_db) { printf("Records added or updated in the catalog:\n%7d Media\n%7d Pool\n%7d Job\n%7d File\n%7d PluginObjects\n", num_media, num_pools, num_jobs, num_files, num_plugin_objects); } else { printf("Records would have been added or updated in the catalog:\n%7d Media\n%7d Pool\n%7d Job\n%7d File\n" "%7d Plugin Objects\n", num_media, num_pools, num_jobs, num_files, num_plugin_objects); } bjcr->read_dcr->dev->free_dedup_rehydration_interface(bjcr->read_dcr); db_close_database(bjcr, db); free_jcr(bjcr); // Notice that one jcr remains 'open' for every SOS_LABEL that don't have a // matching EOS_LABEL (jcr created by create_job_record()) dev->term(NULL); return 0; } /* * We are at the end of reading a Volume. Now, we simulate handling * the end of writing a Volume by wiffling through the attached * jcrs creating jobmedia records. */ static bool bscan_mount_next_read_volume(DCR *dcr) { DEVICE *dev = dcr->dev; DCR *mdcr; Dmsg1(100, "Walk attached jcrs. Volume=%s\n", dev->getVolCatName()); foreach_dlist(mdcr, dev->attached_dcrs) { JCR *mjcr = mdcr->jcr; Dmsg1(100, "========== JobId=%u ========\n", mjcr->JobId); if (mjcr->JobId == 0) { continue; } if (verbose) { Pmsg1(000, _("Create JobMedia for Job %s\n"), mjcr->Job); } mdcr->StartAddr = dcr->StartAddr; mdcr->EndAddr = dcr->EndAddr; mdcr->VolMediaId = dcr->VolMediaId; mjcr->read_dcr->VolLastIndex = dcr->VolLastIndex; if( mjcr->bscan_insert_jobmedia_records ) { if (!create_jobmedia_record(db, mjcr)) { Pmsg2(000, _("Could not create JobMedia record for Volume=%s Job=%s\n"), dev->getVolCatName(), mjcr->Job); } } } update_media_record(db, &mr); /* Now let common read routine get up next tape. Note, * we call mount_next... with bscan's jcr because that is where we * have the Volume list, but we get attached. */ bool stat = mount_next_read_volume(dcr); if (showProgress) { char ed1[50]; struct stat sb; fstat(dev->fd(), &sb); currentVolumeSize = sb.st_size; Pmsg1(000, _("First Volume Size = %s\n"), edit_uint64(currentVolumeSize, ed1)); } return stat; } static void do_scan() { attr = new_attr(bjcr); bmemset(&ar, 0, sizeof(ar)); bmemset(&pr, 0, sizeof(pr)); bmemset(&jr, 0, sizeof(jr)); bmemset(&cr, 0, sizeof(cr)); bmemset(&fsr, 0, sizeof(fsr)); bmemset(&fr, 0, sizeof(fr)); /* Detach bscan's jcr as we are not a real Job on the tape */ read_records(bjcr->read_dcr, record_cb, bscan_mount_next_read_volume); if (update_db) { db_write_batch_file_records(bjcr); /* used by bulk batch file insert */ } free_attr(attr); } /* * Returns: true if OK * false if error */ static bool record_cb(DCR *dcr, DEV_RECORD *rec) { JCR *mjcr; char ec1[30]; DEVICE *dev = dcr->dev; JCR *bjcr = dcr->jcr; DEV_BLOCK *block = dcr->block; POOL_MEM sql_buffer; db_int64_ctx jmr_count; FILEEVENT_DBR fevent; char digest[BASE64_SIZE(CRYPTO_DIGEST_MAX_SIZE)]; if (rec->data_len > 0) { mr.VolBytes += rec->data_len + WRITE_RECHDR_LENGTH; /* Accumulate Volume bytes */ if (showProgress && currentVolumeSize > 0) { int pct = (mr.VolBytes * 100) / currentVolumeSize; if (pct != last_pct) { fprintf(stdout, _("done: %d%%\n"), pct); fflush(stdout); last_pct = pct; } } } if (list_records) { if (is_offset_stream(rec->Stream)) { Pmsg7(000, _("Record: SessId=%u SessTim=%u FileIndex=%d Stream=%d len=%u offset=%lld num=%d\n"), rec->VolSessionId, rec->VolSessionTime, rec->FileIndex, rec->Stream, rec->data_len, rec->FileOffset, rec->RecNum); } else { Pmsg5(000, _("Record: SessId=%u SessTim=%u FileIndex=%d Stream=%d len=%u\n"), rec->VolSessionId, rec->VolSessionTime, rec->FileIndex, rec->Stream, rec->data_len); } } /* * Check for Start or End of Session Record * */ if (rec->FileIndex < 0) { bool save_update_db = update_db; if (verbose > 1) { dump_label_record(dev, rec, 1, false); } switch (rec->FileIndex) { case PRE_LABEL: Pmsg0(000, _("Volume is prelabeled. This tape cannot be scanned.\n")); return false; break; case VOL_LABEL: unser_volume_label(dev, rec); /* Check Pool info */ bstrncpy(pr.Name, dev->VolHdr.PoolName, sizeof(pr.Name)); bstrncpy(pr.PoolType, dev->VolHdr.PoolType, sizeof(pr.PoolType)); num_pools++; if (db_get_pool_numvols(bjcr, db, &pr)) { if (verbose) { Pmsg1(000, _("Pool record for %s found in DB.\n"), pr.Name); } } else { if (!update_db) { Pmsg1(000, _("VOL_LABEL: Pool record not found for Pool: %s\n"), pr.Name); } create_pool_record(db, &pr); } if (strcmp(pr.PoolType, dev->VolHdr.PoolType) != 0) { Pmsg2(000, _("VOL_LABEL: PoolType mismatch. DB=%s Vol=%s\n"), pr.PoolType, dev->VolHdr.PoolType); return true; } else if (verbose) { Pmsg1(000, _("Pool type \"%s\" is OK.\n"), pr.PoolType); } /* Check Media Info */ bmemset(&mr, 0, sizeof(mr)); bstrncpy(mr.VolumeName, dev->VolHdr.VolumeName, sizeof(mr.VolumeName)); mr.PoolId = pr.PoolId; mr.VolEncrypted = !!(dev->VolHdr.blkh_options & BLKHOPT_ENCRYPT_VOL); mr.VolType = dev->dev_type; num_media++; if (db_get_media_record(bjcr, db, &mr)) { if (verbose) { Pmsg1(000, _("Media record for %s found in DB.\n"), mr.VolumeName); } /* Clear out some volume statistics that will be updated */ mr.VolJobs = mr.VolFiles = mr.VolBlocks = 0; mr.VolBytes = rec->data_len + 20; /* If the volume is Purged or Recycled and we create JobMedia, the * catalog will contain incorrect information after the recycling */ if (strcmp(mr.VolStatus, "Purged") == 0 || strcmp(mr.VolStatus, "Recycled") == 0) { bstrncpy(mr.VolStatus, "Archive", sizeof(mr.VolStatus)); if (verbose) { Pmsg1(000, _("Media Status record for %s updated to Archive.\n"), mr.VolumeName); } } mr.VolRetention = MAX(mr.VolRetention, 365 * 3600 * 24); /* 1 year at least */ } else { if (!update_db) { Pmsg1(000, _("VOL_LABEL: Media record not found for Volume: %s\n"), mr.VolumeName); } bstrncpy(mr.MediaType, dev->VolHdr.MediaType, sizeof(mr.MediaType)); create_media_record(db, &mr, &dev->VolHdr); } if (strcmp(mr.MediaType, dev->VolHdr.MediaType) != 0) { Pmsg2(000, _("VOL_LABEL: MediaType mismatch. DB=%s Vol=%s\n"), mr.MediaType, dev->VolHdr.MediaType); return true; /* ignore error */ } else if (verbose) { Pmsg1(000, _("Media type \"%s\" is OK.\n"), mr.MediaType); } /* Reset some DCR variables */ foreach_dlist(dcr, dev->attached_dcrs) { dcr->VolFirstIndex = dcr->FileIndex = 0; dcr->StartAddr = dcr->EndAddr = 0; dcr->VolMediaId = 0; } Pmsg1(000, _("VOL_LABEL: OK for Volume: %s\n"), mr.VolumeName); break; case SOS_LABEL: if (bsr && rec->match_stat < 1) { /* Skipping record, because does not match BSR filter and we don't want to * create job records outside of the BSR specifications */ break; } mr.VolJobs++; num_jobs++; if (ignored_msgs > 0) { Pmsg1(000, _("%d \"errors\" ignored before first Start of Session record.\n"), ignored_msgs); ignored_msgs = 0; } unser_session_label(&label, rec); bmemset(&jr, 0, sizeof(jr)); bstrncpy(jr.Job, label.Job, sizeof(jr.Job)); if (db_get_job_record(bjcr, db, &jr)) { /* Job record already exists in DB */ update_db = false; /* don't change db in create_job_record */ if (verbose) { Pmsg1(000, _("SOS_LABEL: Found Job record for Job: %s\n"), jr.Job); } } else { /* Must create a Job record in DB */ if (!update_db) { Pmsg1(000, _("SOS_LABEL: Job record not found for Job: %s\n"), jr.Job); } } /* Create Client record if not already there */ bstrncpy(cr.Name, label.ClientName, sizeof(cr.Name)); create_client_record(db, &cr); jr.ClientId = cr.ClientId; /* process label, if Job record exists don't update db */ mjcr = create_job_record(db, &jr, &label, rec); dcr = mjcr->read_dcr; update_db = save_update_db; jr.PoolId = pr.PoolId; mjcr->start_time = jr.StartTime; mjcr->setJobLevel(jr.JobLevel); mjcr->client_name = get_pool_memory(PM_FNAME); pm_strcpy(mjcr->client_name, label.ClientName); mjcr->fileset_name = get_pool_memory(PM_FNAME); pm_strcpy(mjcr->fileset_name, label.FileSetName); bstrncpy(dcr->pool_type, label.PoolType, sizeof(dcr->pool_type)); bstrncpy(dcr->pool_name, label.PoolName, sizeof(dcr->pool_name)); /* Look for existing Job Media records for this job. If there are any, no new ones need be created. This may occur if File Retention has expired before Job Retention, or if the volume has already been bscan'd */ Mmsg(sql_buffer, "SELECT count(*) from JobMedia where JobId=%d", jr.JobId); db_sql_query(db, sql_buffer.c_str(), db_int64_handler, &jmr_count); if( jmr_count.value > 0 ) { //FIELD NAME TO BE DEFINED/CONFIRMED (maybe a struct?) mjcr->bscan_insert_jobmedia_records = false; } else { mjcr->bscan_insert_jobmedia_records = true; } if (rec->VolSessionId != jr.VolSessionId) { Pmsg3(000, _("SOS_LABEL: VolSessId mismatch for JobId=%u. DB=%d Vol=%d\n"), jr.JobId, jr.VolSessionId, rec->VolSessionId); return true; /* ignore error */ } if (rec->VolSessionTime != jr.VolSessionTime) { Pmsg3(000, _("SOS_LABEL: VolSessTime mismatch for JobId=%u. DB=%d Vol=%d\n"), jr.JobId, jr.VolSessionTime, rec->VolSessionTime); return true; /* ignore error */ } if (jr.PoolId != pr.PoolId) { Pmsg3(000, _("SOS_LABEL: PoolId mismatch for JobId=%u. DB=%d Vol=%d\n"), jr.JobId, jr.PoolId, pr.PoolId); return true; /* ignore error */ } break; case EOS_LABEL: if (bsr && rec->match_stat < 1) { /* Skipping record, because does not match BSR filter and we don't want to * create job records outside of the BSR specifications */ break; } unser_session_label(&elabel, rec); /* Create FileSet record */ bstrncpy(fsr.FileSet, label.FileSetName, sizeof(fsr.FileSet)); bstrncpy(fsr.MD5, label.FileSetMD5, sizeof(fsr.MD5)); create_fileset_record(db, &fsr); jr.FileSetId = fsr.FileSetId; mjcr = get_jcr_by_session(rec->VolSessionId, rec->VolSessionTime); if (!mjcr) { Pmsg2(000, _("Could not find SessId=%d SessTime=%d for EOS record.\n"), rec->VolSessionId, rec->VolSessionTime); break; } /* Do the final update to the Job record */ update_job_record(db, &jr, &elabel, rec); mjcr->end_time = jr.EndTime; mjcr->JobStatus = JS_Terminated; /* Create JobMedia record */ mjcr->read_dcr->VolLastIndex = dcr->VolLastIndex; if( mjcr->bscan_insert_jobmedia_records ) { create_jobmedia_record(db, mjcr); } free_dcr(mjcr->read_dcr); mjcr->dec_use_count(); /* Decrease reference counter increased by get_jcr_by_session call */ free_jcr(mjcr); break; case EOM_LABEL: break; case EOT_LABEL: /* end of all tapes */ /* * Wiffle through all jobs still open and close * them. */ if (update_db) { DCR *mdcr; /* Temporary list needed for freeing dcr's outsisde of foreach_dlist loop */ alist dcrs_to_delete(10, false); foreach_dlist(mdcr, dev->attached_dcrs) { JCR *mjcr = mdcr->jcr; if (!mjcr || mjcr->JobId == 0) { continue; } jr.JobId = mjcr->JobId; /* Mark Job as Error Terimined */ jr.JobStatus = JS_ErrorTerminated; jr.JobFiles = mjcr->JobFiles; jr.JobBytes = mjcr->JobBytes; jr.VolSessionId = mjcr->VolSessionId; jr.VolSessionTime = mjcr->VolSessionTime; jr.JobTDate = (utime_t)mjcr->start_time; jr.ClientId = mjcr->ClientId; if (!db_update_job_end_record(bjcr, db, &jr)) { Pmsg1(0, _("Could not update job record. ERR=%s\n"), db_strerror(db)); } mjcr->read_dcr = NULL; dcrs_to_delete.append(mdcr); free_jcr(mjcr); } /* Now free dcr's if needed */ foreach_alist(mdcr, &dcrs_to_delete) { free_dcr(mdcr); } } mr.VolFiles = (uint32_t)(rec->Addr >> 32); mr.VolBlocks = (uint32_t)rec->Addr; mr.VolBytes += mr.VolBlocks * WRITE_BLKHDR_LENGTH; /* approx. */ mr.VolMounts++; update_media_record(db, &mr); Pmsg3(0, _("End of all Volumes. VolFiles=%u VolBlocks=%u VolBytes=%s\n"), mr.VolFiles, mr.VolBlocks, edit_uint64_with_commas(mr.VolBytes, ec1)); break; case STREAM_PLUGIN_NAME: break; default: break; } /* end switch */ return true; } mjcr = get_jcr_by_session(rec->VolSessionId, rec->VolSessionTime); if (!mjcr) { if (mr.VolJobs > 0) { Pmsg2(000, _("Could not find Job for SessId=%d SessTime=%d record.\n"), rec->VolSessionId, rec->VolSessionTime); } else { ignored_msgs++; } return true; } dcr = mjcr->read_dcr; if (dcr->VolFirstIndex == 0) { dcr->VolFirstIndex = block->FirstIndex; } /* File Attributes stream */ switch (rec->maskedStream) { case STREAM_UNIX_ATTRIBUTES: case STREAM_UNIX_ATTRIBUTES_EX: case STREAM_UNIX_ATTRIBUTE_UPDATE: if (!unpack_attributes_record(bjcr, rec->Stream, rec->data, rec->data_len, attr)) { Emsg0(M_ERROR_TERM, 0, _("Cannot continue.\n")); } if (verbose > 1) { decode_stat(attr->attr, &attr->statp, sizeof(attr->statp), &attr->LinkFI); build_attr_output_fnames(bjcr, attr); print_ls_output(bjcr, attr); } fr.JobId = mjcr->JobId; fr.FileId = 0; num_files++; if (verbose && (num_files & 0x7FFF) == 0) { char ed1[30], ed2[30], ed3[30]; Pmsg3(000, _("%s file records. At addr=%s bytes=%s\n"), edit_uint64_with_commas(num_files, ed1), edit_uint64_with_commas(rec->Addr, ed2), edit_uint64_with_commas(mr.VolBytes, ed3)); } mjcr = get_jcr_by_session(rec->VolSessionId, rec->VolSessionTime); if (!mjcr) { Pmsg2(000, _("Could not find SessId=%d SessTime=%d for File record.\n"), rec->VolSessionId, rec->VolSessionTime); break; } if (mjcr->bscan_files_purged || mjcr->bscan_created) { // We need to insert file records etiher because job that has files purged or bscan created it if (!create_file_attributes_record(mjcr, attr, rec)) { Jmsg2(mjcr, M_ERROR, 0, _("Failed to insert record for file: %s. err: %s\n"), attr->fname, db_strerror(db)); } } mjcr->dec_use_count(); /* Decrease reference counter increased by get_jcr_by_session call */ break; case STREAM_RESTORE_OBJECT: { char *buf = rec->data; ROBJECT_DBR ro; bmemset(&ro, 0, sizeof(ro)); ro.Stream = rec->maskedStream; parse_restore_object_string(&buf, &ro); // Need to get new jobId if possible mjcr = get_jcr_by_session(rec->VolSessionId, rec->VolSessionTime); if (!mjcr) { Pmsg2(000, _("Could not find SessId=%d SessTime=%d for RestoreObject record.\n"), rec->VolSessionId, rec->VolSessionTime); break; } if (mjcr->bscan_created) { // bscan created this job record so we need to recreate objects as well if (update_db) { /* Send it */ Pmsg1(0, _("RESTORE_OBJECT: Inserting Restore Object \"%s\" into the catalog\n"), ro.object_name); // Create record with updated jobid ro.JobId = mjcr->JobId; if (!db_create_restore_object_record(mjcr, db, &ro)) { Jmsg1(mjcr, M_FATAL, 0, _("Restore object create error. %s"), db_strerror(db)); } } } else { Pmsg1(0, _("RESTORE_OBJECT: Found Restore Object \"%s\" on the volume\n"), ro.object_name); } mjcr->dec_use_count(); /* Decrease reference counter increased by get_jcr_by_session call */ break; } case STREAM_PLUGIN_META_BLOB: case STREAM_PLUGIN_META_CATALOG: { meta_pkt mp(rec->data); //TODO We probably don't want to log every single metadata record Pmsg4(0, _("Plugin metadata of type: %d len: %ld buf: %.*s\n"), mp.type, mp.buf_len, mp.buf_len, mp.buf); break; } case STREAM_PLUGIN_OBJECT: { OBJECT_DBR obj_r; char *buf = rec->data; num_plugin_objects++; if (!obj_r.parse_plugin_object_string(&buf)) { Pmsg0(000, _("Failed to parse plugin object!\n")); break; } // Need to get new jobId if possible mjcr = get_jcr_by_session(rec->VolSessionId, rec->VolSessionTime); if (!mjcr) { Pmsg2(000, _("Could not find SessId=%d SessTime=%d for PluginObject record.\n"), rec->VolSessionId, rec->VolSessionTime); break; } if (mjcr->bscan_created) { // bscan created this job record so we need to recreate objects as well if (update_db) { /* Send it */ Pmsg1(0, _("PLUGIN_OBJECT: Inserting Plugin Object (ObjectName: %s) into the catalog\n"), obj_r.ObjectName); obj_r.JobId = mjcr->JobId; // Create record with updated jobid if (!db_create_object_record(mjcr, db, &obj_r)) { Jmsg1(mjcr, M_FATAL, 0, _("Plugin object create error. %s"), db_strerror(db)); } } } else { Pmsg1(0, _("PLUGIN_OBJECT: Found Plugin Object (ObjectName: %s) on the volume\n"), obj_r.ObjectName); } mjcr->dec_use_count(); /* Decrease reference counter increased by get_jcr_by_session call */ break; } /* Data stream */ case STREAM_WIN32_DATA: case STREAM_FILE_DATA: case STREAM_SPARSE_DATA: case STREAM_MACOS_FORK_DATA: case STREAM_ENCRYPTED_FILE_DATA: case STREAM_ENCRYPTED_WIN32_DATA: case STREAM_ENCRYPTED_MACOS_FORK_DATA: /* * For encrypted stream, this is an approximation. * The data must be decrypted to know the correct length. */ mjcr->JobBytes += rec->data_len; if (rec->maskedStream == STREAM_SPARSE_DATA) { mjcr->JobBytes -= sizeof(uint64_t); } break; case STREAM_GZIP_DATA: case STREAM_COMPRESSED_DATA: case STREAM_ENCRYPTED_FILE_GZIP_DATA: case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: case STREAM_ENCRYPTED_WIN32_GZIP_DATA: case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: /* No correct, we should (decrypt and) expand it done using JCR */ mjcr->JobBytes += rec->data_len; break; case STREAM_SPARSE_GZIP_DATA: case STREAM_SPARSE_COMPRESSED_DATA: mjcr->JobBytes += rec->data_len - sizeof(uint64_t); /* No correct, we should expand it */ break; /* Win32 GZIP stream */ case STREAM_WIN32_GZIP_DATA: case STREAM_WIN32_COMPRESSED_DATA: mjcr->JobBytes += rec->data_len; break; case STREAM_MD5_DIGEST: bin_to_base64(digest, sizeof(digest), (char *)rec->data, CRYPTO_DIGEST_MD5_SIZE, true); if (verbose > 1) { Pmsg1(000, _("Got MD5 record: %s\n"), digest); } update_digest_record(db, digest, rec, CRYPTO_DIGEST_MD5); break; case STREAM_SHA1_DIGEST: bin_to_base64(digest, sizeof(digest), (char *)rec->data, CRYPTO_DIGEST_SHA1_SIZE, true); if (verbose > 1) { Pmsg1(000, _("Got SHA1 record: %s\n"), digest); } update_digest_record(db, digest, rec, CRYPTO_DIGEST_SHA1); break; case STREAM_SHA256_DIGEST: bin_to_base64(digest, sizeof(digest), (char *)rec->data, CRYPTO_DIGEST_SHA256_SIZE, true); if (verbose > 1) { Pmsg1(000, _("Got SHA256 record: %s\n"), digest); } update_digest_record(db, digest, rec, CRYPTO_DIGEST_SHA256); break; case STREAM_SHA512_DIGEST: bin_to_base64(digest, sizeof(digest), (char *)rec->data, CRYPTO_DIGEST_SHA512_SIZE, true); if (verbose > 1) { Pmsg1(000, _("Got SHA512 record: %s\n"), digest); } update_digest_record(db, digest, rec, CRYPTO_DIGEST_SHA512); break; case STREAM_XXHASH64_DIGEST: bin_to_base64(digest, sizeof(digest), (char *)rec->data, CRYPTO_DIGEST_XXHASH64_SIZE, true); if (verbose > 1) { Pmsg1(000, _("Got XXHASH64 record: %s\n"), digest); } update_digest_record(db, digest, rec, CRYPTO_DIGEST_XXHASH64); break; case STREAM_XXH3_64_DIGEST: bin_to_base64(digest, sizeof(digest), (char *)rec->data, CRYPTO_DIGEST_XXH3_64_SIZE, true); if (verbose > 1) { Pmsg1(000, _("Got XXH3_64 record: %s\n"), digest); } update_digest_record(db, digest, rec, CRYPTO_DIGEST_XXH3_64); break; case STREAM_XXH3_128_DIGEST: bin_to_base64(digest, sizeof(digest), (char *)rec->data, CRYPTO_DIGEST_XXH3_128_SIZE, true); if (verbose > 1) { Pmsg1(000, _("Got XXH3_128 record: %s\n"), digest); } update_digest_record(db, digest, rec, CRYPTO_DIGEST_XXH3_128); break; case STREAM_ENCRYPTED_SESSION_DATA: // TODO landonf: Investigate crypto support in bscan if (verbose > 1) { Pmsg0(000, _("Got signed digest record\n")); } break; case STREAM_SIGNED_DIGEST: // TODO landonf: Investigate crypto support in bscan if (verbose > 1) { Pmsg0(000, _("Got signed digest record\n")); } break; case STREAM_PROGRAM_NAMES: if (verbose) { Pmsg1(000, _("Got Prog Names Stream: %s\n"), rec->data); } break; case STREAM_PROGRAM_DATA: if (verbose > 1) { Pmsg0(000, _("Got Prog Data Stream record.\n")); } break; case STREAM_FILEEVENT: if (verbose > 1) { Pmsg0(000, _("Got FileEvent Stream record.\n")); } if (!fevent.unpack(rec->Stream, rec->data, rec->data_len)) { Emsg0(M_ERROR, 0, _("Unable to decode FileEvent.\n")); break; } mjcr = get_jcr_by_session(rec->VolSessionId, rec->VolSessionTime); if (!mjcr) { Pmsg2(000, _("Could not find SessId=%d SessTime=%d for File record.\n"), rec->VolSessionId, rec->VolSessionTime); break; } fevent.SourceJobId = mjcr->JobId; fevent.JobId = edit_int64(mjcr->JobId, ec1); fevent.FileIndex = mjcr->JobFiles; if (!db_create_fileevent_record(mjcr, mjcr->db, &fevent)) { Jmsg2(mjcr, M_ERROR, 0, _("Failed to insert FileEvent record for file: %d. err: %s\n"), fevent.FileIndex, db_strerror(db)); } mjcr->dec_use_count(); /* Decrease reference counter increased by get_jcr_by_session call */ break; case STREAM_UNIX_ACCESS_ACL: /* Deprecated Standard ACL attributes on UNIX */ case STREAM_UNIX_DEFAULT_ACL: /* Deprecated Default ACL attributes on UNIX */ case STREAM_HFSPLUS_ATTRIBUTES: case STREAM_XACL_AIX_TEXT: case STREAM_XACL_DARWIN_ACCESS: case STREAM_XACL_FREEBSD_DEFAULT: case STREAM_XACL_FREEBSD_ACCESS: case STREAM_XACL_HPUX_ACL_ENTRY: case STREAM_XACL_IRIX_DEFAULT: case STREAM_XACL_IRIX_ACCESS: case STREAM_XACL_LINUX_DEFAULT: case STREAM_XACL_LINUX_ACCESS: case STREAM_XACL_TRU64_DEFAULT: case STREAM_XACL_TRU64_DEFAULT_DIR: case STREAM_XACL_TRU64_ACCESS: case STREAM_XACL_SOLARIS_POSIX: case STREAM_XACL_SOLARIS_NFS4: case STREAM_XACL_AFS_TEXT: case STREAM_XACL_AIX_AIXC: case STREAM_XACL_AIX_NFS4: case STREAM_XACL_FREEBSD_NFS4: case STREAM_XACL_HURD_DEFAULT: case STREAM_XACL_HURD_ACCESS: /* Ignore Unix ACL attributes */ break; case STREAM_XACL_HURD_XATTR: case STREAM_XACL_IRIX_XATTR: case STREAM_XACL_TRU64_XATTR: case STREAM_XACL_AIX_XATTR: case STREAM_XACL_OPENBSD_XATTR: case STREAM_XACL_SOLARIS_SYS_XATTR: case STREAM_XACL_SOLARIS_XATTR: case STREAM_XACL_DARWIN_XATTR: case STREAM_XACL_FREEBSD_XATTR: case STREAM_XACL_LINUX_XATTR: case STREAM_XACL_NETBSD_XATTR: /* Ignore Unix Extended attributes */ break; case STREAM_PLUGIN_NAME: break; default: Pmsg2(0, _("Unexpected stream type!!! stream=\"%s\" len=%i\n"), stream_to_ascii(rec->Stream), rec->data_len); break; } free_jcr(mjcr); /* got from get_jcr_by_session() above */ return true; } /* * Free the Job Control Record if no one is still using it. * Called from main free_jcr() routine in src/lib/jcr.c so * that we can do our Director specific cleanup of the jcr. */ static void bscan_free_jcr(JCR *jcr) { Dmsg0(200, "Start bscan free_jcr\n"); free_bsock(jcr->file_bsock); free_bsock(jcr->store_bsock); if (jcr->RestoreBootstrap) { bfree_and_null(jcr->RestoreBootstrap); } if (jcr->read_dcr) { free_dcr(jcr->read_dcr); jcr->read_dcr = NULL; } /* Flush any left file records from batch session */ if (!db_write_batch_file_records(jcr)) { Emsg0(M_ERROR, 0, _("Failed to flush file records!\n")); } /* Close thej jcr's db connections */ if (jcr->db_batch) { if (!db_write_batch_file_records(bjcr)) { /* used by bulk batch file insert */ Jmsg0(jcr, M_ERROR, 0, _("Failed to flush file records!\n")); } db_close_database(jcr, jcr->db_batch); jcr->db_batch = NULL; } if (jcr->db) { db_close_database(jcr, jcr->db); jcr->db = NULL; } Dmsg0(200, "End bscan free_jcr\n"); } /* * We got a File Attributes record on the tape. Now, lookup the Job * record, and then create the attributes record. */ static int create_file_attributes_record(JCR *mjcr, ATTR *attrs, DEV_RECORD *rec) { DCR *dcr = mjcr->read_dcr; ar.fname = attrs->fname; ar.link = attrs->lname; ar.ClientId = mjcr->ClientId; ar.JobId = mjcr->JobId; ar.Stream = rec->Stream; ar.DeltaSeq = attrs->delta_seq; if (attrs->type == FT_DELETED) { ar.FileIndex = 0; } else { ar.FileIndex = rec->FileIndex; } ar.attr = attrs->attr; if (dcr->VolFirstIndex == 0) { dcr->VolFirstIndex = rec->FileIndex; } dcr->FileIndex = rec->FileIndex; mjcr->JobFiles++; if (!update_db) { return 1; } if (!db_create_attributes_record(mjcr, mjcr->db, &ar)) { Pmsg1(0, _("Could not create File Attributes record. ERR=%s\n"), db_strerror(mjcr->db)); return 0; } mjcr->FileId = ar.FileId; if (verbose > 1) { Pmsg1(000, _("Created File record: %s\n"), attrs->fname); } return 1; } /* * For each Volume we see, we create a Medium record */ static int create_media_record(BDB *db, MEDIA_DBR *mr, VOLUME_LABEL *vl) { struct date_time dt; struct tm tm; /* We mark Vols as Archive to keep them from being re-written */ bstrncpy(mr->VolStatus, "Archive", sizeof(mr->VolStatus)); mr->VolRetention = 365 * 3600 * 24; /* 1 year */ mr->Enabled = 1; if (vl->VerNum >= 11) { mr->set_first_written = true; /* Save FirstWritten during update_media */ mr->FirstWritten = btime_to_utime(vl->write_btime); mr->LabelDate = btime_to_utime(vl->label_btime); /* Set label date in the catalog */ mr->set_label_date = true; } else { /* DEPRECATED DO NOT USE */ dt.julian_day_number = vl->write_date; dt.julian_day_fraction = vl->write_time; tm_decode(&dt, &tm); mr->FirstWritten = mktime(&tm); dt.julian_day_number = vl->label_date; dt.julian_day_fraction = vl->label_time; tm_decode(&dt, &tm); mr->LabelDate = mktime(&tm); } lasttime = mr->LabelDate; if (mr->VolJobs == 0) { mr->VolJobs = 1; } if (mr->VolMounts == 0) { mr->VolMounts = 1; } if (!update_db) { return 1; } if (!db_create_media_record(bjcr, db, mr)) { Pmsg1(0, _("Could not create media record. ERR=%s\n"), db_strerror(db)); return 0; } if (!db_update_media_record(bjcr, db, mr)) { Pmsg1(0, _("Could not update media record. ERR=%s\n"), db_strerror(db)); return 0; } if (verbose) { Pmsg1(000, _("Created Media record for Volume: %s\n"), mr->VolumeName); } return 1; } /* * Called at end of media to update it */ static bool update_media_record(BDB *db, MEDIA_DBR *mr) { if (!update_db && !update_vol_info) { return true; } /* Set last written to now, so that user has a month * (because of retention period set to 1month), to inspect recovered volume */ mr->LastWritten = (utime_t)time(NULL); if (!db_update_media_record(bjcr, db, mr)) { Pmsg1(0, _("Could not update media record. ERR=%s\n"), db_strerror(db)); return false;; } if (verbose) { Pmsg1(000, _("Updated Media record at end of Volume: %s\n"), mr->VolumeName); } return true; } static int create_pool_record(BDB *db, POOL_DBR *pr) { pr->NumVols++; pr->UseCatalog = 1; pr->VolRetention = 31 * 3600 * 24; /* 1 month */ if (!update_db) { return 1; } if (!db_create_pool_record(bjcr, db, pr)) { Pmsg1(0, _("Could not create pool record. ERR=%s\n"), db_strerror(db)); return 0; } if (verbose) { Pmsg1(000, _("Created Pool record for Pool: %s\n"), pr->Name); } return 1; } /* * Called from SOS to create a client for the current Job */ static int create_client_record(BDB *db, CLIENT_DBR *cr) { /* * Note, update_db can temporarily be set false while * updating the database, so we must ensure that ClientId is non-zero. */ if (!update_db) { cr->ClientId = 0; if (!db_get_client_record(bjcr, db, cr)) { Pmsg1(0, _("Could not get Client record. ERR=%s\n"), db_strerror(db)); return 0; } return 1; } if (!db_create_client_record(bjcr, db, cr)) { Pmsg1(0, _("Could not create Client record. ERR=%s\n"), db_strerror(db)); return 0; } if (verbose) { Pmsg1(000, _("Created Client record for Client: %s\n"), cr->Name); } return 1; } static int create_fileset_record(BDB *db, FILESET_DBR *fsr) { if (!update_db) { return 1; } fsr->FileSetId = 0; if (fsr->MD5[0] == 0) { fsr->MD5[0] = ' '; /* Equivalent to nothing */ fsr->MD5[1] = 0; } if (db_get_fileset_record(bjcr, db, fsr)) { if (verbose) { Pmsg1(000, _("Fileset \"%s\" already exists.\n"), fsr->FileSet); } } else { if (!db_create_fileset_record(bjcr, db, fsr)) { Pmsg2(0, _("Could not create FileSet record \"%s\". ERR=%s\n"), fsr->FileSet, db_strerror(db)); return 0; } if (verbose) { Pmsg1(000, _("Created FileSet record \"%s\"\n"), fsr->FileSet); } } return 1; } /* * Simulate the two calls on the database to create * the Job record and to update it when the Job actually * begins running. */ static JCR *create_job_record(BDB *db, JOB_DBR *jr, SESSION_LABEL *label, DEV_RECORD *rec) { JCR *mjcr; struct date_time dt; struct tm tm; jr->JobId = label->JobId; jr->JobType = label->JobType; jr->JobLevel = label->JobLevel; jr->JobStatus = JS_Created; bstrncpy(jr->Name, label->JobName, sizeof(jr->Name)); bstrncpy(jr->Job, label->Job, sizeof(jr->Job)); if (label->VerNum >= 11) { jr->SchedTime = btime_to_unix(label->write_btime); } else { dt.julian_day_number = label->write_date; dt.julian_day_fraction = label->write_time; tm_decode(&dt, &tm); jr->SchedTime = mktime(&tm); } jr->StartTime = jr->SchedTime; jr->JobTDate = (utime_t)jr->SchedTime; jr->VolSessionId = rec->VolSessionId; jr->VolSessionTime = rec->VolSessionTime; /* Now create a JCR as if starting the Job */ mjcr = create_jcr(jr, rec, label->JobId); /* Create another db connection per-job so that each job * will be able to perform batch queries in its own context */ mjcr->db = db_init_database(NULL, db_driver, db_name, db_user, db_password, db_host, db_port, NULL, db_ssl_mode, db_ssl_key, db_ssl_cert, db_ssl_ca, db_ssl_capath, db_ssl_cipher, true, false); if (!mjcr->db || !db_open_database(NULL, mjcr->db)) { //TODO add some err handling, bscan will crash at calling db stuff against this jcr otherwise Pmsg2(000, _("Could not open Catalog \"%s\", database \"%s\".\n"), db_driver, db_name); if (mjcr->db) { Jmsg(NULL, M_FATAL, 0, _("%s"), db_strerror(mjcr->db)); Pmsg1(000, "%s", db_strerror(mjcr->db)); db_close_database(NULL, mjcr->db); } Jmsg(NULL, M_ERROR_TERM, 0, _("Could not open Catalog \"%s\", database \"%s\".\n"), db_driver, db_name); } // Bscan needs to be aware if files were previously purged if (jr->PurgedFiles) { mjcr->bscan_files_purged = true; } if (!update_db) { return mjcr; } /* This creates the bare essentials */ if (!db_create_job_record(bjcr, db, jr)) { Pmsg1(0, _("Could not create JobId record. ERR=%s\n"), db_strerror(db)); return mjcr; } /* This adds the client, StartTime, JobTDate, ... */ if (!db_update_job_start_record(bjcr, db, jr)) { Pmsg1(0, _("Could not update job start record. ERR=%s\n"), db_strerror(db)); return mjcr; } Pmsg2(000, _("Created new JobId=%u record for original JobId=%u\n"), jr->JobId, label->JobId); mjcr->JobId = jr->JobId; /* set new JobId */ mjcr->bscan_created = true; return mjcr; } /* * Simulate the database call that updates the Job * at Job termination time. */ static int update_job_record(BDB *db, JOB_DBR *jr, SESSION_LABEL *elabel, DEV_RECORD *rec) { struct date_time dt; struct tm tm; JCR *mjcr; mjcr = get_jcr_by_session(rec->VolSessionId, rec->VolSessionTime); if (!mjcr) { Pmsg2(000, _("Could not find SessId=%d SessTime=%d for EOS record.\n"), rec->VolSessionId, rec->VolSessionTime); return 0; } if (elabel->VerNum >= 11) { jr->EndTime = btime_to_unix(elabel->write_btime); } else { dt.julian_day_number = elabel->write_date; dt.julian_day_fraction = elabel->write_time; tm_decode(&dt, &tm); jr->EndTime = mktime(&tm); } lasttime = jr->EndTime; mjcr->end_time = jr->EndTime; jr->JobId = mjcr->JobId; /* The JobStatus can't be 0 */ if (elabel->JobStatus == 0) { Pmsg2(000, _("Could not find JobStatus for SessId=%d SessTime=%d in EOS record.\n"), rec->VolSessionId, rec->VolSessionTime); } mjcr->JobStatus = jr->JobStatus = elabel->JobStatus ? elabel->JobStatus : JS_ErrorTerminated; jr->JobFiles = elabel->JobFiles; if (jr->JobFiles > 0) { /* If we found files, force PurgedFiles */ jr->PurgedFiles = 0; } jr->JobBytes = elabel->JobBytes; jr->VolSessionId = rec->VolSessionId; jr->VolSessionTime = rec->VolSessionTime; jr->JobTDate = (utime_t)mjcr->start_time; jr->ClientId = mjcr->ClientId; if (!update_db) { free_jcr(mjcr); return 1; } if (!db_update_job_end_record(bjcr, db, jr)) { Pmsg2(0, _("Could not update JobId=%u record. ERR=%s\n"), jr->JobId, db_strerror(db)); free_jcr(mjcr); return 0; } if (verbose) { Pmsg3(000, _("Updated Job termination record for JobId=%u Level=%s TermStat=%c\n"), jr->JobId, job_level_to_str(mjcr->getJobLevel()), jr->JobStatus); } if (verbose > 1) { const char *term_msg; static char term_code[70]; char sdt[50], edt[50]; char ec1[30], ec2[30], ec3[30]; switch (mjcr->JobStatus) { case JS_Terminated: term_msg = _("Backup OK"); break; case JS_Warnings: term_msg = _("Backup OK -- with warnings"); break; case JS_FatalError: case JS_ErrorTerminated: term_msg = _("*** Backup Error ***"); break; case JS_Canceled: term_msg = _("Backup Canceled"); break; default: term_msg = term_code; sprintf(term_code, _("Job Termination code: %d"), mjcr->JobStatus); break; } bstrftime(sdt, sizeof(sdt), mjcr->start_time); bstrftime(edt, sizeof(edt), mjcr->end_time); Pmsg14(000, _("%s\n" "JobId: %d\n" "Job: %s\n" "FileSet: %s\n" "Backup Level: %s\n" "Client: %s\n" "Start time: %s\n" "End time: %s\n" "Files Written: %s\n" "Bytes Written: %s\n" "Volume Session Id: %d\n" "Volume Session Time: %d\n" "Last Volume Bytes: %s\n" "Termination: %s\n\n"), edt, mjcr->JobId, mjcr->Job, mjcr->fileset_name, job_level_to_str(mjcr->getJobLevel()), mjcr->client_name, sdt, edt, edit_uint64_with_commas(mjcr->JobFiles, ec1), edit_uint64_with_commas(mjcr->JobBytes, ec2), mjcr->VolSessionId, mjcr->VolSessionTime, edit_uint64_with_commas(mr.VolBytes, ec3), term_msg); } free_jcr(mjcr); return 1; } static int create_jobmedia_record(BDB *db, JCR *mjcr) { JOBMEDIA_DBR jmr; DCR *dcr = mjcr->read_dcr; dcr->EndAddr = dev->EndAddr; dcr->VolMediaId = dev->VolCatInfo.VolMediaId; bmemset(&jmr, 0, sizeof(jmr)); jmr.JobId = mjcr->JobId; jmr.MediaId = mr.MediaId; jmr.FirstIndex = dcr->VolFirstIndex; jmr.LastIndex = dcr->VolLastIndex; jmr.StartBlock = (uint32_t)dcr->StartAddr; jmr.StartFile = (uint32_t)(dcr->StartAddr >> 32); jmr.EndBlock = (uint32_t)dcr->EndAddr; jmr.EndFile = (uint32_t)(dcr->EndAddr >> 32); if (!update_db) { return 1; } if (!db_create_jobmedia_record(bjcr, db, &jmr)) { Pmsg1(0, _("Could not create JobMedia record. ERR=%s\n"), db_strerror(db)); return 0; } if (verbose) { Pmsg2(000, _("Created JobMedia record JobId %d, MediaId %d\n"), jmr.JobId, jmr.MediaId); } return 1; } /* * Simulate the database call that updates the MD5/SHA1 record */ static int update_digest_record(BDB *db, char *digest, DEV_RECORD *rec, int type) { JCR *mjcr; mjcr = get_jcr_by_session(rec->VolSessionId, rec->VolSessionTime); if (!mjcr) { if (mr.VolJobs > 0) { Pmsg2(000, _("Could not find SessId=%d SessTime=%d for MD5/SHA1 record.\n"), rec->VolSessionId, rec->VolSessionTime); } else { ignored_msgs++; } return 0; } if (!update_db || mjcr->FileId == 0) { free_jcr(mjcr); return 1; } if (!db_add_digest_to_file_record(bjcr, db, mjcr->FileId, digest, type)) { Pmsg1(0, _("Could not add MD5/SHA1 to File record. ERR=%s\n"), db_strerror(db)); free_jcr(mjcr); return 0; } if (verbose > 1) { Pmsg0(000, _("Updated MD5/SHA1 record\n")); } free_jcr(mjcr); return 1; } /* * Create a JCR as if we are really starting the job */ static JCR *create_jcr(JOB_DBR *jr, DEV_RECORD *rec, uint32_t JobId) { JCR *jobjcr; /* * Transfer as much as possible to the Job JCR. Most important is * the JobId and the ClientId. */ jobjcr = new_jcr(sizeof(JCR), bscan_free_jcr); jobjcr->setJobType(jr->JobType); jobjcr->setJobLevel(jr->JobLevel); jobjcr->JobStatus = jr->JobStatus; bstrncpy(jobjcr->Job, jr->Job, sizeof(jobjcr->Job)); jobjcr->JobId = JobId; /* this is JobId on tape */ jobjcr->sched_time = jr->SchedTime; jobjcr->start_time = jr->StartTime; jobjcr->VolSessionId = rec->VolSessionId; jobjcr->VolSessionTime = rec->VolSessionTime; jobjcr->ClientId = jr->ClientId; jobjcr->dcr = jobjcr->read_dcr = new_dcr(jobjcr, NULL, dev, SD_READ); return jobjcr; } bacula-15.0.3/src/stored/cloud_test.c0000644000175000017500000001620614771010173017267 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "../stored/stored.h" extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code); static CONFIG *config; #define CONFIG_FILE "bacula-sd.conf" char *configfile = NULL; bool detect_errors = false; int errors = 0; static void usage() { fprintf(stderr, _( PROG_COPYRIGHT "\n%sVersion: %s (%s)\n\n" "Usage: cloud_test [options] \n" " -b specify a bootstrap file\n" " -c specify a Storage configuration file\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -v be verbose\n" " -V specify Volume names (separated by |)\n" " -? print this message\n\n"), 2000, "", VERSION, BDATE); exit(1); } static void get_session_record(JCR *jcr, DEVICE *dev, DEV_RECORD *rec, SESSION_LABEL *sessrec) { const char *rtype; memset(sessrec, 0, sizeof(SESSION_LABEL)); jcr->JobId = 0; switch (rec->FileIndex) { case PRE_LABEL: rtype = _("Fresh Volume Label"); break; case VOL_LABEL: rtype = _("Volume Label"); unser_volume_label(dev, rec); break; case SOS_LABEL: rtype = _("Begin Job Session"); unser_session_label(sessrec, rec); jcr->JobId = sessrec->JobId; break; case EOS_LABEL: rtype = _("End Job Session"); break; case 0: case EOM_LABEL: rtype = _("End of Medium"); break; case EOT_LABEL: rtype = _("End of Physical Medium"); break; case SOB_LABEL: rtype = _("Start of object"); break; case EOB_LABEL: rtype = _("End of object"); break; default: rtype = _("Unknown"); Dmsg1(10, "FI rtype=%d unknown\n", rec->FileIndex); break; } Dmsg5(10, "%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n", rtype, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); if (verbose) { Pmsg5(-1, _("%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n"), rtype, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); } } /* List just block information */ static void do_blocks(JCR *jcr, DCR *dcr) { DEV_BLOCK *block = dcr->block; DEVICE *dev = dcr->dev; char buf1[100], buf2[100]; DEV_RECORD *rec = new_record(); for ( ;; ) { if (!dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK)) { Dmsg1(100, "!read_block(): ERR=%s\n", dev->print_errmsg()); if (dev->at_eot()) { if (!mount_next_read_volume(dcr)) { Jmsg(jcr, M_INFO, 0, _("Got EOM at file %u on device %s, Volume \"%s\"\n"), dev->file, dev->print_name(), dcr->VolumeName); break; } /* Read and discard Volume label */ DEV_RECORD *record; SESSION_LABEL sessrec; record = new_record(); dcr->read_block_from_device(NO_BLOCK_NUMBER_CHECK); read_record_from_block(dcr, record); get_session_record(jcr, dev, record, &sessrec); free_record(record); Jmsg(jcr, M_INFO, 0, _("Mounted Volume \"%s\".\n"), dcr->VolumeName); } else if (dev->at_eof()) { Jmsg(jcr, M_INFO, 0, _("End of file %u on device %s, Volume \"%s\"\n"), dev->part, dev->print_name(), dcr->VolumeName); Dmsg0(20, "read_record got eof. try again\n"); continue; } else if (dev->is_short_block()) { Jmsg(jcr, M_INFO, 0, "%s", dev->print_errmsg()); continue; } else { /* I/O error */ errors++; display_tape_error_status(jcr, dev); break; } } read_record_from_block(dcr, rec); printf("Block: %d size=%d\n", block->BlockNumber, block->block_len); } free_record(rec); return; } int main (int argc, char *argv[]) { int ch; DEVICE *dev; cloud_dev *cdev; cloud_driver *driver; char *VolumeName=NULL; JCR *jcr=NULL; BSR *bsr = NULL; char *bsrName = NULL; BtoolsAskDirHandler askdir_handler; init_askdir_handler(&askdir_handler); setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); lmgr_init_thread(); working_directory = "/tmp"; my_name_is(argc, argv, "cloud_test"); init_msg(NULL, NULL); /* initialize message handler */ OSDependentInit(); while ((ch = getopt(argc, argv, "b:c:d:vV:?")) != -1) { switch (ch) { case 'c': /* specify config file */ if (configfile != NULL) { free(configfile); } configfile = bstrdup(optarg); break; case 'b': bsrName = optarg; break; case 'd': /* debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { char *p; /* We probably find a tag list -d 10,sql,bvfs */ if ((p = strchr(optarg, ',')) != NULL) { *p = 0; } debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } if (p) { debug_parse_tags(p+1, &debug_level_tags); } } break; case 'v': verbose++; break; case 'V': /* Volume name */ VolumeName = optarg; break; case '?': default: usage(); } /* end switch */ } /* end while */ argc -= optind; argv += optind; if (!argc) { Pmsg0(0, _("No archive name specified\n")); usage(); } if (configfile == NULL) { configfile = bstrdup(CONFIG_FILE); } config = New(CONFIG()); parse_sd_config(config, configfile, M_ERROR_TERM); setup_me(); load_sd_plugins(me->plugin_directory); if (bsrName) { bsr = parse_bsr(NULL, bsrName); } jcr = setup_jcr("cloud_test", argv[0], bsr, VolumeName, SD_READ); dev = jcr->dcr->dev; if (!dev || dev->dev_type != B_CLOUD_DEV) { Pmsg0(0, "Bad device\n"); exit(1); } do_blocks(jcr, jcr->dcr); /* Start low level tests */ cdev = (cloud_dev *)dev; driver = cdev->driver; /* TODO: Put here low level tests for all drivers */ if (!cdev->truncate_cache(jcr->dcr)) { Pmsg1(0, "Unable to truncate the cache ERR=%s\n", cdev->errmsg); } if (jcr) { release_device(jcr->dcr); free_jcr(jcr); dev->term(NULL); } return 0; } bacula-15.0.3/src/stored/Makefile.in0000644000175000017500000005214314771010173017023 0ustar bsbuildbsbuild# # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # @MCOMMON@ srcdir = . VPATH = . .PATH: . sd_group=@sd_group@ # one up basedir = .. # top dir topdir = ../.. # this dir relative to top dir thisdir = src/stored DEBUG=@DEBUG@ GETTEXT_LIBS = @LIBINTL@ CLOUD_DRIVERS=@CLOUD_DRIVERS@ CLOUD_INSTALL_TARGETS=@CLOUD_INSTALL_TARGETS@ S3_LIBS=@S3_LIBS@ S3_INC=@S3_INC@ DB_LIBS=@DB_LIBS@ ACSLS_LIBDIR=@ACSLS_LIBDIR@ ACSLS_OS_DEFINE=@ACSLS_OS_DEFINE@ first_rule: all dummy: EXTRA_STORED_SRCS=@EXTRA_STORED_SRCS@ EXTRA_STORED_OBJS = $(EXTRA_STORED_SRCS:.c=.o) EXTRA_LIBSD_SRCS=@EXTRA_LIBSD_SRCS@ EXTRA_LIBSD_OBJS = $(EXTRA_LIBSD_SRCS:.c=.o) EXTRA_BLS_SRCS=@EXTRA_BLS_SRCS@ EXTRA_BLS_OBJS = $(EXTRA_BLS_SRCS:.c=.o) # Bacula SD core objects needed by all executables SDCORE_OBJS = \ stored_conf.o global.o # bacula-sd SDOBJS = \ stored.o append.o authenticate.o store_mngr.o dircmd.o fd_cmds.o job.o \ hello.o status.o vbackup.o \ sdcollect.o $(SDCORE_OBJS) $(EXTRA_STORED_OBJS) JSONOBJS = bsdjson.o stored_conf.o # btape TAPEOBJS = btape.o $(SDCORE_OBJS) # bls BLSOBJS = bls.o $(SDCORE_OBJS) $(EXTRA_BLS_OBJS) # bextract BEXTOBJS = bextract.o $(SDCORE_OBJS) # bscan SCNOBJS = bscan.o $(SDCORE_OBJS) # bcopy COPYOBJS = bcopy.o $(SDCORE_OBJS) ALIGNED_SRCS = \ aligned_dev.c aligned_read.c aligned_write.c ALIGNED_OBJS = $(ALIGNED_SRCS:.c=.o) ALIGNED_LOBJS = $(ALIGNED_SRCS:.c=.lo) CLOUD_COMMON_SRCS = \ cloud_parts.c cloud_transfer_mgr.c CLOUD_S3_SRCS = \ $(CLOUD_COMMON_SRCS) s3_driver.c CLOUD_S3_OBJS = $(CLOUD_S3_SRCS:.c=.o) CLOUD_S3_LOBJS = $(CLOUD_S3_SRCS:.c=.lo) CLOUD_S3_GLACIER_SRCS = \ $(CLOUD_COMMON_SRCS) bee_s3_cloud_glacier.c CLOUD_S3_GLACIER_OBJS = $(CLOUD_S3_GLACIER_SRCS:.c=.o) CLOUD_S3_GLACIER_LOBJS = $(CLOUD_S3_GLACIER_SRCS:.c=.lo) CLOUD_GENERIC_SRCS = \ $(CLOUD_COMMON_SRCS) generic_driver.c CLOUD_GENERIC_OBJS = $(CLOUD_GENERIC_SRCS:.c=.o) CLOUD_GENERIC_LOBJS = $(CLOUD_GENERIC_SRCS:.c=.lo) CLOUD_ALL_SRCS = \ $(CLOUD_COMMON_SRCS) cloud_dev.c file_driver.c CLOUD_ALL_OBJS = $(CLOUD_ALL_SRCS:.c=.o) CLOUD_ALL_LOBJS = $(CLOUD_ALL_SRCS:.c=.lo) # cloud_test CLOUDTESTOBJS = cloud_test.o $(SDCORE_OBJS) # cloud_cli CLOUDCLIOBJS = bcloud.o $(SDCORE_OBJS) $(CLOUD_ALL_LOBJS) DEDUP_SRCS = \ dedup_dev.c dedupengine.c dedup_interface.c DEDUP_OBJS = $(DEDUP_SRCS:.c=.o) DEDUP_LOBJS = $(DEDUP_SRCS:.c=.lo) # ACSLS changer objects ACSLSAPILIB = $(ACSLS_LIBDIR)/lib/libapi.a ACSLSIPCLIB = $(ACSLS_LIBDIR)/lib/libipc.a ACSLSUTLLIB = $(ACSLS_LIBDIR)/lib/libutl.a ACSLSCLLIB = $(ACSLS_LIBDIR)/lib/libcl.a ACSLSCHGOBJS = acsls-changer.o $(ACSLSAPILIB) $(ACSLSUTLLIB) $(ACSLSIPCLIB) $(ACSLSCLLIB) $(ACSLSAPILIB) # libbacsd objects LIBBACSD_SRCS = \ acquire.c ansi_label.c askdir.c autochanger.c \ block.c block_util.c butil.c dev.c device.c ebcdic.c \ init_dev.c label.c lock.c match_bsr.c mount.c \ null_dev.c os.c parse_bsr.c read.c read_records.c \ record_read.c record_util.c record_write.c reserve.c \ scan.c sd_plugins.c spool.c tape_alert.c vol_mgr.c wait.c \ tape_worm.c fifo_dev.c file_dev.c tape_dev.c vtape_dev.c \ $(EXTRA_LIBSD_SRCS) LIBBACSD_OBJS = $(LIBBACSD_SRCS:.c=.o) LIBBACSD_LOBJS = $(LIBBACSD_SRCS:.c=.lo) LIBBACSD_LT_RELEASE = @LIBBAC_LT_RELEASE@ # these are the objects that are changed by the .configure process EXTRAOBJS = @OBJLIST@ CAP_LIBS = @CAP_LIBS@ ZLIBS=@ZLIBS@ LZO_LIBS= @LZO_LIBS@ LZO_INC= @LZO_INC@ ZSTD_LIBS = @ZSTD_LIBS@ ZSTD_INC = @ZSTD_INC@ TOKYOCABINET_LIBS = @TOKYOCABINET_LIBS@ TOKYOCABINET_INC = @TOKYOCABINET_INC@ SD_LIBS = -lbacsd -lbaccfg -lbac -lbacfind .SUFFIXES: .c .o .lo .PHONY: dedup1/libdedup1.la dedup2/libdedup2.la .DONTCARE: # inference rules .c.o: @echo "Compiling $<" $(NO_ECHO)$(DO_CHECK_DMSG) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) -E $< | $(CHECK_DMSG) $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(TOKYOCABINET_INC) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< .c.lo: @echo "LT Compiling $<" $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(S3_INC) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< #------------------------------------------------------------------------- all: Makefile libbacsd.la drivers bacula-sd @STATIC_SD@ \ bls bextract bscan bcopy \ bsdjson btape @ACSLS_BUILD_TARGET@ @echo "===== Make of stored is good ====" @echo " " ../lib/unittests.o: ../lib/unittests.c (cd ../lib ; make unittests.o) bacula-sd: Makefile libbacsd.la $(SDOBJS) \ ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) \ ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) \ ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) @echo "Linking $@ ..." $(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -L. -L../lib -L../findlib \ -o $@ $(SDOBJS) $(ZLIBS) \ $(SD_LIBS) -lm $(DLIB) $(LIBS) $(WRAPLIBS) \ $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS) libbacsd.a: $(LIBBACSD_OBJS) @echo "Making $@ ..." $(AR) rc $@ $(LIBBACSD_OBJS) $(RANLIB) $@ libbacsd.la: Makefile $(LIBBACSD_LOBJS) @echo "Making $@ ..." $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ \ $(LIBBACSD_LOBJS) \ -export-dynamic -rpath $(libdir) -release $(LIBBACSD_LT_RELEASE) # # Loadable driver # drivers: bacula-sd-cloud-driver.la ${CLOUD_DRIVERS} s3-driver: bacula-sd-cloud-s3-driver.la aws-driver: bacula-sd-cloud-aws-driver.la generic-driver: bacula-sd-cloud-generic-driver.la oci-driver: bacula-sd-cloud-oci-driver.la gs-driver: bacula-sd-cloud-gs-driver.la swift-driver: bacula-sd-cloud-swift-driver.la bacula-sd-cloud-driver.la: Makefile $(CLOUD_ALL_LOBJS) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared $(CLOUD_ALL_LOBJS) -o $@ -R $(libdir) -rpath $(libdir) -module -export-dynamic -release $(LIBBACSD_LT_RELEASE) bacula-sd-cloud-was-driver.la: Makefile $(CLOUD_GENERIC_LOBJS) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared $(CLOUD_GENERIC_LOBJS) -o $@ -R $(libdir) -rpath $(libdir) -module -export-dynamic -release $(LIBBACSD_LT_RELEASE) bacula-sd-cloud-aws-driver.la: Makefile $(CLOUD_GENERIC_LOBJS) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared $(CLOUD_GENERIC_LOBJS) -o $@ -R $(libdir) -rpath $(libdir) -module -export-dynamic -release $(LIBBACSD_LT_RELEASE) bacula-sd-cloud-s3-driver.la: Makefile $(CLOUD_S3_LOBJS) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared $(CLOUD_S3_LOBJS) -o $@ $(S3_LIBS) -R $(libdir) -rpath $(libdir) -module -export-dynamic -release $(LIBBACSD_LT_RELEASE) bacula-sd-cloud-glacier-s3-driver.la: Makefile $(CLOUD_S3_GLACIER_LOBJS) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared $(CLOUD_S3_GLACIER_LOBJS) -o $@ $(S3_LIBS) -R $(libdir) -rpath $(libdir) -module -export-dynamic -release $(LIBBACSD_LT_RELEASE) bacula-sd-cloud-generic-driver.la: Makefile $(CLOUD_GENERIC_LOBJS) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared $(CLOUD_GENERIC_LOBJS) -o $@ -R $(libdir) -rpath $(libdir) -module -export-dynamic -release $(LIBBACSD_LT_RELEASE) bacula-sd-cloud-oci-driver.la: Makefile $(CLOUD_GENERIC_LOBJS) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared $(CLOUD_GENERIC_LOBJS) -o $@ -R $(libdir) -rpath $(libdir) -module -export-dynamic -release $(LIBBACSD_LT_RELEASE) bacula-sd-cloud-gs-driver.la: Makefile $(CLOUD_GENERIC_LOBJS) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared $(CLOUD_GENERIC_LOBJS) -o $@ -R $(libdir) -rpath $(libdir) -module -export-dynamic -release $(LIBBACSD_LT_RELEASE) bacula-sd-cloud-swift-driver.la: Makefile $(CLOUD_GENERIC_LOBJS) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared $(CLOUD_GENERIC_LOBJS) -o $@ -R $(libdir) -rpath $(libdir) -module -export-dynamic -release $(LIBBACSD_LT_RELEASE) dedup1/libdedup1.la: cd dedup1 && make libdedup1.la dedup2/libdedup2.la: cd dedup2 && make libdedup2.la bacula-sd-dedup-driver.la: Makefile $(DEDUP_LOBJS) dedup1/libdedup1.la dedup2/libdedup2.la @echo "Making $@ ..." $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared $(DEDUP_LOBJS) dedup1/libdedup1.la dedup2/libdedup2.la -o $@ -rpath $(plugindir) -module -export-dynamic -release $(LIBBACSD_LT_RELEASE) \ $(TOKYOCABINET_LIBS) bacula-sd-aligned-driver.la: Makefile $(ALIGNED_LOBJS) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared $(ALIGNED_LOBJS) -o $@ -rpath $(plugindir) \ -module -export-dynamic -release $(LIBBACSD_LT_RELEASE) bsdjson: Makefile $(JSONOBJS) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) @echo "Linking $@ ..." $(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -L../lib -o $@ $(JSONOBJS) $(ZLIBS) \ -lbaccfg -lbac -lm $(DLIB) $(LIBS) $(WRAPLIBS) \ $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS) static-bacula-sd: Makefile $(SDOBJS) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -static -L../lib -o $@ $(SDOBJS) $(ZLIBS) \ $(SD_LIBS) -lm $(DLIB) $(LIBS) $(WRAPLIBS) \ $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS) strip $@ btape.o: btape.c @echo "Compiling $<" $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) \ -I$(basedir) $(DINCLUDE) $(CFLAGS) $< btape: Makefile $(TAPEOBJS) libbacsd.la drivers ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) $(TTOOL_LDFLAGS) $(LDFLAGS) -L../lib -L../findlib -o $@ $(TAPEOBJS) \ $(SD_LIBS) $(DLIB) -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) cloud_test.o: cloud_test.c @echo "Compiling $<" $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) \ -I$(basedir) $(DINCLUDE) $(CFLAGS) $< cloud_test: Makefile cloud_test.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) $(BLSOBJS) libbacsd.la drivers $(LIBTOOL_LINK) $(CXX) $(TTOOL_LDFLAGS) $(LDFLAGS) -L../lib -L../findlib -o $@ $(CLOUDTESTOBJS) $(DLIB) \ $(SD_LIBS) -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) bcloud.o: bcloud.c @echo "Compiling $<" $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) \ -I$(basedir) $(DINCLUDE) $(S3_INC) $(CFLAGS) $< bcloud: Makefile bcloud.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) $(BLSOBJS) libbacsd.la drivers $(LIBTOOL_LINK) $(CXX) $(TTOOL_LDFLAGS) $(LDFLAGS) -L../lib -L../findlib -o $@ $(CLOUDCLIOBJS) $(DLIB) \ $(SD_LIBS) -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(S3_LIBS) bls.o: bls.c @echo "Compiling $<" $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) \ -I$(basedir) $(DINCLUDE) $(CFLAGS) $< bls: Makefile $(BLSOBJS) libbacsd.la drivers ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) @echo "Compiling $<" $(LIBTOOL_LINK) $(CXX) $(TTOOL_LDFLAGS) $(LDFLAGS) -L../lib -L../findlib -o $@ $(BLSOBJS) $(DLIB) \ $(SD_LIBS) -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) bextract.o: bextract.c @echo "Compiling $<" $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) \ -I$(basedir) $(DINCLUDE) $(CFLAGS) $(LZO_INC) $(ZSTD_INC) $< bextract: Makefile $(BEXTOBJS) libbacsd.la drivers ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) @echo "Compiling $<" $(LIBTOOL_LINK) $(CXX) $(TTOOL_LDFLAGS) $(LDFLAGS) -L../lib -L../findlib -o $@ $(BEXTOBJS) $(DLIB) $(ZLIBS) $(LZO_LIBS) $(ZSTD_LIBS) \ $(SD_LIBS) -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) bscan.o: bscan.c @echo "Compiling $<" $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) \ -I$(basedir) $(DINCLUDE) $(CFLAGS) $< bscan: Makefile $(SCNOBJS) libbacsd.la drivers ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) \ ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) $(TTOOL_LDFLAGS) $(LDFLAGS) -L../lib -L../cats -L../findlib -o $@ $(SCNOBJS) \ $(SD_LIBS) -lbacsql -lbaccats $(DB_LIBS) $(ZLIBS) -lbacfind -lbaccfg -lbac -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) bcopy.o: bcopy.c @echo "Compiling $<" $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) \ -I$(basedir) $(DINCLUDE) $(CFLAGS) $< bcopy: Makefile $(COPYOBJS) libbacsd.la drivers ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) $(TTOOL_LDFLAGS) $(LDFLAGS) -L../lib -L../findlib -o $@ $(COPYOBJS) \ $(SD_LIBS) -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) cloud_parts_test: Makefile cloud_parts.c $(RMF) cloud_parts.o $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) cloud_parts.c $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ cloud_parts.o $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) rm -f cloud_parts.o $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) cloud_parts.c generic_driver_test: Makefile generic_driver.c $(RMF) generic_driver.o $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) generic_driver.c $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ generic_driver.o $(DLIB) -lbac -lm cloud_transfer_mgr.lo cloud_parts.lo $(LIBS) $(OPENSSL_LIBS) rm -f generic_driver.o $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) generic_driver.c acsls-changer.o: acsls-changer.c @echo "Compiling $<" $(CXX) $(ACSLS_OS_DEFINE) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) -I$(ACSLS_LIBDIR)/src/h $< # this is a dirty fix for linking problem of the ACSLS static libraries. ACSLS libs are not compatible # with libtool. # # libdir points to /opt/bacula/lib, but libtool decides to store libraries to # /opt/bacula/lib64 on some platforms. We can add the two directories in rpath # to workaround the problem. acsls-changer: Makefile $(ACSLSCHGOBJS) $(CXX) $(TTOOL_LDFLAGS) $(LDFLAGS) -L../lib/.libs $(ACSLSCHGOBJS) -L../lib -lbac $(LIBS) $(GETTEXT_LIBS) -Wl,-rpath $(libdir) -o $@ Makefile: $(srcdir)/Makefile.in $(topdir)/config.status cd $(topdir) \ && CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status libtool-install: all $(MKDIR) $(DESTDIR)$(libdir) $(RMF) $(DESTDIR)$(libdir)/libbacsd-*.so $(DESTDIR)$(libdir)/libbacsd.la $(LIBTOOL_INSTALL_FINISH) $(INSTALL_LIB) libbacsd.la $(DESTDIR)$(libdir) libtool-uninstall: $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(libdir)/libbacsd.la install-tune-dde: tune-dde $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) tune-dde $(DESTDIR)$(sbindir)/tune-dde install: all @LIBTOOL_INSTALL_TARGET@ $(CLOUD_INSTALL_TARGETS) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bacula-sd $(DESTDIR)$(sbindir)/bacula-sd $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bsdjson $(DESTDIR)$(sbindir)/bsdjson $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bls $(DESTDIR)$(sbindir)/bls $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bextract $(DESTDIR)$(sbindir)/bextract $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bcopy $(DESTDIR)$(sbindir)/bcopy $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bscan $(DESTDIR)$(sbindir)/bscan $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) btape $(DESTDIR)$(sbindir)/btape @if test -f static-bacula-sd; then \ $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) static-bacula-sd $(DESTDIR)$(sbindir)/static-bacula-sd; \ fi @srcconf=bacula-sd.conf; \ if test -f ${DESTDIR}${sysconfdir}/$$srcconf; then \ destconf=$$srcconf.new; \ echo " ==> Found existing $$srcconf, installing new conf file as $$destconf"; \ else \ destconf=$$srcconf; \ fi; \ echo "${INSTALL_CONFIG} $$srcconf ${DESTDIR}${sysconfdir}/$$destconf"; \ ${INSTALL_CONFIG} $$srcconf ${DESTDIR}${sysconfdir}/$$destconf @if test "x${sd_group}" != "x" -a "x${DESTDIR}" = "x" ; then \ chgrp -f ${sd_group} ${DESTDIR}${sysconfdir}/$$destconf; \ fi install-acsls-changer: acsls-changer $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) acsls-changer $(DESTDIR)$(sbindir)/acsls-changer @srcconf=acsls-changer.conf; \ if test -f ${DESTDIR}${sysconfdir}/$$srcconf; then \ destconf=$$srcconf.new; \ echo " ==> Found existing $$srcconf, installing new conf file as $$destconf"; \ else \ destconf=$$srcconf; \ fi; \ echo "${INSTALL_CONFIG} $$srcconf ${DESTDIR}${sysconfdir}/$$destconf"; \ ${INSTALL_CONFIG} $$srcconf ${DESTDIR}${sysconfdir}/$$destconf @if test "x${sd_group}" != "x" -a "x${DESTDIR}" = "x" ; then \ chgrp -f ${sd_group} ${DESTDIR}${sysconfdir}/$$destconf; \ fi install-bcloud: bcloud $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bcloud $(DESTDIR)$(sbindir)/bcloud install-cloud: bacula-sd-cloud-driver.la $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_LIB) bacula-sd-cloud-driver$(DEFAULT_SHARED_OBJECT_TYPE) $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/bacula-sd-cloud-driver.la install-was-cloud: bacula-sd-cloud-was-driver.la $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bacula-sd-cloud-was-driver$(DEFAULT_SHARED_OBJECT_TYPE) $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/bacula-sd-cloud-was-driver.la install-s3-cloud: install-cloud bacula-sd-cloud-s3-driver.la $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_LIB) bacula-sd-cloud-s3-driver$(DEFAULT_SHARED_OBJECT_TYPE) $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/bacula-sd-cloud-s3-driver.la install-s3-glacier-cloud: install-cloud bacula-sd-cloud-glacier-s3-driver.la $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bacula-sd-cloud-glacier-s3-driver$(DEFAULT_SHARED_OBJECT_TYPE) $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/bacula-sd-cloud-glacier-s3-driver.la install-aws-cloud: install-cloud bacula-sd-cloud-aws-driver.la $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bacula-sd-cloud-aws-driver$(DEFAULT_SHARED_OBJECT_TYPE) $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/bacula-sd-cloud-aws-driver.la install-generic-cloud: install-cloud bacula-sd-cloud-generic-driver.la $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bacula-sd-cloud-generic-driver$(DEFAULT_SHARED_OBJECT_TYPE) $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/bacula-sd-cloud-generic-driver.la install-gs-cloud: install-cloud bacula-sd-cloud-gs-driver.la $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bacula-sd-cloud-gs-driver$(DEFAULT_SHARED_OBJECT_TYPE) $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/bacula-sd-cloud-gs-driver.la install-oci-cloud: install-cloud bacula-sd-cloud-oci-driver.la $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bacula-sd-cloud-oci-driver$(DEFAULT_SHARED_OBJECT_TYPE) $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/bacula-sd-cloud-oci-driver.la install-swift-cloud: install-cloud bacula-sd-cloud-swift-driver.la $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bacula-sd-cloud-swift-driver$(DEFAULT_SHARED_OBJECT_TYPE) $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/bacula-sd-cloud-swift-driver.la install-aligned: bacula-sd-aligned-driver.la $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_LIB) bacula-sd-aligned-driver$(DEFAULT_SHARED_OBJECT_TYPE) $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/bacula-sd-aligned-driver.la install-dedup: bacula-sd-dedup-driver.la $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_LIB) bacula-sd-dedup-driver$(DEFAULT_SHARED_OBJECT_TYPE) $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/bacula-sd-dedup-driver.la uninstall: (cd $(DESTDIR)$(sbindir); $(RMF) bacula-sd bsdjson) (cd $(DESTDIR)$(sbindir); $(RMF) bls) (cd $(DESTDIR)$(sbindir); $(RMF) bextract) (cd $(DESTDIR)$(sbindir); $(RMF) bcopy) (cd $(DESTDIR)$(sbindir); $(RMF) bscan) (cd $(DESTDIR)$(sbindir); $(RMF) btape) (cd $(DESTDIR)$(sbindir); $(RMF) acsls-changer) (cd $(DESTDIR)$(sysconfdir); $(RMF) bacula-sd.conf bacula-sd.conf.new) (cd $(DESTDIR)$(sysconfdir); $(RMF) acsls-changer.conf acsls-changer.conf.new) libtool-clean: @find . -name '*.lo' -print | xargs $(LIBTOOL_CLEAN) $(RMF) @$(RMF) -r .libs _libs @$(RMF) *.la #(cd dedup1 && make libtool-clean) #(cd dedup2 && make libtool-clean) clean: libtool-clean @$(RMF) bacula-sd stored bls bextract bpool btape shmfree core core.* a.out *.o *.bak *~ *.intpro *.extpro 1 2 3 @$(RMF) bscan bsdjson bcopy static-bacula-sd acsls-changer #(cd dedup1 && make clean) #(cd dedup2 && make clean) realclean: clean @$(RMF) tags bacula-sd.conf acsls-changer.conf distclean: realclean if test $(srcdir) = .; then $(MAKE) realclean; fi (cd $(srcdir); $(RMF) Makefile) #(cd dedup1 && make distclean) #(cd dedup2 && make distclean) devclean: realclean if test $(srcdir) = .; then $(MAKE) realclean; fi (cd $(srcdir); $(RMF) Makefile) # Semi-automatic generation of dependencies: # Use cc -M because X11 `makedepend' doesn't work on all systems # and it also includes system headers. # `semi'-automatic since dependencies are generated at distribution time. depend: @$(MV) Makefile Makefile.bak @$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile @$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile @$(CXX) -S -M $(CPPFLAGS) $(XINC) $(S3_INC) -I$(srcdir) -I$(basedir) -I$(ACSLS_LIBDIR)/src/h *.c >> Makefile @set -- *.cpp ; if test -f "$1" ; then \ $(CXX) -S -M $(CPPFLAGS) $(XINC) $(S3_INC) -I$(srcdir) -I$(basedir) -std=c++11 *.cpp >> Makefile ; \ fi @if test -f Makefile ; then \ $(RMF) Makefile.bak; \ else \ $(MV) Makefile.bak Makefile; \ echo " ======= Something went wrong with make depend. ======="; \ fi #(cd dedup1 && make depend) #(cd dedup2 && make depend) # ----------------------------------------------------------------------- # DO NOT DELETE: nice dependency list follows bacula-15.0.3/src/stored/sd_plugins.h0000644000175000017500000001244514771010173017277 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Interface definition for Bacula Plugins * * Kern Sibbald, October 2007 * */ #ifndef __SD_PLUGINS_H #define __SD_PLUGINS_H #ifndef _BACULA_H #ifdef __cplusplus /* Workaround for SGI IRIX 6.5 */ #define _LANGUAGE_C_PLUS_PLUS 1 #endif #define _REENTRANT 1 #define _THREAD_SAFE 1 #define _POSIX_PTHREAD_SEMANTICS 1 #define _FILE_OFFSET_BITS 64 #define _LARGEFILE_SOURCE 1 #define _LARGE_FILES 1 #endif #include #ifndef __CONFIG_H #define __CONFIG_H #include "config.h" #endif #include "bc_types.h" #include "lib/plugins.h" #ifdef __cplusplus extern "C" { #endif /**************************************************************************** * * * Bacula definitions * * * ****************************************************************************/ /* Bacula Variable Ids */ typedef enum { bsdVarJob = 1, bsdVarLevel = 2, bsdVarType = 3, bsdVarJobId = 4, bsdVarClient = 5, bsdVarNumVols = 6, bsdVarPool = 7, bsdVarStorage = 8, bsdVarCatalog = 9, bsdVarMediaType = 10, bsdVarJobName = 11, bsdVarJobStatus = 12, bsdVarPriority = 13, bsdVarVolumeName = 14, bsdVarCatalogRes = 15, bsdVarJobErrors = 16, bsdVarJobFiles = 17, bsdVarSDJobFiles = 18, bsdVarSDErrors = 19, bsdVarFDJobStatus = 20, bsdVarSDJobStatus = 21 } bsdrVariable; typedef enum { #if 0 bsdVarDevTypes = 1 #endif } bsdrGlobalVariable; typedef enum { bsdwVarJobReport = 1, bsdwVarVolumeName = 2, bsdwVarPriority = 3, bsdwVarJobLevel = 4 } bsdwVariable; typedef enum { bsdEventJobStart = 1, bsdEventJobEnd = 2, bsdEventDeviceInit = 3, bsdEventDeviceOpen = 4, bsdEventDeviceTryOpen = 5, bsdEventDeviceClose = 6 } bsdEventType; /* ***BEEF*** */ typedef enum { bsdGlobalEventDeviceInit = 1 } bsdGlobalEventType; typedef struct s_bsdEvent { uint32_t eventType; } bsdEvent; typedef struct s_sdbaculaInfo { uint32_t size; uint32_t version; } bsdInfo; /* Bacula interface version and function pointers */ typedef struct s_sdbaculaFuncs { uint32_t size; uint32_t version; bRC (*registerBaculaEvents)(bpContext *ctx, ...); bRC (*getBaculaValue)(bpContext *ctx, bsdrVariable var, void *value); bRC (*setBaculaValue)(bpContext *ctx, bsdwVariable var, void *value); bRC (*JobMessage)(bpContext *ctx, const char *file, int line, int type, utime_t mtime, const char *fmt, ...); bRC (*DebugMessage)(bpContext *ctx, const char *file, int line, int level, const char *fmt, ...); void (*EditDeviceCodes)(DCR *dcr, POOLMEM **omsg, const char *imsg, const char *cmd); bRC (*getBaculaGlobal)(bsdrGlobalVariable var, void *value); /* ***BEEF*** */ } bsdFuncs; /* Bacula Subroutines */ void load_sd_plugins(const char *plugin_dir); void new_plugins(JCR *jcr); void free_plugins(JCR *jcr); int generate_plugin_event(JCR *jcr, bsdEventType event, void *value=NULL); /* ***BEEF*** */ int generate_global_plugin_event(bsdGlobalEventType event, void *value=NULL); /**************************************************************************** * * * Plugin definitions * * * ****************************************************************************/ typedef enum { psdVarName = 1, psdVarDescription = 2 } psdVariable; #define SD_PLUGIN_MAGIC "*BaculaSDPluginData*" #define SD_PLUGIN_INTERFACE_VERSION ( 13 ) typedef struct s_sdpluginInfo { uint32_t size; uint32_t version; const char *plugin_magic; const char *plugin_license; const char *plugin_author; const char *plugin_date; const char *plugin_version; const char *plugin_description; } psdInfo; /* * Functions that must be defined in every plugin */ typedef struct s_sdpluginFuncs { uint32_t size; uint32_t version; bRC (*newPlugin)(bpContext *ctx); bRC (*freePlugin)(bpContext *ctx); bRC (*getPluginValue)(bpContext *ctx, psdVariable var, void *value); bRC (*setPluginValue)(bpContext *ctx, psdVariable var, void *value); bRC (*handlePluginEvent)(bpContext *ctx, bsdEvent *event, void *value); bRC (*handleGlobalPluginEvent)(bsdEvent *event, void *value); } psdFuncs; #define sdplug_func(plugin) ((psdFuncs *)(plugin->pfuncs)) #define sdplug_info(plugin) ((psdInfo *)(plugin->pinfo)) #ifdef __cplusplus } #endif #endif /* __SD_PLUGINS_H */ bacula-15.0.3/src/stored/org_libsd_sir.c0000644000175000017500000000232614771010173017741 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "stored.h" bool sir_init(DCR *dcr) { return false; } /* Return: SIR_OK nothing to do * SIR_BREAK break * SIR_CONTINUE continue */ int sir_init_loop(DCR *dcr, DEVICE **dev, DEV_BLOCK **block, bool record_cb(DCR *dcr, DEV_RECORD *rec), bool mount_cb(DCR *dcr)) { return SIR_OK; } bool DCR::need_to_reposition() { return false; } void DCR::set_interactive_reposition(BSR *) { } BSR *DCR::clear_interactive_reposition() { return NULL; } void DCR::set_session_interactive() { } bacula-15.0.3/src/stored/block_util.c0000644000175000017500000010460014771010173017245 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * block_util.c -- tape block utility functions * * Kern Sibbald, split from block.c March MMXII */ #include "bacula.h" #include "stored.h" static const int dbglvl = 160; #ifdef DEBUG_BLOCK_CHECKSUM static const bool debug_block_checksum = true; #else static const bool debug_block_checksum = false; #endif #ifdef NO_TAPE_WRITE_TEST static const bool no_tape_write_test = true; #else static const bool no_tape_write_test = false; #endif /* * Dump the block header, then walk through * the block printing out the record headers. */ void dump_block(DEVICE *dev, DEV_BLOCK *b, const char *msg, bool force) { ser_declare; char *p; char *bufp; char Id[BLKHDR_ID_LENGTH+1]; uint64_t CheckSum64, BlockCheckSum64; uint32_t CheckSumLo; uint32_t block_len, reclen; uint32_t BlockNumber; uint32_t VolSessionId, VolSessionTime, data_len; int32_t FileIndex; int32_t Stream; int bhl, rhl; char buf1[100], buf2[100]; if (!force && ((debug_level & ~DT_ALL) < 250)) { return; } if (b->adata) { Dmsg0(20, "Dump block: adata=1 cannot dump.\n"); return; } bufp = b->bufp; if (dev) { if (dev->can_read()) { bufp = b->buf + b->block_len; } } unser_begin(b->buf, BLKHDR1_LENGTH); unser_uint32(CheckSumLo); unser_uint32(block_len); unser_uint32(BlockNumber); unser_bytes(Id, BLKHDR_ID_LENGTH); ASSERT(unser_length(b->buf) == BLKHDR1_LENGTH); Id[BLKHDR_ID_LENGTH] = 0; if (block_len > 4000000 || block_len < BLKHDR_CS_LENGTH) { Dmsg3(20, "Will not dump blocksize too %s %lu msg: %s\n", (block_len < BLKHDR_CS_LENGTH)?"small":"big", block_len, msg); return; } if (Id[3] == '3') { // CheckSumLo is the header option field unser_uint32(VolSessionId); unser_uint32(VolSessionTime); unser_uint64(CheckSum64); bhl = BLKHDR3_LENGTH; rhl = RECHDR3_LENGTH; /* backup the CheckSum64, replace it with 0x0...0 and calculate the CheckSum64 */ uint64_t save; memcpy(&save, b->buf+BLKHDR_CS64_OFFSET, BLKHDR_CS64_LENGTH); memset(b->buf+BLKHDR_CS64_OFFSET, '\0', BLKHDR_CS64_LENGTH); BlockCheckSum64 = bXXH3_64bits((uint8_t *)b->buf+BLKHDR_CS_LENGTH, block_len-BLKHDR_CS_LENGTH); /* restore the checksum */ memcpy(b->buf+BLKHDR_CS64_OFFSET, &save, BLKHDR_CS64_LENGTH); } else if (Id[3] == '2') { unser_uint32(VolSessionId); unser_uint32(VolSessionTime); bhl = BLKHDR2_LENGTH; rhl = RECHDR2_LENGTH; CheckSum64 = CheckSumLo; BlockCheckSum64 = bcrc32((uint8_t *)b->buf+BLKHDR_CS_LENGTH, block_len-BLKHDR_CS_LENGTH); } else { VolSessionId = VolSessionTime = 0; bhl = BLKHDR1_LENGTH; rhl = RECHDR1_LENGTH; CheckSum64 = CheckSumLo; BlockCheckSum64 = bcrc32((uint8_t *)b->buf+BLKHDR_CS_LENGTH, block_len-BLKHDR_CS_LENGTH); } Pmsg7(000, _("Dump block %s %p: adata=%d size=%d BlkNum=%d\n" " Hdrcksum=%llx cksum=%llx\n"), msg, b, b->adata, block_len, BlockNumber, CheckSum64, BlockCheckSum64); p = b->buf + bhl; while (p < bufp) { unser_begin(p, WRITE_RECHDR_LENGTH); if (rhl == RECHDR1_LENGTH) { unser_uint32(VolSessionId); unser_uint32(VolSessionTime); } unser_int32(FileIndex); unser_int32(Stream); unser_uint32(data_len); if (Stream == STREAM_ADATA_BLOCK_HEADER) { reclen = 0; p += WRITE_ADATA_BLKHDR_LENGTH; } else if (Stream == STREAM_ADATA_RECORD_HEADER || Stream == -STREAM_ADATA_RECORD_HEADER) { unser_uint32(reclen); unser_int32(Stream); p += WRITE_ADATA_RECHDR_LENGTH; if (Stream & STREAM_BIT_OFFSETS) { p += OFFSET_FADDR_SIZE; } } else { reclen = 0; p += data_len + rhl; } Pmsg6(000, _(" Rec: VId=%u VT=%u FI=%s Strm=%s len=%d reclen=%d\n"), VolSessionId, VolSessionTime, FI_to_ascii(buf1, FileIndex), stream_to_ascii(buf2, Stream, FileIndex), data_len, reclen); } } /* * Create a new block structure. * We pass device so that the block can inherit the * min and max block sizes. */ void DEVICE::new_dcr_blocks(DCR *dcr) { dcr->block = dcr->ameta_block = new_block(dcr); } DEV_BLOCK *DEVICE::new_block(DCR *dcr, int size) { DEV_BLOCK *block = (DEV_BLOCK *)get_memory(sizeof(DEV_BLOCK)); int len; memset(block, 0, sizeof(DEV_BLOCK)); /* If the user has specified a max_block_size, use it as the default */ if (max_block_size == 0) { len = DEFAULT_BLOCK_SIZE; } else { len = max_block_size; } block->dev = this; /* special size */ if (size) { len = size; } block->buf_len = len; block->buf = get_memory(block->buf_len); block->buf_enc = get_memory(block->buf_len); block->rechdr_queue = get_memory(block->buf_len); block->rechdr_items = 0; Dmsg2(510, "Rechdr len=%d max_items=%d\n", sizeof_pool_memory(block->rechdr_queue), sizeof_pool_memory(block->rechdr_queue)/WRITE_ADATA_RECHDR_LENGTH); block->filemedia = New(alist(1, owned_by_alist)); block->blkh_options = 0; block->blkh_options |= do_checksum() ? BLKHOPT_CHKSUM : 0; empty_block(block); block->BlockVer = BLOCK_VER; /* default write version */ Dmsg3(150, "New block adata=%d len=%d block=%p\n", block->adata, len, block); return block; } /* * Duplicate an existing block (eblock) */ DEV_BLOCK *dup_block(DEV_BLOCK *eblock) { DEV_BLOCK *block = (DEV_BLOCK *)get_memory(sizeof(DEV_BLOCK)); int buf_len = sizeof_pool_memory(eblock->buf); int rechdr_len = sizeof_pool_memory(eblock->rechdr_queue); memcpy(block, eblock, sizeof(DEV_BLOCK)); block->buf = get_memory(buf_len); block->buf_enc = get_memory(buf_len); block->buf_out = (eblock->buf_out==eblock->buf)?block->buf:eblock->buf_enc; memcpy(block->buf, eblock->buf, buf_len); memcpy(block->buf_enc, eblock->buf_enc, buf_len); block->rechdr_queue = get_memory(rechdr_len); memcpy(block->rechdr_queue, eblock->rechdr_queue, rechdr_len); FILEMEDIA_ITEM *fm; block->filemedia = New(alist(1, owned_by_alist)); foreach_alist(fm, eblock->filemedia) { FILEMEDIA_ITEM *fm2 = (FILEMEDIA_ITEM *) malloc(sizeof(FILEMEDIA_ITEM)); memcpy(fm2, fm, sizeof(FILEMEDIA_ITEM)); block->filemedia->append(fm2); } /* bufp might point inside buf */ if (eblock->bufp && eblock->bufp >= eblock->buf && eblock->bufp < (eblock->buf + buf_len)) { block->bufp = (eblock->bufp - eblock->buf) + block->buf; } else { block->bufp = NULL; } return block; } /* * Flush block to disk */ bool DEVICE::flush_block(DCR *dcr) { if (!is_block_empty(dcr->block)) { Dmsg0(dbglvl, "=== wpath 53 flush_ameta\n"); Dmsg4(190, "Call flush_ameta_block BlockAddr=%lld nbytes=%d adata=%d block=%x\n", dcr->block->BlockAddr, dcr->block->binbuf, dcr->adata_block->adata, dcr->adata_block); dump_block(dcr->dev, dcr->block, "Flush_ameta_block"); if (dcr->jcr->is_canceled() || !dcr->write_block_to_device()) { Dmsg0(dbglvl, "=== wpath 54 flush_ameta\n"); Dmsg0(190, "Failed to write ameta block to device, return false.\n"); return false; } empty_block(dcr->block); } return true; } /* * Only the first block checksum error was reported. * If there are more, report it now. */ void print_block_read_errors(JCR *jcr, DEV_BLOCK *block) { if (block->read_errors > 1) { Jmsg(jcr, M_ERROR, 0, _("%d block read errors not printed.\n"), block->read_errors); } } /* We had a problem on some solaris platforms with the CRC32 library, some * 8.4.x jobs uses a bad crc32 algorithm. We just try one then the * other to not create false problems */ uint32_t DCR::crc32(unsigned char *buf, int len, uint32_t expected_crc) { #if BEEF && defined(HAVE_SUN_OS) && defined(HAVE_LITTLE_ENDIAN) uint32_t crc = 0; if (crc32_type) { crc = bcrc32_bad(buf, len); } else { crc = bcrc32(buf, len); } if (expected_crc != crc) { crc32_type = !crc32_type; /* Next time, do it well right away */ if (crc32_type) { crc = bcrc32_bad(buf, len); } else { crc = bcrc32(buf, len); } } return crc; #else return bcrc32(buf, len); #endif } void DEVICE::free_dcr_blocks(DCR *dcr) { if (dcr->block == dcr->ameta_block) { dcr->ameta_block = NULL; /* do not free twice */ } free_block(dcr->block); dcr->block = NULL; free_block(dcr->ameta_block); dcr->ameta_block = NULL; } /* * Free block */ void free_block(DEV_BLOCK *block) { if (block) { Dmsg1(999, "free_block buffer=%p\n", block->buf); if (block->buf) { free_memory(block->buf); } if (block->buf_enc) { free_memory(block->buf_enc); } if (block->rechdr_queue) { free_memory(block->rechdr_queue); } delete block->filemedia; Dmsg1(999, "=== free_block block %p\n", block); free_memory((POOLMEM *)block); } } bool is_block_empty(DEV_BLOCK *block) { if (block->adata) { Dmsg1(200, "=== adata=1 binbuf=%d\n", block->binbuf); return block->binbuf <= 0; } else { Dmsg1(200, "=== adata=0 binbuf=%d\n", block->binbuf-WRITE_BLKHDR_LENGTH); return block->binbuf <= WRITE_BLKHDR_LENGTH; } } /* Empty the block -- for writing */ void empty_block(DEV_BLOCK *block) { if (block->adata) { block->binbuf = 0; } else { block->binbuf = WRITE_BLKHDR_LENGTH; } Dmsg3(250, "empty_block: adata=%d len=%d set binbuf=%d\n", block->adata, block->buf_len, block->binbuf); block->bufp = block->buf + block->binbuf; block->buf_out = block->buf; block->read_len = 0; block->write_failed = false; block->block_read = false; block->needs_write = false; block->FirstIndex = block->LastIndex = 0; block->RecNum = 0; block->BlockAddr = 0; block->filemedia->destroy(); block->extra_bytes = 0; block->first_block = false; // by default this block don't hold a volume label } /* * Create block header just before write. The space * in the buffer should have already been reserved by * init_block. */ uint64_t ser_block_header(DEV_BLOCK *block, bool do_checksum) { DEVICE *dev = block->dev; ser_declare; uint32_t block_len = block->binbuf; uint32_t hdr_option = 0x0; bool do_encrypt_vol = dev->device->volume_encryption != ET_NO && dev->crypto_device_ctx != NULL; bool do_encrypt_block = do_encrypt_vol && !block->first_block; hdr_option |= (do_checksum?BLKHOPT_CHKSUM:0) | (do_encrypt_vol?BLKHOPT_ENCRYPT_VOL:0) | (do_encrypt_block?BLKHOPT_ENCRYPT_BLOCK:0); block->CheckSum64 = 0; /* ***BEEF*** */ if (block->adata) { /* Checksum whole block */ if (do_checksum) { block->CheckSum64 = bcrc32((uint8_t *)block->buf, block_len); } } else { int chk_off = BLKHDR_CS64_OFFSET; int bhl = BLKHDR3_LENGTH; Dmsg1(160, "block_header: block_len=%d\n", block_len); ser_begin(block->buf, BLKHDR3_LENGTH); ser_uint32(hdr_option); /* Was the crc32, is now an "option" bit field in BB03 */ ser_uint32(block_len); ser_uint32(block->BlockNumber); ser_bytes(WRITE_BLKHDR_ID, BLKHDR_ID_LENGTH); // BB03 // BLOCK_VER >= 2 ser_uint32(block->VolSessionId); ser_uint32(block->VolSessionTime); // BLOCK_VER >= 3; // ASSERTD((do_checksum && (block->blkh_options&BLKHOPT_CHKSUM)) // || (!do_checksum && !(block->blkh_options&BLKHOPT_CHKSUM)), "checksum configuration mismatch"); ser_uint64(0x00); // the XXH3_64 checksum if (do_checksum) { /* Checksum whole block including the checksum with value 0x0 */ block->CheckSum64 = bXXH3_64bits((uint8_t *)block->buf, block_len); /* update the checksum in the block header */ ser_begin(block->buf+chk_off, BLKHDR_CS64_LENGTH); ser_uint64(block->CheckSum64); } Dmsg3(160, "ser_block_header: adata=%d checksum=0x%016llx enc=%d\n", block->adata, block->CheckSum64, do_encrypt_block); block->buf_out = block->buf; if (do_encrypt_block) { /* Does the encryption, checksum is encrypted too */ block_cipher_init_iv_header(block->dev->crypto_device_ctx, block->BlockNumber, block->VolSessionId, block->VolSessionTime); block_cipher_encrypt(block->dev->crypto_device_ctx, block_len-bhl, block->buf+bhl, block->buf_enc+bhl); memcpy(block->buf_enc, block->buf, bhl); block->buf_out = block->buf_enc; } } return block->CheckSum64; } /* * Unserialize the block header for reading block. * This includes setting all the buffer pointers correctly. * * Returns: false on failure (not a block) * true on success */ bool unser_block_header(DCR *dcr, DEVICE *dev, DEV_BLOCK *block) { ser_declare; char Id[BLKHDR_ID_LENGTH+1]; uint64_t BlockCheckSum64; uint32_t block_len; uint32_t block_end; uint32_t BlockNumber; uint32_t CheckSumLo; JCR *jcr = dcr->jcr; int bhl; int chk_off = BLKHDR_CS64_OFFSET; /* used only in BB03 */ if (block->adata) { /* Checksum the whole block */ if (block->block_len <= block->read_len && dev->do_checksum()) { BlockCheckSum64 = dcr->crc32((uint8_t *)block->buf, block->block_len, block->CheckSum64); if (BlockCheckSum64 != block->CheckSum64) { dev->dev_errno = EIO; Mmsg5(dev->errmsg, _("Volume data error at %lld!\n" "Adata block checksum mismatch in block=%u len=%d: calc=%llx blk=%llx\n"), block->BlockAddr, block->BlockNumber, block->block_len, BlockCheckSum64, block->CheckSum64); if (block->read_errors == 0 || verbose >= 2) { Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); dump_block(dev, block, "with checksum error"); } block->read_errors++; if (!forge_on) { return false; } } } return true; } if (block->no_header) { return true; } unser_begin(block->buf, BLKHDR_LENGTH); unser_uint32(CheckSumLo); // can be blkh_options in BB03 unser_uint32(block_len); unser_uint32(BlockNumber); unser_bytes(Id, BLKHDR_ID_LENGTH); ASSERT(unser_length(block->buf) == BLKHDR1_LENGTH); Id[BLKHDR_ID_LENGTH] = 0; block->CheckSum64 = CheckSumLo; /* only for BB01 & BB02 */ block->blkh_options = BLKHOPT_NONE; /* only in BB03 */ if (Id[3] == '1' && strncmp(Id, BLKHDR1_ID, BLKHDR_ID_LENGTH) == 0) { bhl = BLKHDR1_LENGTH; block->BlockVer = 1; block->bufp = block->buf + bhl; //Dmsg3(100, "Block=%p buf=%p bufp=%p\n", block, block->buf, block->bufp); } else if (Id[3] == '2' && strncmp(Id, BLKHDR2_ID, BLKHDR_ID_LENGTH) == 0) { unser_uint32(block->VolSessionId); unser_uint32(block->VolSessionTime); bhl = BLKHDR2_LENGTH; block->BlockVer = 2; block->bufp = block->buf + bhl; //Dmsg5(100, "Read-blkhdr Block=%p adata=%d buf=%p bufp=%p off=%d\n", block, block->adata, // block->buf, block->bufp, block->bufp-block->buf); } else if (Id[3] == '3' && strncmp(Id, BLKHDR3_ID, BLKHDR_ID_LENGTH) == 0) { bhl = BLKHDR3_LENGTH; block->blkh_options = CheckSumLo; /* in BB03 blkh_options is stored at the checksum location */ unser_uint32(block->VolSessionId); unser_uint32(block->VolSessionTime); /* decrypt the block before to calculate the checksum */ if ((block->blkh_options & BLKHOPT_ENCRYPT_BLOCK) && block->dev->crypto_device_ctx) { block_cipher_init_iv_header(block->dev->crypto_device_ctx, BlockNumber, block->VolSessionId, block->VolSessionTime); block_cipher_decrypt(block->dev->crypto_device_ctx, block_len-bhl, block->buf+bhl, block->buf_enc); memcpy(block->buf+bhl, block->buf_enc, block_len-bhl); } unser_begin(block->buf+chk_off, BLKHDR_CS64_LENGTH); unser_uint64(block->CheckSum64); block->BlockVer = 3; block->bufp = block->buf + bhl; //Dmsg5(100, "Read-blkhdr Block=%p adata=%d buf=%p bufp=%p off=%d\n", block, block->adata, // block->buf, block->bufp, block->bufp-block->buf); } else { /* unknown block header ID */ char asciibuf[80]; /* dump raw data */ dev->dev_errno = EIO; Mmsg4(dev->errmsg, _("Volume data error at %u:%u! Wanted ID: \"%s\", got \"%s\". Buffer discarded.\n"), dev->get_hi_addr(block->BlockAddr), dev->get_low_addr(block->BlockAddr), BLKHDR3_ID, asciidump(Id, BLKHDR_ID_LENGTH, asciibuf, sizeof(asciibuf))); Dmsg1(50, "%s", dev->errmsg); if (block->read_errors == 0 || verbose >= 2) { Jmsg(jcr, M_FATAL, 0, "%s", dev->errmsg); } block->read_errors++; unser_uint32(block->VolSessionId); unser_uint32(block->VolSessionTime); return false; } /* Sanity check */ if (block_len > MAX_BLOCK_LENGTH) { dev->dev_errno = EIO; Mmsg3(dev->errmsg, _("Volume data error at %u:%u! Block length %u is insane (too large), probably due to a bad archive.\n"), dev->file, dev->block_num, block_len); if (block->read_errors == 0 || verbose >= 2) { Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); } block->read_errors++; return false; } Dmsg1(390, "unser_block_header block_len=%d\n", block_len); /* Find end of block or end of buffer whichever is smaller */ if (block_len > block->read_len) { block_end = block->read_len; } else { block_end = block_len; } block->binbuf = block_end - bhl; Dmsg3(200, "set block=%p adata=%d binbuf=%d\n", block, block->adata, block->binbuf); block->block_len = block_len; block->BlockNumber = BlockNumber; Dmsg3(390, "Read binbuf = %d %d block_len=%d\n", block->binbuf, bhl, block_len); if (block_len <= block->read_len && dev->do_checksum()) { if (Id[3] == '3') { uint64_t save; /* make a copy of the xxh64 */ memcpy(&save, block->buf+chk_off, BLKHDR_CS64_LENGTH); memset(block->buf+chk_off, '\0', BLKHDR_CS64_LENGTH); BlockCheckSum64 = bXXH3_64bits((uint8_t *)block->buf, block_len); memcpy(block->buf+chk_off, &save, BLKHDR_CS64_LENGTH); } else { BlockCheckSum64 = dcr->crc32((uint8_t *)block->buf+BLKHDR_CS_LENGTH, block_len-BLKHDR_CS_LENGTH, block->CheckSum64); } if (BlockCheckSum64 != block->CheckSum64) { dev->dev_errno = EIO; Mmsg6(dev->errmsg, _("Volume data error at %u:%u!\n" "Block checksum mismatch in block=%u len=%d: calc=%llx blk=%llx\n"), dev->file, dev->block_num, (unsigned)BlockNumber, block_len, BlockCheckSum64, block->CheckSum64); if (block->read_errors == 0 || verbose >= 2) { Jmsg(jcr, M_ERROR, 0, "%s", dev->errmsg); dump_block(dev, block, "with checksum error"); } block->read_errors++; if (!forge_on) { return false; } } } return true; } /* * Calculate how many bytes to write and then clear to end * of block. */ uint32_t get_len_and_clear_block(DEV_BLOCK *block, DEVICE *dev, uint32_t &pad) { uint32_t wlen; /* * Clear to the end of the buffer if it is not full, * and on tape devices, apply min and fixed blocking. */ wlen = block->binbuf; if (wlen != block->buf_len) { Dmsg2(250, "binbuf=%d buf_len=%d\n", block->binbuf, block->buf_len); /* Adjust write size to min/max for tapes and aligned only */ if (dev->is_tape() || block->adata) { /* check for fixed block size */ if (dev->min_block_size == dev->max_block_size) { wlen = block->buf_len; /* fixed block size already rounded */ /* Check for min block size */ } else if (wlen < dev->min_block_size) { wlen = ((dev->min_block_size + TAPE_BSIZE - 1) / TAPE_BSIZE) * TAPE_BSIZE; /* Ensure size is rounded */ } else { wlen = ((wlen + TAPE_BSIZE - 1) / TAPE_BSIZE) * TAPE_BSIZE; } } if (block->adata && dev->padding_size > 0) { /* Write to next aligned boundary */ wlen = ((wlen + dev->padding_size - 1) / dev->padding_size) * dev->padding_size; } ASSERT(wlen <= block->buf_len); /* Clear from end of data to end of block */ if (wlen-block->binbuf > 0) { memset(block->bufp, 0, wlen-block->binbuf); /* clear garbage */ } pad = wlen - block->binbuf; /* padding or zeros written */ Dmsg5(150, "Zero end blk: adata=%d cleared=%d buf_len=%d wlen=%d binbuf=%d\n", block->adata, pad, block->buf_len, wlen, block->binbuf); } else { pad = 0; } return wlen; /* bytes to write */ } /* * Determine if user defined volume size has been * reached, and if so, return true, otherwise * return false. */ bool is_user_volume_size_reached(DCR *dcr, bool quiet) { bool hit_max1, hit_max2; uint64_t size, max_size; DEVICE *dev = dcr->ameta_dev; char ed1[50]; bool rtn = false; Enter(dbglvl); if (dev->is_aligned()) { /* Note, we reserve space for one ameta and one adata block */ size = dev->VolCatInfo.VolCatBytes + dcr->ameta_block->buf_len + dcr->adata_block->buf_len; } else { size = dev->VolCatInfo.VolCatBytes + dcr->ameta_block->binbuf; } /* Limit maximum Volume size to value specified by user */ hit_max1 = (dev->max_volume_size > 0) && (size >= dev->max_volume_size); hit_max2 = (dev->VolCatInfo.VolCatMaxBytes > 0) && (size >= dev->VolCatInfo.VolCatMaxBytes); if (hit_max1) { max_size = dev->max_volume_size; } else { max_size = dev->VolCatInfo.VolCatMaxBytes; } if (hit_max1 || hit_max2) { if (!quiet) { Jmsg(dcr->jcr, M_INFO, 0, _("User defined maximum volume size %s will be exceeded on device %s.\n" " Marking Volume \"%s\" as Full.\n"), edit_uint64_with_commas(max_size, ed1), dev->print_name(), dev->getVolCatName()); } Dmsg4(100, "Maximum volume size %s exceeded Vol=%s device=%s.\n" "Marking Volume \"%s\" as Full.\n", edit_uint64_with_commas(max_size, ed1), dev->getVolCatName(), dev->print_name(), dev->getVolCatName()); rtn = true; } else if (is_pool_size_reached(dcr, quiet)) { rtn = true; } Dmsg1(dbglvl, "Return from is_user_volume_size_reached=%d\n", rtn); Leave(dbglvl); return rtn; } void reread_last_block(DCR *dcr) { #define CHECK_LAST_BLOCK #ifdef CHECK_LAST_BLOCK bool ok = true; DEVICE *dev = dcr->dev; JCR *jcr = dcr->jcr; DEV_BLOCK *ameta_block = dcr->ameta_block; DEV_BLOCK *adata_block = dcr->adata_block; DEV_BLOCK *block = dcr->block; /* * If the device is a tape and it supports backspace record, * we backspace over one or two eof marks depending on * how many we just wrote, then over the last record, * then re-read it and verify that the block number is * correct. */ if (dev->is_tape() && dev->has_cap(CAP_BSR)) { /* Now back up over what we wrote and read the last block */ if (!dev->bsf(1)) { berrno be; ok = false; Jmsg(jcr, M_ERROR, 0, _("Backspace file at EOT failed. ERR=%s\n"), be.bstrerror(dev->dev_errno)); } if (ok && dev->has_cap(CAP_TWOEOF) && !dev->bsf(1)) { berrno be; ok = false; Jmsg(jcr, M_ERROR, 0, _("Backspace file at EOT failed. ERR=%s\n"), be.bstrerror(dev->dev_errno)); } /* Backspace over record */ if (ok && !dev->bsr(1)) { berrno be; ok = false; Jmsg(jcr, M_ERROR, 0, _("Backspace record at EOT failed. ERR=%s\n"), be.bstrerror(dev->dev_errno)); /* * On FreeBSD systems, if the user got here, it is likely that his/her * tape drive is "frozen". The correct thing to do is a * rewind(), but if we do that, higher levels in cleaning up, will * most likely write the EOS record over the beginning of the * tape. The rewind *is* done later in mount.c when another * tape is requested. Note, the clrerror() call in bsr() * calls ioctl(MTCERRSTAT), which *should* fix the problem. */ } if (ok) { dev->new_dcr_blocks(dcr); /* Note, this can destroy dev->errmsg */ if (!dcr->read_block_from_dev(NO_BLOCK_NUMBER_CHECK)) { Jmsg(jcr, M_ERROR, 0, _("Re-read last block at EOT failed. ERR=%s"), dev->errmsg); } else { /* * If we wrote block and the block numbers don't agree * we have a possible problem. */ if (dcr->block->BlockNumber != dev->LastBlock) { if (dev->LastBlock > (dcr->block->BlockNumber + 1)) { Jmsg(jcr, M_FATAL, 0, _( "Re-read of last block: block numbers differ by more than one.\n" "Probable tape misconfiguration and data loss. Read block=%u Want block=%u.\n"), dcr->block->BlockNumber, dev->LastBlock); } else { Jmsg(jcr, M_ERROR, 0, _( "Re-read of last block OK, but block numbers differ. Read block=%u Want block=%u.\n"), dcr->block->BlockNumber, dev->LastBlock); } } else { Jmsg(jcr, M_INFO, 0, _("Re-read of last block succeeded.\n")); } } dev->free_dcr_blocks(dcr); dcr->ameta_block = ameta_block; dcr->block = block; dcr->adata_block = adata_block; } } #endif } /* * If this routine is called, we do our bookkeeping and * then assure that the volume will not be written any * more. */ bool terminate_writing_volume(DCR *dcr) { DEVICE *dev = dcr->dev; bool ok = true; bool was_adata = false; Enter(dbglvl); if (dev->is_ateot()) { Leave(dbglvl); return ok; /* already been here return now */ } /* Work with ameta device */ if (dev->adata) { dev->set_ateot(); /* no more writing this Volume */ dcr->adata_block->write_failed = true; dcr->set_ameta(); dev = dcr->ameta_dev; was_adata = true; } /* Create a JobMedia record to indicated end of medium */ dev->VolCatInfo.VolCatFiles = dev->get_file(); dev->VolCatInfo.VolLastPartBytes = dev->part_size; dev->VolCatInfo.VolCatParts = dev->part; if (!dir_create_jobmedia_record(dcr)) { Dmsg0(50, "Error from create JobMedia\n"); dev->dev_errno = EIO; Mmsg2(dev->errmsg, _("Could not create JobMedia record for Volume=\"%s\" Job=%s\n"), dev->getVolCatName(), dcr->jcr->Job); Jmsg(dcr->jcr, M_FATAL, 0, "%s", dev->errmsg); ok = false; } flush_jobmedia_queue(dcr->jcr); bstrncpy(dev->LoadedVolName, dev->VolCatInfo.VolCatName, sizeof(dev->LoadedVolName)); dcr->block->write_failed = true; if (dev->can_append() && !dev->weof(dcr, 1)) { /* end the tape */ dev->VolCatInfo.VolCatErrors++; Jmsg(dcr->jcr, M_ERROR, 0, _("Error writing final EOF to tape. Volume %s may not be readable.\n" "%s"), dev->VolCatInfo.VolCatName, dev->errmsg); ok = false; Dmsg0(50, "Error writing final EOF to volume.\n"); } if (ok) { ok = dev->end_of_volume(dcr); } Dmsg3(100, "Set VolCatStatus Full adata=%d size=%lld vol=%s\n", dev->adata, dev->VolCatInfo.VolCatBytes, dev->VolCatInfo.VolCatName); /* If still in append mode mark volume Full */ if (bstrcmp(dev->VolCatInfo.VolCatStatus, "Append")) { dev->setVolCatStatus("Full"); } char buf[128], buf2[128]; if (dev->device->set_vol_immutable || dev->device->set_vol_read_only) { uint32_t when = MAX(dev->device->min_volume_protection_time, dev->VolCatInfo.VolRetention); btime_t now = time(NULL); if (dev->set_atime(-1, dev->getVolCatName(), now + when, &dev->errmsg) < 0) { Jmsg(dcr->jcr, M_WARNING, 0, _(" Failed to set the atime retention on volume %s on device %s. %s.\n"), dev->getVolCatName(), dev->print_name(), dev->errmsg); } bstrftime(buf2, sizeof(buf2), now+when); strip_trailing_junk(edit_utime(when, buf, sizeof(buf))); dev->VolCatInfo.UseProtect = 1; } if (dev->device->set_vol_read_only) { /* Set volume as immutable/read only */ if (dev->set_readonly(dev->m_fd, dev->getVolCatName(), &dev->errmsg) < 0) { /* We may proceed with that but warn the user */ Jmsg(dcr->jcr, M_WARNING, 0, _("Failed to set the volume %s on device %s in read-only. %s.\n"), dev->getVolCatName(), dev->print_name(), dev->errmsg); } else { Jmsg(dcr->jcr, M_INFO, 0, _("Marking Volume \"%s\" as read-only. Retention set to %s (%s).\n"), dev->getVolCatName(), buf2, buf); dev->VolCatInfo.Protected = 1; events_send_msg(dcr->jcr, "SJ0003", EVENTS_TYPE_VOLUME, me->hdr.name, (intptr_t)dcr->jcr, "Mark Volume \"%s\" as read-only. Retention set to %s (%s).", dev->getVolCatName(), buf2, buf); } } if (dev->device->set_vol_immutable) { /* Set volume as immutable */ if (!dev->set_immutable(dev->getVolCatName(), &dev->errmsg)) { /* We may proceed with that but warn the user */ Jmsg(dcr->jcr, M_WARNING, 0, _("Failed to set the volume %s on device %s as immutable, %s.\n"), dev->getVolCatName(), dev->print_name(), dev->errmsg); } else { Jmsg(dcr->jcr, M_INFO, 0, _("Marking Volume \"%s\" as immutable. Retention set to %s (%s).\n"), dev->getVolCatName(), buf2, buf); events_send_msg(dcr->jcr, "SJ0003", EVENTS_TYPE_VOLUME, me->hdr.name, (intptr_t)dcr->jcr, "Mark Volume \"%s\" as immutable. Retention set to %s (%s).", dev->getVolCatName(), buf2, buf);; dev->VolCatInfo.Protected = 1; } } if (!dir_update_volume_info(dcr, false, true)) { Mmsg(dev->errmsg, _("Error sending Volume info to Director.\n")); ok = false; Dmsg0(50, "Error updating volume info.\n"); } Dmsg2(150, "dir_update_volume_info vol=%s to terminate writing -- %s\n", dev->getVolCatName(), ok?"OK":"ERROR"); dev->notify_newvol_in_attached_dcrs(NULL); /* Set new file/block parameters for current dcr */ set_new_file_parameters(dcr); if (ok && dev->has_cap(CAP_TWOEOF) && dev->can_append() && !dev->weof(dcr, 1)) { /* end the tape */ dev->VolCatInfo.VolCatErrors++; /* This may not be fatal since we already wrote an EOF */ if (dev->errmsg[0]) { Jmsg(dcr->jcr, M_ERROR, 0, "%s", dev->errmsg); } Dmsg0(50, "Writing second EOF failed.\n"); } dev->set_ateot(); /* no more writing this tape */ Dmsg2(150, "Leave terminate_writing_volume=%s -- %s\n", dev->getVolCatName(), ok?"OK":"ERROR"); if (was_adata) { dcr->set_adata(); } Leave(dbglvl); return ok; } /* * If a new volume has been mounted since our last write * Create a JobMedia record for the previous volume written, * and set new parameters to write this volume * The same applies for if we are in a new file. */ bool check_for_newvol_or_newfile(DCR *dcr) { JCR *jcr = dcr->jcr; if (dcr->NewVol || dcr->NewFile) { if (job_canceled(jcr)) { Dmsg0(100, "Canceled\n"); return false; } /* If we wrote on Volume create a last jobmedia record for this job */ if (!dcr->VolFirstIndex) { Dmsg7(100, "Skip JobMedia Vol=%s wrote=%d MediaId=%lld FI=%lu LI=%lu StartAddr=%lld EndAddr=%lld\n", dcr->VolumeName, dcr->WroteVol, dcr->VolMediaId, dcr->VolFirstIndex, dcr->VolLastIndex, dcr->StartAddr, dcr->EndAddr); } if (dcr->VolFirstIndex && !dir_create_jobmedia_record(dcr)) { dcr->dev->dev_errno = EIO; Jmsg2(jcr, M_FATAL, 0, _("Could not create JobMedia record for Volume=\"%s\" Job=%s\n"), dcr->getVolCatName(), jcr->Job); set_new_volume_parameters(dcr); Dmsg0(100, "cannot create media record\n"); return false; } if (dcr->NewVol) { Dmsg0(250, "Process NewVol\n"); flush_jobmedia_queue(jcr); /* Note, setting a new volume also handles any pending new file */ set_new_volume_parameters(dcr); } else { set_new_file_parameters(dcr); } } return true; } /* * Do bookkeeping when a new file is created on a Volume. This is * also done for disk files to generate the jobmedia records for * quick seeking. */ bool do_new_file_bookkeeping(DCR *dcr) { DEVICE *dev = dcr->dev; JCR *jcr = dcr->jcr; /* Create a JobMedia record so restore can seek */ if (!dir_create_jobmedia_record(dcr)) { Dmsg0(40, "Error from create_job_media.\n"); dev->dev_errno = EIO; Jmsg2(jcr, M_FATAL, 0, _("Could not create JobMedia record for Volume=\"%s\" Job=%s\n"), dcr->getVolCatName(), jcr->Job); Dmsg0(40, "Call terminate_writing_volume\n"); terminate_writing_volume(dcr); dev->dev_errno = EIO; return false; } dev->VolCatInfo.VolCatFiles = dev->get_file(); dev->VolCatInfo.VolLastPartBytes = dev->part_size; dev->VolCatInfo.VolCatParts = dev->part; if (!dir_update_volume_info(dcr, false, false)) { Dmsg0(50, "Error from update_vol_info.\n"); Dmsg0(40, "Call terminate_writing_volume\n"); terminate_writing_volume(dcr); dev->dev_errno = EIO; return false; } Dmsg0(100, "dir_update_volume_info max file size -- OK\n"); dev->notify_newfile_in_attached_dcrs(); /* Set new file/block parameters for current dcr */ set_new_file_parameters(dcr); return true; } bacula-15.0.3/src/stored/record_util.c0000644000175000017500000002470514771010173017440 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * record-util.c -- Utilities for record handling * * Kern Sibbald, October MMXII * */ #include "bacula.h" #include "stored.h" /* * Convert a FileIndex into a printable * ASCII string. Not reentrant. * If the FileIndex is negative, it flags the * record as a Label, otherwise it is simply * the FileIndex of the current file. */ const char *FI_to_ascii(char *buf, int fi) { if (fi >= 0) { sprintf(buf, "%d", fi); return buf; } switch (fi) { case PRE_LABEL: return "PRE_LABEL"; case VOL_LABEL: return "VOL_LABEL"; case EOM_LABEL: return "EOM_LABEL"; case SOS_LABEL: return "SOS_LABEL"; case EOS_LABEL: return "EOS_LABEL"; case EOT_LABEL: return "EOT_LABEL"; break; case SOB_LABEL: return "SOB_LABEL"; break; case EOB_LABEL: return "EOB_LABEL"; break; default: sprintf(buf, _("unknown: %d"), fi); return buf; } } /* * Convert a Stream ID into a printable * ASCII string. Not reentrant. * A negative stream number represents * stream data that is continued from a * record in the previous block. * If the FileIndex is negative, we are * dealing with a Label, hence the * stream is the JobId. */ const char *stream_to_ascii(char *buf, int stream, int fi) { if (fi < 0) { sprintf(buf, "%d", stream); return buf; } if (stream < 0) { stream = -stream; stream &= STREAMMASK_TYPE; /* Stream was negative => all are continuation items */ switch (stream) { case STREAM_UNIX_ATTRIBUTE_UPDATE: return _("contUATTRU"); case STREAM_UNIX_ATTRIBUTES: return "contUATTR"; case STREAM_FILE_DATA: return "contDATA"; case STREAM_WIN32_DATA: return "contWIN32-DATA"; case STREAM_WIN32_GZIP_DATA: return "contWIN32-GZIP"; case STREAM_WIN32_COMPRESSED_DATA: return "contWIN32-COMPRESSED"; case STREAM_MD5_DIGEST: return "contMD5"; case STREAM_SHA1_DIGEST: return "contSHA1"; case STREAM_GZIP_DATA: return "contGZIP"; case STREAM_COMPRESSED_DATA: return "contCOMPRESSED"; case STREAM_UNIX_ATTRIBUTES_EX: return "contUNIX-ATTR-EX"; case STREAM_RESTORE_OBJECT: return "contRESTORE-OBJECT"; case STREAM_PLUGIN_OBJECT: return "contPLUGIN-OBJECT"; case STREAM_PLUGIN_META_BLOB: return "convPLUGIN-METADATA-BINARY"; case STREAM_PLUGIN_META_CATALOG: return "convPLUGIN-METADATA-CATALOG"; case STREAM_SPARSE_DATA: return "contSPARSE-DATA"; case STREAM_SPARSE_GZIP_DATA: return "contSPARSE-GZIP"; case STREAM_SPARSE_COMPRESSED_DATA: return "contSPARSE-COMPRESSED"; case STREAM_PROGRAM_NAMES: return "contPROG-NAMES"; case STREAM_PROGRAM_DATA: return "contPROG-DATA"; case STREAM_MACOS_FORK_DATA: return "contMACOS-RSRC"; case STREAM_HFSPLUS_ATTRIBUTES: return "contHFSPLUS-ATTR"; case STREAM_SHA256_DIGEST: return "contSHA256"; case STREAM_SHA512_DIGEST: return "contSHA512"; case STREAM_SIGNED_DIGEST: return "contSIGNED-DIGEST"; case STREAM_ENCRYPTED_SESSION_DATA: return "contENCRYPTED-SESSION-DATA"; case STREAM_ENCRYPTED_FILE_DATA: return "contENCRYPTED-FILE"; case STREAM_ENCRYPTED_FILE_GZIP_DATA: return "contENCRYPTED-GZIP"; case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: return "contENCRYPTED-COMPRESSED"; case STREAM_ENCRYPTED_WIN32_DATA: return "contENCRYPTED-WIN32-DATA"; case STREAM_ENCRYPTED_WIN32_GZIP_DATA: return "contENCRYPTED-WIN32-GZIP"; case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: return "contENCRYPTED-WIN32-COMPRESSED"; case STREAM_ENCRYPTED_MACOS_FORK_DATA: return "contENCRYPTED-MACOS-RSRC"; case STREAM_PLUGIN_NAME: return "contPLUGIN-NAME"; case STREAM_ADATA_BLOCK_HEADER: return "contADATA-BLOCK-HEADER"; case STREAM_ADATA_RECORD_HEADER: return "contADATA-RECORD-HEADER"; case STREAM_FILEEVENT: return _("FileEvent"); case STREAM_XXHASH64_DIGEST: return "contXXHASH64"; case STREAM_XXH3_64_DIGEST: return "contXXH3_63"; case STREAM_XXH3_128_DIGEST: return "contXXH3_128"; default: sprintf(buf, "%d", -stream); return buf; } } switch (stream & STREAMMASK_TYPE) { case STREAM_UNIX_ATTRIBUTES: return "UATTR"; case STREAM_FILE_DATA: return "DATA"; case STREAM_WIN32_DATA: return "WIN32-DATA"; case STREAM_WIN32_GZIP_DATA: return "WIN32-GZIP"; case STREAM_WIN32_COMPRESSED_DATA: return "WIN32-COMPRESSED"; case STREAM_MD5_DIGEST: return "MD5"; case STREAM_SHA1_DIGEST: return "SHA1"; case STREAM_GZIP_DATA: return "GZIP"; case STREAM_COMPRESSED_DATA: return "COMPRESSED"; case STREAM_UNIX_ATTRIBUTES_EX: return "UNIX-ATTR-EX"; case STREAM_RESTORE_OBJECT: return "RESTORE-OBJECT"; case STREAM_PLUGIN_OBJECT: return "PLUGIN-OBJECT"; case STREAM_PLUGIN_META_BLOB: return "PLUGIN-METADATA-BINARY"; case STREAM_PLUGIN_META_CATALOG: return "PLUGIN-METADATA-CATALOG"; case STREAM_SPARSE_DATA: return "SPARSE-DATA"; case STREAM_SPARSE_GZIP_DATA: return "SPARSE-GZIP"; case STREAM_SPARSE_COMPRESSED_DATA: return "SPARSE-COMPRESSED"; case STREAM_PROGRAM_NAMES: return "PROG-NAMES"; case STREAM_PROGRAM_DATA: return "PROG-DATA"; case STREAM_PLUGIN_NAME: return "PLUGIN-NAME"; case STREAM_MACOS_FORK_DATA: return "MACOS-RSRC"; case STREAM_HFSPLUS_ATTRIBUTES: return "HFSPLUS-ATTR"; case STREAM_SHA256_DIGEST: return "SHA256"; case STREAM_SHA512_DIGEST: return "SHA512"; case STREAM_SIGNED_DIGEST: return "SIGNED-DIGEST"; case STREAM_ENCRYPTED_SESSION_DATA: return "ENCRYPTED-SESSION-DATA"; case STREAM_ENCRYPTED_FILE_DATA: return "ENCRYPTED-FILE"; case STREAM_ENCRYPTED_FILE_GZIP_DATA: return "ENCRYPTED-GZIP"; case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: return "ENCRYPTED-COMPRESSED"; case STREAM_ENCRYPTED_WIN32_DATA: return "ENCRYPTED-WIN32-DATA"; case STREAM_ENCRYPTED_WIN32_GZIP_DATA: return "ENCRYPTED-WIN32-GZIP"; case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: return "ENCRYPTED-WIN32-COMPRESSED"; case STREAM_ENCRYPTED_MACOS_FORK_DATA: return "ENCRYPTED-MACOS-RSRC"; case STREAM_ADATA_BLOCK_HEADER: return "ADATA-BLOCK-HEADER"; case STREAM_ADATA_RECORD_HEADER: return "ADATA-RECORD-HEADER"; case STREAM_FILEEVENT: return _("FileEvent"); case STREAM_XXHASH64_DIGEST: return "XXHASH64"; case STREAM_XXH3_64_DIGEST: return "XXH3_63"; case STREAM_XXH3_128_DIGEST: return "XXH3_128"; default: sprintf(buf, "%d", stream); return buf; } } const char *stream_to_ascii_ex(char *buf, int stream, int fi) { if (fi < 0) { return stream_to_ascii(buf, stream, fi); } int ustream = (stream>=0)?stream:-stream; const char *p = stream_to_ascii(buf, stream, fi); if (ustream & (STREAM_BIT_DEDUPLICATION_DATA|STREAM_BIT_NO_DEDUPLICATION)) { if (p!=buf) { strcpy(buf, p); } strcat(buf, "-"); if (ustream & STREAM_BIT_DEDUPLICATION_DATA) { strcat(buf, "D"); } if (ustream & STREAM_BIT_NO_DEDUPLICATION) { strcat(buf, "d"); } return buf; } else { return p; } } /* * Return a new record entity */ DEV_RECORD *new_record(void) { DEV_RECORD *rec; rec = (DEV_RECORD *)get_memory(sizeof(DEV_RECORD)); memset(rec, 0, sizeof(DEV_RECORD)); rec->data = get_pool_memory(PM_MESSAGE); rec->wstate = st_none; rec->rstate = st_none; return rec; } void empty_record(DEV_RECORD *rec) { rec->RecNum = 0; rec->StartAddr = rec->Addr = 0; rec->VolSessionId = rec->VolSessionTime = 0; rec->FileIndex = rec->Stream = 0; rec->data_len = rec->remainder = 0; rec->state_bits &= ~(REC_PARTIAL_RECORD|REC_ADATA_EMPTY|REC_BLOCK_EMPTY|REC_NO_MATCH|REC_CONTINUATION); rec->FileOffset = 0; rec->wstate = st_none; rec->rstate = st_none; rec->VolumeName = NULL; } /* * Free the record entity * */ void free_record(DEV_RECORD *rec) { Dmsg0(950, "Enter free_record.\n"); if (rec->data) { free_pool_memory(rec->data); } Dmsg0(950, "Data buf is freed.\n"); free_pool_memory((POOLMEM *)rec); Dmsg0(950, "Leave free_record.\n"); } void dump_record(DEV_RECORD *rec) { char buf[32]; Dmsg11(100|DT_VOLUME, "Dump record %s 0x%p:\n\tStart=%lld addr=%lld #%d\n" "\tVolSess: %ld:%ld\n" "\tFileIndex: %ld\n" "\tStream: 0x%lx\n\tLen: %ld\n\tData: %s\n", rec, NPRT(rec->VolumeName), rec->StartAddr, rec->Addr, rec->RecNum, rec->VolSessionId, rec->VolSessionTime, rec->FileIndex, rec->Stream, rec->data_len, asciidump(rec->data, rec->data_len, buf, sizeof(buf))); } /* * Test if we can write whole record to the block * * Returns: false on failure * true on success (all bytes can be written) */ bool can_write_record_to_block(DEV_BLOCK *block, DEV_RECORD *rec) { uint32_t remlen; remlen = block->buf_len - block->binbuf; if (rec->remainder == 0) { if (remlen >= WRITE_RECHDR_LENGTH) { remlen -= WRITE_RECHDR_LENGTH; rec->remainder = rec->data_len; } else { return false; } } else { return false; } if (rec->remainder > 0 && remlen < rec->remainder) { return false; } return true; } uint64_t get_record_address(DEV_RECORD *rec) { return rec->Addr; } uint64_t get_record_start_address(DEV_RECORD *rec) { return rec->StartAddr; } bacula-15.0.3/src/stored/bcopy.c0000644000175000017500000002363014771010173016235 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Program to copy a Bacula from one volume to another. * * Kern E. Sibbald, October 2002 * */ #include "bacula.h" #include "stored.h" extern bool parse_sd_config(CONFIG *config, const char *configfile, int exit_code); /* Forward referenced functions */ static void get_session_record(DEVICE *dev, DEV_RECORD *rec, SESSION_LABEL *sessrec); static bool record_cb(DCR *dcr, DEV_RECORD *rec); /* Global variables */ static DEVICE *in_dev = NULL; static DEVICE *out_dev = NULL; static JCR *in_jcr; /* input jcr */ static JCR *out_jcr; /* output jcr */ static BSR *bsr = NULL; static const char *wd = "/tmp"; static bool list_records = false; static uint32_t records = 0; static uint32_t jobs = 0; static DEV_BLOCK *out_block; static SESSION_LABEL sessrec; static CONFIG *config; #define CONFIG_FILE "bacula-sd.conf" char *configfile = NULL; static void usage() { fprintf(stderr, _( PROG_COPYRIGHT "\n%sVersion: %s (%s)\n\n" "Usage: bcopy [-d debug_level] \n" " -b bootstrap specify a bootstrap file\n" " -c specify a Storage configuration file\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -i specify input Volume names (separated by |)\n" " -o specify output Volume names (separated by |)\n" " -p proceed inspite of errors\n" " -v verbose\n" " -w specify working directory (default /tmp)\n" " -? print this message\n\n"), 2002, BDEMO, VERSION, BDATE); exit(1); } int main (int argc, char *argv[]) { int ch; char *iVolumeName = NULL; char *oVolumeName = NULL; bool ignore_label_errors = false; bool ok; BtoolsAskDirHandler askdir_handler; init_askdir_handler(&askdir_handler); setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); my_name_is(argc, argv, "bcopy"); lmgr_init_thread(); init_msg(NULL, NULL); while ((ch = getopt(argc, argv, "b:c:d:i:o:pvw:?")) != -1) { switch (ch) { case 'b': bsr = parse_bsr(NULL, optarg); break; case 'c': /* specify config file */ if (configfile != NULL) { free(configfile); } configfile = bstrdup(optarg); break; case 'd': /* debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } } break; case 'i': /* input Volume name */ iVolumeName = optarg; break; case 'o': /* output Volume name */ oVolumeName = optarg; break; case 'p': ignore_label_errors = true; forge_on = true; break; case 'v': verbose++; break; case 'w': wd = optarg; break; case '?': default: usage(); } } argc -= optind; argv += optind; if (argc != 2) { Pmsg0(0, _("Wrong number of arguments: \n")); usage(); } OSDependentInit(); working_directory = wd; if (configfile == NULL) { configfile = bstrdup(CONFIG_FILE); } config = New(CONFIG()); parse_sd_config(config, configfile, M_ERROR_TERM); setup_me(); load_sd_plugins(me->plugin_directory); /* Setup and acquire input device for reading */ Dmsg0(100, "About to setup input jcr\n"); in_jcr = setup_jcr("bcopy", argv[0], bsr, iVolumeName, SD_READ, true/*read dedup data*/); /* read device */ if (!in_jcr) { exit(1); } in_jcr->ignore_label_errors = ignore_label_errors; in_dev = in_jcr->dcr->dev; if (!in_dev) { exit(1); } /* Setup output device for writing */ Dmsg0(100, "About to setup output jcr\n"); out_jcr = setup_jcr("bcopy", argv[1], bsr, oVolumeName, SD_APPEND); /* no acquire */ if (!out_jcr) { exit(1); } out_dev = out_jcr->dcr->dev; if (!out_dev) { exit(1); } Dmsg0(100, "About to acquire device for writing\n"); /* For we must now acquire the device for writing */ out_dev->rLock(false); if (!out_dev->open_device(out_jcr->dcr, OPEN_READ_WRITE)) { Emsg1(M_FATAL, 0, _("dev open failed: %s\n"), out_dev->errmsg); out_dev->Unlock(); exit(1); } out_dev->Unlock(); if (!acquire_device_for_append(out_jcr->dcr)) { free_jcr(in_jcr); exit(1); } out_block = out_jcr->dcr->block; ok = read_records(in_jcr->dcr, record_cb, mount_next_read_volume); if (ok || out_dev->can_write()) { if (!out_jcr->dcr->write_final_block_to_device()) { Pmsg0(000, _("Write of last block failed.\n")); } } Pmsg2(000, _("%u Jobs copied. %u records copied.\n"), jobs, records); free_jcr(in_jcr); free_jcr(out_jcr); in_dev->term(NULL); out_dev->term(NULL); return 0; } /* * read_records() calls back here for each record it gets */ static bool record_cb(DCR *in_dcr, DEV_RECORD *rec) { if (list_records) { Pmsg5(000, _("Record: SessId=%u SessTim=%u FileIndex=%d Stream=%d len=%u\n"), rec->VolSessionId, rec->VolSessionTime, rec->FileIndex, rec->Stream, rec->data_len); } /* * Check for Start or End of Session Record * */ if (rec->FileIndex < 0) { get_session_record(in_dcr->dev, rec, &sessrec); if (verbose > 1) { dump_label_record(in_dcr->dev, rec, 1/*verbose*/, false/*check err*/); } switch (rec->FileIndex) { case PRE_LABEL: Pmsg0(000, _("Volume is prelabeled. This volume cannot be copied.\n")); return false; case VOL_LABEL: Pmsg0(000, _("Volume label not copied.\n")); return true; case SOS_LABEL: if (bsr && rec->match_stat < 1) { /* Skipping record, because does not match BSR filter */ if (verbose) { Pmsg0(-1, _("Copy skipped. Record does not match BSR filter.\n")); } } else { jobs++; } break; case EOS_LABEL: if (bsr && rec->match_stat < 1) { /* Skipping record, because does not match BSR filter */ return true; } while (!write_record_to_block(out_jcr->dcr, rec)) { Dmsg2(150, "!write_record_to_block data_len=%d rem=%d\n", rec->data_len, rec->remainder); if (!out_jcr->dcr->write_block_to_device()) { Dmsg2(90, "Got write_block_to_dev error on device %s: ERR=%s\n", out_dev->print_name(), out_dev->bstrerror()); Jmsg(out_jcr, M_FATAL, 0, _("Cannot fixup device error. %s\n"), out_dev->bstrerror()); return false; } } if (!out_jcr->dcr->write_block_to_device()) { Dmsg2(90, "Got write_block_to_dev error on device %s: ERR=%s\n", out_dev->print_name(), out_dev->bstrerror()); Jmsg(out_jcr, M_FATAL, 0, _("Cannot fixup device error. %s\n"), out_dev->bstrerror()); return false; } return true; case EOM_LABEL: Pmsg0(000, _("EOM label not copied.\n")); return true; case EOT_LABEL: /* end of all tapes */ Pmsg0(000, _("EOT label not copied.\n")); return true; default: return true; } } /* Write record */ if (bsr && rec->match_stat < 1) { /* Skipping record, because does not match BSR filter */ return true; } records++; while (!write_record_to_block(out_jcr->dcr, rec)) { Dmsg2(150, "!write_record_to_block data_len=%d rem=%d\n", rec->data_len, rec->remainder); if (!out_jcr->dcr->write_block_to_device()) { Dmsg2(90, "Got write_block_to_dev error on device %s: ERR=%s\n", out_dev->print_name(), out_dev->bstrerror()); Jmsg(out_jcr, M_FATAL, 0, _("Cannot fixup device error. %s\n"), out_dev->bstrerror()); return false; } } return true; } static void get_session_record(DEVICE *dev, DEV_RECORD *rec, SESSION_LABEL *sessrec) { const char *rtype; memset(sessrec, 0, sizeof(SESSION_LABEL)); switch (rec->FileIndex) { case PRE_LABEL: rtype = _("Fresh Volume Label"); break; case VOL_LABEL: rtype = _("Volume Label"); unser_volume_label(dev, rec); break; case SOS_LABEL: rtype = _("Begin Job Session"); unser_session_label(sessrec, rec); break; case EOS_LABEL: rtype = _("End Job Session"); unser_session_label(sessrec, rec); break; case 0: case EOM_LABEL: rtype = _("End of Medium"); break; default: rtype = _("Unknown"); break; } Dmsg5(10, "%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n", rtype, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); if (verbose) { Pmsg5(-1, _("%s Record: VolSessionId=%d VolSessionTime=%d JobId=%d DataLen=%d\n"), rtype, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); } } bacula-15.0.3/src/stored/init_dev.c0000644000175000017500000004265614771010173016733 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Initialize a single device. * * This code was split from dev.c in January 2016 * * written by, Kern Sibbald, MM */ #include "bacula.h" #include "stored.h" #include /* Define possible extensions */ #if defined(HAVE_WIN32) #define DRV_EXT ".dll" #elif defined(HAVE_DARWIN_OS) #define DRV_EXT ".dylib" #else #define DRV_EXT ".so" #endif #ifndef RTLD_NOW #define RTLD_NOW 2 #endif /* Forward referenced functions */ extern "C" { typedef DEVICE *(*newDriver_t)(JCR *jcr, DEVRES *device); } static DEVICE *load_driver(JCR *jcr, DEVRES *device); /* * Driver item for driver table */ struct driver_item { const char *name; void *handle; newDriver_t newDriver; bool builtin; bool loaded; }; /* * Driver table. Must be in same order as the B_xxx_DEV type * name handle, builtin loaded */ static driver_item driver_tab[] = { /* name handle, newDriver builtin loaded */ {"file", NULL, NULL, true, true}, {"tape", NULL, NULL, true, true}, {"none", NULL, NULL, true, true}, /* deprecated was DVD */ {"fifo", NULL, NULL, true, true}, {"vtape", NULL, NULL, true, true}, {"ftp", NULL, NULL, true, true}, {"vtl", NULL, NULL, true, true}, {"none", NULL, NULL, true, true}, /* B_ADATA_DEV */ {"aligned", NULL, NULL, false, false}, {"none", NULL, NULL, true, true}, /* deprecated was old dedup */ {"null", NULL, NULL, true, true}, {"none", NULL, NULL, true, true}, /* deprecated B_VALIGNED_DEV */ {"none", NULL, NULL, true, true}, /* deprecated B_VDEDUP_DEV */ {"cloud", NULL, NULL, false, false}, {"dedup", NULL, NULL, false, false}, {NULL, NULL, NULL, false, false} }; static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_mutex_t mutex_dev_init = PTHREAD_MUTEX_INITIALIZER; /* The alist should be created with not_owned_by_alist argument */ void sd_list_loaded_drivers(alist *list) { for(int i=0 ; driver_tab[i].name != NULL; i++) { if (driver_tab[i].loaded && !driver_tab[i].builtin) { list->append((void*)driver_tab[i].name); } } } /* * Allocate and initialize the DEVICE structure * Note, if dev is non-NULL, it is already allocated, * thus we neither allocate it nor free it. This allows * the caller to put the packet in shared memory. * * clone = true allows to create a clone of an existing device for special * purpose, like the vacuum * Note, for a tape, the device->device_name is the device name * (e.g. /dev/nst0), and for a file, the device name * is the directory in which the file will be placed. * */ DEVICE *init_dev(JCR *jcr, DEVRES *device, bool adata, bstatcollect *statcollector, bool clone) { struct stat statp; DEVICE *dev = NULL; uint32_t n_drivers; bool cloning = clone; /* && device->init_state=='R' */ /* Try to avoid concurrent initialization of the same device. * The initial initialization is sequential then no worry here, but if * it failed, any later use of the device trigger a new initialization attempt * that could be concurrent with each other */ if (!cloning) { P(mutex_dev_init); if (device->init_state) { /* It looks like the device is already initialized or being initialized */ V(mutex_dev_init); /* The initialization failed, return NULL, maybe device->dev != NULL now * but let the caller check for that and call init_dev() again if needed */ return NULL; } device->init_state = 'B'; // busy initializing V(mutex_dev_init); } generate_global_plugin_event(bsdGlobalEventDeviceInit, device); Dmsg1(150, "init_dev dev_type=%d\n", device->dev_type); /* If no device type specified, try to guess */ if (!device->dev_type) { /* Check that device is available */ if (stat(device->device_name, &statp) < 0) { berrno be; Jmsg3(jcr, M_ERROR, 0, _("[SE0017] Unable to stat device %s at %s: ERR=%s\n"), device->hdr.name, device->device_name, be.bstrerror()); goto try_again_later; } if (S_ISDIR(statp.st_mode)) { device->dev_type = B_FILE_DEV; } else if (S_ISCHR(statp.st_mode)) { device->dev_type = B_TAPE_DEV; } else if (S_ISFIFO(statp.st_mode)) { device->dev_type = B_FIFO_DEV; #ifdef USE_VTAPE /* must set DeviceType = Vtape * in normal mode, autodetection is disabled */ } else if (S_ISREG(statp.st_mode)) { device->dev_type = B_VTAPE_DEV; #endif } else if (!(device->cap_bits & CAP_REQMOUNT)) { Jmsg2(jcr, M_ERROR, 0, _("[SE0017] %s is an unknown device type. Must be tape or directory." " st_mode=%x\n"), device->device_name, statp.st_mode); goto try_again_later; } if (strcmp(device->device_name, "/dev/null") == 0) { device->dev_type = B_NULL_DEV; } } /* Count drivers */ for (n_drivers=0; driver_tab[n_drivers].name; n_drivers++) { }; Dmsg1(100, "Num drivers=%d\n", n_drivers); /* If invalid dev_type get out */ if (device->dev_type < 0 || device->dev_type > n_drivers) { Jmsg2(jcr, M_FATAL, 0, _("[SF0017] Invalid device type=%d name=\"%s\"\n"), device->dev_type, device->hdr.name); goto never_try_again; } Dmsg5(100, "loadable=%d type=%d loaded=%d name=%s handle=%p\n", !driver_tab[device->dev_type-1].builtin, device->dev_type, driver_tab[device->dev_type-1].loaded, driver_tab[device->dev_type-1].name, driver_tab[device->dev_type-1].handle); if (driver_tab[device->dev_type-1].builtin) { /* Built-in driver */ switch (device->dev_type) { #ifdef HAVE_WIN32 case B_TAPE_DEV: // dev = New(win_tape_dev); break; case B_ADATA_DEV: case B_ALIGNED_DEV: case B_FILE_DEV: dev = New(win_file_dev); break; case B_NULL_DEV: dev = New(win_file_dev); break; #else case B_VTAPE_DEV: dev = New(vtape); break; case B_TAPE_DEV: dev = New(tape_dev); break; case B_FILE_DEV: dev = New(file_dev); break; case B_NULL_DEV: dev = New(null_dev); break; case B_FIFO_DEV: dev = New(fifo_dev); break; #endif default: Jmsg2(jcr, M_FATAL, 0, _("[SF0017] Unknown device type=%d device=\"%s\"\n"), device->dev_type, device->hdr.name); goto never_try_again; } } else { /* Loadable driver */ dev = load_driver(jcr, device); } if (!dev) { goto try_again_later; } Dmsg1(100, "init_dev allocated: %p\n", dev); dev->adata = adata; /* Keep the device ID in the DEVICE struct to identify the hardware */ if (dev->is_file() && stat(dev->archive_name(), &statp) == 0) { dev->devno = statp.st_dev; } dev->device_generic_init(jcr, device); /* Do device specific initialization */ if (dev->device_specific_init(jcr, device)) { goto bailout; } dev->register_metrics(statcollector); if (!cloning) { P(mutex_dev_init); device->init_state = 'R'; // The device is ready V(mutex_dev_init); } return dev; bailout: if (dev != NULL) { dev->term(NULL); dev = NULL; } never_try_again: // This should never happens, no need to make a special case, just report // the problem and let it try again anyway try_again_later: if (!cloning) { P(mutex_dev_init); device->init_state = 0; // The initialization failed, try again later V(mutex_dev_init); } return NULL; } /* * Do all the generic initialization here. Same for all devices. */ void DEVICE::device_generic_init(JCR *jcr, DEVRES *device) { struct stat statp; DEVICE *dev = this; DCR *dcr = NULL; int errstat; uint32_t max_bs; dev->clear_slot(); /* unknown */ /* Copy user supplied device parameters from Resource */ dev->dev_name = get_memory(strlen(device->device_name)+1); pm_strcpy(dev->dev_name, device->device_name); dev->prt_name = get_memory(strlen(device->device_name) + strlen(device->hdr.name) + 20); /* We edit "Resource-name" (physical-name) */ Mmsg(dev->prt_name, "\"%s\" (%s)", device->hdr.name, device->device_name); Dmsg1(400, "Allocate dev=%s\n", dev->print_name()); dev->capabilities = device->cap_bits; dev->min_free_space = device->min_free_space; dev->min_block_size = device->min_block_size; dev->max_block_size = device->max_block_size; dev->max_volume_size = device->max_volume_size; dev->max_file_size = device->max_file_size; dev->padding_size = device->padding_size; dev->file_alignment = device->file_alignment; dev->max_concurrent_jobs = device->max_concurrent_jobs; dev->volume_capacity = device->volume_capacity; dev->max_rewind_wait = device->max_rewind_wait; dev->max_open_wait = device->max_open_wait; dev->vol_poll_interval = device->vol_poll_interval; dev->max_spool_size = device->max_spool_size; dev->drive_index = device->drive_index; dev->enabled = device->enabled; dev->autoselect = device->autoselect; dev->read_only = device->read_only; dev->dev_type = device->dev_type; dev->device = device; dev->crypto_device_ctx = NULL; if (dev->is_tape()) { /* No parts on tapes */ dev->max_part_size = 0; dev->max_vol_parts_num = 0; } else { dev->max_part_size = device->max_part_size; dev->max_vol_parts_num = device->max_vol_parts_num; } #ifndef DEVELOPER /* Sanity check */ if (dev->vol_poll_interval && dev->vol_poll_interval < 60) { dev->vol_poll_interval = 60; } #endif if (!device->dev) { /* The first time we create a DEVICE from the DEVRES, we keep a pointer * to the DEVICE accessible from the DEVRES. */ device->dev = dev; } /* If the device requires mount : * - Check that the mount point is available * - Check that (un)mount commands are defined */ if (dev->is_file() && dev->requires_mount()) { if (!device->mount_point || stat(device->mount_point, &statp) < 0) { berrno be; dev->dev_errno = errno; Jmsg2(jcr, M_ERROR_TERM, 0, _("[SA0017] Unable to stat mount point %s: ERR=%s\n"), device->mount_point, be.bstrerror()); } if (!device->mount_command || !device->unmount_command) { Jmsg0(jcr, M_ERROR_TERM, 0, _("[SA0017] Mount and unmount commands must defined for a device which requires mount.\n")); } } /* Sanity check */ if (dev->max_block_size == 0) { max_bs = DEFAULT_BLOCK_SIZE; } else { max_bs = dev->max_block_size; } if (dev->min_block_size > max_bs) { Jmsg(jcr, M_ERROR_TERM, 0, _("[SA0017] Min block size > max on device %s\n"), dev->print_name()); } if (dev->max_block_size > MAX_BLOCK_LENGTH) { Jmsg3(jcr, M_ERROR, 0, _("[SA0017] Block size %u on device %s is too large, using default %u\n"), dev->max_block_size, dev->print_name(), DEFAULT_BLOCK_SIZE); dev->max_block_size = DEFAULT_BLOCK_SIZE; } if (dev->max_block_size % TAPE_BSIZE != 0) { Jmsg3(jcr, M_WARNING, 0, _("[SW0017] Max block size %u not multiple of device %s block size=%d.\n"), dev->max_block_size, dev->print_name(), TAPE_BSIZE); } if (dev->max_volume_size != 0 && dev->max_volume_size < (dev->max_block_size << 4)) { Jmsg(jcr, M_ERROR_TERM, 0, _("[SA0017] Max Vol Size < 8 * Max Block Size for device %s\n"), dev->print_name()); } dev->errmsg = get_pool_memory(PM_EMSG); *dev->errmsg = 0; if ((errstat = dev->init_mutex()) != 0) { berrno be; dev->dev_errno = errstat; Mmsg1(dev->errmsg, _("[SA0020] Unable to init mutex: ERR=%s\n"), be.bstrerror(errstat)); Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); } if ((errstat = pthread_cond_init(&dev->wait, NULL)) != 0) { berrno be; dev->dev_errno = errstat; Mmsg1(dev->errmsg, _("[SA0020] Unable to init cond variable: ERR=%s\n"), be.bstrerror(errstat)); Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); } if ((errstat = pthread_cond_init(&dev->wait_next_vol, NULL)) != 0) { berrno be; dev->dev_errno = errstat; Mmsg1(dev->errmsg, _("[SA0020] Unable to init cond variable: ERR=%s\n"), be.bstrerror(errstat)); Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); } if ((errstat = pthread_mutex_init(&dev->spool_mutex, NULL)) != 0) { berrno be; dev->dev_errno = errstat; Mmsg1(dev->errmsg, _("[SA0020] Unable to init spool mutex: ERR=%s\n"), be.bstrerror(errstat)); Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); } if ((errstat = dev->init_acquire_mutex()) != 0) { berrno be; dev->dev_errno = errstat; Mmsg1(dev->errmsg, _("[SA0020] Unable to init acquire mutex: ERR=%s\n"), be.bstrerror(errstat)); Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); } if ((errstat = dev->init_freespace_mutex()) != 0) { berrno be; dev->dev_errno = errstat; Mmsg1(dev->errmsg, _("[SA0020] Unable to init freespace mutex: ERR=%s\n"), be.bstrerror(errstat)); Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); } if ((errstat = dev->init_read_acquire_mutex()) != 0) { berrno be; dev->dev_errno = errstat; Mmsg1(dev->errmsg, _("[SA0020] Unable to init read acquire mutex: ERR=%s\n"), be.bstrerror(errstat)); Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); } if ((errstat = dev->init_volcat_mutex()) != 0) { berrno be; dev->dev_errno = errstat; Mmsg1(dev->errmsg, _("[SA0020] Unable to init volcat mutex: ERR=%s\n"), be.bstrerror(errstat)); Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); } if ((errstat = dev->init_dcrs_mutex()) != 0) { berrno be; dev->dev_errno = errstat; Mmsg1(dev->errmsg, _("[SA0020] Unable to init dcrs mutex: ERR=%s\n"), be.bstrerror(errstat)); Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); } dev->set_mutex_priorities(); dev->clear_opened(); dev->attached_dcrs = New(dlist(dcr, &dcr->dev_link)); Dmsg2(100, "init_dev: tape=%d dev_name=%s\n", dev->is_tape(), dev->dev_name); dev->initiated = true; } static DEVICE *load_driver(JCR *jcr, DEVRES *device) { POOL_MEM fname(PM_FNAME); DEVICE *dev; driver_item *drv; const char *slash; void *pHandle; int len; newDriver_t newDriver; P(mutex); if (!me->plugin_directory) { Jmsg2(jcr, M_FATAL, 0, _("[SF0017] Plugin directory not defined. Cannot load SD %s driver for device %s.\n"), driver_tab[device->dev_type - 1], device->hdr.name); V(mutex); return NULL; } len = strlen(me->plugin_directory); if (len == 0) { Jmsg0(jcr, M_FATAL, 0, _("[SF0017] Plugin directory not defined. Cannot load drivers.\n")); V(mutex); return NULL; } if (IsPathSeparator(me->plugin_directory[len - 1])) { slash = ""; } else { slash = "/"; } Dmsg5(100, "loadable=%d type=%d loaded=%d name=%s handle=%p\n", !driver_tab[device->dev_type-1].builtin, device->dev_type, driver_tab[device->dev_type-1].loaded, driver_tab[device->dev_type-1].name, driver_tab[device->dev_type-1].handle); drv = &driver_tab[device->dev_type - 1]; Mmsg(fname, "%s%sbacula-sd-%s-driver%s%s", me->plugin_directory, slash, drv->name, "-" VERSION, DRV_EXT); if (!drv->loaded) { Dmsg1(10, "Open SD driver at %s\n", fname.c_str()); pHandle = dlopen(fname.c_str(), RTLD_NOW); if (pHandle) { Dmsg2(100, "Driver=%s handle=%p\n", drv->name, pHandle); /* Get global entry point */ Dmsg1(10, "Lookup \"BaculaSDdriver\" in driver=%s\n", drv->name); newDriver = (newDriver_t)dlsym(pHandle, "BaculaSDdriver"); Dmsg2(10, "Driver=%s entry point=%p\n", drv->name, newDriver); if (!newDriver) { const char *error = dlerror(); Jmsg(NULL, M_ERROR, 0, _("[SE0018] Lookup of symbol \"BaculaSDdriver\" in driver %s for device %s failed: ERR=%s\n"), device->hdr.name, fname.c_str(), NPRT(error)); Dmsg2(10, "Lookup of symbol \"BaculaSDdriver\" driver=%s failed: ERR=%s\n", fname.c_str(), NPRT(error)); dlclose(pHandle); V(mutex); return NULL; } drv->handle = pHandle; drv->loaded = true; drv->newDriver = newDriver; } else { /* dlopen failed */ const char *error = dlerror(); Jmsg3(jcr, M_FATAL, 0, _("[SF0018] dlopen of SD driver=%s at %s failed: ERR=%s\n"), drv->name, fname.c_str(), NPRT(error)); Dmsg2(0, "dlopen plugin %s failed: ERR=%s\n", fname.c_str(), NPRT(error)); V(mutex); return NULL; } } else { Dmsg1(10, "SD driver=%s is already loaded.\n", drv->name); } /* Call driver initialization */ dev = drv->newDriver(jcr, device); V(mutex); return dev; } bacula-15.0.3/src/stored/lock.c0000644000175000017500000003563214771010173016056 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Collection of Bacula Storage daemon locking software * * Kern Sibbald, June 2007 * */ #include "bacula.h" /* pull in global headers */ #include "stored.h" /* pull in Storage Deamon headers */ #ifdef SD_DEBUG_LOCK const int dbglvl = DT_LOCK|30; #else const int dbglvl = DT_LOCK|50; #endif /* * * The Storage daemon has three locking concepts that must be * understood: * * 1. dblock blocking the device, which means that the device * is "marked" in use. When setting and removing the block, the device is locked, but after dblock is called the device is unlocked. * 2. Lock() simple mutex that locks the device structure. A Lock * can be acquired while a device is blocked if it is not * locked. * 3. rLock(locked) "recursive" Lock, when means that a Lock (mutex) * will be acquired on the device if it is not blocked * by some other thread. If the device was blocked by * the current thread, it will acquire the lock. * If some other thread has set a block on the device, * this call will wait until the device is unblocked. * Can be called with locked true, which means the * Lock is already set * * A lock is normally set when modifying the device structure. * A rLock is normally acquired when you want to block the device * i.e. it will wait until the device is not blocked. * A block is normally set during long operations like writing to * the device. * If you are writing the device, you will normally block and * lock it. * A lock cannot be violated. No other thread can touch the * device while a lock is set. * When a block is set, every thread accept the thread that set * the block will block if rLock is called. * A device can be blocked for multiple reasons, labeling, writing, * acquiring (opening) the device, waiting for the operator, unmounted, * ... * Under certain conditions the block that is set on a device can be * stolen and the device can be used by another thread. For example, * a device is blocked because it is waiting for the operator to * mount a tape. The operator can then unmount the device, and label * a tape, re-mount it, give back the block, and the job will continue. * * * Functions: * * DEVICE::Lock() does P(m_mutex) (in dev.h) * DEVICE::Unlock() does V(m_mutex) * * DEVICE::rLock(locked) allows locking the device when this thread * already has the device blocked. * if (!locked) * Lock() * if blocked and not same thread that locked * pthread_cond_wait * leaves device locked * * DEVICE::rUnlock() unlocks but does not unblock * same as Unlock(); * * DEVICE::dblock(why) does * rLock(); (recursive device lock) * block_device(this, why) * rUnlock() * * DEVICE::dunblock does * Lock() * unblock_device() * Unlock() * * block_device() does (must be locked and not blocked at entry) * set blocked status * set our pid * * unblock_device() does (must be blocked at entry) * (locked on entry) * (locked on exit) * set unblocked status * clear pid * if waiting threads * pthread_cond_broadcast * * obtain_device_block() does (must be locked and blocked at entry) * save status * set new blocked status * set new pid * * give_back_device_block() does (must be blocked and locked) * reset blocked status * save previous blocked * reset pid * if waiting threads * pthread_cond_broadcast * */ void DEVICE::dblock(int why) { rLock(false); /* need recursive lock to block */ block_device(this, why); rUnlock(); } void DEVICE::dunblock(bool locked) { if (!locked) { Lock(); } unblock_device(this); Unlock(); } /* * Debug DEVICE locks N.B. * */ #ifdef DEV_DEBUG_LOCK void DEVICE::dbg_Lock(const char *file, int line) { Dmsg4(sd_dbglvl, "Lock %s from %s:%d precnt=%d\n", device->hdr.name, file, line, m_count); bthread_mutex_lock_p(&m_mutex, file, line); m_pid = pthread_self(); m_count++; } void DEVICE::dbg_Unlock(const char *file, int line) { m_count--; clear_thread_id(m_pid); Dmsg4(sd_dbglvl, "Unlock %s from %s:%d postcnt=%d\n", device->hdr.name, file, line, m_count); bthread_mutex_unlock_p(&m_mutex, file, line); } void DEVICE::dbg_rUnlock(const char *file, int line) { Dmsg2(sd_dbglvl, "rUnlock from %s:%d\n", file, line); dbg_Unlock(file, line); } #else /* * DEVICE locks N.B. * */ void DEVICE::rUnlock() { Unlock(); } void DEVICE::Lock() { P(m_mutex); } void DEVICE::Unlock() { V(m_mutex); } #endif /* DEV_DEBUG_LOCK */ /* * This is a recursive lock that checks if the device is blocked. * * When blocked is set, all threads EXCEPT thread with id no_wait_id * must wait. The no_wait_id thread is out obtaining a new volume * and preparing the label. */ #ifdef DEV_DEBUG_LOCK void DEVICE::dbg_rLock(const char *file, int line, bool locked) { Dmsg3(sd_dbglvl, "Enter rLock blked=%s from %s:%d\n", print_blocked(), file, line); if (!locked) { /* lockmgr version of P(m_mutex) */ Dmsg4(sd_dbglvl, "Lock %s in rLock %s from %s:%d\n", device->hdr.name, print_blocked(), file, line); bthread_mutex_lock_p(&m_mutex, file, line); m_count++; } if (blocked() && !pthread_equal(no_wait_id, pthread_self())) { num_waiting++; /* indicate that I am waiting */ while (blocked()) { int stat; Dmsg5(sd_dbglvl, "Blocked by %d %s in rLock blked=%s no_wait=%p me=%p\n", blocked_by, device->hdr.name, print_blocked(), no_wait_id, bthread_get_thread_id()); if ((stat = bthread_cond_wait_p(&this->wait, &m_mutex, file, line)) != 0) { berrno be; this->dbg_Unlock(file, line); Emsg1(M_ABORT, 0, _("pthread_cond_wait failure. ERR=%s\n"), be.bstrerror(stat)); } } num_waiting--; /* no longer waiting */ } } #else /* DEV_DEBUG_LOCK */ void DEVICE::rLock(bool locked) { if (!locked) { Lock(); m_count++; } if (blocked() && !pthread_equal(no_wait_id, pthread_self())) { num_waiting++; /* indicate that I am waiting */ while (blocked()) { int stat; Dmsg5(sd_dbglvl, "Blocked by %d rLock %s blked=%s no_wait=%p me=%p\n", blocked_by, device->hdr.name, print_blocked(), no_wait_id, bthread_get_thread_id()); if ((stat = pthread_cond_wait(&this->wait, &m_mutex)) != 0) { berrno be; this->Unlock(); Emsg1(M_ABORT, 0, _("pthread_cond_wait failure. ERR=%s\n"), be.bstrerror(stat)); } } num_waiting--; /* no longer waiting */ } } #endif /* DEV_DEBUG_LOCK */ #ifdef SD_DEBUG_LOCK void DEVICE::dbg_Lock_acquire(const char *file, int line) { Dmsg2(sd_dbglvl, "Lock_acquire from %s:%d\n", file, line); bthread_mutex_lock_p(&acquire_mutex, file, line); } void DEVICE::dbg_Unlock_acquire(const char *file, int line) { Dmsg2(sd_dbglvl, "Unlock_acquire from %s:%d\n", file, line); bthread_mutex_unlock_p(&acquire_mutex, file, line); } void DEVICE::dbg_Lock_read_acquire(const char *file, int line) { Dmsg2(sd_dbglvl, "Lock_read_acquire from %s:%d\n", file, line); bthread_mutex_lock_p(&read_acquire_mutex, file, line); } void DEVICE::dbg_Unlock_read_acquire(const char *file, int line) { Dmsg2(sd_dbglvl, "Unlock_read_acquire from %s:%d\n", file, line); bthread_mutex_unlock_p(&read_acquire_mutex, file, line); } void DEVICE::dbg_Lock_VolCatInfo(const char *file, int line) { bthread_mutex_lock_p(&volcat_mutex, file, line); } void DEVICE::dbg_Unlock_VolCatInfo(const char *file, int line) { bthread_mutex_unlock_p(&volcat_mutex, file, line); } #else void DEVICE::Lock_acquire() { P(acquire_mutex); } void DEVICE::Unlock_acquire() { V(acquire_mutex); } void DEVICE::Lock_read_acquire() { P(read_acquire_mutex); } void DEVICE::Unlock_read_acquire() { V(read_acquire_mutex); } void DEVICE::Lock_VolCatInfo() { P(volcat_mutex); } void DEVICE::Unlock_VolCatInfo() { V(volcat_mutex); } #endif /* Main device access control */ int DEVICE::init_mutex() { return pthread_mutex_init(&m_mutex, NULL); } /* Mutex around the freespace command */ int DEVICE::init_freespace_mutex() { return pthread_mutex_init(&freespace_mutex, NULL); } /* Write device acquire mutex */ int DEVICE::init_acquire_mutex() { return pthread_mutex_init(&acquire_mutex, NULL); } /* Read device acquire mutex */ int DEVICE::init_read_acquire_mutex() { return pthread_mutex_init(&read_acquire_mutex, NULL); } /* VolCatInfo mutex */ int DEVICE::init_volcat_mutex() { return pthread_mutex_init(&volcat_mutex, NULL); } /* dcrs mutex */ int DEVICE::init_dcrs_mutex() { return pthread_mutex_init(&dcrs_mutex, NULL); } /* Set order in which device locks must be acquired */ void DEVICE::set_mutex_priorities() { /* Ensure that we respect this order in P/V operations */ bthread_mutex_set_priority(&m_mutex, PRIO_SD_DEV_ACCESS); bthread_mutex_set_priority(&spool_mutex, PRIO_SD_DEV_SPOOL); bthread_mutex_set_priority(&acquire_mutex, PRIO_SD_DEV_ACQUIRE); } int DEVICE::next_vol_timedwait(const struct timespec *timeout) { return pthread_cond_timedwait(&wait_next_vol, &m_mutex, timeout); } /* * Block all other threads from using the device * Device must already be locked. After this call, * the device is blocked to any thread calling dev->rLock(), * but the device is not locked (i.e. no P on device). Also, * the current thread can do slip through the dev->rLock() * calls without blocking. */ void _block_device(const char *file, int line, DEVICE *dev, int state) { ASSERT2(dev->blocked() == BST_NOT_BLOCKED, "Block request of device already blocked"); dev->set_blocked(state); /* make other threads wait */ dev->no_wait_id = pthread_self(); /* allow us to continue */ dev->blocked_by = get_jobid_from_tsd(); Dmsg4(sd_dbglvl, "Blocked %s %s from %s:%d\n", dev->device->hdr.name, dev->print_blocked(), file, line); } /* * Unblock the device, and wake up anyone who went to sleep. * Enter: device locked * Exit: device locked */ void _unblock_device(const char *file, int line, DEVICE *dev) { Dmsg4(sd_dbglvl, "Unblocked %s %s from %s:%d\n", dev->device->hdr.name, dev->print_blocked(), file, line); ASSERT2(dev->blocked(), "Unblock request of device not blocked"); dev->set_blocked(BST_NOT_BLOCKED); dev->blocked_by = 0; clear_thread_id(dev->no_wait_id); if (dev->num_waiting > 0) { pthread_cond_broadcast(&dev->wait); /* wake them up */ } } static pthread_mutex_t block_mutex = PTHREAD_MUTEX_INITIALIZER; /* * Enter and leave with device locked * * Note: actually this routine: * returns true if it can either set or steal the device block * returns false if it cannot block the device */ bool DEVICE::_obtain_device_block(const char *file, int line, bsteal_lock_t *hold, int retry, int state) { int ret; int r = retry; if (!can_obtain_block() && !pthread_equal(no_wait_id, pthread_self())) { num_waiting++; /* indicate that I am waiting */ while ((retry == 0 || r-- > 0) && !can_obtain_block()) { if ((ret = bthread_cond_wait_p(&wait, &m_mutex, file, line)) != 0) { berrno be; Emsg1(M_ABORT, 0, _("pthread_cond_wait failure. ERR=%s\n"), be.bstrerror(ret)); } } num_waiting--; /* no longer waiting */ } P(block_mutex); Dmsg4(sd_dbglvl, "Steal lock %s old=%s from %s:%d\n", device->hdr.name, print_blocked(), file, line); if (!can_obtain_block() && !pthread_equal(no_wait_id, pthread_self())) { V(block_mutex); return false; } hold->dev_blocked = blocked(); hold->dev_prev_blocked = dev_prev_blocked; hold->no_wait_id = no_wait_id; hold->blocked_by = blocked_by; set_blocked(state); Dmsg1(sd_dbglvl, "steal block. new=%s\n", print_blocked()); no_wait_id = pthread_self(); blocked_by = get_jobid_from_tsd(); V(block_mutex); return true; } /* * Enter with device blocked and locked by us * Exit with device locked, and blocked by previous owner */ void _give_back_device_block(const char *file, int line, DEVICE *dev, bsteal_lock_t *hold) { Dmsg4(sd_dbglvl, "Return lock %s old=%s from %s:%d\n", dev->device->hdr.name, dev->print_blocked(), file, line); P(block_mutex); dev->set_blocked(hold->dev_blocked); dev->dev_prev_blocked = hold->dev_prev_blocked; dev->no_wait_id = hold->no_wait_id; dev->blocked_by = hold->blocked_by; Dmsg1(sd_dbglvl, "return lock. new=%s\n", dev->print_blocked()); if (dev->num_waiting > 0) { pthread_cond_broadcast(&dev->wait); /* wake them up */ } V(block_mutex); } const char *DEVICE::print_blocked() const { switch (m_blocked) { case BST_NOT_BLOCKED: return "BST_NOT_BLOCKED"; case BST_UNMOUNTED: return "BST_UNMOUNTED"; case BST_WAITING_FOR_SYSOP: return "BST_WAITING_FOR_SYSOP"; case BST_DOING_ACQUIRE: return "BST_DOING_ACQUIRE"; case BST_WRITING_LABEL: return "BST_WRITING_LABEL"; case BST_UNMOUNTED_WAITING_FOR_SYSOP: return "BST_UNMOUNTED_WAITING_FOR_SYSOP"; case BST_MOUNT: return "BST_MOUNT"; case BST_DESPOOLING: return "BST_DESPOOLING"; case BST_RELEASING: return "BST_RELEASING"; default: return _("unknown blocked code"); } } /* * Check if the device is blocked or not */ bool DEVICE::is_device_unmounted() { bool stat; int blk = blocked(); stat = (blk == BST_UNMOUNTED) || (blk == BST_UNMOUNTED_WAITING_FOR_SYSOP); return stat; } bacula-15.0.3/src/stored/hello.c0000644000175000017500000003763214771010173016233 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Hello routines for Storage daemon. * * This file contains all the code relating to reading and writing of * all Hello commands between the daemons. * * Written by Kern Sibbald, June 2014 * */ #include "bacula.h" #include "stored.h" extern STORES *me; /* our Global resource */ const int dbglvl = 50; static char hello_sd[] = "Hello Bacula SD: Start Job %s %d %d tlspsk=%d\n"; static char Sorry[] = "3999 No go\n"; static char OK_hello[] = "3000 OK Hello %d\n"; /* We store caps in a special structure to update JCR * only after the auth phase */ class caps_fd { public: int32_t fd_dedup; int32_t fd_rehydration; caps_fd() : fd_dedup(0), fd_rehydration(0) {}; bool scan(const char *msg) { return sscanf(msg, "fdcaps: dedup=%ld rehydration=%ld", &fd_dedup, &fd_rehydration) == 2; } /* Set variable that we got in JCR */ void set(JCR *jcr) { jcr->fd_dedup = fd_dedup; jcr->fd_rehydration = fd_rehydration; }; }; static bool recv_fdcaps(JCR *jcr, BSOCK *cl, caps_fd *caps); static bool send_sdcaps(JCR *jcr, BSOCK *cl); static bool recv_sdcaps(JCR *jcr, BSOCK *cl); static bool send_fdcaps(JCR *jcr, BSOCK *cl); /********************************************************************* * * Validate hello from the Director. * * Returns: true if Hello is good. * false if Hello is bad. */ bool validate_dir_hello(JCR* jcr) { POOLMEM *dirname; DIRRES *director = NULL; int dir_version = 0; BSOCK *dir = jcr->dir_bsock; if (dir->msglen < 25 || dir->msglen > 500) { Dmsg3(dbglvl, "Bad Hello command from Director at %s:%s. Len=%d.\n", dir->who(), dir->host(), dir->msglen); Qmsg3(jcr, M_SECURITY, 0, _("Bad Hello command from Director at %s:%s. Len=%d.\n"), dir->who(), dir->host(), dir->msglen); sleep(5); return false; } dirname = get_pool_memory(PM_MESSAGE); dirname = check_pool_memory_size(dirname, dir->msglen); dir->tlspsk_remote = 0; if (scan_string(dir->msg, "Hello SD: Bacula Director %127s calling %d tlspsk=%d", dirname, &dir_version, &dir->tlspsk_remote) != 3 && scan_string(dir->msg, "Hello SD: Bacula Director %127s calling %d", dirname, &dir_version) != 2 && scan_string(dir->msg, "Hello SD: Bacula Director %127s calling", dirname) != 1) { dir->msg[100] = 0; Dmsg3(dbglvl, "Bad Hello command from Director at %s:%s. Msg=%s\n", dir->who(), dir->host(), dir->msg); Qmsg3(jcr, M_SECURITY, 0, _("Bad Hello command from Director at %s:%s. Msg=%s\n"), dir->who(), dir->host(), dir->msg); free_pool_memory(dirname); sleep(5); return false; } if (dir_version >= 1 && me->comm_compression) { dir->set_compress(); } else { dir->clear_compress(); Dmsg0(050, "**** No SD compression to Dir\n"); } director = NULL; unbash_spaces(dirname); foreach_res(director, R_DIRECTOR) { if (strcasecmp(director->hdr.name, dirname) == 0) { break; } } if (!director) { Dmsg3(dbglvl, "Connection from unknown Director %s at %s:%s rejected.\n", dirname, dir->who(), dir->host()); Qmsg3(jcr, M_SECURITY, 0, _("Connection from unknown Director %s at %s:%s rejected.\n" "Please see " MANUAL_AUTH_URL " for help.\n"), dirname, dir->who(), dir->host()); events_send_msg(jcr, "SS0001", EVENTS_TYPE_SECURITY, dir->host(), (intptr_t)jcr, "Authentication failed from %s", dir->host()); free_pool_memory(dirname); sleep(5); return false; } jcr->director = director; free_pool_memory(dirname); return true; } /* * After receiving a connection (in dircmd.c) if it is * from the File daemon, this routine is called. */ void handle_client_connection(BSOCK *fd) { JCR *jcr; int fd_version = 0; int sd_version = 0; char job_name[500]; caps_fd fdcaps; /* * Do a sanity check on the message received */ if (fd->msglen < 25 || fd->msglen > (int)sizeof(job_name)) { Pmsg1(000, "msg); Qmsg3(NULL, M_SECURITY, 0, _("Invalid connection from %s:%s. Len=%d\n"), fd->who(), fd->host(), fd->msglen); bmicrosleep(5, 0); /* make user wait 5 seconds */ fd->destroy(); return; } Dmsg1(dbglvl, "authenticate: %s", fd->msg); /* * See if this is a File daemon connection. If so * call FD handler. */ fd->tlspsk_remote = 0; if (scan_string(fd->msg, "Hello Bacula SD: Start Job %127s %d %d tlspsk=%d", job_name, &fd_version, &sd_version, &fd->tlspsk_remote) != 4 && scan_string(fd->msg, "Hello Bacula SD: Start Job %127s %d %d", job_name, &fd_version, &sd_version) != 3 && scan_string(fd->msg, "Hello Bacula SD: Start Job %127s %d tlspsk=%d", job_name, &fd_version, &fd->tlspsk_remote) != 3 && scan_string(fd->msg, "Hello Bacula SD: Start Job %127s %d", job_name, &fd_version) != 2 && scan_string(fd->msg, "Hello FD: Bacula Storage calling Start Job %127s %d", job_name, &sd_version) != 2 && scan_string(fd->msg, "Hello Start Job %127s", job_name) != 1) { Qmsg3(NULL, M_SECURITY, 0, _("Invalid Hello from %s:%s. Len=%d\n"), fd->who(), fd->host(), fd->msglen); sleep(5); fd->destroy(); return; } if (!(jcr=get_jcr_by_full_name(job_name))) { Qmsg1(NULL, M_SECURITY, 0, _("Client connect failed: Job name not found: %s\n"), job_name); Dmsg1(3, "**** Job \"%s\" not found.\n", job_name); sleep(5); fd->destroy(); return; } /* After this point, we can use bail_out */ Dmsg1(100, "Found Client Job %s\n", job_name); if (jcr->authenticated) { Jmsg4(jcr, M_SECURITY, 0, _("A Client %s:%s tried to authenticate for Job %s, " "but the Job is already authenticated with \"%s\".\n"), fd->who(), fd->host(), jcr->Job, jcr->file_bsock?jcr->file_bsock->who():"N/A"); Dmsg2(050, "Hey!!!! JobId %u Job %s already authenticated.\n", (uint32_t)jcr->JobId, jcr->Job); goto bail_out; } fd->set_jcr(jcr); Dmsg2(050, "fd_version=%d sd_version=%d\n", fd_version, sd_version); /* Turn on compression for newer FDs, except for community version */ if ((fd_version >= 9 || sd_version >= 1) && fd_version != 213 && me->comm_compression) { fd->set_compress(); /* set compression allowed */ } else { fd->clear_compress(); Dmsg0(050, "*** No SD compression to FD\n"); } /* The community version 213 doesn't send caps, the 215 version might do it */ if (fd_version >= 12 && fd_version != 213 && fd_version != 214) { /* Send and get capabilities */ if (!send_sdcaps(jcr, fd)) { goto bail_out; } if (!recv_fdcaps(jcr, fd, &fdcaps)) { goto bail_out; } } /* * Authenticate the Client (FD or SD) */ jcr->lock_auth(); /* Ensure that only one thread is dealing with auth */ if (jcr->authenticated) { Jmsg3(jcr, M_SECURITY, 0, _("A Client %s:%s tried to authenticate for Job %s, " "but the job is already authenticated.\n"), fd->who(), fd->host(), jcr->Job); } else if (!authenticate_filed(jcr, fd, fd_version)) { Dmsg1(50, "Authentication failed Job %s\n", jcr->Job); /* Job not yet started, we can cancel */ Jmsg(jcr, M_SECURITY, 0, _("Unable to authenticate File daemon\n")); } else { Dmsg2(050, "OK Authentication jid=%u Job %s\n", (uint32_t)jcr->JobId, jcr->Job); jcr->file_bsock = fd; jcr->FDVersion = fd_version; jcr->SDVersion = sd_version; fdcaps.set(jcr); jcr->authenticated = true; if (sd_version > 0) { jcr->sd_client = true; } } jcr->unlock_auth(); if (!jcr->authenticated) { jcr->setJobStatus(JS_ErrorTerminated); } Dmsg4(050, "=== Auth %s, unblock Job %s jid=%d sd_ver=%d\n", jcr->authenticated?"OK":"KO", job_name, jcr->JobId, sd_version); bail_out: /* file_bsock might be NULL or a previous BSOCK */ if (jcr->file_bsock != fd) { fd->destroy(); } pthread_cond_signal(&jcr->job_start_wait); /* wake waiting job */ free_jcr(jcr); if (!jcr->authenticated) { sleep(5); } return; } bool is_client_connection(BSOCK *bs) { return scan_string(bs->msg, "Hello Bacula SD: Start Job ") == 0 || scan_string(bs->msg, "Hello FD: Bacula Storage calling Start Job ") == 0 || scan_string(bs->msg, "Hello Start Job ") == 0; } /* * If sd_calls_client, we must read the client's response to * the hello we previously sent. */ bool read_client_hello(JCR *jcr) { int i; int stat; int fd_version = 0; int sd_version = 0; BSOCK *cl = jcr->file_bsock; char job_name[500]; caps_fd fdcaps; /* We connected to Client, so finish work */ if (!cl) { Jmsg0(jcr, M_FATAL, 0, _("Client socket not open. Could not connect to Client.\n")); Dmsg0(050, "Client socket not open. Could not connect to Client.\n"); return false; } /* Get response to Hello command sent earlier */ Dmsg0(050, "Read Hello command from Client\n"); for (i=0; i<60; i++) { stat = cl->recv(); if (stat <= 0) { bmicrosleep(1, 0); } else { break; } } if (stat <= 0) { berrno be; Jmsg1(jcr, M_FATAL, 0, _("Recv request to Client failed. ERR=%s\n"), be.bstrerror()); Dmsg1(050, _("Recv request to Client failed. ERR=%s\n"), be.bstrerror()); return false; } Dmsg1(dbglvl, "authenticate: %s", cl->msg); cl->tlspsk_remote = 0; if (scan_string(cl->msg, "Hello Bacula SD: Start Job %127s %d %d tlspsk=%d", job_name, &fd_version, &sd_version, &cl->tlspsk_remote) != 4 && scan_string(cl->msg, "Hello Bacula SD: Start Job %127s %d tlspsk=%d", job_name, &fd_version, &cl->tlspsk_remote) != 3 && scan_string(cl->msg, "Hello Bacula SD: Start Job %127s %d %d", job_name, &fd_version, &sd_version) != 3 && scan_string(cl->msg, "Hello Bacula SD: Start Job %127s %d", job_name, &fd_version) != 2) { Jmsg1(jcr, M_FATAL, 0, _("Bad Hello from Client: %s.\n"), cl->msg); Dmsg1(050, _("Bad Hello from Client: %s.\n"), cl->msg); return false; } unbash_spaces(job_name); jcr->FDVersion = fd_version; jcr->SDVersion = sd_version; Dmsg1(050, "FDVersion=%d\n", fd_version); /* Turn on compression for newer FDs, except for Community version */ if (jcr->FDVersion >= 9 && jcr->FDVersion != 213 && me->comm_compression) { cl->set_compress(); /* set compression allowed */ } else { cl->clear_compress(); Dmsg0(050, "*** No SD compression to FD\n"); } /* The community version 213 doesn't have caps */ if (fd_version >= 12 && fd_version != 213 && fd_version != 214) { /* Send and get capabilities */ if (!send_sdcaps(jcr, cl)) { return false; } if (!recv_fdcaps(jcr, cl, &fdcaps)) { return false; } fdcaps.set(jcr); } return true; } /* * Send Hello OK to DIR or FD */ bool send_hello_ok(BSOCK *bs) { return bs->fsend(OK_hello, SD_VERSION); } bool send_sorry(BSOCK *bs) { return bs->fsend(Sorry); } /* * We are acting as a client, so send Hello to the SD. */ bool send_hello_sd(JCR *jcr, char *Job, int tlspsk) { bool rtn; BSOCK *sd = jcr->store_bsock; bash_spaces(Job); rtn = sd->fsend(hello_sd, Job, FD_VERSION, SD_VERSION, tlspsk); unbash_spaces(Job); Dmsg1(100, "Send to SD: %s\n", sd->msg); if (!rtn) { return false; } /* Receive and send capabilities */ if (!recv_sdcaps(jcr, sd)) { return false; } if (!send_fdcaps(jcr, sd)) { return false; } return true; } /* * We are SD so send Hello to client * Note: later the Client will send us a Hello. */ bool send_hello_client(JCR *jcr, char *Job) { bool rtn; BSOCK *cl = jcr->file_bsock; bash_spaces(Job); // This is a "dummy" hello, just to connect to the waiting FD job // No need of a TLS-PSK field here, the FD will send the "real" TLS-PSK field rtn = cl->fsend("Hello FD: Bacula Storage calling Start Job %s %d\n", Job, SD_VERSION); unbash_spaces(Job); if (!rtn) { return false; } return rtn; } /* * Capabilities Exchange * * Immediately after the Hello is sent/received by the SD, the SD sends * its capabilities to the client (FD), and the client responds by * sending its capabilities. */ static bool send_sdcaps(JCR *jcr, BSOCK *cl) { int stat; int32_t dedup = 0; int32_t hash = DEDUP_DEFAULT_HASH_ID; uint32_t block_size = DEDUP_IDEAL_BLOCK_SIZE; uint32_t min_block_size = DEDUP_MIN_BLOCK_SIZE; uint32_t max_block_size = DEDUP_MAX_BLOCK_SIZE; /* Set dedup, if SD is dedup enabled and device is dedup type */ if (jcr->dcr) { dedup = jcr->dcr->dev->dev_type==B_DEDUP_DEV; } Dmsg5(200, ">Send sdcaps: dedup=%ld hash=%ld dedup_block=%lu min_dedup_block=%lu max_dedup_block=%lu\n", dedup, hash, block_size, min_block_size, max_block_size); stat = cl->fsend("sdcaps: dedup=%ld hash=%ld dedup_block=%lu min_dedup_block=%lu max_dedup_block=%lu\n", dedup, hash, block_size, min_block_size, max_block_size); if (!stat) { berrno be; Jmsg1(jcr, M_FATAL, 0, _("Send caps to Client failed. ERR=%s\n"), be.bstrerror()); Dmsg1(050, _("Send caps to Client failed. ERR=%s\n"), be.bstrerror()); return false; } return true; } static bool recv_fdcaps(JCR *jcr, BSOCK *cl, caps_fd *caps) { int stat; stat = cl->recv(); if (stat <= 0) { berrno be; Jmsg1(jcr, M_FATAL, 0, _("Recv caps from Client failed. ERR=%s\n"), be.bstrerror()); Dmsg1(050, _("Recv caps from Client failed. ERR=%s\n"), be.bstrerror()); return false; } if (!caps->scan(cl->msg)) { Jmsg1(jcr, M_FATAL, 0, _("Recv bad caps from Client: %s.\n"), cl->msg); Dmsg1(050, _("Recv bad caps from Client %s\n"), cl->msg); return false; } return true; } /* ======================== */ /* * Note: for the following two subroutines, during copy/migration * jobs, we are acting as a File daemon. */ static bool send_fdcaps(JCR *jcr, BSOCK *sd) { int dedup = 0; if (jcr->dcr->device->dev_type == B_DEDUP_DEV) { dedup = 1; } return sd->fsend("fdcaps: dedup=%d rehydration=%d\n", dedup, dedup); } /* Result not used */ static bool recv_sdcaps(JCR *jcr, BSOCK *sd) { int stat; int32_t dedup = 0; int32_t hash = 0; int32_t block_size = 0; int32_t min_block_size = 0; int32_t max_block_size = 0; stat = sd->recv(); if (stat <= 0) { berrno be; Jmsg1(jcr, M_FATAL, 0, _("Recv caps from SD failed. ERR=%s\n"), be.bstrerror()); Dmsg1(050, _("Recv caps from SD failed. ERR=%s\n"), be.bstrerror()); return false; } if (sscanf(sd->msg, "sdcaps: dedup=%ld hash=%ld dedup_block=%ld min_dedup_block=%ld max_dedup_block=%ld", &dedup, &hash, &block_size, &min_block_size, &max_block_size) != 5) { Jmsg1(jcr, M_FATAL, 0, _("Bad caps from SD: %s.\n"), sd->msg); Dmsg1(050, _("Bad caps from SD: %s\n"), sd->msg); return false; } Dmsg5(200, "sdcaps: dedup=%ld hash=%ld dedup_block=%ld min_dedup_block=%ld max_dedup_block=%ld\n", dedup, hash, block_size, min_block_size, max_block_size); return true; } bacula-15.0.3/src/stored/vbackup.c0000644000175000017500000002404514771010173016555 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * SD -- vbackup.c -- responsible for doing virtual backup jobs. * * Kern Sibbald, January MMVI * */ #include "bacula.h" #include "stored.h" /* Import functions */ extern char Job_end[]; /* Forward referenced subroutines */ static bool record_cb(DCR *dcr, DEV_RECORD *rec); /* * Read Data and send to File Daemon * Returns: false on failure * true on success */ bool do_vbackup(JCR *jcr) { bool ok = true; BSOCK *dir = jcr->dir_bsock; const char *Type; char ec1[50]; DEVICE *dev; switch(jcr->getJobType()) { case JT_MIGRATE: Type = "Migration"; break; case JT_ARCHIVE: Type = "Archive"; break; case JT_COPY: Type = "Copy"; break; case JT_BACKUP: Type = "Virtual Backup"; break; default: Type = "Unknown"; break; } /* TODO: Remove when the new match_all is well tested */ jcr->use_new_match_all = use_new_match_all; Dmsg1(20, "Start read data. newbsr=%d\n", jcr->use_new_match_all); if (!jcr->read_dcr) { Jmsg(jcr, M_FATAL, 0, _("Read device not initialized or defined for this job.\n")); goto bail_out; } if (!jcr->dcr) { Jmsg(jcr, M_FATAL, 0, _("Write device not initialized or defined for this job.\n")); goto bail_out; } Dmsg2(100, "read_dcr=%p write_dcr=%p\n", jcr->read_dcr, jcr->dcr); if (jcr->NumReadVolumes == 0) { Jmsg(jcr, M_FATAL, 0, _("No Volume names found for %s.\n"), Type); goto bail_out; } Dmsg3(200, "Found %d volumes names for %s. First=%s\n", jcr->NumReadVolumes, jcr->VolList->VolumeName, Type); ASSERT(jcr->read_dcr != jcr->dcr); ASSERT(jcr->read_dcr->dev != jcr->dcr->dev); /* Ready devices for reading and writing */ if (!acquire_device_for_read(jcr->read_dcr) || !acquire_device_for_append(jcr->dcr)) { jcr->setJobStatus(JS_ErrorTerminated); goto bail_out; } jcr->dcr->dev->start_of_job(jcr->dcr); Dmsg2(200, "===== After acquire pos %u:%u\n", jcr->dcr->dev->file, jcr->dcr->dev->block_num); jcr->sendJobStatus(JS_Running); begin_data_spool(jcr->dcr); begin_attribute_spool(jcr); jcr->dcr->VolFirstIndex = jcr->dcr->VolLastIndex = 0; jcr->run_time = time(NULL); set_start_vol_position(jcr->dcr); jcr->JobFiles = 0; jcr->dcr->set_ameta(); jcr->read_dcr->set_ameta(); ok = read_records(jcr->read_dcr, record_cb, mount_next_read_volume); goto ok_out; bail_out: ok = false; ok_out: if (jcr->dedup) { /* was create in record_cb() to do rehydration */ delete jcr->dedup; jcr->dedup = NULL; } if (jcr->dcr) { jcr->dcr->set_ameta(); dev = jcr->dcr->dev; Dmsg1(100, "ok=%d\n", ok); if (ok || dev->can_write()) { if (!dev->flush_before_eos(jcr->dcr)) { Jmsg2(jcr, M_FATAL, 0, _("Fatal append error on device %s: ERR=%s\n"), dev->print_name(), dev->bstrerror()); Dmsg0(100, _("Set ok=FALSE after write_block_to_device.\n")); ok = false; } /* Flush out final ameta partial block of this session */ if (!jcr->dcr->write_final_block_to_device()) { Jmsg2(jcr, M_FATAL, 0, _("Fatal append error on device %s: ERR=%s\n"), dev->print_name(), dev->bstrerror()); Dmsg0(100, _("Set ok=FALSE after write_final_block_to_device.\n")); ok = false; } Dmsg2(200, "Flush block to device pos %u:%u\n", dev->file, dev->block_num); } flush_jobmedia_queue(jcr); if (!ok) { discard_data_spool(jcr->dcr); } else { /* Note: if commit is OK, the device will remain blocked */ commit_data_spool(jcr->dcr); } /* * Don't use time_t for job_elapsed as time_t can be 32 or 64 bits, * and the subsequent Jmsg() editing will break */ int32_t job_elapsed = time(NULL) - jcr->run_time; if (job_elapsed <= 0) { job_elapsed = 1; } Jmsg(jcr, M_INFO, 0, _("Elapsed time=%02d:%02d:%02d, Transfer rate=%s Bytes/second\n"), job_elapsed / 3600, job_elapsed % 3600 / 60, job_elapsed % 60, edit_uint64_with_suffix(jcr->JobBytes / job_elapsed, ec1)); /* Release the device -- and send final Vol info to DIR */ release_device(jcr->dcr); if (!ok || job_canceled(jcr)) { discard_attribute_spool(jcr); } else { commit_attribute_spool(jcr); } } if (jcr->read_dcr) { if (!release_device(jcr->read_dcr)) { ok = false; } } jcr->sendJobStatus(); /* update director */ Dmsg0(30, "Done reading.\n"); jcr->end_time = time(NULL); dequeue_messages(jcr); /* send any queued messages */ if (ok) { jcr->setJobStatus(JS_Terminated); } generate_daemon_event(jcr, "JobEnd"); generate_plugin_event(jcr, bsdEventJobEnd); dir->fsend(Job_end, jcr->Job, jcr->JobStatus, jcr->JobFiles, edit_uint64(jcr->JobBytes, ec1), jcr->JobErrors, jcr->StatusErrMsg); Dmsg6(100, Job_end, jcr->Job, jcr->JobStatus, jcr->JobFiles, ec1, jcr->JobErrors, jcr->StatusErrMsg); dequeue_daemon_messages(jcr); dir->signal(BNET_EOD); /* send EOD to Director daemon */ free_plugins(jcr); /* release instantiated plugins */ return ok; } /* * Called here for each record from read_records() * Returns: true if OK * false if error */ static bool record_cb(DCR *dcr, DEV_RECORD *rec) { JCR *jcr = dcr->jcr; DEVICE *dev = jcr->dcr->dev; char buf1[100], buf2[100]; bool restoredatap = false; POOLMEM *orgdata = NULL; uint32_t orgdata_len = 0; bool ret = false; /* If label and not for us, discard it */ if (rec->FileIndex < 0 && rec->match_stat <= 0) { ret = true; goto bail_out; } /* We want to write SOS_LABEL and EOS_LABEL discard all others */ switch (rec->FileIndex) { case PRE_LABEL: case VOL_LABEL: case EOT_LABEL: case EOM_LABEL: ret = true; /* don't write vol labels */ goto bail_out; } /* * For normal migration jobs, FileIndex values are sequential because * we are dealing with one job. However, for Vbackup (consolidation), * we will be getting records from multiple jobs and writing them back * out, so we need to ensure that the output FileIndex is sequential. * We do so by detecting a FileIndex change and incrementing the * JobFiles, which we then use as the output FileIndex. */ if (rec->FileIndex >= 0) { /* If something changed, increment FileIndex */ if (rec->VolSessionId != rec->last_VolSessionId || rec->VolSessionTime != rec->last_VolSessionTime || rec->FileIndex != rec->last_FileIndex) { jcr->JobFiles++; rec->last_VolSessionId = rec->VolSessionId; rec->last_VolSessionTime = rec->VolSessionTime; rec->last_FileIndex = rec->FileIndex; } rec->FileIndex = jcr->JobFiles; /* set sequential output FileIndex */ } /* TODO: If user really wants to do rehydrate the data, we should propose * this option. */ /* Do rehydration if the write device doesn't support deduplication */ if (jcr->dcr->device->dev_type != B_DEDUP_DEV && rec->Stream & STREAM_BIT_DEDUPLICATION_DATA) { if (!jcr->read_dcr->dev->setup_dedup_rehydration_interface(jcr->read_dcr)) { Jmsg0(jcr, M_FATAL, 0, _("Cannot do rehydration, device is not dedup aware\n")); goto bail_out; } bool despite_of_error = false; /* don't try to cheat for now */ int size; int err = jcr->dedup->record_rehydration(dcr, rec, jcr->dedup->get_msgbuf(), jcr->errmsg, despite_of_error, &size); if (err < 0) { /* cannot read data from DSE */ Jmsg(jcr, M_FATAL, 0, "%s", jcr->errmsg); goto bail_out; } /* Keep old pointers and restore them when at the end, * after this point, we need to use goto bail_out */ orgdata = rec->data; orgdata_len = rec->data_len; restoredatap = true; rec->data = jcr->dedup->get_msgbuf(); rec->data_len = size; } /* * Modify record SessionId and SessionTime to correspond to * output. */ rec->VolSessionId = jcr->VolSessionId; rec->VolSessionTime = jcr->VolSessionTime; Dmsg5(200, "before write JobId=%d FI=%s SessId=%d Strm=%s len=%d\n", jcr->JobId, FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len); rec->state_bits |= is_dedup_ref(rec, true) ? REC_NO_SPLIT : 0; if (!jcr->dcr->write_record(rec)) { Jmsg2(jcr, M_FATAL, 0, _("Fatal append error on device %s: ERR=%s\n"), dev->print_name(), dev->bstrerror()); goto bail_out; } /* Restore packet */ rec->VolSessionId = rec->last_VolSessionId; rec->VolSessionTime = rec->last_VolSessionTime; if (rec->FileIndex < 0) { ret = true; /* don't send LABELs to Dir */ goto bail_out; } jcr->JobBytes += rec->data_len; /* increment bytes this job */ Dmsg5(500, "wrote_record JobId=%d FI=%s SessId=%d Strm=%s len=%d\n", jcr->JobId, FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, stream_to_ascii(buf2, rec->Stream, rec->FileIndex), rec->data_len); send_attrs_to_dir(jcr, rec); ret = true; bail_out: if (restoredatap) { rec->data = orgdata; rec->data_len = orgdata_len; } return ret; } bacula-15.0.3/src/stored/scan.c0000644000175000017500000001057114771010173016045 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * scan.c scan a directory (on a removable file) for a valid * Volume name. If found, open the file for append. * * Kern Sibbald, MMVI * */ #include "bacula.h" #include "stored.h" int breaddir(DIR *dirp, POOLMEM *&d_name); /* Forward referenced functions */ static bool is_volume_name_legal(char *name); bool DEVICE::scan_dir_for_volume(DCR *dcr) { DIR* dp; int name_max; char *mount_point; VOLUME_CAT_INFO dcrVolCatInfo, devVolCatInfo; char VolumeName[MAX_NAME_LENGTH]; struct stat statp; bool found = false; POOL_MEM fname(PM_FNAME); POOL_MEM dname(PM_FNAME); bool need_slash = false; int len; dcrVolCatInfo = dcr->VolCatInfo; /* structure assignment */ devVolCatInfo = VolCatInfo; /* structure assignment */ bstrncpy(VolumeName, dcr->VolumeName, sizeof(VolumeName)); name_max = pathconf(".", _PC_NAME_MAX); if (name_max < 1024) { name_max = 1024; } if (device->mount_point) { mount_point = device->mount_point; } else { mount_point = device->device_name; } if (!(dp = opendir(mount_point))) { berrno be; dev_errno = errno; Dmsg3(29, "scan_dir_for_vol: failed to open dir %s (dev=%s), ERR=%s\n", mount_point, print_name(), be.bstrerror()); goto get_out; } len = strlen(mount_point); if (len > 0) { need_slash = !IsPathSeparator(mount_point[len - 1]); } for ( ;; ) { if (breaddir(dp, dname.addr()) != 0) { dev_errno = EIO; Dmsg2(129, "scan_dir_for_vol: failed to find suitable file in dir %s (dev=%s)\n", mount_point, print_name()); break; } if (strcmp(dname.c_str(), ".") == 0 || strcmp(dname.c_str(), "..") == 0) { continue; } if (!is_volume_name_legal(dname.c_str())) { continue; } pm_strcpy(fname, mount_point); if (need_slash) { pm_strcat(fname, "/"); } pm_strcat(fname, dname); if (lstat(fname.c_str(), &statp) != 0 || !S_ISREG(statp.st_mode)) { continue; /* ignore directories & special files */ } /* * OK, we got a different volume mounted. First save the * requested Volume info (dcr) structure, then query if * this volume is really OK. If not, put back the desired * volume name, mark it not in changer and continue. */ bstrncpy(dcr->VolumeName, dname.c_str(), sizeof(dcr->VolumeName)); /* Check if this is a valid Volume in the pool */ if (!dir_get_volume_info(dcr, dcr->VolumeName, GET_VOL_INFO_FOR_WRITE)) { continue; } /* This was not the volume we expected, but it is OK with * the Director, so use it. */ VolCatInfo = dcr->VolCatInfo; /* structure assignment */ found = true; break; /* got a Volume */ } closedir(dp); get_out: if (!found) { /* Restore VolumeName we really wanted */ bstrncpy(dcr->VolumeName, VolumeName, sizeof(dcr->VolumeName)); dcr->VolCatInfo = dcrVolCatInfo; /* structure assignment */ VolCatInfo = devVolCatInfo; /* structure assignment */ } Dsm_check(100); return found; } /* * Check if the Volume name has legal characters * If ua is non-NULL send the message */ static bool is_volume_name_legal(char *name) { int len; const char *p; const char *accept = ":.-_"; /* Restrict the characters permitted in the Volume name */ for (p=name; *p; p++) { if (B_ISALPHA(*p) || B_ISDIGIT(*p) || strchr(accept, (int)(*p))) { continue; } return false; } len = strlen(name); if (len >= MAX_NAME_LENGTH) { return false; } if (len == 0) { return false; } return true; } bacula-15.0.3/src/stored/generic_driver.c0000644000175000017500000013657414771010173020124 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Routines for writing Cloud drivers * * Written by Norbert Bizet, June MMXVIII * */ #include "generic_driver.h" #include "cloud_transfer_mgr.h" #ifndef HAVE_OPENSSL #include "lib/sha2.h" #endif const int64_t dbglvl = DT_CLOUD|50; const size_t block_size = 4096; const int TIMEOUT = 0; const int RETRY_DOWNLOAD = 0x0D; const int MIN_RESTORE_CONTAINER_NAME_SIZE = 3; const int MAX_RESTORE_CONTAINER_NAME_SIZE = 63; #ifdef __cplusplus extern "C" { #endif cloud_driver *BaculaCloudDriver() { return New(generic_driver); } #ifdef __cplusplus } #endif /* taken from plugins */ static ssize_t full_write(FILE *fp, char *ptr, int32_t nbytes, cancel_callback *cancel_cb=NULL) { int fd = fileno(fp); int32_t nleft, nwritten; nleft = nbytes; bool cancel = false; if (cancel_cb && cancel_cb->fct) { cancel = cancel_cb->fct(cancel_cb->arg); } while (nleft > 0 && !cancel) { do { errno = 0; nwritten = fwrite(ptr, 1, nleft, fp); } while (nwritten == -1 && errno == EINTR); /* * If connection is non-blocking, we will get EAGAIN */ if (nwritten == -1 && errno == EAGAIN) { fd_set fdset; struct timeval tv; FD_ZERO(&fdset); FD_SET((unsigned)fd, &fdset); tv.tv_sec = 1; tv.tv_usec = 0; select(fd + 1, NULL, &fdset, NULL, &tv); continue; } if (nwritten <= 0) { return nwritten; /* error */ } nleft -= nwritten; ptr += nwritten; if (cancel_cb && cancel_cb->fct) { cancel = cancel_cb->fct(cancel_cb->arg); } } return nbytes - nleft; } generic_driver::generic_driver() { host_env = get_pool_memory(PM_FNAME); bucket_env = get_pool_memory(PM_NAME); access_key_env = get_pool_memory(PM_NAME); secret_key_env = get_pool_memory(PM_NAME); region_env = get_pool_memory(PM_NAME); protocol_env = get_pool_memory(PM_NAME); uri_style_env = get_pool_memory(PM_NAME); blob_endpoint_env = get_pool_memory(PM_NAME); file_endpoint_env = get_pool_memory(PM_NAME); queue_endpoint_env = get_pool_memory(PM_NAME); table_endpoint_env = get_pool_memory(PM_NAME); endpoint_suffix_env = get_pool_memory(PM_NAME); max_concurrent_uploads_env = get_pool_memory(PM_NAME); max_concurrent_downloads_env = get_pool_memory(PM_NAME); upload_limit_env = get_pool_memory(PM_NAME); download_limit_env = get_pool_memory(PM_NAME); transfer_priority_env = get_pool_memory(PM_NAME); transfer_retention_env = get_pool_memory(PM_NAME); unset_lctime_env = get_pool_memory(PM_NAME); debug_env = get_pool_memory(PM_NAME); working_path_env = get_pool_memory(PM_NAME); home_path_env = get_pool_memory(PM_NAME); objects_default_tier_env = get_pool_memory(PM_NAME); } generic_driver::~generic_driver() { free_pool_memory(host_env); free_pool_memory(bucket_env); free_pool_memory(access_key_env); free_pool_memory(secret_key_env); free_pool_memory(region_env); free_pool_memory(protocol_env); free_pool_memory(uri_style_env); free_pool_memory(blob_endpoint_env); free_pool_memory(file_endpoint_env); free_pool_memory(queue_endpoint_env); free_pool_memory(table_endpoint_env); free_pool_memory(endpoint_suffix_env); free_pool_memory(max_concurrent_uploads_env); free_pool_memory(max_concurrent_downloads_env); free_pool_memory(upload_limit_env); free_pool_memory(download_limit_env); free_pool_memory(transfer_priority_env); free_pool_memory(transfer_retention_env); free_pool_memory(unset_lctime_env); free_pool_memory(debug_env); free_pool_memory(working_path_env); free_pool_memory(home_path_env); free_pool_memory(objects_default_tier_env); } bool generic_driver::init(CLOUD *cloud, POOLMEM *&err) { /* File I/O buffer */ buf_len = DEFAULT_BLOCK_SIZE; if (cloud) { host_name = cloud->host_name; bucket_name = cloud->bucket_name; access_key = cloud->access_key; secret_key = cloud->secret_key; region = cloud->region; protocol = cloud->protocol; uri_style = cloud->uri_style; blob_endpoint = cloud->blob_endpoint; file_endpoint = cloud->file_endpoint; queue_endpoint = cloud->queue_endpoint; table_endpoint = cloud->table_endpoint; endpoint_suffix = cloud->endpoint_suffix; max_concurrent_uploads = cloud->max_concurrent_uploads; max_concurrent_downloads = cloud->max_concurrent_downloads; upload_limit.set_bwlimit(cloud->upload_limit); download_limit.set_bwlimit(cloud->download_limit); objects_default_tier = cloud->objects_default_tier; driver_command = cloud->driver_command; pm_strcpy(host_env, "CLOUD_HOST="); pm_strcat(host_env, NPRTB(host_name)); envs[0] = host_env; pm_strcpy(bucket_env, "CLOUD_BUCKET="); pm_strcat(bucket_env, NPRTB(bucket_name)); envs[1] = bucket_env; pm_strcpy(access_key_env, "CLOUD_ACCESS_KEY="); pm_strcat(access_key_env, NPRTB(access_key)); envs[2] = access_key_env; pm_strcpy(secret_key_env, "CLOUD_SECRET_KEY="); pm_strcat(secret_key_env, NPRTB(secret_key)); envs[3] = secret_key_env; pm_strcpy(region_env, "CLOUD_REGION="); pm_strcat(region_env, NPRTB(region)); envs[4] = region_env; pm_strcpy(protocol_env, "CLOUD_PROTOCOL="); pm_strcat(protocol_env, (protocol==0)?"https":"http"); envs[5] = protocol_env; pm_strcpy(uri_style_env, "CLOUD_URI_TYPE="); pm_strcat(uri_style_env, (uri_style==0)?"virtual":"path"); envs[6] = uri_style_env; pm_strcpy(blob_endpoint_env, "CLOUD_BLOB_ENDPOINT="); pm_strcat(blob_endpoint_env, NPRTB(blob_endpoint)); envs[7] = blob_endpoint_env; pm_strcpy(file_endpoint_env, "CLOUD_FILE_ENDPOINT="); pm_strcat(file_endpoint_env, NPRTB(file_endpoint)); envs[8] = file_endpoint_env; pm_strcpy(queue_endpoint_env, "CLOUD_QUEUE_ENDPOINT="); pm_strcat(queue_endpoint_env, NPRTB(queue_endpoint)); envs[9] = queue_endpoint_env; pm_strcpy(table_endpoint_env, "CLOUD_TABLE_ENDPOINT="); pm_strcat(table_endpoint_env, NPRTB(table_endpoint)); envs[10] = table_endpoint_env; pm_strcpy(endpoint_suffix_env, "CLOUD_ENDPOINT_SUFFIX="); pm_strcat(endpoint_suffix_env, NPRTB(endpoint_suffix)); envs[11] = endpoint_suffix_env; char b[33]; sprintf(b, "%d", max_concurrent_uploads); pm_strcpy(max_concurrent_uploads_env, "CLOUD_MAX_CONCURRENT_UPLOADS="); pm_strcat(max_concurrent_uploads_env, b); envs[12] = max_concurrent_uploads_env; sprintf(b, "%d", max_concurrent_downloads); pm_strcpy(max_concurrent_downloads_env, "CLOUD_MAX_CONCURRENT_DOWNLOADS="); pm_strcat(max_concurrent_downloads_env, b); envs[13] = max_concurrent_downloads_env; sprintf(b, "%lld", (long long) cloud->upload_limit); pm_strcpy(upload_limit_env, "CLOUD_UPLOAD_LIMIT="); pm_strcat(upload_limit_env, b); envs[14] = upload_limit_env; sprintf(b, "%lld", (long long) cloud->download_limit); pm_strcpy(download_limit_env, "CLOUD_DOWNLOAD_LIMIT="); pm_strcat(download_limit_env, b); envs[15] = download_limit_env; sprintf(b, "%d", cloud->transfer_priority); pm_strcpy(transfer_priority_env, "CLOUD_TRANSFER_PRIORITY="); pm_strcat(transfer_priority_env, b); envs[16] = transfer_priority_env; uint32_t transfer_retention_days = cloud->transfer_retention / (3600 * 24); /* at least 1 day retention period */ if (transfer_retention_days <= 0) { transfer_retention_days = 1; } sprintf(b, "%d", transfer_retention_days); pm_strcpy(transfer_retention_env, "CLOUD_TRANSFER_RETENTION_DAYS="); pm_strcat(transfer_retention_env, b); envs[17] = transfer_retention_env; pm_strcpy(debug_env, "CLOUD_DEBUG="); if (chk_dbglvl(dbglvl)) pm_strcat(debug_env, "TRUE"); envs[18] = debug_env; pm_strcpy(working_path_env, "CLOUD_WORKING_PATH="); pm_strcat(working_path_env, working_directory); envs[19] = working_path_env; pm_strcpy(home_path_env, "HOME="); /* child default home location */ pm_strcat(home_path_env, working_directory); envs[20] = home_path_env; sprintf(b, "%d", objects_default_tier); pm_strcpy(objects_default_tier_env, "CLOUD_OBJECTS_DEFAULT_TIER="); /* child default home location */ pm_strcat(objects_default_tier_env, b); envs[21] = objects_default_tier_env; if (driver_command && strstr(driver_command, "was_cloud_driver") != NULL) { pm_strcpy(unset_lctime_env, "LC_TIME="); envs[22] = unset_lctime_env; } else { envs[22] = NULL; } envs[23] = NULL; return true; } return false; } /* helper to handler close_bpipe error code */ /* be.code(stat) extracts the actual error code returned by the driver script */ /* the pipe-specific errors are concated to the err string */ int handle_error(int stat, POOLMEM *&err) { berrno be; if (be.code(stat) != 0) { err = strip_trailing_junk(err); pm_strcat(err, " "); pm_strcat(err, be.bstrerror(stat)); } return be.code(stat); } /* Main contact point with the script. The script is always called with the following parameters in this exact order : fct_name, volume_name, cache_name, partname ("part.x"). Depending on fct_name, some parameters can be NULL ("*None*"). call_fct Control I/Os with the script and calls read_cb and write_cb in return. read_cb : is called when data from the script is available for read. First argument is the data buffer ready for read. Second argument is the size of the data. The last argument is the arg parameter that's been passed at cb creation. A typical read_cb function will parse the data buffer for some information/error message or copy the data buffer to file. write_cb : is called when the script is ready to write data. First argument is the data buffer to fill with data. Second argument is the max size of the data. The last argument is the arg parameter that's been passed at cb creation. The write_cb function must return the actual written size in the data buffer within the write_cb function, 0 if the file is to be closed (eof) or -1 in case of error (in this case, data buffer contains the error text). A typical write_cb function will fill the data buffer with data and return the actual data size written. The return value is the script return value. 0 is success. But err must be tested separately for error message. */ int generic_driver::call_fct(const char* fct_name, const char* volume_name, int part_number, read_callback *read_cb, write_callback *write_cb, cancel_callback *cancel_cb, POOLMEM *&err, const char* cache_path_name) { POOL_MEM cmd(PM_FNAME); if (cache_path_name) { Mmsg(cmd, "%s %s %s part.%d %s", driver_command, fct_name, NPRT(volume_name), part_number, cache_path_name); } else { Mmsg(cmd, "%s %s %s part.%d", driver_command, fct_name, NPRT(volume_name), part_number); } return call_fct(cmd.addr(), read_cb, write_cb, cancel_cb, err); } int generic_driver::call_fct(const char* fct_name, const char* volume_name, const char* part_name, read_callback *read_cb, write_callback *write_cb, cancel_callback *cancel_cb, POOLMEM *&err) { POOL_MEM cmd(PM_FNAME); Mmsg(cmd, "%s %s %s %s", driver_command, fct_name, NPRT(volume_name), NPRT(part_name)); return call_fct(cmd.addr(), read_cb, write_cb, cancel_cb, err); } int generic_driver::call_fct(char* cmd, read_callback *read_cb, write_callback *write_cb, cancel_callback *cancel_cb, POOLMEM *&err) { BPIPE *bpipe; POOL_MEM res(PM_BSOCK); bool cancel; /* recheck debug level */ pm_strcpy(debug_env, "CLOUD_DEBUG="); if (chk_dbglvl(dbglvl)) pm_strcat(debug_env, "TRUE"); envs[18] = debug_env; if (chk_dbglvl(dbglvl)) { Dmsg1(dbglvl, "cmd=%s\n", cmd); for (int i = 0; envs[i] ; i++) { if (strncmp(envs[i], "CLOUD_SECRET_KEY=", 17) == 0) { Dmsg0(dbglvl, " CLOUD_SECRET_KEY=xxx\n"); } else { Dmsg1(dbglvl, " %s\n", envs[i]); } } } bpipe = open_bpipe(cmd, TIMEOUT, "rwe",envs); if (!bpipe) { Mmsg(err, _("%s. error in open_bpipe.\n"), cmd); return false; } cancel = (cancel_cb && cancel_cb->fct && cancel_cb->fct(cancel_cb->arg)); while(!cancel) { int rfd = bpipe->rfd ? fileno(bpipe->rfd):-1; int wfd = bpipe->wfd ? fileno(bpipe->wfd):-1; int efd = bpipe->efd ? fileno(bpipe->efd):-1; fd_set rfds; FD_ZERO(&rfds); if (rfd >= 0) FD_SET((unsigned)rfd, &rfds); if (efd >= 0) FD_SET((unsigned)efd, &rfds); fd_set wfds; FD_ZERO(&wfds); if (write_cb && wfd >= 0) FD_SET((unsigned)wfd, &wfds); struct timeval tv; tv.tv_sec = 1; tv.tv_usec = 0; int maxfd = rfd>wfd?(efd>rfd?efd:rfd):(efd>wfd?efd:wfd); int ret = select(maxfd+1, &rfds, &wfds, NULL, &tv); if (ret == 0) { /* timeout */ if (FD_ISSET(efd, &rfds)) { size_t size = read(efd, res.addr(), res.size()-1); if (size > 0) { res.addr()[size] = '\0'; Mmsg(err, _("%s. %s\n"), cmd, res.c_str()); } } } else if (ret < 0) { /* error */ berrno be; Mmsg(err, _("%s. error in select. %s\n"), cmd, be.bstrerror(errno)); return handle_error(close_bpipe(bpipe), err); } else if (FD_ISSET(rfd, &rfds)) { /* read 1 line */ ssize_t size = read(rfd, res.addr(), res.size()-1); if (size == -1) { /* error */ berrno be; Mmsg(err, _("%s. fgets error. %s\n"), cmd, be.bstrerror(ferror(bpipe->rfd))); return handle_error(close_bpipe(bpipe), err); } else if (size == 0) { return handle_error(close_bpipe(bpipe), err); } else { res.addr()[size] = '\0'; /* Outside from the data stream, but it might be displayed in a command */ size_t unsigned_size = size; if (download_limit.use_bwlimit()) download_limit.control_bwlimit(size); if (read_cb && read_cb->fct){ /* process the current line*/ size_t processed = read_cb->fct(res.addr(), size, read_cb->arg); if (processed != unsigned_size) { if (errno!=0) { berrno be; Mmsg(err, _("%s. read_cb error. %s\n"), cmd, be.bstrerror(errno)); } else { Mmsg(err, _("%s. read_cb error. %s\n"), cmd, res.c_str()); } return handle_error(close_bpipe(bpipe), err); } } } } else if (FD_ISSET(efd, &rfds)) { size_t size = read(efd, res.addr(), res.size()-1); if (size > 0) { res.addr()[size] = '\0'; Mmsg(err, _("%s. %s\n"), cmd, res.c_str()); return handle_error(close_bpipe(bpipe), err); } } else if (FD_ISSET(wfd, &wfds)) { int f=0; size_t size = write_cb->fct(res.addr(), res.size()-1, write_cb->arg, f); if (upload_limit.use_bwlimit()) upload_limit.control_bwlimit(size); if (f & WRITE_CB_EOF) { full_write(bpipe->wfd, res.addr(), size, cancel_cb); /* end of file */ if (close_wpipe(bpipe)==0) { return 1; } } else if (f & WRITE_CB_ERR) { /* error */ Mmsg(err, _("%s. fgets error. %s"), cmd, res.c_str()); return handle_error(close_bpipe(bpipe), err); } else { size_t processed = full_write(bpipe->wfd, res.addr(), size, cancel_cb); if (processed != size) { if (errno!=0) { berrno be; Mmsg(err, _("%s. write to pipe error. %s\n"), cmd, be.bstrerror(errno)); } else { Mmsg(err, _("%s. write to pipe error. %s\n"), cmd, res.c_str()); } return handle_error(close_bpipe(bpipe), err); } } } // update cancel status cancel = (cancel_cb && cancel_cb->fct && cancel_cb->fct(cancel_cb->arg)); } if (cancel) { Mmsg(err, _("%s. cancelled by job.\n"), cmd); handle_error(close_bpipe(bpipe), err); return -1; } return 0; } bool generic_driver::term(POOLMEM *&err) { return true; } bool generic_driver::start_of_job(POOLMEM *&err) { if (err) { Mmsg(err, _("Using driver %s Bucket=%s"), driver_command, bucket_name); } return true; } bool generic_driver::end_of_job(POOLMEM *&err) { return true; } size_t copy_cache_part_to_cloud_read_cb(char *res, size_t sz, void* arg) { transfer *xfer = (transfer*)arg; bool wrong_string = false; if (xfer) { char *size=strstr(res, "size:"), *mtime=strstr(res, "mtime:"); if (size && mtime) { xfer->m_res_mtime = str_to_uint64(&(mtime[6])); xfer->m_res_size = str_to_uint64(&(size[5])); } else { wrong_string = true; } return wrong_string ? 0:sz; } return 0; } struct copy_cache_part_to_cloud_write_cb_arg { FILE *file; SHA512_CTX* sha512; transfer *xfer; }; size_t copy_cache_part_to_cloud_write_cb(char *res, size_t block_size, void* arg, int& flags) { flags = 0; ssize_t ret = 0; copy_cache_part_to_cloud_write_cb_arg *_arg = (copy_cache_part_to_cloud_write_cb_arg*)arg; if (_arg) { FILE *file = _arg->file; if (file) { ret = read(fileno(file), res, block_size); if (ret == -1){ /* error */ berrno be; Mmsg(res, "write_read_cb ERR %s", be.bstrerror(ferror(file))); flags|= WRITE_CB_ERR; ret = strlen(res); } else if (ret == 0) { /* end of file */ flags|= WRITE_CB_EOF; } else { /* res is filed with ret new bytes */ if (_arg->sha512) { SHA512_Update(_arg->sha512, res, ret); } if (_arg->xfer) { _arg->xfer->increment_processed_size(ret); } } } } return ret; } bool copy_cache_part_to_cloud_cancel_cb(void* arg) { transfer *xfer = (transfer*)arg; if (xfer) return xfer->is_canceled(); return false; } bool generic_driver::copy_cache_part_to_cloud(transfer *xfer) { bool use_cache_file_path = (strstr(driver_command, "was_cloud_driver") != NULL); SHA512_CTX sha512; SHA512_Init(&sha512); read_callback rcb; rcb.fct = ©_cache_part_to_cloud_read_cb; rcb.arg = (void*)xfer; cancel_callback ccb; ccb.fct = ©_cache_part_to_cloud_cancel_cb; ccb.arg = (void*)xfer; /* when not passing the cache file path, stream it to the driver stdind */ if (!use_cache_file_path) { /* copy to const_cast */ char *fname = bstrdup(xfer->m_cache_fname); FILE *file=bfopen(fname, "rb"); if (!file) { berrno be; Mmsg2(xfer->m_message, "Could not open output file %s. ERR=%s\n", fname, be.bstrerror()); free(fname); return false; } write_callback wcb; wcb.fct = ©_cache_part_to_cloud_write_cb; copy_cache_part_to_cloud_write_cb_arg arg; arg.file = file; if (getenv("GENERATE_CLOUD_HASH")) { arg.sha512 = &sha512; } else { arg.sha512 = NULL; } arg.xfer = xfer; wcb.arg = (void*)&arg; uint32_t retry = max_upload_retries; int ret=-1; /* so we pass the first while */ while ((ret!=0) && (retry>0)) { /* when the driver decide to retry, it must reset the processed size */ xfer->reset_processed_size(); SHA512_Init(&sha512); if (retry < max_upload_retries) { if (xfer->m_message) { Dmsg3(dbglvl, "%s retry #%d err=%d\n", xfer->m_message, (max_upload_retries-retry), ret); } else { Dmsg3(dbglvl, "generic_driver::copy_cache_part_to_cloud part.%d retry #%d err=%d\n", xfer->m_part, (max_upload_retries-retry), ret); } /* And we clean the cloud part, in case the cloud target is a mess */ call_fct("delete", xfer->m_volume_name, xfer->m_part, NULL, NULL, &ccb, xfer->m_message, NULL); } ret=call_fct("upload",xfer->m_volume_name, xfer->m_part, &rcb, &wcb, &ccb, xfer->m_message, NULL); if (ret!=0) { xfer->inc_retry(); } --retry; } if (fclose(file) != 0) { berrno be; Mmsg2(xfer->m_message, "Could not close output file %s. ERR=%s\n", fname, be.bstrerror()); free(fname); return false; } if (getenv("GENERATE_CLOUD_HASH")) { SHA512_Final(xfer->m_hash64, &sha512); } else { bmemzero(xfer->m_hash64, 64); } free(fname); return (ret==0); } else { uint32_t retry = max_upload_retries; int ret=-1; while ((ret!=0) && (retry>0)) { /* when the driver decide to retry, it must reset the processed size */ xfer->reset_processed_size(); SHA512_Init(&sha512); if (retry < max_upload_retries) { if (xfer->m_message) { Dmsg3(dbglvl, "%s retry #%d err=%d\n", xfer->m_message, (max_upload_retries-retry), ret); } else { Dmsg3(dbglvl, "generic_driver::copy_cache_part_to_cloud part.%d retry #%d err=%d\n", xfer->m_part, (max_upload_retries-retry), ret); } /* And we clean the cloud part, in case the cloud target is a mess */ call_fct("delete", xfer->m_volume_name, xfer->m_part, NULL, NULL, &ccb, xfer->m_message, NULL); } ret=call_fct("upload",xfer->m_volume_name, xfer->m_part, &rcb, NULL, &ccb, xfer->m_message, xfer->m_cache_fname); if (ret!=0) { xfer->inc_retry(); } --retry; } if (getenv("GENERATE_CLOUD_HASH") && (ret==0)) { /* compute SHA512 */ char *fname = bstrdup(xfer->m_cache_fname); FILE *file=bfopen(fname, "rb"); if (!file) { berrno be; Mmsg2(xfer->m_message, "Could not open output file %s. ERR=%s\n", fname, be.bstrerror()); free(fname); return false; } POOLMEM *res = get_memory(block_size+1); ssize_t size = block_size; while( size == block_size) { size = fread(res, 1, block_size, file); SHA512_Update(&sha512, res, size); } free_pool_memory(res); if (fclose(file) != 0) { berrno be; Mmsg2(xfer->m_message, "Could not close output file %s. ERR=%s\n", fname, be.bstrerror()); free(fname); return false; } free(fname); SHA512_Final(xfer->m_hash64, &sha512); } else { bmemzero(xfer->m_hash64, 64); } return (ret==0); } return false; } struct move_cloud_part_read_cb_arg { POOLMEM *msg; }; size_t move_cloud_part_read_cb(char *res, size_t size, void* arg) { move_cloud_part_read_cb_arg*_arg = (move_cloud_part_read_cb_arg*)arg; if (_arg) { Mmsg(_arg->msg, "%s", res); } return size; } bool generic_driver::move_cloud_part(const char *VolumeName, uint32_t apart , const char *to, cancel_callback *cancel_cb, POOLMEM *&err, int& exists) { /* retrieve the output message into err */ read_callback rcb; rcb.fct = &move_cloud_part_read_cb; move_cloud_part_read_cb_arg arg; arg.msg = err; rcb.arg = (void*)&arg; int ret = call_fct("move", VolumeName, (int)apart, &rcb, NULL, cancel_cb, err, to); /* 0 = OK (either because the part has been moved or because it doesn't exists) */ if (ret == 0) { /* copied part is return by the read callback */ exists = strlen(err); return true; } return false; } struct copy_cloud_part_to_cache_read_cb_arg { FILE *file; SHA512_CTX* sha512; transfer *xfer; }; size_t copy_cloud_part_to_cache_read_cb(char *res, size_t size, void* arg) { copy_cloud_part_to_cache_read_cb_arg *_arg = (copy_cloud_part_to_cache_read_cb_arg*)arg; if (_arg) { if (_arg->file) { if (_arg->sha512) { SHA512_Update(_arg->sha512, res, size); } if (_arg->xfer) { _arg->xfer->increment_processed_size(size); } return full_write(_arg->file, res, size); } } return 0; } bool copy_cloud_part_to_cache_cancel_cb(void* arg) { transfer *xfer = (transfer*)arg; if (xfer) return xfer->is_canceled(); return false; } int generic_driver::copy_cloud_part_to_cache(transfer *xfer) { SHA512_CTX sha512; SHA512_Init(&sha512); cancel_callback ccb; ccb.fct = ©_cloud_part_to_cache_cancel_cb; ccb.arg = (void*)xfer; if (xfer) { /* Azure restores on a different bucket, so the restore bucket is the one to be checked */ /* swap bucket to restore or swap it back to the original */ if (strlen(xfer->m_restore_bucket) != 0) { pm_strcpy(bucket_env, "CLOUD_BUCKET="); pm_strcat(bucket_env, NPRTB(xfer->m_restore_bucket)); envs[1] = bucket_env; } else { pm_strcpy(bucket_env, "CLOUD_BUCKET="); pm_strcat(bucket_env, NPRTB(bucket_name)); envs[1] = bucket_env; } } bool use_cache_file_path = (strstr(driver_command, "was_cloud_driver") != NULL); if (!use_cache_file_path) { /* Create the cache file */ /* copy to const_cast */ char *fname = bstrdup(xfer->m_cache_fname); /* create the folder */ char *p = fname; while (IsPathSeparator(*p)) { p++; } while ((p = first_path_separator(p))) { char save_p; save_p = *p; *p = 0; mkdir(fname, 0740); *p = save_p; while (IsPathSeparator(*p)) { p++; } } FILE *file=bfopen(fname, "wb"); if (!file) { berrno be; Mmsg2(xfer->m_message, "Could not open output file %s. ERR=%s\n", fname, be.bstrerror()); free(fname); return CLOUD_DRIVER_COPY_PART_TO_CACHE_ERROR; } read_callback rcb; rcb.fct = ©_cloud_part_to_cache_read_cb; copy_cloud_part_to_cache_read_cb_arg arg; arg.file = file; if (getenv("GENERATE_CLOUD_HASH")) { arg.sha512 = &sha512; } else { arg.sha512 = NULL; } arg.xfer = xfer; rcb.arg = (void*)&arg; int ret=call_fct("download",xfer->m_volume_name, xfer->m_part, &rcb, NULL, &ccb, xfer->m_message, NULL); if (fclose(file) != 0) { berrno be; Mmsg2(xfer->m_message, "Could not close output file %s. ERR=%s\n", fname, be.bstrerror()); free(fname); return CLOUD_DRIVER_COPY_PART_TO_CACHE_ERROR; } if (getenv("GENERATE_CLOUD_HASH")) { SHA512_Final(xfer->m_hash64, &sha512); } else { bmemzero(xfer->m_hash64, 64); } free(fname); if (ret==RETRY_DOWNLOAD) { /* reset xfer->m_message (MT9511) Normally we dont do it in order to report as many errors as possible but retry is detected based on the return code (0x0D). This code might be interpreted as an error thru bpipe ("Unknown error during program execvp" in berror.c), so to keep the log clean, we reset the error message in this very specific case. */ if (xfer && xfer->m_message) { Dmsg3(dbglvl, "generic_driver::RETRY_DOWNLOAD purgin xfer message '%s' %s\\part%d.\n", xfer->m_message, xfer->m_volume_name, xfer->m_part); *(xfer->m_message)='\0'; } return CLOUD_DRIVER_COPY_PART_TO_CACHE_RETRY; } return (ret==0) ? CLOUD_DRIVER_COPY_PART_TO_CACHE_OK:CLOUD_DRIVER_COPY_PART_TO_CACHE_ERROR; } else { /* Azure is assumed here. Make sure to adjust logic if other provider use direct write to cache*/ int ret=call_fct("download",xfer->m_volume_name, xfer->m_part, NULL, NULL, &ccb, xfer->m_message, xfer->m_cache_fname); if (getenv("GENERATE_CLOUD_HASH") && (ret==0)) { /* compute SHA512 */ char *fname = bstrdup(xfer->m_cache_fname); FILE *file=bfopen(fname, "rb"); if (!file) { berrno be; Mmsg2(xfer->m_message, "Could not open output file %s. ERR=%s\n", fname, be.bstrerror()); free(fname); return CLOUD_DRIVER_COPY_PART_TO_CACHE_ERROR; } POOLMEM *res = get_memory(block_size+1); ssize_t size = block_size; while( size == block_size) { size = fread(res, 1, block_size, file); SHA512_Update(&sha512, res, size); } free_pool_memory(res); if (fclose(file) != 0) { berrno be; Mmsg2(xfer->m_message, "Could not close output file %s. ERR=%s\n", fname, be.bstrerror()); free(fname); return CLOUD_DRIVER_COPY_PART_TO_CACHE_ERROR; } free(fname); SHA512_Final(xfer->m_hash64, &sha512); } else { bmemzero(xfer->m_hash64, 64); } if (ret==RETRY_DOWNLOAD) { /* the python driver will write the restoration bucket name into the cache file */ /* retrieve the restore bucket name */ char *fname = bstrdup(xfer->m_cache_fname); FILE *file=bfopen(fname, "rb"); if (!file) { berrno be; Mmsg2(xfer->m_message, "Could not open output file %s. ERR=%s\n", fname, be.bstrerror()); free(fname); return CLOUD_DRIVER_COPY_PART_TO_CACHE_ERROR; } fseek(file, 0L, SEEK_END); ssize_t size = ftell(file); fseek(file, 0L, SEEK_SET); /*Azure Container names must be from 3 through 63 characters long.*/ if (size < MIN_RESTORE_CONTAINER_NAME_SIZE || size > MAX_RESTORE_CONTAINER_NAME_SIZE) { /* if we're out-of-bound, something is wrong: let's abort */ Mmsg1(xfer->m_message, "restore bucket name length %d is out of range. Aborting.\n", size); free(fname); return CLOUD_DRIVER_COPY_PART_TO_CACHE_ERROR; } else { char restore_bucket_name[MAX_RESTORE_CONTAINER_NAME_SIZE+1]; ssize_t ret = fread(restore_bucket_name, 1, size, file); if (ret != size) { berrno be; Mmsg2(xfer->m_message, "Could not close output file %s. ERR=%s\n", fname, be.bstrerror()); free(fname); return CLOUD_DRIVER_COPY_PART_TO_CACHE_ERROR; } restore_bucket_name[size] = '\0'; if (fclose(file) != 0) { berrno be; Mmsg2(xfer->m_message, "Could not close output file %s. ERR=%s\n", fname, be.bstrerror()); free(fname); return CLOUD_DRIVER_COPY_PART_TO_CACHE_ERROR; } free(fname); xfer->set_restore_bucket(restore_bucket_name); return CLOUD_DRIVER_COPY_PART_TO_CACHE_RETRY; } } return (ret==0) ? CLOUD_DRIVER_COPY_PART_TO_CACHE_OK:CLOUD_DRIVER_COPY_PART_TO_CACHE_ERROR; } return CLOUD_DRIVER_COPY_PART_TO_CACHE_ERROR; } bool generic_driver::restore_cloud_object(transfer *xfer, const char *cloud_fname) { (void) (xfer); (void) (cloud_fname); /* not implemented */ return false; } struct is_waiting_on_server_read_cb_arg { POOLMEM *msg; }; size_t is_waiting_on_server_read_cb(char *res, size_t size, void* arg) { is_waiting_on_server_read_cb_arg*_arg = (is_waiting_on_server_read_cb_arg*)arg; if (_arg) { Mmsg(_arg->msg, "%s", res); } return size; } bool generic_driver::is_waiting_on_server(transfer *xfer) { Dmsg2(dbglvl, "generic_driver::is_waiting_on_server for %spart%d.\n", xfer->m_volume_name, xfer->m_part); /* only Amazon and Azure support restoration */ if (strstr(driver_command, "aws_cloud_driver") != NULL || strstr(driver_command, "was_cloud_driver") != NULL) { Dmsg0(dbglvl, "call_fct wait_on_restore\n"); if (xfer) { /* Azure restores on a different bucket, so the restore bucket is the one to be checked */ /* swap bucket to restore or swap it back to the original */ if (strlen(xfer->m_restore_bucket) != 0) { pm_strcpy(bucket_env, "CLOUD_BUCKET="); pm_strcat(bucket_env, NPRTB(xfer->m_restore_bucket)); envs[1] = bucket_env; } else { pm_strcpy(bucket_env, "CLOUD_BUCKET="); pm_strcat(bucket_env, NPRTB(bucket_name)); envs[1] = bucket_env; } } POOLMEM *output = get_memory(block_size+1); read_callback rcb; rcb.fct = &is_waiting_on_server_read_cb; is_waiting_on_server_read_cb_arg arg; arg.msg = output; rcb.arg = (void*)&arg; int ret = call_fct("wait_on_restore",xfer->m_volume_name, xfer->m_part, &rcb, NULL, NULL, xfer->m_message, NULL); /* We report xfer->m_message here once */ Dmsg3(dbglvl, "wait_on_restore returns %d. output=%s error=%s\n", ret, output, xfer->m_message); bool in_progress = (strcmp(output, "WOR-INPROGRESS") == 0); free_pool_memory(output); return (in_progress); } return false; } /* no truncate_cloud_volume_read_cb */ bool generic_driver::truncate_cloud_volume(const char *volume_name, ilist *trunc_parts, cancel_callback *cancel_cb, POOLMEM *&err) { if (!volume_name) { Mmsg(err, _("truncate_cloud_volume. No Volume name provided.\n")); return false; } if (!trunc_parts) { Mmsg(err, _("truncate_cloud_volume. No trunc_parts list provided.\n")); return false; } int rtn=0; int i; for (i=1; (i <= (int)trunc_parts->last_index()); i++) { if (!trunc_parts->get(i)) { continue; } rtn |= call_fct("delete", volume_name, i, NULL, NULL, cancel_cb, err); if ((cancel_cb && cancel_cb->fct && cancel_cb->fct(cancel_cb->arg))) { Mmsg(err, _("truncate_cloud_volume. cancelled by job.\n")); return false; } } return (rtn==0); } struct clean_cloud_volume_read_cb_arg { POOLMEM **remain; alist *parts; cleanup_cb_type *cb; cleanup_ctx_type *ctx; }; size_t clean_cloud_volume_read_cb(char *res, size_t sz, void* arg) { alist *parts = NULL; bool wrong_string = false; size_t left = sz; POOLMEM **remain = NULL; clean_cloud_volume_read_cb_arg *_arg = (clean_cloud_volume_read_cb_arg*)arg; if (_arg) { parts = _arg->parts; remain = _arg->remain; } if (parts) { char * pch = strtok (res,"\n"); /* we enter the cb again and remaining string has not been processed */ if (remain && strlen(*remain) != 0) { pm_strcat(remain, pch); char *name=strstr(*remain, "part"); char *time=strstr(*remain, ",mtime:"); if (name && time) { *time='\0'; parts->append(bstrdup(name)); } else { wrong_string = true; } **remain = 0; left -= strlen(pch)+1; pch = strtok (NULL,"\n"); } while (!wrong_string && pch && (left > 0)) { char *name=strstr(pch, "part"); char *time=strstr(pch, ",mtime:"); if (name && time) { *time='\0'; parts->append(bstrdup(name)); } else { pm_strcpy(remain, pch); } left -= strlen(pch)+1; pch = strtok (NULL, "\n"); } return wrong_string ? 0:sz; } return 0; } bool generic_driver::clean_cloud_volume(const char *VolumeName, cleanup_cb_type *cb, cleanup_ctx_type *ctx, cancel_callback *cancel_cb, POOLMEM *&err) { if (!VolumeName) { Mmsg(err, _("clean_cloud_volume. No Volume name provided.\n")); return false; } alist parts; clean_cloud_volume_read_cb_arg arg; arg.parts = &parts; POOLMEM *p= get_memory(block_size); *p = 0; arg.remain = &p; arg.cb = cb; arg.ctx = ctx; read_callback pcb; pcb.fct = &clean_cloud_volume_read_cb; pcb.arg = (void*)&arg; /* list everything in the volume VolumeName */ int ret = call_fct("ls", VolumeName, "", &pcb, NULL, cancel_cb, err); free_pool_memory(*arg.remain); /* 1 is the generic return code for path not found, except for was driver that always return 0 */ if (ret == 1) { err = strip_trailing_junk(err); pm_strcat(err, " Cloud volume "); pm_strcat(err, VolumeName); pm_strcat(err, " not found.\n"); /* the volume could not be found in the cloud: it's possible to list a Volume on cloud that doesn't exist. Accept it as an OK behavior */ return true; } int rtn=0; char *part = NULL; foreach_alist(part, &parts) { int r = call_fct("delete", VolumeName, part, NULL, NULL, cancel_cb, err); if (r == 0) { Dmsg2(dbglvl, "clean_cloud_volume for %s: Unlink file %s.\n", VolumeName, part); } else { Dmsg4(dbglvl, "clean_cloud_volume delete %s/%s returns %d. err=%s\n", VolumeName, part, r, err); } rtn |= r; if ((cancel_cb && cancel_cb->fct && cancel_cb->fct(cancel_cb->arg))) { Mmsg(err, _("clean_cloud_volume. cancelled by job.\n")); return false; } } return ((ret|rtn) == 0); } struct get_cloud_volume_parts_list_read_cb_arg { POOLMEM **remain; ilist *parts; }; size_t get_cloud_volume_parts_list_read_cb(char *res, size_t sz, void* arg) { ilist *parts = NULL; bool wrong_string = false; size_t left = sz; POOLMEM **remain = NULL; get_cloud_volume_parts_list_read_cb_arg *_arg = (get_cloud_volume_parts_list_read_cb_arg*)arg; if (_arg) { parts = _arg->parts; remain = _arg->remain; } if (parts) { char * pch = strtok (res,"\n"); /* we enter the cb again and remaining string has not been processed */ if (remain && *remain && strlen(*remain) != 0) { pm_strcat(remain, pch); char *ext=strstr(*remain, "part."), *size=strstr(*remain, "size:"), *mtime=strstr(*remain, "mtime:"); if (ext && size && mtime) { cloud_part *part = (cloud_part*) malloc(sizeof(cloud_part)); part->index = str_to_uint64(&(ext[5])); part->mtime = str_to_uint64(&(mtime[6])); part->size = str_to_uint64((&(size[5]))); /* ***FIXME*** : wbn to retrieve SHA512 from cloud */ bmemzero(part->hash64, 64); parts->put(part->index, part); } else { wrong_string = true; } **remain = 0; left -= strlen(pch)+1; pch = strtok (NULL,"\n"); } while (!wrong_string && pch && (left > 0)) { char *ext=strstr(pch, "part."), *size=strstr(pch, "size:"), *mtime=strstr(pch, "mtime:"); if (ext && size && mtime) { cloud_part *part = (cloud_part*) malloc(sizeof(cloud_part)); part->index = str_to_uint64(&(ext[5])); part->mtime = str_to_uint64(&(mtime[6])); part->size = str_to_uint64((&(size[5]))); /* ***FIXME*** : wbn to retrieve SHA512 from cloud */ bmemzero(part->hash64, 64); parts->put(part->index, part); } else { pm_strcpy(remain, pch); } left -= strlen(pch)+1; pch = strtok (NULL, "\n"); } return wrong_string ? 0:sz; } return 0; } bool generic_driver::get_cloud_volume_parts_list(const char* volume_name, ilist *parts, cancel_callback *cancel_cb, POOLMEM *&err) { if (!volume_name) { Mmsg(err, _("get_cloud_volume_parts_list. No Volume name provided.\n")); return false; } if (!parts) { Mmsg(err, _("get_cloud_volume_parts_list. No parts list provided.\n")); return false; } get_cloud_volume_parts_list_read_cb_arg arg; arg.parts = parts; POOLMEM *p= get_memory(block_size); *p = 0; arg.remain = &p; read_callback pcb; pcb.fct = &get_cloud_volume_parts_list_read_cb; pcb.arg = (void*)&arg; int ret = call_fct("ls", volume_name, "part.", &pcb, NULL, cancel_cb, err); free_pool_memory(*arg.remain); /* 1 is the generic return code for path not found, except for was driver that always return 0 */ if (ret == 1) { err = strip_trailing_junk(err); pm_strcat(err, " Cloud volume "); pm_strcat(err, volume_name); pm_strcat(err, " not found.\n"); /* the volume could not be found in the cloud: it's possible to list a Volume on cloud that doesn't exist. Accept it as an OK behavior */ return true; } return (ret == 0); } struct get_cloud_volume_list_read_cb_arg { POOLMEM **remain; alist *volumes; }; void get_cloud_volumes_list_read_cb_append_to_volumes(char* c, alist* volumes) { if (volumes) { c = strip_trailing_slashes(c); char * vol = strrchr(c, '/'); if (vol) { vol++; volumes->append(bstrdup(vol)); } else { volumes->append(bstrdup(c)); } } } size_t get_cloud_volumes_list_read_cb(char* res, size_t size, void *arg) { alist *volumes = NULL; POOLMEM **remain = NULL; bool is_complete(res[size-1]=='\n'); get_cloud_volume_list_read_cb_arg *_arg = (get_cloud_volume_list_read_cb_arg*)arg; if (_arg) { volumes = _arg->volumes; remain = _arg->remain; } if (volumes) { /* do the actual process */ char * pch = strtok (res,"\n"); if (remain && *remain && strlen(*remain) != 0) { pm_strcat(remain, pch); get_cloud_volumes_list_read_cb_append_to_volumes(*remain, volumes); pch = strtok (NULL, "\n"); *remain = 0; } while (pch != NULL) { pm_strcpy(*remain, pch); pch = strtok (NULL, "\n"); if (pch || is_complete) { get_cloud_volumes_list_read_cb_append_to_volumes(*remain, volumes); } } return size; } return 0; } bool generic_driver::get_cloud_volumes_list(alist *volumes, cancel_callback *cancel_cb, POOLMEM *&err) { if (!volumes) { Mmsg(err, _("get_cloud_volumes_list. No volumes list provided.\n")); return false; } read_callback pcb; pcb.fct = &get_cloud_volumes_list_read_cb; get_cloud_volume_list_read_cb_arg arg; arg.volumes = volumes; POOLMEM *p= get_memory(block_size); *p = 0; arg.remain = &p; pcb.arg = (void*)&arg; int ret = call_fct("vol_ls", NULL, 0, &pcb, NULL, cancel_cb, err); free_pool_memory(*arg.remain); return (ret == 0); } //================================================= #ifdef TEST_PROGRAM int main (int argc, char *argv[]) { pthread_attr_t attr; void * start_heap = sbrk(0); (void)start_heap; setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); my_name_is(argc, argv, "generic_driver_test"); init_msg(NULL, NULL); daemon_start_time = time(NULL); set_thread_concurrency(150); lmgr_init_thread(); /* initialize the lockmanager stack */ pthread_attr_init(&attr); { /* init a stub cloud */ // CLOUD cloud = { // RES(),_("host_name"),_("norbtestbucket"),_("access_key"),_("secret_key"), // NULL, NULL, NULL, NULL, NULL, NULL, // 0, 0, 0, 0, 0, 0, 0, 0, 0, _("generic_cloud_driver")}; /*!! Change script location !!*/ // CLOUD cloud = { // RES(),_("host_name"),_("norbtestbucket"),_("access_key"),_("secret_key"), // NULL, NULL, NULL, NULL, NULL, NULL, // 0, 0, 0, 0, 0, 0, 0, 0, 0, _("google_cloud_driver")}; /*!! Change script location !!*/ CLOUD cloud = { RES(),_("host_name"),_("norbtestbucket"),_("access_key"),_("secret_key"), NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0, 0, 0, 0, 0, 0, 0, _("oracle_cloud_driver")}; /*!! Change script location !!*/ /* init a driver with stub cloud */ generic_driver dev; POOLMEM *err=get_pool_memory(PM_MESSAGE); memset(err, 0, sizeof_pool_memory(err)); POOLMEM *tmp=get_pool_memory(PM_FNAME); memset(tmp, 0, sizeof_pool_memory(tmp)); printf("init\n"); bool res = dev.init(&cloud, err); if (err[0]) printf(err); ASSERT(res); /* retrieve all volumes for this cloud*/ alist volumes; cancel_callback ccb; printf("get_cloud_volumes_list\n"); res = dev.get_cloud_volumes_list(&volumes, &ccb, err); if (err[0]) printf(err); ASSERT(res); /* list all parts for each volume */ char *vol; foreach_alist(vol, &volumes) { ilist parts; printf("get_cloud_volume_parts_list for volume:%s\n", vol); res = dev.get_cloud_volume_parts_list(vol, &parts, &ccb, err); if (err[0]) printf(err); ASSERT(res); /* for each found part, transfer it to /tmp cache */ alist cache_parts; int max_size = parts.last_index(); for(int index=0; index<=max_size; index++ ) { if (cloud_part *p = (cloud_part *)parts.get(index)) { /* create a stub transfer */ Mmsg(tmp, "/tmp/%s/part.%d", vol, p->index); transfer xfer(p->size, NULL, tmp, vol, p->index, NULL, NULL, NULL); printf("copy_cloud_part_to_cache to %s\n", tmp); res = dev.copy_cloud_part_to_cache(&xfer); if (xfer.m_message[0]) printf(xfer.m_message); ASSERT(res); struct stat mstatp; res = (lstat(xfer.m_cache_fname, &mstatp) == 0); ASSERT(res); ASSERT((uint64_t)mstatp.st_size == p->size); /* save for later*/ cache_parts.append(bstrdup(tmp)); } } /* purge all the parts in the current volume */ printf("truncate_cloud_volume %s\n", vol); res = dev.truncate_cloud_volume(vol, &parts, &ccb, err); if (err[0]) printf(err); ASSERT(res); /* now push all the cache files back into the cloud */ char *c; foreach_alist(c, &cache_parts) { struct stat mstatp; res = (lstat(c, &mstatp) == 0); ASSERT(res); const char *ext=strstr(c, "part."); transfer xfer(mstatp.st_size, NULL, c, vol, atoi(&(ext[5])), NULL, NULL, NULL); printf("copy_cache_part_to_cloud %s\n", c); res = dev.copy_cache_part_to_cloud(&xfer); if (xfer.m_message[0]) printf(xfer.m_message); ASSERT(res); } } free_pool_memory(err); free_pool_memory(tmp); } printf("SUCCESS"); return 0; } #endif /* TEST_PROGRAM */ bacula-15.0.3/src/stored/store_mngr.c0000644000175000017500000001147514771010173017304 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Storage daemon's part of Storage Manager - responsible for retrieving needed * information in response to Director's query. */ #include "bacula.h" #include "stored.h" #include "store_mngr.h" #include "lib/store_mngr_cmds.h" static const int dbglvl = 200; /* Helper Compare method for the FreeSpace policy. * It's purpose is to have list with unique devices. */ static int query_size_cmp(void *item1, void *item2) { struct query_size *qs1 = (struct query_size *) item1; struct query_size *qs2 = (struct query_size *) item2; /* We only want to avoid duplicates, order does not matter at all */ if (qs1->devno == qs2->devno) { return 0; } return 1; } static bool build_query_size(DEVICE *dev, struct query_size **qs) { uint64_t ts, fs = 0; struct stat statp; struct query_size *temp_qs; if (!dev) { return false; } /* Alloc helper struct, set correct size for this dev and add it to list * so that later we can sum size of all unique devices */ temp_qs = (struct query_size *) malloc(sizeof(struct query_size)); if (!temp_qs) { Dmsg1(dbglvl, "Failed to alloc query_size for dev: %s\n", dev->archive_name()); return false; } else { /* Make sure that freespace is valid and up to date */ if (!dev->update_freespace()) { Dmsg1(dbglvl, "update_free space failed to for dev: %s\n", dev->archive_name()); return false; } dev->get_freespace(&fs, &ts); temp_qs->size = fs; /* devno can be empty when e.g. volume is not open, need to call stat() in that case */ if (dev->devno == 0 && (stat(dev->archive_name(), &statp) == 0)) { temp_qs->devno = statp.st_dev; } else { temp_qs->devno = dev->devno; } } *qs = temp_qs; return true; } static bool query_freespace(JCR *jcr, const char *dev_name) { BSOCK *dir = jcr->dir_bsock; DEVRES *device; AUTOCHANGER *changer; uint64_t free_sum = 0; struct query_size *qs; alist dev_list(10, not_owned_by_alist); dlist size_list; bool found = false; /* Device sent can be either single device or an autochanger */ foreach_res(changer, R_AUTOCHANGER) { if (strcmp(changer->hdr.name, dev_name) == 0) { /* For now we sum free size of all autochanger's unique devices and report it as a single value * back to the DIR in response to the FreeSpace policy querying. * We may want to change it at some point if we see that some other calculations/reporting make more sense */ foreach_alist(device, changer->device) { if (build_query_size(device->dev, &qs)) { dev_list.append(qs); } } found = true; /* No need to go through the device resource list now */ break; } } if (!found) { /* Device sent to query is a single device instead of an autochanger */ foreach_res(device, R_DEVICE) { if (strcmp(device->hdr.name, dev_name) == 0) { if (build_query_size(device->dev, &qs)) { dev_list.append(qs); } found = true; break; } } } /* Add free size for each unique device */ foreach_alist(qs, &dev_list) { struct query_size *tmp = (struct query_size *) size_list.binary_insert(qs, query_size_cmp); if (tmp == qs) { /* Unique item was added to the list, add it's size */ free_sum+=tmp->size; } else { /* Since this item won't be used, we need to free it manually */ free(qs); } } dir->fsend(OK_store_size, free_sum); return found; } /* Handler for DIR's 'store_mngr query' command */ bool query_store(JCR *jcr) { BSOCK *dir = jcr->dir_bsock; POOL_MEM dev_name; bool ok = false; if (strncmp(dir->msg, store_query, strlen(store_query)) != 0) { return false; } dir->fsend(OK_store_query); /* Return parameters asked by the DIR */ if (dir->recv() >= 0) { if (sscanf(dir->msg, store_query_freespace, dev_name.c_str()) == 1) { unbash_spaces(dev_name.c_str()); ok = query_freespace(jcr, dev_name.c_str()); } else { dir->fsend(store_query_unsupp_policy); } } return ok; } bacula-15.0.3/src/stored/dedup_dev.h0000644000175000017500000000333114771010173017061 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Inspired by vtape.h */ #ifndef DEDUP_DEV_H #define DEDUP_DEV_H /* enum loadable dedup drivers */ enum { D_LEGACY_DRIVER = 1, D_DEDUP2_DRIVER = 2 }; class dedup_dev : public file_dev { public: DedupEngine *dedupengine; dedup_dev(JCR *jcr, DEVRES *device): dedupengine(NULL) { }; virtual ~dedup_dev() {}; int device_specific_init(JCR*, DEVRES*); bool truncate(DCR *dcr); bool open_device(DCR *dcr, int omode); const char *print_type(); virtual void dbg_print(FILE *fp); void updateVolCatExtraBytes(uint64_t); boffset_t get_adata_size(DCR *dcr); bool setup_dedup_rehydration_interface(DCR *dcr); void free_dedup_rehydration_interface(DCR *dcr); GetMsg *get_msg_queue(JCR *jcr, BSOCK *sock, int32_t bufsize); void dedup_get_status(STATUS_PKT *sp, int options); const char *dedup_get_dedupengine_name() { return device->dedup->hdr.name; }; void term(DCR *dcr); virtual bool dedup_cmd(JCR *jcr); virtual void *dedup_get_dedupengine() { return (void*)dedupengine; }; virtual bool is_fs_nearly_full(uint64_t threshold); }; #endif /* DEDUP_DEV_H */ bacula-15.0.3/src/stored/spool.c0000644000175000017500000006254614771010173016266 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald All rights reserved. The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Spooling code * * Kern Sibbald, March 2004 * */ #include "bacula.h" #include "stored.h" /* Forward referenced subroutines */ static void make_unique_data_spool_filename(DCR *dcr, POOLMEM **name); static bool open_data_spool_file(DCR *dcr); static bool close_data_spool_file(DCR *dcr); static bool despool_data(DCR *dcr, bool commit); static int read_block_from_spool_file(DCR *dcr); static bool open_attr_spool_file(JCR *jcr, BSOCK *bs); static bool close_attr_spool_file(JCR *jcr, BSOCK *bs); static ssize_t write_spool_header(DCR *dcr, ssize_t *expected); static ssize_t write_spool_data(DCR *dcr, ssize_t *expected); static ssize_t write_spool_filemedia(DCR *dcr, ssize_t *expected); static bool write_spool_block(DCR *dcr); struct spool_stats_t { uint32_t data_jobs; /* current jobs spooling data */ uint32_t attr_jobs; uint32_t total_data_jobs; /* total jobs to have spooled data */ uint32_t total_attr_jobs; int64_t max_data_size; /* max data size */ int64_t max_attr_size; int64_t data_size; /* current data size (all jobs running) */ int64_t attr_size; }; static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; spool_stats_t spool_stats; /* * Header for data spool record */ struct spool_hdr { int32_t FirstIndex; /* FirstIndex for buffer */ int32_t LastIndex; /* LastIndex for buffer */ uint32_t len; /* length of next buffer */ uint32_t nbFileMedia; /* Number of File Media after the block */ }; enum { RB_EOT = 1, RB_ERROR, RB_OK }; void list_spool_stats(void sendit(const char *msg, int len, void *sarg), void *arg) { char ed1[30], ed2[30]; POOL_MEM msg(PM_MESSAGE); int len; len = Mmsg(msg, _("Spooling statistics:\n")); if (spool_stats.data_jobs || spool_stats.max_data_size) { len = Mmsg(msg, _("Data spooling: %u active jobs, %s bytes; %u total jobs, %s max bytes/job.\n"), spool_stats.data_jobs, edit_uint64_with_commas(spool_stats.data_size, ed1), spool_stats.total_data_jobs, edit_uint64_with_commas(spool_stats.max_data_size, ed2)); sendit(msg.c_str(), len, arg); } if (spool_stats.attr_jobs || spool_stats.max_attr_size) { len = Mmsg(msg, _("Attr spooling: %u active jobs, %s bytes; %u total jobs, %s max bytes.\n"), spool_stats.attr_jobs, edit_uint64_with_commas(spool_stats.attr_size, ed1), spool_stats.total_attr_jobs, edit_uint64_with_commas(spool_stats.max_attr_size, ed2)); sendit(msg.c_str(), len, arg); } } bool begin_data_spool(DCR *dcr) { bool stat = true; if (dcr->dev->is_aligned() || dcr->dev->is_dedup()) { dcr->jcr->spool_data = false; } if (dcr->jcr->spool_data) { Dmsg0(100, "Turning on data spooling\n"); dcr->spool_data = true; stat = open_data_spool_file(dcr); if (stat) { dcr->spooling = true; Jmsg(dcr->jcr, M_INFO, 0, _("Spooling data ...\n")); P(mutex); spool_stats.data_jobs++; V(mutex); } } return stat; } bool discard_data_spool(DCR *dcr) { if (dcr->spooling) { Dmsg0(100, "Data spooling discarded\n"); return close_data_spool_file(dcr); } return true; } bool commit_data_spool(DCR *dcr) { bool stat; if (dcr->spooling) { Dmsg0(100, "Committing spooled data\n"); stat = despool_data(dcr, true /*commit*/); if (!stat) { Dmsg1(100, _("Bad return from despool WroteVol=%d\n"), dcr->WroteVol); close_data_spool_file(dcr); return false; } return close_data_spool_file(dcr); } return true; } static void make_unique_data_spool_filename(DCR *dcr, POOLMEM **name) { const char *dir; if (dcr->dev->device->spool_directory) { dir = dcr->dev->device->spool_directory; } else { dir = working_directory; } Mmsg(name, "%s/%s.data.%u.%s.%s.spool", dir, my_name, dcr->jcr->JobId, dcr->jcr->Job, dcr->device->hdr.name); } static bool open_data_spool_file(DCR *dcr) { POOLMEM *name = get_pool_memory(PM_MESSAGE); int spool_fd; make_unique_data_spool_filename(dcr, &name); if ((spool_fd = open(name, O_CREAT|O_TRUNC|O_RDWR|O_BINARY|O_CLOEXEC, 0640)) >= 0) { dcr->spool_fd = spool_fd; dcr->jcr->spool_attributes = true; } else { berrno be; Jmsg(dcr->jcr, M_FATAL, 0, _("Open data spool file %s failed: ERR=%s\n"), name, be.bstrerror()); free_pool_memory(name); return false; } Dmsg1(100, "Created spool file: %s\n", name); free_pool_memory(name); return true; } static const char *spool_name = "*spool*"; /* * NB! This routine locks the device, but if committing will * not unlock it. If not committing, it will be unlocked. */ static bool despool_data(DCR *dcr, bool commit) { DEVICE *rdev; DCR *rdcr; bool ok = true; DEV_BLOCK *block; JCR *jcr = dcr->jcr; int stat; char ec1[50]; Dmsg0(100, "Despooling data\n"); if (jcr->dcr->job_spool_size == 0) { Jmsg(jcr, M_WARNING, 0, _("Despooling zero bytes. Your disk is probably FULL!\n")); } /* * Commit means that the job is done, so we commit, otherwise, we * are despooling because of user spool size max or some error * (e.g. filesystem full). */ if (commit) { Jmsg(jcr, M_INFO, 0, _("Committing spooled data to Volume \"%s\". Despooling %s bytes ...\n"), jcr->dcr->VolumeName, edit_uint64_with_commas(jcr->dcr->job_spool_size, ec1)); jcr->setJobStatus(JS_DataCommitting); } else { Jmsg(jcr, M_INFO, 0, _("Writing spooled data to Volume. Despooling %s bytes ...\n"), edit_uint64_with_commas(jcr->dcr->job_spool_size, ec1)); jcr->setJobStatus(JS_DataDespooling); } jcr->sendJobStatus(JS_DataDespooling); dcr->despool_wait = true; dcr->spooling = false; /* * We work with device blocked, but not locked so that * other threads -- e.g. reservations can lock the device * structure. */ dcr->dblock(BST_DESPOOLING); dcr->despool_wait = false; dcr->despooling = true; /* * This is really quite kludgy and should be fixed some time. * We create a dev structure to read from the spool file * in rdev and rdcr. */ rdev = New(file_dev); rdev->dev_name = get_memory(strlen(spool_name)+1); bstrncpy(rdev->dev_name, spool_name, strlen(spool_name)+1); rdev->errmsg = get_pool_memory(PM_EMSG); *rdev->errmsg = 0; rdev->max_block_size = dcr->dev->max_block_size; rdev->min_block_size = dcr->dev->min_block_size; rdev->device = dcr->dev->device; rdcr = new_dcr(jcr, NULL, rdev, SD_READ); rdcr->spool_fd = dcr->spool_fd; block = dcr->block; /* save block */ dcr->block = rdcr->block; /* make read and write block the same */ Dmsg1(800, "read/write block size = %d\n", block->buf_len); lseek(rdcr->spool_fd, 0, SEEK_SET); /* rewind */ #if defined(HAVE_POSIX_FADVISE) && defined(POSIX_FADV_WILLNEED) posix_fadvise(rdcr->spool_fd, 0, 0, POSIX_FADV_WILLNEED); #endif /* Add run time, to get current wait time */ int32_t despool_start = time(NULL) - jcr->run_time; set_new_file_parameters(dcr); for ( ; ok; ) { stat = read_block_from_spool_file(rdcr); if (stat == RB_EOT) { break; } else if (stat == RB_ERROR) { ok = false; break; } ok = dcr->write_block_to_device(); if (jcr->is_canceled()) { ok = false; break; } if (!ok) { Jmsg2(jcr, M_FATAL, 0, _("Fatal append error on device %s: ERR=%s\n"), dcr->dev->print_name(), dcr->dev->bstrerror()); Pmsg2(000, "Fatal append error on device %s: ERR=%s\n", dcr->dev->print_name(), dcr->dev->bstrerror()); /* Force in case Incomplete set */ jcr->forceJobStatus(JS_FatalError); } Dmsg3(800, "Write block ok=%d FI=%d LI=%d\n", ok, block->FirstIndex, block->LastIndex); } if (!dir_create_jobmedia_record(dcr)) { Jmsg2(jcr, M_FATAL, 0, _("Could not create JobMedia record for Volume=\"%s\" Job=%s\n"), dcr->getVolCatName(), jcr->Job); jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ } flush_jobmedia_queue(jcr); /* Set new file/block parameters for current dcr */ set_new_file_parameters(dcr); /* * Subtracting run_time give us elapsed time - wait_time since * we started despooling. Note, don't use time_t as it is 32 or 64 * bits depending on the OS and doesn't edit with %d */ int32_t despool_elapsed = time(NULL) - despool_start - jcr->run_time; if (despool_elapsed <= 0) { despool_elapsed = 1; } Jmsg(jcr, M_INFO, 0, _("Despooling elapsed time = %02d:%02d:%02d, Transfer rate = %s Bytes/second\n"), despool_elapsed / 3600, despool_elapsed % 3600 / 60, despool_elapsed % 60, edit_uint64_with_suffix(jcr->dcr->job_spool_size / despool_elapsed, ec1)); dcr->block = block; /* reset block */ #if defined(HAVE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED) posix_fadvise(rdcr->spool_fd, 0, 0, POSIX_FADV_DONTNEED); #endif lseek(rdcr->spool_fd, 0, SEEK_SET); /* rewind */ if (ftruncate(rdcr->spool_fd, 0) != 0) { berrno be; Jmsg(jcr, M_ERROR, 0, _("Ftruncate spool file failed: ERR=%s\n"), be.bstrerror()); /* Note, try continuing despite ftruncate problem */ } P(mutex); if (spool_stats.data_size < dcr->job_spool_size) { spool_stats.data_size = 0; } else { spool_stats.data_size -= dcr->job_spool_size; } V(mutex); P(dcr->dev->spool_mutex); dcr->dev->spool_size -= dcr->job_spool_size; dcr->job_spool_size = 0; /* zap size in input dcr */ V(dcr->dev->spool_mutex); free_memory(rdev->dev_name); free_pool_memory(rdev->errmsg); /* Be careful to NULL the jcr and free rdev after free_dcr() */ rdcr->jcr = NULL; rdcr->set_dev(NULL); free_dcr(rdcr); free(rdev); dcr->spooling = true; /* turn on spooling again */ dcr->despooling = false; /* * Note, if committing we leave the device blocked. It will be removed in * release_device(); */ if (!commit) { dcr->dev->dunblock(); } jcr->sendJobStatus(JS_Running); return ok; } /* * Read a block from the spool file * * Returns RB_OK on success * RB_EOT when file done * RB_ERROR on error */ static int read_block_from_spool_file(DCR *dcr) { uint32_t rlen; ssize_t stat; spool_hdr hdr; DEV_BLOCK *block = dcr->block; JCR *jcr = dcr->jcr; rlen = sizeof(hdr); stat = read(dcr->spool_fd, (char *)&hdr, (size_t)rlen); if (stat == 0) { Dmsg0(100, "EOT on spool read.\n"); return RB_EOT; } else if (stat != (ssize_t)rlen) { if (stat == -1) { berrno be; Jmsg(dcr->jcr, M_FATAL, 0, _("Spool header read error. ERR=%s\n"), be.bstrerror()); } else { Pmsg2(000, _("Spool read error. Wanted %u bytes, got %d\n"), rlen, stat); Jmsg2(jcr, M_FATAL, 0, _("Spool header read error. Wanted %u bytes, got %d\n"), rlen, stat); } jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ return RB_ERROR; } rlen = hdr.len; if (rlen > block->buf_len) { Pmsg2(000, _("Spool block too big. Max %u bytes, got %u\n"), block->buf_len, rlen); Jmsg2(jcr, M_FATAL, 0, _("Spool block too big. Max %u bytes, got %u\n"), block->buf_len, rlen); jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ return RB_ERROR; } stat = read(dcr->spool_fd, (char *)block->buf, (size_t)rlen); if (stat != (ssize_t)rlen) { Pmsg2(000, _("Spool data read error. Wanted %u bytes, got %d\n"), rlen, stat); Jmsg2(dcr->jcr, M_FATAL, 0, _("Spool data read error. Wanted %u bytes, got %d\n"), rlen, stat); jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ return RB_ERROR; } /* Setup write pointers */ block->binbuf = rlen; block->bufp = block->buf + block->binbuf; block->FirstIndex = hdr.FirstIndex; block->LastIndex = hdr.LastIndex; block->VolSessionId = dcr->jcr->VolSessionId; block->VolSessionTime = dcr->jcr->VolSessionTime; /* Now, check if we have FileMedia records associated */ for (uint32_t i = 0; i < hdr.nbFileMedia ; i++) { FILEMEDIA_ITEM *fm = (FILEMEDIA_ITEM *) malloc(sizeof(FILEMEDIA_ITEM)); stat = read(dcr->spool_fd, fm, sizeof(FILEMEDIA_ITEM)); if (stat != sizeof(FILEMEDIA_ITEM)) { Pmsg2(000, _("Spool data read error. Wanted %u bytes, got %d\n"), sizeof(FILEMEDIA_ITEM), stat); Jmsg2(dcr->jcr, M_FATAL, 0, _("Spool data read error. Wanted %u bytes, got %d\n"), sizeof(FILEMEDIA_ITEM), stat); jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ free(fm); return RB_ERROR; } block->filemedia->append(fm); } Dmsg2(800, "Read block FI=%d LI=%d\n", block->FirstIndex, block->LastIndex); return RB_OK; } /* * Write a block to the spool file * * Returns: true on success or EOT * false on hard error */ bool write_block_to_spool_file(DCR *dcr) { uint32_t wlen, hlen; /* length to write */ bool despool = false; DEV_BLOCK *block = dcr->block; if (job_canceled(dcr->jcr)) { return false; } ASSERT(block->binbuf == ((uint32_t) (block->bufp - block->buf))); if (block->binbuf <= WRITE_BLKHDR_LENGTH) { /* Does block have data in it? */ return true; } hlen = sizeof(spool_hdr); wlen = block->binbuf; P(dcr->dev->spool_mutex); dcr->job_spool_size += hlen + wlen; dcr->dev->spool_size += hlen + wlen; if ((dcr->max_job_spool_size > 0 && dcr->job_spool_size >= dcr->max_job_spool_size) || (dcr->dev->max_spool_size > 0 && dcr->dev->spool_size >= dcr->dev->max_spool_size)) { despool = true; } V(dcr->dev->spool_mutex); P(mutex); spool_stats.data_size += hlen + wlen; if (spool_stats.data_size > spool_stats.max_data_size) { spool_stats.max_data_size = spool_stats.data_size; } V(mutex); if (despool) { char ec1[30], ec2[30]; if (dcr->max_job_spool_size > 0) { Jmsg(dcr->jcr, M_INFO, 0, _("User specified Job spool size reached: " "JobSpoolSize=%s MaxJobSpoolSize=%s\n"), edit_uint64_with_commas(dcr->job_spool_size, ec1), edit_uint64_with_commas(dcr->max_job_spool_size, ec2)); } else { Jmsg(dcr->jcr, M_INFO, 0, _("User specified Device spool size reached: " "DevSpoolSize=%s MaxDevSpoolSize=%s\n"), edit_uint64_with_commas(dcr->dev->spool_size, ec1), edit_uint64_with_commas(dcr->dev->max_spool_size, ec2)); } if (!despool_data(dcr, false)) { Pmsg0(000, _("Bad return from despool in write_block.\n")); return false; } /* Despooling cleared these variables so reset them */ P(dcr->dev->spool_mutex); dcr->job_spool_size += hlen + wlen; dcr->dev->spool_size += hlen + wlen; V(dcr->dev->spool_mutex); Jmsg(dcr->jcr, M_INFO, 0, _("Spooling data again ...\n")); } if (!write_spool_block(dcr)) { return false; } Dmsg2(800, "Wrote block FI=%d LI=%d\n", block->FirstIndex, block->LastIndex); empty_block(block); return true; } static bool rewind_spoolfile(DCR *dcr, ssize_t size, ssize_t expected) { JCR *jcr = dcr->jcr; if (size == 0) { return true; /* nothing to do */ } Jmsg(jcr, M_ERROR, 0, _("Error writing header to spool file." " Disk probably full. Attempting recovery. Wanted to write=%d got=%d\n"), (int)expected, (int)size); #if defined(HAVE_WIN32) boffset_t pos = _lseeki64(dcr->spool_fd, (__int64)0, SEEK_CUR); #else boffset_t pos = lseek(dcr->spool_fd, 0, SEEK_CUR); #endif if (ftruncate(dcr->spool_fd, pos - size) != 0) { berrno be; Jmsg(dcr->jcr, M_ERROR, 0, _("Ftruncate spool file failed: ERR=%s\n"), be.bstrerror()); /* Note, try continuing despite ftruncate problem */ } if (!despool_data(dcr, false)) { Jmsg(jcr, M_FATAL, 0, _("Fatal despooling error.")); jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ return false; } return true; } static bool write_spool_block(DCR *dcr) { ssize_t size = 0, ret; ssize_t expected = 0; for (int retry=0; retry <= 1; retry++) { /* Rewind if needed */ if (size > 0 && !rewind_spoolfile(dcr, size, expected)) { return false; } /* Try to write the header */ ret = write_spool_header(dcr, &expected); if (ret == -1) { /* I/O error, it's fatal */ goto bail_out; } else { size += ret; /* Keep the size written for a future rewind */ } if (ret != expected) { /* We don't have the size expected, rewind, despool and retry */ continue; } ret = write_spool_data(dcr, &expected); if (ret == -1) { /* I/O Error, it's fatal */ goto bail_out; } else { size += ret; /* Keep the size written for a furture rewind */ } if (ret != expected) { /* We don't have the size expected, rewind, despool and retry */ continue; } ret = write_spool_filemedia(dcr, &expected); if (ret == -1) { goto bail_out; } else { size += ret; } if (ret != expected) { continue; } return true; } bail_out: berrno be; Jmsg(dcr->jcr, M_FATAL, 0, _("Error writing block to spool file. ERR=%s\n"), be.bstrerror()); dcr->jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ return false; } static ssize_t write_spool_header(DCR *dcr, ssize_t *expected) { spool_hdr hdr; DEV_BLOCK *block = dcr->block; hdr.FirstIndex = block->FirstIndex; hdr.LastIndex = block->LastIndex; hdr.len = block->binbuf; hdr.nbFileMedia = block->filemedia->size(); *expected = sizeof(hdr); /* Write header */ return write(dcr->spool_fd, (char*)&hdr, sizeof(hdr)); } static ssize_t write_spool_filemedia(DCR *dcr, ssize_t *expected) { FILEMEDIA_ITEM *hdr; ssize_t stat, sum=0; DEV_BLOCK *block = dcr->block; *expected = sizeof(FILEMEDIA_ITEM) * block->filemedia->size(); foreach_alist(hdr, block->filemedia) { /* Write header */ stat = write(dcr->spool_fd, (char*)hdr, sizeof(*hdr)); if (stat == -1) { return stat; } sum += stat; if (stat != (ssize_t)sizeof(*hdr)) { return sum; } } return sum; } static ssize_t write_spool_data(DCR *dcr, ssize_t *expected) { DEV_BLOCK *block = dcr->block; *expected = block->binbuf; return write(dcr->spool_fd, block->buf, (size_t)block->binbuf); } static bool close_data_spool_file(DCR *dcr) { POOLMEM *name = get_pool_memory(PM_MESSAGE); P(mutex); spool_stats.data_jobs--; spool_stats.total_data_jobs++; if (spool_stats.data_size < dcr->job_spool_size) { spool_stats.data_size = 0; } else { spool_stats.data_size -= dcr->job_spool_size; } V(mutex); P(dcr->dev->spool_mutex); dcr->job_spool_size = 0; V(dcr->dev->spool_mutex); make_unique_data_spool_filename(dcr, &name); close(dcr->spool_fd); dcr->spool_fd = -1; dcr->spooling = false; unlink(name); Dmsg1(100, "Deleted spool file: %s\n", name); free_pool_memory(name); return true; } bool are_attributes_spooled(JCR *jcr) { return jcr->spool_attributes && jcr->dir_bsock->m_spool_fd; } /* * Create spool file for attributes. * This is done by "attaching" to the bsock, and when * it is called, the output is written to a file. * The actual spooling is turned on and off in * append.c only during writing of the attributes. */ bool begin_attribute_spool(JCR *jcr) { if (!jcr->no_attributes && jcr->spool_attributes) { return open_attr_spool_file(jcr, jcr->dir_bsock); } return true; } static void update_attr_spool_size(ssize_t size) { P(mutex); if (size > 0) { if ((spool_stats.attr_size - size) > 0) { spool_stats.attr_size -= size; } else { spool_stats.attr_size = 0; } } V(mutex); } static void make_unique_spool_filename(JCR *jcr, POOLMEM **name, int fd) { Mmsg(name, "%s/%s.attr.%s.%d.spool", working_directory, my_name, jcr->Job, fd); } /* * Tell Director where to find the attributes spool file * Note, if we are not on the same machine, the Director will * return an error, and the higher level routine will transmit * the data record by record -- using bsock->despool(). */ static bool blast_attr_spool_file(JCR *jcr, boffset_t size) { /* send full spool file name */ POOLMEM *name = get_pool_memory(PM_MESSAGE); make_unique_spool_filename(jcr, &name, jcr->dir_bsock->m_fd); bash_spaces(name); jcr->dir_bsock->fsend("BlastAttr JobId=%d File=%s\n", jcr->JobId, name); free_pool_memory(name); if (jcr->dir_bsock->recv() <= 0) { Jmsg(jcr, M_FATAL, 0, _("Network error on BlastAttributes.\n")); jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ return false; } if (!bstrcmp(jcr->dir_bsock->msg, "1000 OK BlastAttr\n")) { return false; } return true; } bool commit_attribute_spool(JCR *jcr) { boffset_t size, data_end; char ec1[30]; char tbuf[100]; BSOCK *dir; Dmsg1(100, "Commit attributes at %s\n", bstrftimes(tbuf, sizeof(tbuf), (utime_t)time(NULL))); if (are_attributes_spooled(jcr)) { dir = jcr->dir_bsock; if (fseeko(dir->m_spool_fd, 0, SEEK_END) != 0) { berrno be; Jmsg(jcr, M_FATAL, 0, _("Fseek on attributes file failed: ERR=%s\n"), be.bstrerror()); jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ goto bail_out; } size = ftello(dir->m_spool_fd); /* For Incomplete Job truncate spool file to last valid data_end if necssary */ if (jcr->is_JobStatus(JS_Incomplete)) { data_end = dir->get_last_data_end(); if (size > data_end) { if (ftruncate(fileno(dir->m_spool_fd), data_end) != 0) { berrno be; Jmsg(jcr, M_FATAL, 0, _("Truncate on attributes file failed: ERR=%s\n"), be.bstrerror()); jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ goto bail_out; } Dmsg2(100, "=== Attrib spool truncated from %lld to %lld\n", size, data_end); size = data_end; } } if (size < 0) { berrno be; Jmsg(jcr, M_FATAL, 0, _("Fseek on attributes file failed: ERR=%s\n"), be.bstrerror()); jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ goto bail_out; } P(mutex); if (spool_stats.attr_size + size > spool_stats.max_attr_size) { spool_stats.max_attr_size = spool_stats.attr_size + size; } spool_stats.attr_size += size; V(mutex); jcr->sendJobStatus(JS_AttrDespooling); Jmsg(jcr, M_INFO, 0, _("Sending spooled attrs to the Director. Despooling %s bytes ...\n"), edit_uint64_with_commas(size, ec1)); if (!blast_attr_spool_file(jcr, size)) { /* Can't read spool file from director side, * send content over network. */ dir->despool(update_attr_spool_size, size); } return close_attr_spool_file(jcr, dir); } return true; bail_out: close_attr_spool_file(jcr, dir); return false; } static bool open_attr_spool_file(JCR *jcr, BSOCK *bs) { POOLMEM *name = get_pool_memory(PM_MESSAGE); make_unique_spool_filename(jcr, &name, bs->m_fd); bs->m_spool_fd = bfopen(name, "w+b"); if (!bs->m_spool_fd) { berrno be; Jmsg(jcr, M_FATAL, 0, _("fopen attr spool file %s failed: ERR=%s\n"), name, be.bstrerror()); jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ free_pool_memory(name); return false; } P(mutex); spool_stats.attr_jobs++; V(mutex); free_pool_memory(name); return true; } static bool close_attr_spool_file(JCR *jcr, BSOCK *bs) { POOLMEM *name; char tbuf[100]; Dmsg1(100, "Close attr spool file at %s\n", bstrftimes(tbuf, sizeof(tbuf), (utime_t)time(NULL))); if (!bs->m_spool_fd) { return true; } name = get_pool_memory(PM_MESSAGE); P(mutex); spool_stats.attr_jobs--; spool_stats.total_attr_jobs++; V(mutex); make_unique_spool_filename(jcr, &name, bs->m_fd); fclose(bs->m_spool_fd); unlink(name); free_pool_memory(name); bs->m_spool_fd = NULL; bs->clear_spooling(); return true; } bool discard_attribute_spool(JCR *jcr) { if (are_attributes_spooled(jcr)) { return close_attr_spool_file(jcr, jcr->dir_bsock); } return true; } bacula-15.0.3/src/stored/aligned_dev.h0000644000175000017500000001220714771010173017365 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Inspired by vtape.h */ #ifndef _ALIGNED_DEV_H_ #define _ALIGNED_DEV_H_ class aligned_dev : public file_dev { protected: void get_volume_fpath(const char *vol_name, POOLMEM **buf); public: aligned_dev(); ~aligned_dev(); aligned_dev *paired_dev; /* Paired meta data or aligned data device */ boffset_t get_adata_size(DCR *dcr); boffset_t align_adata_addr(DCR *dcr, boffset_t addr); boffset_t get_adata_addr(DCR *dcr); void set_adata_addr(DCR *dcr); void clear_adata_addr(); /* DEVICE virtual functions that we redefine */ void set_file_size(uint64_t val); uint64_t update_file_size(uint64_t add); void setVolCatName(const char *name); void setVolCatStatus(const char *status); void free_dcr_blocks(DCR *dcr); void new_dcr_blocks(DCR *dcr); void updateVolCatBytes(uint64_t); void updateVolCatBlocks(uint32_t); void updateVolCatWrites(uint32_t); void updateVolCatReads(uint32_t); void updateVolCatReadBytes(uint64_t); void updateVolCatPadding(uint64_t); bool setVolCatAdataBytes(uint64_t bytes); void updateVolCatHoleBytes(uint64_t bytes); void device_specific_open(DCR *dcr); void set_volcatinfo_from_dcr(DCR *dcr); bool allow_maxbytes_concurrency(DCR *dcr); bool flush_before_eos(DCR *dcr); void set_nospace(); void set_append(); void set_read(); void clear_nospace(); void clear_append(); void clear_read(); int device_specific_init(JCR *jcr, DEVRES *device); int device_specific_close(DCR *dcr); int d_close(int fd); int d_open(const char *pathname, int flags); int d_ioctl(int fd, ioctl_req_t request, char *mt_com); ssize_t d_read(int fd, void *buffer, size_t count); ssize_t d_write(int, const void *buffer, size_t count); boffset_t lseek(DCR *dcr, off_t offset, int whence); bool rewind(DCR *dcr); bool reposition(DCR *dcr, uint64_t raddr); bool open_device(DCR *dcr, int omode); bool truncate(DCR *dcr); bool close(DCR *dcr); void term(DCR *dcr); bool eod(DCR *dcr); bool update_pos(DCR *dcr); bool mount_file(int mount, int dotimeout); bool is_indexed() { return !adata; }; int read_dev_volume_label(DCR *dcr); const char *print_type(); DEVICE *get_dev(DCR *dcr); uint32_t get_hi_addr(); uint32_t get_low_addr(); uint64_t get_full_addr(); uint64_t get_full_addr(boffset_t addr); bool do_size_checks(DCR *dcr, DEV_BLOCK *block); bool write_volume_label_to_block(DCR *dcr); bool write_volume_label_to_dev(DCR *dcr, const char *VolName, const char *PoolName, bool relabel, bool no_prelabel); bool write_adata_label(DCR *dcr, DEV_RECORD *rec); void write_adata(DCR *dcr, DEV_RECORD *rec); void write_cont_adata(DCR *dcr, DEV_RECORD *rec); int write_adata_rechdr(DCR *dcr, DEV_RECORD *rec); bool read_adata_record_header(DCR *dcr, DEV_BLOCK *block, DEV_RECORD *rec, bool *firstcall); void read_adata_block_header(DCR *dcr); int read_adata(DCR *dcr, DEV_RECORD *rec); bool have_adata_header(DCR *dcr, DEV_RECORD *rec, int32_t FileIndex, int32_t Stream, uint32_t VolSessionId); void select_data_stream(DCR *dcr, DEV_RECORD *rec); bool flush_block(DCR *dcr); bool do_pre_write_checks(DCR *dcr, DEV_RECORD *rec); bool get_os_device_freespace(); bool is_fs_nearly_full(uint64_t threshold); int rehydrate_record(DCR *dcr, DEV_RECORD *rec); int set_writable(int fd, const char *vol_name, POOLMEM **error); int set_readonly(int fd, const char *vol_name, POOLMEM **error); int set_atime(int fd, const char *vol_name, btime_t val, POOLMEM **error); /* * Locking and blocking calls */ #ifdef DEV_DEBUG_LOCK void dbg_Lock(const char *, int); void dbg_Unlock(const char *, int); void dbg_rLock(const char *, int, bool locked=false); void dbg_rUnlock(const char *, int); #else void Lock(); void Unlock(); void rLock(bool locked=false); void rUnlock(); #endif #ifdef SD_DEBUG_LOCK void dbg_Lock_acquire(const char *, int); void dbg_Unlock_acquire(const char *, int); void dbg_Lock_read_acquire(const char *, int); void dbg_Unlock_read_acquire(const char *, int); void dbg_Lock_VolCatInfo(const char *, int); void dbg_Unlock_VolCatInfo(const char *, int); #else void Lock_acquire(); void Unlock_acquire(); void Lock_read_acquire(); void Unlock_read_acquire(); void Lock_VolCatInfo(); void Unlock_VolCatInfo(); #endif void dblock(int why); /* in lock.c */ void dunblock(bool locked=false); }; #endif /* _ALIGNED_DEV_H_ */ bacula-15.0.3/src/stored/cloud_dev.c0000644000175000017500000026603714771010173017077 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Generic routines for creating Cloud compatibile Volumes. * NOTE!!! This cloud device is not compatible with * any disk-changer script for changing Volumes. * It does however work with Bacula Virtual autochangers. * * Written by Kern Sibbald, May MMXVI * */ #include "bacula.h" #include "stored.h" #include #include "cloud_dev.h" #include "s3_driver.h" #include "file_driver.h" #include "generic_driver.h" #include "cloud_parts.h" #include "math.h" static const int64_t dbglvl = DT_CLOUD|50; #define ASYNC_TRANSFER 1 #define NUM_DOWNLOAD_WORKERS 3 #define NUM_UPLOAD_WORKERS 3 /* Debug only: Enable to introduce random transfer delays*/ /* #define RANDOM_WAIT_ENABLE*/ #define RANDOM_WAIT_MIN 2 /* minimum delay time*/ #define RANDOM_WAIT_MAX 12 /* maxinum delay time*/ /* retry timeouts on transfer manager (arbitrary) start 1 minute, max 5 minutes*/ #define WAIT_TIMEOUT_INC_INSEC 60 #define MAX_WAIT_TIMEOUT_INC_INSEC 300 #define XFER_TMP_NAME "xfer" #include #if defined(HAVE_WIN32) #define lseek _lseeki64 #endif /* standard dcr cancel callback function */ bool DCR_cancel_cb(void* arg) { DCR *dcr = (DCR*)arg; if (dcr && dcr->jcr) { return dcr->jcr->is_canceled(); } return false; } #ifdef __cplusplus extern "C" { #endif DEVICE *BaculaSDdriver(JCR *jcr, DEVRES *device) { DEVICE *dev; if (!device->cloud) { Jmsg0(jcr, M_FATAL, 0, _("A Cloud resource is required for the Cloud driver, but is missing.\n")); return NULL; } dev = New(cloud_dev(jcr, device)); return dev; } #ifdef __cplusplus } #endif /******************* Driver loading *******************/ /* Define possible extensions */ #if defined(HAVE_WIN32) #define DRV_EXT ".dll" #elif defined(HAVE_DARWIN_OS) #define DRV_EXT ".dylib" #else #define DRV_EXT ".so" #endif #ifndef RTLD_NOW #define RTLD_NOW 2 #endif /* Forward referenced functions */ extern "C" { typedef cloud_driver *(*newCloudDriver_t)(void); } static cloud_driver *load_driver(JCR *jcr, uint cloud_driver_type); /* * Driver item for driver table */ struct cloud_driver_item { const char *name; void *handle; newCloudDriver_t newDriver; bool builtin; bool loaded; }; /* * Driver table. Must be in same order as the B_xxx_DEV type * name handle, builtin loaded */ static cloud_driver_item driver_tab[] = { /* name handle, newDriver builtin loaded */ {"s3", NULL, NULL, false, false}, {"file", NULL, NULL, false, false}, {"was", NULL, NULL, false, false}, {"gs", NULL, NULL, false, false}, {"oci", NULL, NULL, false, false}, {"generic", NULL, NULL, false, false}, {"swift", NULL, NULL, false, false}, {"aws", NULL, NULL, false, false}, {NULL, NULL, NULL, false, false} }; static const char* cloud_driver_type_name[] = { "Unknown", "S3", "File", "Azure", "Google", "Oracle", "Generic", "Swift", "Amazon", NULL }; //static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; /******************* Driver loading *******************/ transfer_manager cloud_dev::download_mgr(transfer_manager(NUM_DOWNLOAD_WORKERS)); transfer_manager cloud_dev::upload_mgr(transfer_manager(NUM_UPLOAD_WORKERS)); /* Imported functions */ const char *mode_to_str(int mode); int breaddir(DIR *dirp, POOLMEM *&dname); /* Forward referenced functions */ bool makedir(JCR *jcr, char *path, mode_t mode); /* Const and Static definitions */ /* Address manipulations: * * The current idea is internally use part and offset-in-part * However for sending back JobMedia, we need to use * get_full_addr() which puts the file in the top 20 bits. */ static boffset_t get_offset(boffset_t ls_offset) { boffset_t pos = ls_offset & off_mask; return pos; } static boffset_t make_addr(uint32_t my_part, boffset_t my_offset) { return (boffset_t)(((uint64_t)my_part)<m_volume_name) && (upart == t->m_part)) { return t; } } return NULL; } /* * This upload_engine is called by workq in a worker thread. * it receives the transfer packet to be processed * it returns the state the manager should transition to after once processed. * TRANS_STATE_DONE: processing was OK * TRANS_STATE_ERROR: error during processing (no retry) * TRANS_STATE_QUEUED: processing should be retry */ transfer_state upload_engine(transfer *tpkt) { #ifdef RANDOM_WAIT_ENABLE srand(time(NULL)); /* wait between 2 and 12 seconds */ int s_time = RANDOM_WAIT_MIN + rand() % (RANDOM_WAIT_MAX-RANDOM_WAIT_MIN); bmicrosleep(s_time, 0); #endif if (tpkt && tpkt->m_driver) { /* call the driver method async */ Dmsg4(dbglvl, "Upload start %s-%d JobId : %d driver :%p\n", tpkt->m_volume_name, tpkt->m_part, tpkt->m_job_id, tpkt->m_driver); cancel_callback cancel_cb; cancel_cb.fct = DCR_cancel_cb; cancel_cb.arg = tpkt->m_dcr; /* don't version part.1 */ if (tpkt->m_part != 1) { /* from btime.c */ time_t td = time(NULL); struct tm tm; (void)localtime_r(&td, &tm); POOL_MEM strtime; strftime(strtime.c_str(), strtime.size(), "part%Y%m%d%H%M%S", &tm); /* form the target part name */ POOL_MEM target_part_name; Mmsg(target_part_name, "%s.%d", strtime.c_str(), tpkt->m_part); /* exists is passed as argument */ int exists=0; POOLMEM *msg = get_pool_memory(PM_FNAME); msg[0] = 0; /* try to move the part to the target part */ if (tpkt->m_driver->move_cloud_part(tpkt->m_volume_name, tpkt->m_part, target_part_name.c_str(), &cancel_cb, msg, exists) ) { if (exists == 0) { /* sucessful + !exists : the source path has not been found -> OK ignore */ } else { /* sucessful + exists : the source part has been moved to target part name */ //Jmsg(tpkt->m_dcr->jcr, M_INFO, 0, _("%s/part.%d was present on the cloud and has been versioned to %s\n"), Dmsg3(dbglvl, _("%s/part.%d was present on the cloud and has been versioned to %s\n"), tpkt->m_volume_name, tpkt->m_part, msg); } } else { Dmsg4(dbglvl, "Move error!! JobId=%d part=%d Vol=%s cache=%s\n", tpkt->m_job_id, tpkt->m_part, tpkt->m_volume_name, tpkt->m_cache_fname); POOL_MEM dmsg(PM_MESSAGE); tpkt->append_status(dmsg); Dmsg1(dbglvl, "%s\n",dmsg.c_str()); } free_pool_memory(msg); } /* upload the cache part to the cloud */ if (!tpkt->m_driver->copy_cache_part_to_cloud(tpkt)) { /* Error message already sent by Qmsg() */ Dmsg4(dbglvl, "Upload error!! JobId=%d part=%d Vol=%s cache=%s\n", tpkt->m_job_id, tpkt->m_part, tpkt->m_volume_name, tpkt->m_cache_fname); POOL_MEM dmsg(PM_MESSAGE); tpkt->append_status(dmsg); Dmsg1(dbglvl, "%s\n",dmsg.c_str()); return TRANS_STATE_ERROR; } Dmsg2(dbglvl, "Upload end JobId : %d driver :%p\n", tpkt->m_job_id, tpkt->m_driver); if (tpkt->m_do_cache_truncate && tpkt->m_part!=1) { bool allow_truncate = false; uint64_t cloud_size = 0; if (tpkt->m_state == TRANS_STATE_PROCESSED && tpkt->m_res_size != 0 && tpkt->m_res_mtime != 0) { /* so far so good for truncation */ /* double check if the cache size matches the transfer size */ struct stat statbuf; if (lstat(tpkt->m_cache_fname, &statbuf) == -1) { berrno be; Dmsg2(dbglvl, "Failed to stat cache file %s. ERR=%s\n", tpkt->m_cache_fname, be.bstrerror()); allow_truncate = false; } else { /* if sizes do match, we can truncate */ cloud_size = (uint64_t)statbuf.st_size; allow_truncate = (cloud_size == tpkt->m_res_size); } } if (allow_truncate) { if (unlink(tpkt->m_cache_fname) != 0) { berrno be; Dmsg2(dbglvl, "Truncate cache option after upload. Unable to truncate %s. ERR=%s\n", tpkt->m_cache_fname, be.bstrerror()); } else { Dmsg1(dbglvl, "Truncate cache option after upload. %s OK\n", tpkt->m_cache_fname); } } else { Dmsg4(dbglvl, "Truncate cache option after upload skipped. %s state=%d cache size=%lld cloud size =%lld\n", tpkt->m_cache_fname, tpkt->m_state, tpkt->m_res_size, cloud_size); } } } return TRANS_STATE_DONE; } /* Forward declaration*/ transfer_state download_engine(transfer *); /* This wait_engine is called by workq in a worker thread * as an alternative to download_engine when download has been postponned. * it receives the transfer packet to be processed * it returns the state the manager should transition to once processed. * TRANS_STATE_DONE: processing was OK * TRANS_STATE_ERROR: error during processing (no retry) * TRANS_STATE_QUEUED: processing should be retry */ transfer_state wait_engine(transfer *tpkt) { if (tpkt) { btime_t now = time(NULL); if (now < tpkt->m_wait_timeout) { sleep(10); return TRANS_STATE_QUEUED; } if (tpkt->m_driver && tpkt->m_driver->is_waiting_on_server(tpkt)) { Dmsg3(dbglvl, "JobId=%d %s/part.%d waiting...\n", tpkt->m_job_id, tpkt->m_volume_name, tpkt->m_part); lock_guard lg(tpkt->m_mutex); /* increase the timeout increment up to MAX_WAIT_TIMEOUT_INC_INSEC value */ if (tpkt->m_wait_timeout_inc_insec < MAX_WAIT_TIMEOUT_INC_INSEC) { tpkt->m_wait_timeout_inc_insec += WAIT_TIMEOUT_INC_INSEC; if (tpkt->m_wait_timeout_inc_insec > MAX_WAIT_TIMEOUT_INC_INSEC) { tpkt->m_wait_timeout_inc_insec = MAX_WAIT_TIMEOUT_INC_INSEC; } } tpkt->m_wait_timeout = time(NULL)+tpkt->m_wait_timeout_inc_insec; return TRANS_STATE_QUEUED; } else { Dmsg3(dbglvl, "JobId=%d %s/part.%d is ready!\n", tpkt->m_job_id, tpkt->m_volume_name, tpkt->m_part); lock_guard lg(tpkt->m_mutex); tpkt->m_wait_timeout_inc_insec = 0; tpkt->m_funct = download_engine; return TRANS_STATE_QUEUED; } } return TRANS_STATE_QUEUED; } /* * This download_engine is called by workq in a worker thread. * it receives the transfer packet to be processed * it returns the state the manager should transition to after process * TRANS_STATE_DONE: processing was OK * TRANS_STATE_ERROR: error during processing (no retry) * TRANS_STATE_QUEUED: processing should be retry */ transfer_state download_engine(transfer *tpkt) { #ifdef RANDOM_WAIT_ENABLE srand(time(NULL)); /* wait between 2 and 12 seconds */ int s_time = RANDOM_WAIT_MIN + rand() % (RANDOM_WAIT_MAX-RANDOM_WAIT_MIN); bmicrosleep(s_time, 0); #endif if (tpkt && tpkt->m_driver) { /* call the driver method async */ Dmsg4(dbglvl, "JobId=%d %s/part.%d download started to %s.\n", tpkt->m_job_id, tpkt->m_volume_name, tpkt->m_part, tpkt->m_cache_fname); Dmsg4(dbglvl, "%s/part.%d download started. job : %d driver :%p\n", tpkt->m_volume_name, tpkt->m_part, tpkt->m_job_id, tpkt->m_driver); int ret = tpkt->m_driver->copy_cloud_part_to_cache(tpkt); switch (ret) { case cloud_driver::CLOUD_DRIVER_COPY_PART_TO_CACHE_OK: { POOLMEM *cache_fname = get_pool_memory(PM_FNAME); pm_strcpy(cache_fname, tpkt->m_cache_fname); char *p = strstr(cache_fname, XFER_TMP_NAME); char partnumber[50]; bsnprintf(partnumber, sizeof(partnumber), "part.%d", tpkt->m_part); strcpy(p,partnumber); if (rename(tpkt->m_cache_fname, cache_fname) != 0) { Dmsg5(dbglvl, "JobId=%d %s/part.%d download. part copy from %s to %s error!!\n", tpkt->m_job_id, tpkt->m_volume_name, tpkt->m_part, tpkt->m_cache_fname, cache_fname); free_pool_memory(cache_fname); return TRANS_STATE_ERROR; } free_pool_memory(cache_fname); return TRANS_STATE_DONE; } case cloud_driver::CLOUD_DRIVER_COPY_PART_TO_CACHE_ERROR: { Dmsg4(dbglvl, "JobId=%d %s/part.%d download to cache=%s error!!\n", tpkt->m_job_id, tpkt->m_volume_name, tpkt->m_part, tpkt->m_cache_fname); POOL_MEM dmsg(PM_MESSAGE); tpkt->append_status(dmsg); Dmsg1(dbglvl, "%s\n",dmsg.c_str()); /* download failed -> remove the temp xfer file */ if (unlink(tpkt->m_cache_fname) != 0) { berrno be; Dmsg2(dbglvl, "Unable to delete %s. ERR=%s\n", tpkt->m_cache_fname, be.bstrerror()); } else { Dmsg1(dbglvl, "Unlink file %s\n", tpkt->m_cache_fname); } return TRANS_STATE_ERROR; } case cloud_driver::CLOUD_DRIVER_COPY_PART_TO_CACHE_RETRY: { lock_guard lg(tpkt->m_mutex); Dmsg4(dbglvl, "JobId=%d %s/part.%d download to cache=%s retry... \n", tpkt->m_job_id, tpkt->m_volume_name, tpkt->m_part, tpkt->m_cache_fname); tpkt->m_wait_timeout_inc_insec = WAIT_TIMEOUT_INC_INSEC; tpkt->m_wait_timeout = time(NULL)+ tpkt->m_wait_timeout_inc_insec; tpkt->m_funct = wait_engine; return TRANS_STATE_QUEUED; } default: break; } } return TRANS_STATE_DONE; } /* * Upload the given part to the cloud */ bool cloud_dev::upload_part_to_cloud(DCR *dcr, const char *VolumeName, uint32_t upart, bool do_truncate) { /* for a "regular" backup job, don't proceed with the upload, but pretend everything went OK. */ /* Otherwise, pass thru and proceed with the upload, even if UPLOAD_NO is set */ bool internal_job = dcr->jcr->is_internal_job() || dcr->jcr->is_JobType(JT_ADMIN); if ((upload_opt == UPLOAD_NO) && !internal_job) { return true; } bool ret=false; if (upart == 0 || get_list_transfer(dcr->uploads, VolumeName, upart)) { return ret; } uint64_t file_size=0; POOLMEM *cache_fname = get_pool_memory(PM_FNAME); make_cache_filename(cache_fname, VolumeName, upart); /* part is valid and no upload for the same part is scheduled */ if (!upload_mgr.find(VolumeName,upart)) { Enter(dbglvl); /* statistics require the size to transfer */ struct stat statbuf; if (lstat(cache_fname, &statbuf) < 0) { berrno be; Mmsg2(errmsg, "Failed to find cache part file %s. ERR=%s\n", cache_fname, be.bstrerror()); Dmsg1(dbglvl, "%s", errmsg); free_pool_memory(cache_fname); Leave(dbglvl); return false; } file_size = statbuf.st_size; /* Nothing to do with this empty part */ if (file_size == 0) { free_pool_memory(cache_fname); Leave(dbglvl); return true; /* consider the transfer OK */ } ret=true; } Dmsg1(dbglvl, "upload_part_to_cloud: %s\n", cache_fname); /* get_xfer either returns a new transfer or a similar one if it already exists. * in this case, the transfer is shared and ref_count is incremented. The caller should only care to release() * the transfer eventually. The transfer_mgr is in charge of deleting the transfer when no one shares it anymore*/ transfer *item = upload_mgr.get_xfer(file_size, upload_engine, cache_fname,/* cache_fname is duplicated in the transfer constructor*/ VolumeName, /* VolumeName is duplicated in the transfer constructor*/ name(), /* device name() is duplicated in the transfer constructor*/ upart, driver, dcr->jcr->JobId, dcr, cloud_prox); dcr->uploads->append(item); /* transfer are queued manually, so the caller has control on when the transfer is scheduled * this should come handy for upload_opt */ item->set_do_cache_truncate(do_truncate); if ( (upload_opt == UPLOAD_EACHPART) || ((upload_opt == UPLOAD_NO) && internal_job) ) { /* in each part upload option, queue right away */ item->queue(); } free_pool_memory(cache_fname); if (ret) { /* Update the Media information */ if (upart >= VolCatInfo.VolCatParts) { VolCatInfo.VolCatParts = upart; VolCatInfo.VolLastPartBytes = file_size; } /* We do not call dir_update_volume_info() because the part is not yet * uploaded, but we may call it to update VolCatParts or VolLastPartBytes. */ } Leave(dbglvl); return ret; } /* Small helper to get */ static int64_t part_get_size(ilist *cachep, int index) { int64_t ret=0; if (index <= cachep->last_index()) { cloud_part *p = (cloud_part*) cachep->get(index); if (p) { ret = p->size; } } return ret; } /* * Download the part_idx part to the cloud. The result is store in the DCR context * The caller should use free_transfer() */ transfer *cloud_dev::download_part_to_cache(DCR *dcr, const char *VolumeName, uint32_t dpart) { if (dpart == 0) { return NULL; } /* if item's already in the dcr list, it's already in the download_mgr, we don't need any duplication*/ transfer *item = get_list_transfer(dcr->downloads, VolumeName, dpart); if (!item) { POOLMEM *cache_fname = get_pool_memory(PM_FNAME); pm_strcpy(cache_fname, dev_name); /* create a uniq xfer file name with XFER_TMP_NAME and the pid */ POOL_MEM xferbuf; Mmsg(xferbuf, "%s_%d_%d", XFER_TMP_NAME, (int)getpid(), (int)dcr->jcr->JobId); cloud_driver::add_vol_and_part(cache_fname, VolumeName, xferbuf.c_str(), dpart); /* use the cloud proxy to retrieve the transfer size */ uint64_t cloud_size = cloud_prox->get_size(VolumeName, dpart); /* check if the part is already in the cache and if it's bigger or equal to the cloud conterpart*/ ilist cachep; if (!get_cache_volume_parts_list(dcr, getVolCatName(), &cachep)) { free_pool_memory(cache_fname); return NULL; } if (cachep.get(dpart)) { uint64_t cache_size = part_get_size(&cachep, dpart); Dmsg3(dbglvl, "download_part_to_cache: %s. cache_size=%d cloud_size=%d\n", cache_fname, cache_size, cloud_size); if (cache_size >= cloud_size) { /* We could/should use mtime */ /* cache is "better" than cloud, no need to download */ Dmsg2(dbglvl, "part %ld is up-to-date in the cache %lld\n", (int32_t)dpart, cache_size); free_pool_memory(cache_fname); return NULL; } } /* Unlikely, but still possible : the xfer cache file already exists */ struct stat statbuf; if (lstat(cache_fname, &statbuf) == 0) { Dmsg1(dbglvl, "download_part_to_cache: %s already exists: remove it.", cache_fname); if (unlink(cache_fname) < 0) { berrno be; Dmsg2(dbglvl, "download_part_to_cache: failed to remove file %s. ERR: %s\n",cache_fname, be.bstrerror()); } else { Dmsg1(dbglvl, "=== unlinked: %s\n", cache_fname); } } /* get_xfer either returns a new transfer or a similar one if it already exists. * in this case, the transfer is shared and ref_count is incremented. The caller should only care to release() * the transfer eventually. The transfer_mgr is in charge of deleting the transfer when no one shares it anymore*/ item = download_mgr.get_xfer(cloud_size, download_engine, cache_fname,/* cache_fname is duplicated in the transfer constructor*/ VolumeName, /* VolumeName is duplicated in the transfer constructor*/ name(), /* device name() is duplicated in the transfer constructor*/ dpart, driver, dcr->jcr->JobId, dcr, NULL); // no proxy on download to cache dcr->downloads->append(item); /* transfer are queued manually, so the caller has control on when the transfer is scheduled */ item->queue(); free_pool_memory(cache_fname); } return item; } /* * Note, we might want to make a new download_first_part_to_read() * where it waits for the first part, then this routine * can simply start the other downloads that will be needed, and * we can wait for them in each new open(). */ bool cloud_dev::download_parts_to_read(DCR *dcr, alist* parts) { intptr_t part; transfer *part_1=NULL, *item; ilist cachep; int64_t size; /* Find and download any missing parts for read */ if (!driver) { return false; } if (!get_cache_volume_parts_list(dcr, getVolCatName(), &cachep)) { return false; } foreach_alist(part, parts) { /* TODO: get_cache_sizes is called before; should be an argument */ size = part_get_size(&cachep, part); if (size == 0) { item = download_part_to_cache(dcr, getVolCatName(), (int32_t)part); if (part == 1) { part_1 = item; /* Keep it, we continue only if the part1 is downloaded */ } } else { Dmsg2(dbglvl, "part %ld is already in the cache %lld\n", (int32_t)part, size); } } /* wait for the part.1 */ if (part_1) { wait_end_of_transfer(dcr, part_1); } return true; } uint32_t cloud_dev::get_part(boffset_t ls_offset) { return (uint32_t)(ls_offset>>off_bits); } DEVICE *cloud_dev::get_dev(DCR */*dcr*/) { return this; } uint32_t cloud_dev::get_hi_addr() { return (uint32_t)(file_addr >> 32); } uint32_t cloud_dev::get_low_addr() { return (uint32_t)(file_addr); } uint64_t cloud_dev::get_full_addr() { uint64_t pos; pos = make_addr(part, get_offset(file_addr)); return pos; } uint64_t cloud_dev::get_full_addr(boffset_t addr) { uint64_t pos; pos = make_addr(part, get_offset(addr)); return pos; } #if 0 static transfer* find_transfer(DCR *dcr, const char *VolumeName, uint32_t part) { transfer *item; foreach_alist(item, dcr->transfers) { if (part == item->m_part && strcmp(item->m_volume_name, VolumeName) == 0) { return item; } } return NULL; } #endif /* * Make a list of cache sizes and count num_cache_parts * * TODO: It seems that we are interested only by one part * when we call this function. So, it can be enhanced or removed */ bool cloud_dev::get_cache_sizes(DCR *dcr, const char *VolumeName) { DIR* dp = NULL; struct dirent *entry = NULL; struct stat statbuf; int name_max; POOLMEM *vol_dir = get_pool_memory(PM_NAME); POOLMEM *fname = get_pool_memory(PM_NAME); uint32_t cpart; bool ok = false; POOL_MEM dname(PM_FNAME); int status = 0; /* * **FIXME**** do this only once for each Volume. Currently, * it is done for each part that is opened. * NB : this should be substituted with get_cache_volume_parts_list */ Enter(dbglvl); max_cache_size = MAX(part + 1, 100); /* We need to list at least part elements, but it can be more */ if (cache_sizes) { free(cache_sizes); } cache_sizes = (uint64_t *)malloc(max_cache_size * sizeof(uint64_t)); memset(cache_sizes, 0, max_cache_size * sizeof(uint64_t)); num_cache_parts = 0; max_cache_part = 0; name_max = pathconf(".", _PC_NAME_MAX); if (name_max < 1024) { name_max = 1024; } make_cache_volume_name(vol_dir, VolumeName); if (!(dp = opendir(vol_dir))) { berrno be; Mmsg2(errmsg, "Cannot opendir to get cache sizes. Volume=%s does not exist. ERR=%s\n", vol_dir, be.bstrerror()); Dmsg1(dbglvl, "%s", errmsg); goto get_out; } entry = (struct dirent *)malloc(sizeof(struct dirent) + name_max + 1000); for ( ;; ) { if (dcr->jcr->is_canceled()) { goto get_out; } errno = 0; status = breaddir(dp, dname.addr()); if (status == -1) { break; } else if (status > 0) { Mmsg1(errmsg, "breaddir failed: ERR=%s", status); Dmsg1(dbglvl, "%s\n", errmsg); goto get_out; } /* Always ignore . and .. */ if (strcmp(".", dname.c_str()) == 0 || strcmp("..", dname.c_str()) == 0) { continue; } /* Look only for part files */ if (strncmp("part.", dname.c_str(), 5) != 0) { continue; } /* Get size of part */ Mmsg(fname, "%s/%s", vol_dir, dname.c_str()); if (lstat(fname, &statbuf) == -1) { /* The part is no longer here, might be a truncate in an other thread, just * do like if the file wasn't here */ continue; } cpart = (int)str_to_int64((char *)&(dname.c_str()[5])); Dmsg3(dbglvl+100, "part=%d file=%s fname=%s\n", cpart, dname.c_str(), dname.c_str()); if (cpart > max_cache_part) { max_cache_part = cpart; } if (cpart >= max_cache_size) { int new_max_cache_size = cpart + 100; cache_sizes = (uint64_t *)realloc(cache_sizes, new_max_cache_size * sizeof(uint64_t)); for (int i=max_cache_size; i<(int)new_max_cache_size; i++) { cache_sizes[i] = 0; } max_cache_size = new_max_cache_size; } num_cache_parts++; cache_sizes[cpart] = (uint64_t)statbuf.st_size; Dmsg2(dbglvl+100, "found part=%d size=%llu\n", cpart, cache_sizes[cpart]); } if (chk_dbglvl(dbglvl)) { Pmsg1(0, "Cache objects Vol=%s:\n", VolumeName); for (int i=1; i <= (int)max_cache_part; i++) { Pmsg2(0, " part num=%d size=%llu\n", i, cache_sizes[i]); } Pmsg2(0, "End cache obj list: nparts=%d max_cache_part=%d\n", num_cache_parts, max_cache_part); } ok = true; get_out: if (dp) { closedir(dp); } if (entry) { free(entry); } free_pool_memory(vol_dir); free_pool_memory(fname); return ok; } void cloud_dev::make_cache_filename(POOLMEM *&filename, const char *VolumeName, uint32_t upart) { Enter(dbglvl); pm_strcpy(filename, dev_name); cloud_driver::add_vol_and_part(filename, VolumeName, "part", upart); } void cloud_dev::make_cache_volume_name(POOLMEM *&volname, const char *VolumeName) { Enter(dbglvl); POOL_MEM archive_name(PM_FNAME); pm_strcpy(archive_name, dev_name); if (!IsPathSeparator(archive_name.c_str()[strlen(archive_name.c_str())-1])) { pm_strcat(archive_name, "/"); } pm_strcat(archive_name, VolumeName); pm_strcpy(volname, archive_name.c_str()); } /* * DEVICE virtual functions that we redefine. */ cloud_dev::~cloud_dev() { Enter(dbglvl); cloud_prox->release(); if (cache_sizes) { free(cache_sizes); cache_sizes = NULL; } if (driver) { driver->term(errmsg); delete driver; driver = NULL; } for(int i=0 ; driver_tab[i].name != NULL; i++) { if (driver_tab[i].handle) { dlclose(driver_tab[i].handle); } } if (m_fd != -1) { if (d_close(m_fd) < 0) { berrno be; Dmsg1(dbglvl, "Unable to close device. ERR=%s\n", be.bstrerror()); } m_fd = -1; } } static cloud_driver *load_driver(JCR *jcr, uint driver_type) { POOL_MEM fname(PM_FNAME); cloud_driver *dev; cloud_driver_item drv; const char *slash; void *pHandle; newCloudDriver_t newDriver; if (!me->plugin_directory || strlen(me->plugin_directory) == 0) { Jmsg1(jcr, M_FATAL, 0, _("Plugin directory not defined. Cannot load cloud driver %d.\n"), driver_type); return NULL; } if (IsPathSeparator(me->plugin_directory[strlen(me->plugin_directory) - 1])) { slash = ""; } else { slash = "/"; } drv = driver_tab[driver_type - 1]; Mmsg(fname, "%s%sbacula-sd-cloud-%s-driver%s%s", me->plugin_directory, slash, drv.name, "-" VERSION, DRV_EXT); if (!drv.loaded) { Dmsg1(10, "Open SD driver at %s\n", fname.c_str()); pHandle = dlopen(fname.c_str(), RTLD_NOW); if (pHandle) { Dmsg2(100, "Driver=%s handle=%p\n", drv.name, pHandle); /* Get global entry point */ Dmsg1(10, "Lookup \"BaculaCloudDriver\" in driver=%s\n", drv.name); newDriver = (newCloudDriver_t)dlsym(pHandle, "BaculaCloudDriver"); Dmsg2(10, "Driver=%s entry point=%p\n", drv.name, newDriver); if (!newDriver) { const char *error = dlerror(); Jmsg(NULL, M_ERROR, 0, _("Lookup of symbol \"BaculaCloudDriver\" in driver %d for device %s failed: ERR=%s\n"), driver_type, fname.c_str(), NPRT(error)); Dmsg2(10, "Lookup of symbol \"BaculaCloudDriver\" driver=%s failed: ERR=%s\n", fname.c_str(), NPRT(error)); dlclose(pHandle); return NULL; } drv.handle = pHandle; drv.loaded = true; drv.newDriver = newDriver; } else { /* dlopen failed */ const char *error = dlerror(); Jmsg3(jcr, M_FATAL, 0, _("dlopen of SD driver=%s at %s failed: ERR=%s\n"), drv.name, fname.c_str(), NPRT(error)); Dmsg2(0, "dlopen plugin %s failed: ERR=%s\n", fname.c_str(), NPRT(error)); return NULL; } } else { Dmsg1(10, "SD driver=%s is already loaded.\n", drv.name); } /* Call driver initialization */ dev = drv.newDriver(); return dev; } cloud_dev::cloud_dev(JCR *jcr, DEVRES *device) { Enter(dbglvl); m_fd = -1; *full_type = 0; /* Initialize Cloud driver */ if (!driver) { switch (device->cloud->driver_type) { case C_S3_DRIVER: driver = load_driver(jcr, C_S3_DRIVER); break; case C_AWS_DRIVER: if (!device->cloud->driver_command) { POOL_MEM tmp(PM_FNAME); Mmsg(tmp, "%s/aws_cloud_driver", me->plugin_directory); device->cloud->driver_command = bstrdup(tmp.c_str()); } struct stat mstatp; if (lstat(device->cloud->driver_command, &mstatp) == 0) { driver = load_driver(jcr, C_AWS_DRIVER); } break; case C_WAS_DRIVER: { if (!device->cloud->driver_command) { POOL_MEM tmp(PM_FNAME); Mmsg(tmp, "%s/was_cloud_driver", me->plugin_directory); device->cloud->driver_command = bstrdup(tmp.c_str()); } struct stat mstatp; if (lstat(device->cloud->driver_command, &mstatp) == 0) { driver = load_driver(jcr, C_WAS_DRIVER); } break; } case C_GOOGLE_DRIVER: { if (!device->cloud->driver_command) { POOL_MEM tmp(PM_FNAME); Mmsg(tmp, "%s/google_cloud_driver", me->plugin_directory); device->cloud->driver_command = bstrdup(tmp.c_str()); } struct stat mstatp; if (lstat(device->cloud->driver_command, &mstatp) == 0) { driver = load_driver(jcr, C_GOOGLE_DRIVER); } break; } case C_ORACLE_DRIVER: { if (!device->cloud->driver_command) { POOL_MEM tmp(PM_FNAME); Mmsg(tmp, "%s/oracle_cloud_driver", me->plugin_directory); device->cloud->driver_command = bstrdup(tmp.c_str()); } struct stat mstatp; if (lstat(device->cloud->driver_command, &mstatp) == 0) { driver = load_driver(jcr, C_ORACLE_DRIVER); } break; } case C_GEN_DRIVER: { if (!device->cloud->driver_command) { POOL_MEM tmp(PM_FNAME); Mmsg(tmp, "%s/generic_cloud_driver", me->plugin_directory); device->cloud->driver_command = bstrdup(tmp.c_str()); } struct stat mstatp; if (lstat(device->cloud->driver_command, &mstatp) == 0) { driver = load_driver(jcr, C_GEN_DRIVER); } break; } case C_SWIFT_DRIVER: { if (!device->cloud->driver_command) { POOL_MEM tmp(PM_FNAME); Mmsg(tmp, "%s/swift_cloud_driver", me->plugin_directory); device->cloud->driver_command = bstrdup(tmp.c_str()); } struct stat mstatp; if (lstat(device->cloud->driver_command, &mstatp) == 0) { driver = load_driver(jcr, C_SWIFT_DRIVER); } break; } case C_FILE_DRIVER: driver = New(file_driver); break; default: break; } current_driver_type = 0; if (!driver) { /* We are outside from a job */ Qmsg2(jcr, M_FATAL, 0, _("Could not open Cloud driver type=%d for Device=%s.\n"), device->cloud->driver_type, device->hdr.name); /* We create a dummy driver that will fail all jobs automatically. We do not abort * the storage daemon because we can have other device type like tape, dedup or file * that are perfectly working. TODO: See if we abort the SD startup here. */ driver = New(dummy_driver); } else { current_driver_type = device->cloud->driver_type; } /* Make local copy in device */ if (device->cloud->upload_limit) { driver->upload_limit.set_bwlimit(device->cloud->upload_limit); } if (device->cloud->download_limit) { driver->download_limit.set_bwlimit(device->cloud->download_limit); } trunc_opt = device->cloud->trunc_opt; upload_opt = device->cloud->upload_opt; Dmsg2(dbglvl, "Trunc_opt=%d upload_opt=%d\n", trunc_opt, upload_opt); if (device->cloud->max_concurrent_uploads) { upload_mgr.m_wq.max_workers = device->cloud->max_concurrent_uploads; } if (device->cloud->max_concurrent_downloads) { download_mgr.m_wq.max_workers = device->cloud->max_concurrent_downloads; } POOL_MEM err; /* device->errmsg is not yet ready */ /* Initialize the driver */ if (!driver->init(device->cloud, err.addr())) { Qmsg1(jcr, M_FATAL, 0, "Cloud driver initialization error %s\n", err.c_str()); Tmsg1(0, "Cloud driver initialization error %s\n", err.c_str()); } bsnprintf(full_type, sizeof(full_type), "Cloud (%s Plugin)", print_driver_type()); } /* the cloud proxy owns its cloud_parts, so we can 'set and forget' them */ cloud_prox = cloud_proxy::get_instance(); } int cloud_dev::device_specific_init(JCR *jcr, DEVRES *device) { // Don't forget to do capabilities |= CAP_LSEEK in device_specific_init() return file_dev::device_specific_init(jcr, device); } /* * DEVICE virtuals that we redefine. */ static const char *seek_where(int whence) { switch (whence) { case SEEK_SET: return "SEEK_SET"; case SEEK_CUR: return "SEEK_CUR"; case SEEK_END: return "SEEK_END"; default: return "UNKNOWN"; } } /* * Note, we can enter with a full address containing a part number * and an offset or with an offset. If the part number is zero * at entry, we use the current part. * * This routine always returns a full address (part, offset). * */ boffset_t cloud_dev::lseek(DCR *dcr, boffset_t ls_offset, int whence) { boffset_t pos; uint32_t new_part; boffset_t new_offset; char ed1[50]; if (!dcr) { /* can be NULL when called from rewind(NULL) */ return -1; } /* Convert input ls_offset into part and off */ if (ls_offset < 0) { return -1; } new_part = get_part(ls_offset); new_offset = get_offset(ls_offset); if (new_part == 0) { new_part = part; if (new_part == 0) { new_part = 1; } } Dmsg6(dbglvl+10, "lseek(%d, %s, %s) part=%d nparts=%d off=%lld\n", m_fd, print_addr(ed1, sizeof(ed1), ls_offset), seek_where(whence), part, num_cache_parts, new_offset); if (whence != SEEK_CUR && new_part != part) { Dmsg2(dbglvl, "new_part=%d part=%d call close_part()\n", new_part, part); close_part(dcr); part = new_part; Dmsg0(dbglvl, "now open_device()\n"); if (!open_device(dcr, openmode)) { return -1; } ASSERT2(part==new_part, "Big problem part!=new_partn"); } switch (whence) { case SEEK_SET: /* We are staying in the current part, just seek */ pos = ::lseek(m_fd, new_offset, SEEK_SET); if (pos < 0) { berrno be; dev_errno = errno; Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); Dmsg1(000, "Seek error. ERR=%s\n", errmsg); return pos; } Dmsg4(dbglvl+10, "lseek_set part=%d pos=%s fd=%d offset=%lld\n", part, print_addr(ed1, sizeof(ed1), pos), m_fd, new_offset); return get_full_addr(pos); case SEEK_CUR: pos = ::lseek(m_fd, 0, SEEK_CUR); if (pos < 0) { berrno be; dev_errno = errno; Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); Dmsg1(000, "Seek error. ERR=%s\n", errmsg); return pos; } Dmsg4(dbglvl+10, "lseek %s fd=%d offset=%lld whence=%s\n", print_addr(ed1, sizeof(ed1)), m_fd, new_offset, seek_where(whence)); return get_full_addr(pos); case SEEK_END: /* * Bacula does not use offsets for SEEK_END * Also, Bacula uses seek_end only when it wants to * append to the volume. */ pos = ::lseek(m_fd, new_offset, SEEK_END); if (pos < 0) { berrno be; dev_errno = errno; Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); Dmsg1(000, "Seek error. ERR=%s\n", errmsg); return pos; } Dmsg4(dbglvl+10, "lseek_end part=%d pos=%lld fd=%d offset=%lld\n", part, pos, m_fd, new_offset); return get_full_addr(pos); default: Dmsg0(dbglvl, "Seek call error.\n"); errno = EINVAL; return -1; } } /* use this to track file usage */ bool cloud_dev::update_pos(DCR *dcr) { Enter(dbglvl); return file_dev::update_pos(dcr); } bool cloud_dev::rewind(DCR *dcr) { Enter(dbglvl); Dmsg3(dbglvl, "rewind res=%d fd=%d %s\n", num_reserved(), m_fd, print_name()); state &= ~(ST_EOT|ST_EOF|ST_WEOT); /* remove EOF/EOT flags */ block_num = file = 0; file_size = 0; if (m_fd < 0) { Mmsg1(errmsg, _("Rewind failed: device %s is not open.\n"), print_name()); return false; } if (part != 1) { close_part(dcr); part = 1; if (!open_device(dcr, openmode)) { return false; } } if (lseek(dcr, (boffset_t)0, SEEK_SET) < 0) { berrno be; dev_errno = errno; Mmsg2(errmsg, _("lseek to 0 error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); return false; } file_addr = 0; return true; } bool cloud_dev::reposition(DCR *dcr, uint64_t raddr) { Enter(dbglvl); char ed1[50]; Dmsg2(dbglvl, "part=%d num_cache_parts=%d\n", part, num_cache_parts); if (!is_open()) { dev_errno = EBADF; Mmsg0(errmsg, _("Bad call to reposition. Device not open\n")); Qmsg0(dcr->jcr, M_FATAL, 0, errmsg); return false; } if (lseek(dcr, (boffset_t)raddr, SEEK_SET) == (boffset_t)-1) { berrno be; dev_errno = errno; Mmsg2(errmsg, _("lseek error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); return false; } file_addr = raddr; Dmsg1(dbglvl, "=== reposition lseeked to %s\n", print_addr(ed1, sizeof(ed1))); return true; } #define INTPTR(a) (void*)(intptr_t)(a) /* Small cloud scanner for the BSR list, we check if all parts are in the cache area. */ class BSRPartScanner { private: DCR *dcr; cloud_dev *dev; uint32_t lastPart; /* Last part used, mark the part for download only one time */ alist *parts; public: BSRPartScanner(DCR *adcr, cloud_dev *adev) { dcr = adcr; dev = adev; lastPart = 0; parts = New(alist(100, not_owned_by_alist)); }; ~BSRPartScanner() { delete parts; }; /* accessor for parts list*/ alist *get_parts_list() { return parts; }; /* We check if the current Parts in the voladdr list are needed * The BSR structure should progress forward in the volume. */ void get_parts(BSR_VOLUME *volume, BSR_VOLADDR *voladdr) { while (voladdr) { for (uint32_t part = dev->get_part(voladdr->saddr); part <=dev->get_part(voladdr->eaddr); ++part) { if (lastPart != part) { lastPart = part; parts->append(INTPTR(part)); } } voladdr = voladdr->next; } }; /* Get Volume/Parts that must be downloaded For each MediaType, we must find * the right device and check if it's a Cloud device. If we have a cloud device, * then we need to check all VolAddress to get the Part list that is associated. * * It's likely that we will always query the same MediaType and the same * Volume. */ void get_all_parts(BSR *bsr, const char *cur_volume) { bool done=false; BSR_VOLUME *volume; parts->destroy(); /* Always download the part.1 */ parts->append(INTPTR(1)); while (bsr) { volume = bsr->volume; if (strcmp(volume->VolumeName, cur_volume) == 0) { get_parts(volume, bsr->voladdr); done = true; } else if (done) { /* TODO: We can stop when it's no longer the right volume */ break; } bsr = bsr->next; } intptr_t part; if (chk_dbglvl(dbglvl)) { Dmsg1(0, "Display list of parts to download for volume %s:\n", cur_volume); foreach_alist(part, parts) { Dmsg2(0, " Must download part %s/part.%lld\n", cur_volume, (int64_t)part); } } }; }; /* Wait for the download of a particular part */ bool cloud_dev::wait_one_transfer(DCR *dcr, char *VolName, uint32_t part) { dcr->jcr->setJobStatus(JS_CloudDownload); transfer *item = download_part_to_cache(dcr, VolName, part); if (item) { bool ok = wait_end_of_transfer(dcr, item); ok &= (item->m_state == TRANS_STATE_DONE); dcr->jcr->setJobStatus(JS_Running); if (!ok) { Qmsg3(dcr->jcr, M_ERROR, 0, _("Unable to download Volume=\"%s\"%s. %s\n"), VolName, (part == 1) ? " label" : "", item->m_message ? item->m_message:""); } return ok; } else { /* no item to download -> up-to-date */ return true; } return false; } bool cloud_dev::open_device(DCR *dcr, int omode) { POOL_MEM archive_name(PM_FNAME); POOL_MEM part_name(PM_FNAME); struct stat sp; Enter(dbglvl); /* Call base class to define generic variables */ if (DEVICE::open_device(dcr, omode)) { Dmsg2(dbglvl, "fd=%d device %s already open\n", m_fd, print_name()); Leave(dbglvl); return true; } omode = openmode; /* At this point, the device is closed, so we open it */ /* reset the cloud parts proxy for the current volume */ /* this can modify errmsg, so it's useful to test errmsg before overwritting it */ probe_cloud_proxy(dcr, getVolCatName()); /* Now Initialize the Cache part */ pm_strcpy(archive_name, dev_name); if (!IsPathSeparator(archive_name.c_str()[strlen(archive_name.c_str())-1])) { pm_strcat(archive_name, "/"); } pm_strcat(archive_name, getVolCatName()); /* If create make directory with Volume name */ if (part <= 0 && omode == CREATE_READ_WRITE) { Dmsg1(dbglvl, "=== makedir=%s\n", archive_name.c_str()); if (!makedir(dcr->jcr, archive_name.c_str(), 0740)) { berrno be; if (errmsg[0] == 0) { Mmsg2(errmsg, _("Could not make dir %s. %s"), archive_name.c_str(), be.bstrerror()); } Dmsg2(dbglvl, _("Could not make dir %s. %s"), archive_name.c_str(), be.bstrerror()); Leave(dbglvl); return false; } } if (part <= 0) { part = 1; /* always start from 1 */ } Dmsg2(dbglvl, "part=%d num_cache_parts=%d\n", part, num_cache_parts); /* * If we are doing a restore, get the necessary parts */ if (dcr->is_reading()) { BSRPartScanner scanner(dcr, this); scanner.get_all_parts(dcr->jcr->bsr, getVolCatName()); download_parts_to_read(dcr, scanner.get_parts_list()); } get_cache_sizes(dcr, getVolCatName()); /* refresh with what may have downloaded */ /* We need to make sure the current part is loaded */ uint64_t cld_size = cloud_prox->get_size(getVolCatName(), 1); if (cache_sizes[1] == 0 && cld_size != 0) { if (!wait_one_transfer(dcr, getVolCatName(), 1)) { if (errmsg[0] == 0) { Mmsg1(errmsg, _("wait for part.1 transfert failed for volume %s"), getVolCatName()); } Dmsg1(dbglvl, _("wait for part.1 transfert failed for volume %s"), getVolCatName()); return false; } } /* TODO: Merge this part of the code with the previous section */ cld_size = cloud_prox->get_size(getVolCatName(), part); if (dcr->is_reading() && part > 1 && (part > max_cache_part || cache_sizes[part] == 0)) /* Out of the existing parts or size == 0 */ { if (!wait_one_transfer(dcr, getVolCatName(), part)) { if (errmsg[0] == 0) { Mmsg2(errmsg, _("wait for part.%d transfert failed for volume %s"), part, getVolCatName()); } Dmsg2(dbglvl, _("wait for part.%d transfert failed for volume %s"), part, getVolCatName()); return false; } } Mmsg(part_name, "/part.%d", part); pm_strcat(archive_name, part_name.c_str()); set_mode(omode); /* If creating file, give 0640 permissions */ Dmsg3(DT_CLOUD|10, "open mode=%s open(%s, 0x%x, 0640)\n", mode_to_str(omode), archive_name.c_str(), mode); /* Use system open() */ /* NB: Shall we really reset the error message here? */ errmsg[0] = 0; if ((m_fd = ::open(archive_name.c_str(), mode|O_CLOEXEC, 0640)) < 0) { berrno be; dev_errno = errno; if (part == 1 && omode != CREATE_READ_WRITE) { part = 0; /* Open failed, reset part number */ Mmsg3(errmsg, _("Could not open(%s,%s,0640): ERR=%s\n"), archive_name.c_str(), mode_to_str(omode), be.bstrerror()); Dmsg1(dbglvl, "open failed: %s", errmsg); } } if (m_fd >= 0 && !get_cache_sizes(dcr, getVolCatName())) { /* get_cache_sizes should populate errMsg */ ASSERTD(errmsg[0] != 0, "get_cache_sizes reported error state without errmsg"); return false; } /* TODO: Make sure max_cache_part and max_cloud_part are up to date */ uint32_t max_cloud_part = cloud_prox->last_index(getVolCatName()); if (can_read() && m_fd < 0 && part > MAX(max_cache_part, max_cloud_part)) { Dmsg4(dbglvl, "set_eot: part=%d num_cache_parts=%d max_cache_part=%d max_cloud_part=%d\n", part, num_cache_parts, max_cache_part, max_cloud_part); set_eot(); } if (m_fd >= 0) { if (omode == CREATE_READ_WRITE || omode == OPEN_READ_WRITE) { set_append(); } dev_errno = 0; file = 0; file_addr = 0; if (part > num_cache_parts) { num_cache_parts = part; if (part > max_cache_part) { max_cache_part = part; } } /* Refresh the device id and the current file size */ if (fstat(m_fd, &sp) == 0) { devno = sp.st_dev; part_size = sp.st_size; } else { /* Should never happen */ berrno be; Mmsg1(errmsg, _("Could not use fstat on file descriptor. ERR=%s\n") ,be.bstrerror()); d_close(m_fd); m_fd = -1; } } else if (dcr->jcr) { pm_strcpy(dcr->jcr->errmsg, errmsg); } state |= preserve; /* reset any important state info */ Dmsg3(dbglvl, "fd=%d part=%d num_cache_parts=%d\n", m_fd, part, num_cache_parts); Leave(dbglvl); /* No errmsg and m_fd < 0 -> let's report unknown error anyway */ if (m_fd < 0) { if (errmsg[0] == 0) { Mmsg(errmsg, _("unknown error")); } } return m_fd >= 0; } bool cloud_dev::close(DCR *dcr) { Enter(dbglvl); bool ok = true; Dmsg6(dbglvl, "close_dev vol=%s part=%d fd=%d dev=%p adata=%d dev=%s\n", VolHdr.VolumeName, part, m_fd, this, adata, print_name()); if (!is_open()) { //Dmsg2(1000, "Device %s already closed vol=%s\n", print_name(),VolHdr.VolumeName); Leave(dbglvl); return true; /* already closed */ } if (d_close(m_fd) != 0) { berrno be; dev_errno = errno; Mmsg2(errmsg, _("Error closing device %s. ERR=%s.\n"), print_name(), be.bstrerror()); ok = false; } unmount(1); /* do unmount if required */ /* Ensure the last written part is uploaded */ if ((part > 0) && dcr->is_writing()) { if (!upload_part_to_cloud(dcr, VolHdr.VolumeName, part, (trunc_opt == TRUNC_AFTER_UPLOAD))) { if (errmsg[0]) { Qmsg(dcr->jcr, M_ERROR, 0, "%s", errmsg); } } } /* * Clean up device packet so it can be re-opened * */ state &= ~(ST_LABEL|ST_READ|ST_APPEND|ST_EOT|ST_WEOT|ST_EOF| ST_NOSPACE|ST_MOUNTED|ST_MEDIA|ST_SHORT); label_type = B_BACULA_LABEL; clear_opened(); file = block_num = 0; part = 0; EndAddr = get_full_addr(); file_addr = 0; EndFile = EndBlock = 0; openmode = 0; clear_volhdr(); memset(&VolCatInfo, 0, sizeof(VolCatInfo)); if (tid) { stop_thread_timer(tid); tid = 0; } Leave(dbglvl); return ok; } /* When constructed, jcr_killable_lock captures the jcr current killable state and set it to false. * The original state is re-applied at destruction */ class jcr_not_killable { JCR *m_jcr; bool m_killable; public: jcr_not_killable(JCR* jcr) : m_jcr(jcr), m_killable(jcr->is_killable()) { if (m_killable) { m_jcr->set_killable(false); } } ~jcr_not_killable() { /* reset killable state */ m_jcr->set_killable(m_killable); } }; /* update the cloud_proxy at VolName key. Only if necessary or if force-d */ bool cloud_dev::probe_cloud_proxy(DCR *dcr,const char *VolName, bool force) { /* check if the current volume is present in the proxy by probing the label (part.1)*/ if (!cloud_prox->volume_lookup(VolName)|| force) { /* Make sure the Job thread will not be killed in this function */ jcr_not_killable jkl(dcr->jcr); ilist cloud_parts(100, false); /* !! dont own the parts here */ /* first, retrieve the volume content within cloud_parts list*/ cancel_callback cancel_cb; cancel_cb.fct = DCR_cancel_cb; cancel_cb.arg = dcr; if (!driver->get_cloud_volume_parts_list(VolName, &cloud_parts, &cancel_cb, errmsg)) { Dmsg2(dbglvl, "Cannot get cloud sizes for Volume=%s Err=%s\n", VolName, errmsg); return false; } /* then, add the content of cloud_parts in the proxy table */ if (!cloud_prox->reset(VolName, &cloud_parts)) { Dmsg1(dbglvl, "could not reset cloud proxy for Volume=%s\n", VolName); return false; } } return true; } /* * Truncate cache parts that are also in the cloud * NOTE! We do not delete part.1 so that after this * truncate cache command (really a sort of purge), * the user can still do a restore. */ int cloud_dev::truncate_cache(DCR *dcr, const char *VolName, int64_t *size, POOLMEM *&msg) { int i, nbpart=0; Enter(dbglvl); ilist cache_parts; /* init the dev error message */ errmsg[0] = 0; msg[0] = 0; POOLMEM *vol_dir = get_pool_memory(PM_NAME); POOLMEM *fname = get_pool_memory(PM_NAME); /* cache truncation is expected to be infrequent and not performance intensive. * However deleting parts from the cache that would not be present on the cloud results * in data lost, so it's high risk, altough it should not occurs in normal use, to have * a de-sync cloud_proxy. For extra safety we do FORCE the cloud_proxy probing * to cover out-of-band manual deletions b.e. */ if (!probe_cloud_proxy(dcr, VolName, true)) { if (errmsg[0] == 0) { Mmsg1(errmsg, "Truncate cache cannot get cache volume parts list for Volume=%s\n", VolName); } Mmsg1(msg, "Truncate cache cannot get cache volume parts list for Volume=%s\n", VolName); Dmsg1(dbglvl, "%s\n", errmsg); nbpart = -1; goto bail_out; } if (!get_cache_volume_parts_list(dcr, VolName, &cache_parts)) { if (errmsg[0] == 0) { Mmsg1(errmsg, "Truncate cache cannot get cache volume parts list for Volume=%s\n", VolName); } Mmsg1(msg, "Truncate cache cannot get cache volume parts list for Volume=%s\n", VolName); Dmsg1(dbglvl, "%s\n", errmsg); nbpart = -1; goto bail_out; } make_cache_volume_name(vol_dir, VolName); /* * Remove every cache part that is also in the cloud */ for (i=2; i <= (int)cache_parts.last_index(); i++) { int64_t cache_size = part_get_size(&cache_parts, i); int64_t cloud_size = cloud_prox->get_size(VolName, i); /* remove cache parts that are empty or cache parts with matching cloud_part size*/ if (cache_size != 0 && cache_size != cloud_size) { Dmsg3(dbglvl, "Skip truncate for part=%d size mismatch scloud=%lld scache=%lld\n", i, cloud_size, cache_size); Mmsg(msg, "Some part(s) couldn't be truncated from the cache because they are inconsistent with the cloud."); continue; } /* Look in the transfer list if we have a download/upload for the current volume */ if (download_mgr.find(VolName, i)) { Dmsg1(dbglvl, "Skip truncate for part=%d because it's transfering\n", i); Mmsg(msg, "Some part(s) couldn't be truncated from the cache because they are still transferring."); continue; } Mmsg(fname, "%s/part.%d", vol_dir, i); if (unlink(fname) < 0) { berrno be; Mmsg2(errmsg, "Truncate cache failed to remove file %s. ERR: %s\n", fname, be.bstrerror()); Dmsg1(dbglvl, "%s\n", errmsg); } else { *size = *size + cache_size; nbpart++; Dmsg1(dbglvl, "=== unlinked: part=%s\n", fname); } } bail_out: free_pool_memory(vol_dir); free_pool_memory(fname); Leave(dbglvl); return nbpart; } /* callback called by the driver to test file for deletion */ /* return true if file should be deleted, false otherwise */ bool test_cleanup_file(const char* file, cleanup_ctx_type* ctx) { if (ctx) { int64_t tag=0; int32_t index=0; bool ret = (ctx->pattern_num == scan_string(file, ctx->pattern, &tag, &index)); return ret; } return false; } /* * Truncate both cache and cloud */ bool cloud_dev::truncate(DCR *dcr) { DIR* dp = NULL; struct dirent *entry = NULL; int name_max; POOLMEM *vol_dir = get_pool_memory(PM_NAME); POOLMEM *fname = get_pool_memory(PM_NAME); bool ok = false; POOL_MEM dname(PM_FNAME); int status = 0; ilist * iuploads=New(ilist(100,true)); /* owns the parts */ ilist *truncate_list=NULL; FILE *fp; errmsg[0] = 0; Enter(dbglvl); /* Make sure the Job thread will not be killed in this function */ jcr_not_killable jkl(dcr->jcr); if (cache_sizes) { free(cache_sizes); cache_sizes = NULL; } num_cache_parts = 0; max_cache_part = 0; part = 0; if (m_fd) { ::close(m_fd); /* No need to check the return code, we truncate */ m_fd = -1; } name_max = pathconf(".", _PC_NAME_MAX); if (name_max < 1024) { name_max = 1024; } make_cache_volume_name(vol_dir, getVolCatName()); Dmsg1(dbglvl, "===== truncate: %s\n", vol_dir); if (!(dp = opendir(vol_dir))) { berrno be; Mmsg2(errmsg, "Cannot opendir to get cache sizes. Volume %s does not exist. ERR=%s\n", vol_dir, be.bstrerror()); Dmsg1(dbglvl, "%s\n", errmsg); goto get_out; } entry = (struct dirent *)malloc(sizeof(struct dirent) + name_max + 1000); for ( ;; ) { errno = 0; status = breaddir(dp, dname.addr()); if (status == -1) { break; } else if (status > 0) { Mmsg1(errmsg, "breaddir failed: status=%d", status); Dmsg1(dbglvl, "%s\n", errmsg); goto get_out; } /* Always ignore . and .. */ if (strcmp(".", dname.c_str()) == 0 || strcmp("..", dname.c_str()) == 0) { continue; } /* Look only for part files */ if (strncmp("part.", dname.c_str(), 5) != 0) { continue; } Mmsg(fname, "%s/%s", vol_dir, dname.c_str()); if (unlink(fname) < 0) { berrno be; Mmsg2(errmsg, "Failed to remove file %s ERR: %s\n", fname, be.bstrerror()); Dmsg1(dbglvl, "%s\n", errmsg); goto get_out; } else { Dmsg1(dbglvl, "=== unlinked: part=%s\n", fname); } } /* All parts have been unlinked. Recreate an empty part.1 * FIX MT3450:Fatal error: Failed to re-open device after truncate on Cloud device */ Dmsg1(dbglvl, "Recreate empty part.1 for volume: %s\n", vol_dir); Mmsg(fname, "%s/part.1", vol_dir); fp = bfopen(fname, "a"); if (!fp || fclose(fp) != 0) { berrno be; Mmsg2(errmsg, "Failed to create empty file %s ERR: %s\n", fname, be.bstrerror()); } if (!dir_get_volume_info(dcr, getVolCatName(), GET_VOL_INFO_FOR_READ)) { /* It may happen for label operation */ Dmsg2(100, "dir_get_vol_info failed for vol=%s: %s\n", getVolCatName(), dcr->jcr->errmsg); goto get_out; } /* Update the Catalog information */ dcr->VolCatInfo.VolCatParts = 0; dcr->VolCatInfo.VolLastPartBytes = 0; dcr->VolCatInfo.VolCatCloudParts = 0; /* reset VolCatBytes to zero */ /* so dir_update_volume_info() will trigger update */ dcr->VolCatInfo.VolCatBytes = 0; /* wrap the uploads in a parts ilist */ transfer *tpkt; foreach_alist(tpkt, dcr->uploads) { /* convert xfer into part when VolName match*/ if (strcmp(tpkt->m_volume_name,getVolCatName())!=0) { continue; } cloud_part *part = (cloud_part*) malloc(sizeof(cloud_part)); part->index = tpkt->m_part; part->mtime = tpkt->m_res_mtime; part->size = tpkt->m_res_size; memcpy(part->hash64, tpkt->m_hash64, 64); iuploads->put(part->index, part); } /* returns the list of items to truncate : cloud parts-uploads*/ cancel_callback cancel_cb; cancel_cb.fct = DCR_cancel_cb; cancel_cb.arg = dcr; truncate_list = cloud_prox->exclude(getVolCatName(), iuploads); if (truncate_list && !driver->truncate_cloud_volume(getVolCatName(), truncate_list, &cancel_cb, errmsg)) { Dmsg1(dbglvl, "%s", errmsg); Qmsg(dcr->jcr, M_ERROR, 0, "%s", errmsg); goto get_out; } else { Dmsg1(dbglvl, "%s", errmsg); } /* cleanup temporary copy files. */ cleanup_ctx_type ctx; Mmsg(fname, "%s/part%%lld.%%d", getVolCatName()); ctx.pattern = fname; ctx.pattern_num = 2; if (!driver->clean_cloud_volume(getVolCatName(), &test_cleanup_file, &ctx, &cancel_cb, errmsg)) { Dmsg1(dbglvl, "%s", errmsg); Qmsg(dcr->jcr, M_ERROR, 0, "%s", errmsg); goto get_out; } else { Dmsg1(dbglvl, "%s", errmsg); } /* force proxy refresh (volume should be empty so it should be fast) */ /* another approach would be to reuse truncate_list to remove items */ if (!probe_cloud_proxy(dcr, getVolCatName(), true)) { goto get_out; } /* check content of the list : only index should be available */ for(uint32_t index=1; index<=cloud_prox->last_index(getVolCatName()); index++ ) { if (cloud_prox->get(getVolCatName(), index)) { Dmsg2(0, "truncate_cloud_volume proxy for volume %s got part.%d should be empty\n", getVolCatName(), index); Qmsg(dcr->jcr, M_WARNING, 0, "truncate_cloud_volume: %s/part.%d is still present\n", getVolCatName(), index); } } ok = true; get_out: if (dp) { closedir(dp); } if (entry) { free(entry); } free_pool_memory(vol_dir); free_pool_memory(fname); delete iuploads; delete truncate_list; Leave(dbglvl); return ok; } int cloud_dev::read_dev_volume_label(DCR *dcr) { int stat; Enter(dbglvl); Dmsg2(dbglvl, "part=%d num_cache_parts=%d\n", part, num_cache_parts); if (!is_open()) { part = 0; } stat = file_dev::read_dev_volume_label(dcr); Dmsg2(dbglvl, "part=%d num_cache_parts=%d\n", part, num_cache_parts); return stat; } const char *cloud_dev::print_type() { return "Cloud"; } const char *cloud_dev::print_driver_type() { return cloud_driver_type_name[current_driver_type]; } const char *cloud_dev::print_full_type() { return full_type; } /* * makedir() is a lightly modified copy of the same function * in findlib/mkpath.c * */ bool makedir(JCR *jcr, char *path, mode_t mode) { struct stat statp; if (mkdir(path, mode) != 0) { berrno be; if (lstat(path, &statp) != 0) { Qmsg2(jcr, M_ERROR, 0, _("Cannot create directory %s: ERR=%s\n"), path, be.bstrerror()); return false; } else if (!S_ISDIR(statp.st_mode)) { Qmsg1(jcr, M_ERROR, 0, _("%s exists but is not a directory.\n"), path); return false; } return true; /* directory exists */ } return true; } /* * This call closes the device, but it is used for part handling * where we close one part and then open the next part. The * difference between close_part() and close() is that close_part() * saves the state information of the device (e.g. the Volume label, * the Volume Catalog record, ... This permits opening and closing * the Volume parts multiple times without losing track of what the * main Volume parameters are. */ bool cloud_dev::close_part(DCR *dcr) { bool ok = true; Enter(dbglvl); Dmsg5(dbglvl, "close_part vol=%s fd=%d dev=%p adata=%d dev=%s\n", VolHdr.VolumeName, m_fd, this, adata, print_name()); if (!is_open()) { //Dmsg2(1000, "Device %s already closed vol=%s\n", print_name(),VolHdr.VolumeName); Leave(dbglvl); return true; /* already closed */ } if (d_close(m_fd) != 0) { berrno be; dev_errno = errno; Mmsg2(errmsg, _("Error closing device %s. ERR=%s.\n"), print_name(), be.bstrerror()); ok = false; } m_fd = -1; part = 0; file_addr = 0; Leave(dbglvl); return ok; } bool cloud_dev::open_next_part(DCR *dcr) { Enter(dbglvl); int save_part; char ed1[50]; Dmsg4(dbglvl, "open next: part=%d part_size=%d, can_append()=%s, openmode=%d\n", part, part_size, can_append() ? "true":"false", openmode); /* When appending, do not open a new part if the current is empty */ if (can_append() && (part_size == 0)) { Dmsg2(dbglvl, "open next: part=%d num_cache_parts=%d exit OK no new part needed.\n", part, num_cache_parts); Leave(dbglvl); return true; } /* TODO: Get the the last max_part */ uint32_t max_cloud_part = cloud_prox->last_index(getVolCatName()); Dmsg2(dbglvl, "open next: part=%d max_cloud_part=%d\n", part, max_cloud_part); if (!can_append() && part >= MAX(max_cache_part, max_cloud_part)) { Dmsg3(dbglvl, "EOT: part=%d num_cache_parts=%d max_cloud_part=%d\n", part, num_cache_parts, max_cloud_part); Mmsg2(errmsg, "part=%d no more parts to read. addr=%s\n", part, print_addr(ed1, sizeof(ed1), EndAddr)); Dmsg1(dbglvl, "%s", errmsg); part = 0; Leave(dbglvl); return false; } save_part = part; if (!close_part(dcr)) { /* close current part */ POOL_MEM tmp; Leave(dbglvl); Mmsg(tmp, " close_part failed: part=%d num_cache_parts=%d\n", part, num_cache_parts); pm_strcat(errmsg, tmp); Dmsg1(dbglvl, "%s", errmsg); return false; } if (openmode == CREATE_READ_WRITE) { VolCatInfo.VolCatParts = num_cache_parts; if (!dir_update_volume_info(dcr, false, false, true)) { Dmsg0(dbglvl, "Error from update_vol_info.\n"); dev_errno = EIO; return false; } part_size = 0; } /* Restore part number */ part = save_part; if (dcr->is_reading()) { wait_one_transfer(dcr, getVolCatName(), part); } /* Write part to cloud */ Dmsg2(dbglvl, "=== part=%d num_cache_parts=%d\n", part, num_cache_parts); if (dcr->is_writing()) { if (!upload_part_to_cloud(dcr, getVolCatName(), part, (trunc_opt == TRUNC_AFTER_UPLOAD))) { if (errmsg[0]) { Qmsg(dcr->jcr, M_ERROR, 0, "%s", errmsg); } } } /* Try to open next part */ part++; Dmsg2(dbglvl, "=== inc part: part=%d num_cache_parts=%d\n", part, num_cache_parts); if (can_append()) { Dmsg0(dbglvl, "Set openmode to CREATE_READ_WRITE\n"); openmode = CREATE_READ_WRITE; } if (open_device(dcr, openmode)) { if (openmode == CREATE_READ_WRITE) { set_append(); clear_eof(); clear_eot(); file_addr = 0; file_addr = get_full_addr(); if (lseek(dcr, file_addr, SEEK_SET) < 0) { berrno be; dev_errno = errno; Mmsg2(errmsg, _("lseek to 0 error on %s. ERR=%s.\n"), print_name(), be.bstrerror()); Leave(dbglvl); return false; } } } else { /* open failed */ /* TODO: Make sure max_cache_part and max_cloud_part are up to date */ if (part > MAX(max_cache_part, max_cloud_part)) { Dmsg4(dbglvl, "set_eot: part=%d num_cache_parts=%d max_cache_part=%d max_cloud_part=%d\n", part, num_cache_parts, max_cache_part, max_cloud_part); set_eof(); set_eot(); } Leave(dbglvl); Mmsg2(errmsg, "EOT: part=%d num_cache_parts=%d\n", part, num_cache_parts); Dmsg1(dbglvl, "%s", errmsg); return false; } set_labeled(); /* all parts are labeled */ Dmsg3(dbglvl, "opened next: append=%d part=%d num_cache_parts=%d\n", can_append(), part, num_cache_parts); Leave(dbglvl); return true; } /* Print the object address */ char *cloud_dev::print_addr(char *buf, int32_t buf_len) { uint64_t full_addr = get_full_addr(); buf[0] = 0; bsnprintf(buf, buf_len, "%lu:%llu", get_part(full_addr), get_offset(full_addr)); return buf; } char *cloud_dev::print_addr(char *buf, int32_t buf_len, boffset_t addr) { buf[0] = 0; bsnprintf(buf, buf_len, "%lu:%llu", get_part(addr), get_offset(addr)); return buf; } /* * Check if the current position on the volume corresponds to * what is in the catalog. * */ bool cloud_dev::is_eod_valid(DCR *dcr) { JCR *jcr = dcr->jcr; ilist cache_parts; bool do_update = false; bool ok = true; POOL_MEM err, tmp; /* We need up to date information for Cloud and Cache */ uint32_t max_cloud_part = cloud_prox->last_index(dcr->VolumeName); uint64_t last_cloud_size = cloud_prox->get_size(dcr->VolumeName, max_cloud_part); get_cache_volume_parts_list(dcr, dcr->VolumeName, &cache_parts); uint32_t max_cache_part = cache_parts.last_index(); uint64_t last_cache_size = part_get_size(&cache_parts, max_cache_part); /* When we open a new part, the actual size is 0, so we are not very interested */ if (last_cache_size == 0 && max_cache_part > 0) { max_cache_part--; last_cache_size = part_get_size(&cache_parts, max_cache_part); } uint32_t last_p = MAX(max_cloud_part, max_cache_part); uint64_t last_s = MAX(last_cache_size, last_cloud_size); Dmsg5(dbglvl, "vol=%s cache part=%ld size=%lld, cloud part=%ld size=%lld\n", dcr->VolumeName, max_cache_part, last_cache_size, max_cloud_part, last_cloud_size); /* If we have the same Part number in the cloud and in the cache. We check * the size of the two parts. The cache part may be truncated (size=0). */ if (max_cloud_part == max_cache_part) { if (last_cache_size > 0 && last_cloud_size != last_cache_size) { ok = false; /* Big consistency problem, which one do we take? Biggest one? */ Mmsg(tmp, "For the last Part=%ld the Cache and Cloud sizes are not the same! Cache=%lld Cloud=%lld.\n", max_cloud_part, last_cloud_size, last_cache_size); pm_strcat(err, tmp.c_str()); } } /* The catalog should have the right LastPart */ if (VolCatInfo.VolCatParts != last_p) { Mmsg(tmp, "The number of parts do not match! Volume=%ld Catalog=%ld.\n", last_p, VolCatInfo.VolCatParts); VolCatInfo.VolCatParts = last_p; VolCatInfo.VolLastPartBytes = last_s; VolCatInfo.VolCatBytes = last_s; pm_strcat(err, tmp.c_str()); do_update = true; /* The catalog should have the right LastPartBytes */ } else if (VolCatInfo.VolLastPartBytes != last_s) { Mmsg(tmp, "Sizes of last part number=%ld do not match! Volume=%lld Catalog=%lld.\n", last_p, VolCatInfo.VolLastPartBytes, last_s); VolCatInfo.VolLastPartBytes = last_s; VolCatInfo.VolCatBytes = last_s; pm_strcat(err, tmp.c_str()); do_update = true; } /* We also check that the last part uploaded in the cloud is correct */ if (VolCatInfo.VolCatCloudParts != max_cloud_part) { Mmsg(tmp, "Number of Cloud Parts do not match! Volume=%ld Catalog=%ld.\n", max_cloud_part, VolCatInfo.VolCatCloudParts); /* FIXME: The VolCatCloudParts is not used in the code and the value is not correct * VolCatInfo.VolCatCloudParts = max_cloud_part; */ pm_strcat(err, tmp.c_str()); do_update = true; } if (ok) { if (do_update) { Jmsg2(jcr, M_INFO, 0, _("Correcting catalog for Volume \"%s\":\n%s\n"), dcr->VolumeName, err.c_str()); if (!dir_update_volume_info(dcr, false, true)) { Jmsg(jcr, M_WARNING, 0, _("Error updating Catalog\n")); dcr->mark_volume_in_error(); return false; } } } else { Mmsg2(jcr->errmsg, _("Bacula cannot write on disk Volume \"%s\" because: %s"), dcr->VolumeName, err.c_str()); Jmsg(jcr, M_ERROR, 0, jcr->errmsg); Dmsg0(100, jcr->errmsg); dcr->mark_volume_in_error(); return false; } return true; } /* * We are called here when Bacula wants to append to a Volume */ bool cloud_dev::eod(DCR *dcr) { bool ok; uint32_t max_part = 1; Enter(dbglvl); uint32_t max_cloud_part = cloud_prox->last_index(getVolCatName()); Dmsg5(dbglvl, "=== eod: part=%d num_cache_parts=%d max_cache_part=%d max_cloud_part=%d vol_parts=%d\n", part, num_cache_parts, max_cache_part, max_cloud_part, VolCatInfo.VolCatParts); /* First find maximum part */ if (max_part < max_cache_part) { max_part = max_cache_part; } if (max_part < max_cloud_part) { max_part = max_cloud_part; } if (max_part < VolCatInfo.VolCatParts) { max_part = VolCatInfo.VolCatParts; } if (max_part < VolCatInfo.VolCatCloudParts) { max_part = VolCatInfo.VolCatCloudParts; } if (part < max_part) { if (!close_part(dcr)) { /* close current part */ Leave(dbglvl); Dmsg2(dbglvl, "close_part failed: part=%d num_cache_parts=%d\n", part, num_cache_parts); return false; } /* Try to open next part */ part = max_part; /* Create new part */ part_size = 0; part++; openmode = CREATE_READ_WRITE; Dmsg2(dbglvl, "=== eod: set part=%d num_cache_parts=%d\n", part, num_cache_parts); if (!open_device(dcr, openmode)) { Leave(dbglvl); Dmsg2(dbglvl, "Fail open_device: part=%d num_cache_parts=%d\n", part, num_cache_parts); return false; } if (part > 1) { /* When going to EOD with Cloud, we close the current part (that can * be part.1 with the label), and we open the last part file * available. close_part()/open_device() is clearing the labeled * flag, and we absolutely need this flag in some cases, for example * after an incomplete job. Like in open_next_part(), we assume that * if we are beyond the part.1, the volume is labeled. */ set_labeled(); } } ok = file_dev::eod(dcr); return ok; } bool cloud_dev::write_volume_label(DCR *dcr, const char *VolName, const char *PoolName, bool relabel, bool no_prelabel) { bool ok = DEVICE::write_volume_label(dcr, VolName, PoolName, relabel, no_prelabel); if (!ok) { Dmsg0(dbglvl, "write_volume_label failed.\n"); return false; } if (part != 1) { Dmsg1(000, "Big problem!!! part=%d, but should be 1\n", part); return false; } set_append(); return true; } bool cloud_dev::rewrite_volume_label(DCR *dcr, bool recycle) { Enter(100); bool ok = DEVICE::rewrite_volume_label(dcr, recycle); /* * Normally, at this point, the label has been written to disk * but remains in the first part of the block, and will be * "rewritten" when the full block is written. * However, in the case of a cloud device the label has * already been written to a part, so we must now clear * the block of the label data. */ empty_block(dcr->block); if (!ok || !open_next_part(dcr)) { ok = false; } Leave(100); return ok; } bool cloud_dev::do_size_checks(DCR *dcr, DEV_BLOCK *block) { if (!DEVICE::do_size_checks(dcr, block)) { return false; } /* * Do Cloud specific size checks */ /* Limit maximum part size to value specified by user */ if (max_part_size > 0 && ((part_size + block->binbuf) >= max_part_size)) { if (part < num_cache_parts) { Qmsg3(dcr->jcr, M_FATAL, 0, _("Error while writing, current part number" " is less than the total number of parts (%d/%d, device=%s)\n"), part, num_cache_parts, print_name()); dev_errno = EIO; return false; } /* switch volume when max_vol_parts_num is set and reached. Do it before next part is openned to avoid small orphean part at the end of volume */ if ((max_vol_parts_num) > 0 && (part >= max_vol_parts_num)) { Dmsg2(dbglvl, "max_vol_parts_num = %d exceeded by partidx= %d. Calling terminate_writing_volume\n", max_vol_parts_num, part); terminate_writing_volume(dcr); dev_errno = ENOSPC; return false; } if (!open_next_part(dcr)) { return false; } } // static, so it's not calculated everytime static uint64_t hard_max_part_size = ((uint64_t)1 << off_bits) -1; static uint32_t hard_max_part_number = ((uint32_t)1 << part_bits) -1; if (part_size >= hard_max_part_size) { Qmsg3(dcr->jcr, M_FATAL, 0, _("Error while writing, current part size" " is greater than the maximum part size (%d>%d, device=%s)\n"), part_size, hard_max_part_size, print_name()); dev_errno = EIO; return false; } if (part >= hard_max_part_number) { Qmsg3(dcr->jcr, M_FATAL, 0, _("Error while writing, current part number" " is greater than the maximum part number (%d>%d, device=%s)\n"), part, hard_max_part_number, print_name()); dev_errno = EIO; return false; } return true; } bool cloud_dev::start_of_job(DCR *dcr) { bool ret=false; if (driver) { ret = driver->start_of_job(errmsg); } else { Mmsg(errmsg, "Cloud driver not properly loaded"); /* We should always have a dummy driver */ } Jmsg(dcr->jcr, ret? M_INFO : M_FATAL, 0, "%s\n", errmsg); return ret; } /* Two jobs can try to update the catalog information for a given cloud * volume. It might be avoided by converting the vol_info_mutex to a recursive * lock */ static pthread_mutex_t update_mutex = PTHREAD_MUTEX_INITIALIZER; static void update_volume_record(DCR *dcr, transfer *ppkt) { lock_guard lg(update_mutex); /* automatically released at exit */ bool do_update=false; /* * At this point ppkt should have the last part for the * previous volume, so update the Media record. */ if (!dir_get_volume_info(dcr, ppkt->m_volume_name, GET_VOL_INFO_FOR_READ)) { /* It may happen for label operation */ Dmsg2((ppkt->m_part == 1 ? 100 : 0) , "dir_get_vol_info failed for vol=%s: %s\n", ppkt->m_volume_name, dcr->jcr->errmsg); return; } /* Between the GET and the UPDATE, and other job can call the same * function and put more up to date information. So we are protected * by the update_mutex */ /* Update the Media information */ if ((ppkt->m_part > dcr->VolCatInfo.VolCatParts) || (ppkt->m_part == dcr->VolCatInfo.VolCatParts && dcr->VolCatInfo.VolLastPartBytes != ppkt->m_stat_size)) { do_update=true; dcr->VolCatInfo.VolCatParts = ppkt->m_part; dcr->VolCatInfo.VolLastPartBytes = ppkt->m_stat_size; } /* We update the CloudParts in the catalog only if the current transfer is correct */ if (ppkt->m_state == TRANS_STATE_DONE && ppkt->m_part > dcr->VolCatInfo.VolCatCloudParts && ppkt->m_stat_size > 0) { do_update = true; dcr->VolCatInfo.VolCatCloudParts = ppkt->m_part; } if (do_update) { dir_update_volume_info(dcr, false, true, true/*use_dcr*/); } } bool cloud_dev::end_of_job(DCR *dcr, uint32_t truncate) { Enter(dbglvl); transfer *tpkt; /* current packet */ transfer *ppkt=NULL; /* previous packet */ const char *prefix = ""; /* before waiting on transfers, we might have to lauch the uploads */ if (upload_opt == UPLOAD_AT_ENDOFJOB) { foreach_alist(tpkt, dcr->uploads) { tpkt->queue(); } } /* * We wait for each of our uploads to complete * Note: we also want to update the cloud parts and cache parts for * each part uploaded. The deletes list contains transfer packet for * each part that was upload in the order of the parts as they were * created. Also, there may be multiple Volumes that were uploaded, * so for each volume, we search until the end of the list or a * different Volume is found in order to find the maximum part * number that was uploaded. Then we read the Media record for * that Volume, update it, and write it back to the catalog. */ POOL_MEM msg(PM_MESSAGE); if (!dcr->downloads->empty()) { if (!dcr->jcr->is_internal_job()) { Jmsg(dcr->jcr, M_INFO, 0, _("Cloud Download transfers:\n")); } else { prefix = "3000 Cloud Download: "; } Dmsg1(dbglvl, "%s", msg.c_str()); foreach_alist(tpkt, dcr->downloads) { /* Do we really need to wait on downloads : if we didn't * wait for them until now, we basically didn't use them. And we * surelly won't anymore. If the job is canceled we can cancel our * own downloads (do not touch downloads shared with other jobs). */ wait_end_of_transfer(dcr, tpkt); POOL_MEM dmsg(PM_MESSAGE); tpkt->append_status(dmsg); Jmsg(dcr->jcr, M_INFO, 0, "%s%s", prefix, dmsg.c_str()); download_mgr.release(tpkt); } } dcr->downloads->destroy(); if (!dcr->uploads->empty()) { int oldstatus = dcr->jcr->JobStatus; dcr->jcr->sendJobStatus(JS_CloudUpload); if (!dcr->jcr->is_internal_job()) { Jmsg(dcr->jcr, M_INFO, 0, _("Cloud Upload transfers:\n")); } else { prefix = "3000 Cloud Upload: "; } foreach_alist(tpkt, dcr->uploads) { wait_end_of_transfer(dcr, tpkt); POOL_MEM umsg(PM_MESSAGE); tpkt->append_status(umsg); Jmsg(dcr->jcr, (tpkt->m_state == TRANS_STATE_ERROR) ? M_ERROR : M_INFO, 0, "%s%s", prefix, umsg.c_str()); Dmsg1(dbglvl, "%s", umsg.c_str()); bool do_truncate = (truncate==TRUNC_AT_ENDOFJOB) || (truncate==TRUNC_CONF_DEFAULT && trunc_opt==TRUNC_AT_ENDOFJOB); if (do_truncate && tpkt->m_part!=1) { bool allow_truncate = false; uint64_t cloud_size = 0; if (tpkt->m_state == TRANS_STATE_DONE && tpkt->m_res_size != 0 && tpkt->m_res_mtime != 0) { /* so far so good for truncation */ /* double check if the cache size matches the transfer size */ struct stat statbuf; if (lstat(tpkt->m_cache_fname, &statbuf) == -1) { berrno be; Dmsg2(dbglvl, "Failed to stat cache file %s. ERR=%s\n", tpkt->m_cache_fname, be.bstrerror()); allow_truncate = false; } else { /* if sizes do match, we can truncate */ cloud_size = (uint64_t)statbuf.st_size; allow_truncate = (cloud_size == tpkt->m_res_size); } } if (allow_truncate) { if (unlink(tpkt->m_cache_fname) != 0) { berrno be; Dmsg2(dbglvl, "Truncate cache option at end of job. Unable to truncate cache part %s. ERR=%s\n", tpkt->m_cache_fname, be.bstrerror()); } else { Dmsg1(dbglvl, "Truncate cache option at end of job. Truncated cache part %s OK\n", tpkt->m_cache_fname); } } else { Dmsg4(dbglvl, "Truncate cache option at end of job skipped. %s state=%d cache size=%lld cloud size =%lld\n", tpkt->m_cache_fname, tpkt->m_state, tpkt->m_res_size, cloud_size); } } if (ppkt == NULL) { ppkt = tpkt; continue; } if (strcmp(ppkt->m_volume_name, tpkt->m_volume_name) == 0) { ppkt = tpkt; continue; } /* vol name changed so update media for previous transfer */ update_volume_record(dcr, ppkt); ppkt = tpkt; } dcr->jcr->sendJobStatus(oldstatus); } /* Update the last (previous) one */ if (ppkt) { Dmsg3(dbglvl, "== Last part=%d size=%lld Volume=%s\n", ppkt->m_part, ppkt->m_stat_size, ppkt->m_volume_name); update_volume_record(dcr, ppkt); Dmsg3(dbglvl, "=== Very Last part=%d size=%lld Volume=%s\n", ppkt->m_part, ppkt->m_stat_size, ppkt->m_volume_name); } /* Now, clear our list and the global one if needed */ foreach_alist(tpkt, dcr->uploads) { upload_mgr.release(tpkt); } dcr->uploads->destroy(); if (driver) { driver->end_of_job(errmsg); } Leave(dbglvl); return true; } bool cloud_dev::wait_end_of_transfer(DCR *dcr, transfer *elem) { if (!elem) { return false; } Enter(dbglvl); struct timeval tv; tv.tv_usec = 0; tv.tv_sec = 30; int stat = ETIMEDOUT; while (stat == ETIMEDOUT) { if (dcr->jcr->is_canceled()) { elem->cancel(); break; } if (chk_dbglvl(dbglvl)) { POOL_MEM status(PM_FNAME); get_cloud_upload_transfer_status(status, false); Dmsg1(0, "%s",status.addr()); get_cloud_download_transfer_status(status, false); Dmsg1(0, "%s",status.addr()); } stat = elem->timedwait(tv); } Leave(dbglvl); return (stat == 0); } /* return the volumes list */ bool cloud_dev::get_cloud_volumes_list(DCR* dcr, alist *volumes, POOLMEM *&err) { cancel_callback cancel_cb; cancel_cb.fct = DCR_cancel_cb; cancel_cb.arg = dcr; if (!driver) { return false; } return driver->get_cloud_volumes_list(volumes, &cancel_cb, err); } /* return the list of parts contained in VolumeName */ bool cloud_dev::get_cloud_volume_parts_list(DCR *dcr, const char *VolumeName, ilist *parts, POOLMEM *&err) { cancel_callback cancel_cb; cancel_cb.fct = DCR_cancel_cb; cancel_cb.arg = dcr; if (!driver) { return false; } return driver->get_cloud_volume_parts_list(VolumeName, parts, &cancel_cb, err); } /* TODO: Add .api2 mode for the status message */ /* format a status message of the cloud transfers. Verbose gives details on each transfer */ uint32_t cloud_dev::get_cloud_upload_transfer_status(POOL_MEM& msg, bool verbose) { uint32_t ret = 0; ret = Mmsg(msg,_(" Uploads ")); ret += upload_mgr.append_status(msg, verbose); return ret; } void cloud_dev::get_api_cloud_upload_transfer_status(OutputWriter &ow, bool verbose) { ow.get_output(OT_LABEL, "uploads", OT_END); upload_mgr.append_api_status(ow, verbose); } /* format a status message of the cloud transfers. Verbose gives details on each transfer */ uint32_t cloud_dev::get_cloud_download_transfer_status(POOL_MEM& msg, bool verbose) { uint32_t ret = 0; ret = Mmsg(msg,_(" Downloads ")); ret += download_mgr.append_status(msg, verbose); return ret; } void cloud_dev::get_api_cloud_download_transfer_status(OutputWriter &ow, bool verbose) { ow.get_output(OT_LABEL, "downloads", OT_END); download_mgr.append_api_status(ow, verbose); } /* for a given volume VolumeName, return parts that is a list of the * cache parts within the volume */ bool cloud_dev::get_cache_volume_parts_list(DCR *dcr, const char* VolumeName, ilist *parts) { JCR *jcr = dcr->jcr; Enter(dbglvl); if (!parts || strlen(VolumeName) == 0) { return false; } POOLMEM *part_path = get_pool_memory(PM_NAME); POOLMEM *vol_dir = get_pool_memory(PM_NAME); /*NB : *** QUESTION *** : it works with examples but is archive_name() the kosher fct to call to get the cache path? */ pm_strcpy(vol_dir, archive_name()); if (!IsPathSeparator(vol_dir[strlen(vol_dir)-1])) { pm_strcat(vol_dir, "/"); } pm_strcat(vol_dir, VolumeName); DIR* dp = NULL; struct dirent *entry = NULL; struct stat statbuf; int name_max; bool ok = false; POOL_MEM dname(PM_FNAME); int status = 0; Enter(dbglvl); Dmsg1(dbglvl, "Searching for parts in: %s\n", VolumeName); if (!(dp = opendir(vol_dir))) { berrno be; Mmsg2(errmsg, "Cannot opendir to get parts list. Volume %s does not exist. ERR=%s\n", VolumeName, be.bstrerror()); Dmsg1(dbglvl, "%s", errmsg); goto get_out; } name_max = pathconf(".", _PC_NAME_MAX); if (name_max < 1024) { name_max = 1024; } entry = (struct dirent *)malloc(sizeof(struct dirent) + name_max + 1000); for ( ;; ) { if (jcr->is_canceled()) { goto get_out; } errno = 0; status = breaddir(dp, dname.addr()); if (status == -1) { break; } else if (status < 0) { Mmsg1(errmsg, "breaddir failed: status=%d", status); Dmsg1(dbglvl, "%s\n", errmsg); goto get_out; } /* Always ignore . and .. */ if (strcmp(".", dname.c_str()) == 0 || strcmp("..", dname.c_str()) == 0) { continue; } /* Look only for part files */ if (strncmp("part.", dname.c_str(), 5) != 0) { continue; } char *ext = strrchr (dname.c_str(), '.'); if (!ext || strlen(ext) < 2) { continue; } cloud_part *part = (cloud_part*) malloc(sizeof(cloud_part)); if (!part) { berrno be; Dmsg1(dbglvl, "Failed to create part structure: %s\n", be.bstrerror()); goto get_out; } /* save extension (part number) to cloud_part struct index*/ part->index = atoi(&ext[1]); /* Bummer : caller is responsible for freeing label */ pm_strcpy(part_path, vol_dir); if (!IsPathSeparator(part_path[strlen(vol_dir)-1])) { pm_strcat(part_path, "/"); } pm_strcat(part_path, dname.c_str()); /* Get size of part */ if (lstat(part_path, &statbuf) == -1) { /* The part is no longer here, might be a truncate in an other thread, just * do like if the file wasn't here */ continue; } part->size = statbuf.st_size; part->mtime = statbuf.st_mtime; /* ***FIXME***: should get SHA512 from cloud */ bmemzero(part->hash64, 64); parts->put(part->index, part); } ok = true; get_out: if (dp) { closedir(dp); } if (entry) { free(entry); } free_pool_memory(vol_dir); free_pool_memory(part_path); return ok; } /* * Upload cache parts that are not in the cloud */ bool cloud_dev::upload_cache(DCR *dcr, const char *VolumeName, uint32_t truncate, POOLMEM *&err) { int i; Enter(dbglvl); bool ret=true; ilist cloud_parts; ilist cache_parts; POOLMEM *vol_dir = get_pool_memory(PM_NAME); POOLMEM *fname = get_pool_memory(PM_NAME); cancel_callback cancel_cb; cancel_cb.fct = DCR_cancel_cb; cancel_cb.arg = dcr; if (!driver->get_cloud_volume_parts_list(VolumeName, &cloud_parts, &cancel_cb, err)) { Qmsg2(dcr->jcr, M_ERROR, 0, "Error while uploading parts for volume %s. %s\n", VolumeName, err); ret = false; goto bail_out; } if (!get_cache_volume_parts_list(dcr, VolumeName, &cache_parts)) { Qmsg1(dcr->jcr, M_ERROR, 0, "Error while listing cache parts for volume %s.\n", VolumeName); ret = false; goto bail_out; } make_cache_volume_name(vol_dir, VolumeName); /* * Upload every part where cache_size > cloud_size */ for (i=1; i <= (int)cache_parts.last_index(); i++) { if (i <= (int)cloud_parts.last_index()) { /* not on the cloud, but exists in the cache */ cloud_part *cachep = (cloud_part *)cache_parts[i]; cloud_part *cloudp = (cloud_part *)cloud_parts[i]; if (!cachep || cachep->size == 0) { /* Not in the current cache */ continue; } if (cloudp && cloudp->size >= cachep->size) { continue; /* already uploaded */ } } Mmsg(fname, "%s/part.%d", vol_dir, i); Dmsg1(dbglvl, "Do upload of %s\n", fname); bool do_truncate = (truncate==TRUNC_AFTER_UPLOAD) || (truncate==TRUNC_CONF_DEFAULT && (trunc_opt == TRUNC_AFTER_UPLOAD)); if (cache_parts[i]) { if (!upload_part_to_cloud(dcr, VolumeName, i, do_truncate)) { if (errmsg[0]) { Qmsg(dcr->jcr, M_ERROR, 0, "%s", errmsg); } ret = false; } else { Qmsg(dcr->jcr, M_INFO, 0, "Uploaded cache %s\n", fname); } } else { Qmsg(dcr->jcr, M_WARNING, 0, "Part %s not found in cache. Upload skipped.\n", fname); } } bail_out: free_pool_memory(vol_dir); free_pool_memory(fname); Leave(dbglvl); return ret; } bacula-15.0.3/src/stored/cloud_parts.h0000644000175000017500000001024614771010173017444 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Routines for managing Volumes contains and comparing parts * * Written by Norbert Bizet, May MMXVI * */ #ifndef _CLOUD_PARTS_H_ #define _CLOUD_PARTS_H_ #include "bacula.h" #include "stored.h" struct cloud_part { uint32_t index; utime_t mtime; uint64_t size; unsigned char hash64[64]; }; /* equality operators for cloud_part structure */ bool operator==(const cloud_part& lhs, const cloud_part& rhs); bool operator!=(const cloud_part& lhs, const cloud_part& rhs); /* more equality operators: when compared to int, we match only index */ bool operator==(const cloud_part& lhs, const uint32_t& rhs); bool operator!=(const cloud_part& lhs, const uint32_t& rhs); /* Check if a part p is contained in a parts list */ bool list_contains_part(ilist *parts, cloud_part *p); /* Check if a part index is contained in a parts list */ bool list_contains_part(ilist *parts, uint32_t part_idx); /* if parts1 and parts2 are synced, return true. false otherwise */ bool identical_lists(ilist *parts1, ilist *parts2); /* cloud_parts present in source but not in dest are appended to diff. * there's no cloud_part copy made. * Diff only holds references and shoudn't own them */ bool diff_lists(ilist *source, ilist *dest, ilist *diff); /* A proxy view of the cloud, providing existing parts * index/size/date of modification without accessing the cloud itself. * The basic proxy structure is a hash table of ilists: root | -[volume001]-----ilist | | | [01]-->cloud_part | [03]-->cloud_part | | | -[volume002]-----ilist | | | [01]-->cloud_part [02]-->cloud_part | */ class cloud_proxy : public SMARTALLOC { private: htable *m_hash; /* the root htable */ bool m_owns; /* determines if ilist own the cloud_parts */ pthread_mutex_t m_mutex; /* protect access*/ static cloud_proxy *m_pinstance; /* singleton instance */ static uint64_t m_count; /* static refcount */ ~cloud_proxy(); public: /* size: the default hash size * owns: determines if the ilists own the cloud_parts or not */ cloud_proxy(uint32_t size=100, bool owns=true); /* each time a part is added to the cloud, the corresponding cloud_part * should be set here */ /* either using a part ptr (part can be disposed afterward)... */ bool set(const char *volume, cloud_part *part); /* ...or by passing basic part parameters (part is constructed internally) */ bool set(const char *volume, uint32_t index, utime_t mtime, uint64_t size, unsigned char *hash64); /* one can retrieve the proxied cloud_part using the get method */ cloud_part *get(const char *volume, uint32_t part_idx); /* direct access to part size */ uint64_t get_size(const char *volume, uint32_t part_idx); /* Check if the volume entry exists and return true if it's the case */ bool volume_lookup(const char *volume); /* reset the volume list content with the content of part_list */ bool reset(const char *volume, ilist *part_list); /* get the current last (max) index for a given volume */ uint32_t last_index(const char *volume); /* returns a ilist of elements present in the proxy but not in the exclusion list */ ilist *exclude(const char* volume, ilist *exclusion_lst); /* refcounted singleton */ static cloud_proxy *get_instance(); /* instead of deleting, release the cloud_proxy*/ void release(); void dump(); }; #endif /* _CLOUD_PARTS_H_ */ bacula-15.0.3/src/stored/dev.h0000644000175000017500000013563214771010173015712 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Definitions for using the Device functions in Bacula * Tape and File storage access * * Kern Sibbald, MM */ /* * Some details of how device reservations work * * class DEVICE: * set_load() set to load volume * needs_load() volume must be loaded (i.e. set_load done) * clear_load() load done. * set_unload() set to unload volume * needs_unload() volume must be unloaded * clear_unload() volume unloaded * * reservations are temporary until the drive is acquired * inc_reserved() increments num of reservations * dec_reserved() decrements num of reservations * num_reserved() number of reservations * * class DCR: * set_reserved() sets local reserve flag and calls dev->inc_reserved() * clear_reserved() clears local reserve flag and calls dev->dec_reserved() * is_reserved() returns local reserved flag * unreserve_device() much more complete unreservation * */ #ifndef __DEV_H #define __DEV_H 1 #undef DCR /* used by Bacula */ /* Return values from wait_for_sysop() */ enum { W_ERROR = 1, W_TIMEOUT = 2, W_POLL = 3, W_MOUNT = 4, W_WAKE = 5 }; /* Arguments to open_dev() */ enum { CREATE_READ_WRITE = 1, OPEN_READ_WRITE = 2, OPEN_READ_ONLY = 3, OPEN_WRITE_ONLY = 4 }; /* Argument to print tape alerts */ enum alert_list_type { list_codes = 1, list_short = 2, list_long = 3 }; enum alert_list_which { list_last = 1, list_all = 2 }; /* To open device readonly when appropriate, like for bls for dedup devices */ enum default_open_mode_for_device { omd_none = 0, omd_write = 1, omd_rdonly = 2 }; extern int device_default_open_mode; typedef void (alert_cb)(void *alert_ctx, const char *short_msg, const char *long_msg, char *Volume, int severity, int flags, int alert, utime_t alert_time); /* registered device metrics */ typedef struct { int bacula_storage_device_freespace; int bacula_storage_device_totalspace; int bacula_storage_device_readbytes; int bacula_storage_device_readtime; int bacula_storage_device_readspeed; int bacula_storage_device_writespeed; int bacula_storage_device_status; int bacula_storage_device_writebytes; int bacula_storage_device_writetime; } devstatmetrics_t; /* Aligned Data Disk Volume extension */ #define ADATA_EXTENSION ".add" /* Generic status bits returned from status_dev() */ #define BMT_TAPE (1<<0) /* is tape device */ #define BMT_EOF (1<<1) /* just read EOF */ #define BMT_BOT (1<<2) /* at beginning of tape */ #define BMT_EOT (1<<3) /* end of tape reached */ #define BMT_SM (1<<4) /* DDS setmark */ #define BMT_EOD (1<<5) /* DDS at end of data */ #define BMT_WR_PROT (1<<6) /* tape write protected */ #define BMT_ONLINE (1<<7) /* tape online */ #define BMT_DR_OPEN (1<<8) /* tape door open */ #define BMT_IM_REP_EN (1<<9) /* immediate report enabled */ /* Bits for device capabilities */ #define CAP_EOF (1<<0) /* has MTWEOF */ #define CAP_BSR (1<<1) /* has MTBSR */ #define CAP_BSF (1<<2) /* has MTBSF */ #define CAP_FSR (1<<3) /* has MTFSR */ #define CAP_FSF (1<<4) /* has MTFSF */ #define CAP_EOM (1<<5) /* has MTEOM */ #define CAP_REM (1<<6) /* is removable media */ #define CAP_RACCESS (1<<7) /* is random access device */ #define CAP_AUTOMOUNT (1<<8) /* Read device at start to see what is there */ #define CAP_LABEL (1<<9) /* Label blank tapes */ #define CAP_ANONVOLS (1<<10) /* Mount without knowing volume name */ #define CAP_ALWAYSOPEN (1<<11) /* always keep device open */ #define CAP_AUTOCHANGER (1<<12) /* AutoChanger */ #define CAP_OFFLINEUNMOUNT (1<<13) /* Offline before unmount */ #define CAP_STREAM (1<<14) /* Stream device */ #define CAP_BSFATEOM (1<<15) /* Backspace file at EOM */ #define CAP_FASTFSF (1<<16) /* Fast forward space file */ #define CAP_TWOEOF (1<<17) /* Write two eofs for EOM */ #define CAP_CLOSEONPOLL (1<<18) /* Close device on polling */ #define CAP_POSITIONBLOCKS (1<<19) /* Use block positioning */ #define CAP_MTIOCGET (1<<20) /* Basic support for fileno and blkno */ #define CAP_REQMOUNT (1<<21) /* Require mount and unmount */ #define CAP_CHECKLABELS (1<<22) /* Check for ANSI/IBM labels */ #define CAP_BLOCKCHECKSUM (1<<23) /* Create/test block checksum */ #define CAP_LSEEK (1<<24) /* Has lseek function defined i.e. basically File storage */ #define CAP_SYNCONCLOSE (1<<25) /* Need to call fsync() when releasing/closing the device */ #define CAP_LINTAPE (1<<26) /* If has the Lintape interface */ /* Test state */ #define dev_state(dev, st_state) ((dev)->state & (st_state)) /* Device state bits */ #define ST_XXXXXX (1<<0) /* was ST_OPENED */ #define ST_XXXXX (1<<1) /* was ST_TAPE */ #define ST_XXXX (1<<2) /* was ST_FILE */ #define ST_XXX (1<<3) /* was ST_FIFO */ #define ST_XX (1<<4) /* was ST_DVD */ #define ST_X (1<<5) /* was ST_PROG */ #define ST_LABEL (1<<6) /* label found */ #define ST_MALLOC (1<<7) /* dev packet malloc'ed in init_dev() */ #define ST_APPEND (1<<8) /* ready for Bacula append */ #define ST_READ (1<<9) /* ready for Bacula read */ #define ST_EOT (1<<10) /* at end of tape */ #define ST_WEOT (1<<11) /* Got EOT on write */ #define ST_EOF (1<<12) /* Read EOF i.e. zero bytes */ #define ST_NEXTVOL (1<<13) /* Start writing on next volume */ #define ST_SHORT (1<<14) /* Short block read */ #define ST_MOUNTED (1<<15) /* the device is mounted to the mount point */ #define ST_MEDIA (1<<16) /* Media found in mounted device */ #define ST_OFFLINE (1<<17) /* set offline by operator */ #define ST_PART_SPOOLED (1<<18) /* spooling part */ #define ST_FREESPACE_OK (1<<19) /* Have valid freespace */ #define ST_NOSPACE (1<<20) /* No space on device */ /* Volume Catalog Information structure definition */ struct VOLUME_CAT_INFO { /* Media info for the current Volume */ uint64_t VolCatBytes; /* Total bytes written */ uint64_t VolCatAmetaBytes; /* Ameta bytes written */ uint64_t VolCatAdataBytes; /* Adata bytes written */ uint64_t VolCatPadding; /* Total padding bytes written */ uint64_t VolCatAmetaPadding; /* Ameta zeros (padding) written */ uint64_t VolCatAdataPadding; /* Adata zeros (padding) written */ uint32_t VolCatBlocks; /* Total blocks */ uint32_t VolCatAmetaBlocks; /* Ameta blocks */ uint32_t VolCatAdataBlocks; /* Adata blocks */ uint32_t VolCatWrites; /* Total writes this volume */ uint32_t VolCatAmetaWrites; /* Ameta writes this volume */ uint32_t VolCatAdataWrites; /* Adata writes this volume */ uint32_t VolCatReads; /* Total reads this volume */ uint32_t VolCatAmetaReads; /* Ameta reads this volume */ uint32_t VolCatAdataReads; /* Adata reads this volume */ uint64_t VolCatRBytes; /* Total bytes read */ uint64_t VolCatAmetaRBytes; /* Ameta bytes read */ uint64_t VolCatAdataRBytes; /* Adata bytes read */ uint64_t VolCatHoleBytes; /* Total hole bytes */ uint64_t VolEndAddr; /* Last Volume address */ uint64_t VolLastPartBytes; /* Bytes in last part */ uint32_t VolCatHoles; /* Number of holes */ uint32_t VolCatJobs; /* Number of jobs on this Volume */ uint32_t VolCatFiles; /* Number of files */ uint32_t VolCatType; /* Volume drive type */ uint32_t VolCatParts; /* Max number of cache parts */ uint32_t VolCatCloudParts; /* Max number of cloud parts */ uint32_t VolCatMounts; /* Number of mounts this volume */ uint32_t VolCatErrors; /* Number of errors this volume */ uint32_t VolCatRecycles; /* Number of recycles this volume */ uint32_t EndFile; /* Last file number */ uint32_t EndBlock; /* Last block number */ uint32_t VolRetention; /* Volume Retention */ int32_t LabelType; /* Bacula/ANSI/IBM */ int32_t Slot; /* >0=Slot loaded, 0=nothing, -1=unknown */ uint32_t VolCatMaxJobs; /* Maximum Jobs to write to volume */ uint32_t VolCatMaxFiles; /* Maximum files to write to volume */ uint64_t VolCatMaxBytes; /* Max bytes to write to volume */ uint64_t VolCatCapacityBytes; /* capacity estimate */ btime_t VolReadTime; /* time spent reading */ btime_t VolWriteTime; /* time spent writing this Volume */ int64_t VolMediaId; /* MediaId */ int64_t VolScratchPoolId; /* ScratchPoolId */ int64_t MaxPoolBytes; /* Maximum Pool Bytes */ int64_t PoolBytes; /* Current Pool Bytes */ int64_t BytesWritten; /* Bytes written since last update */ int64_t ABytesWritten; /* Bytes written to Adata since last update */ utime_t VolFirstWritten; /* Time of first write */ utime_t VolLastWritten; /* Time of last write */ bool InChanger; /* Set if vol in current magazine */ bool Protected; /* Set if the vol is Readonly, worm or immutable */ bool UseProtect; /* Set if the device can set the volume Readonly, worm or immutable */ bool VolEncrypted; /* Set if the volume is encrypted */ bool is_valid; /* set if this data is valid */ bool VolEnabled; /* set if volume enabled */ bool VolRecycle; /* set if volume can be recycled */ char VolCatStatus[20]; /* Volume status */ char VolCatName[MAX_NAME_LENGTH]; /* Desired volume to mount */ }; class DEVRES; /* Device resource defined in stored_conf.h */ class DCR; /* forward reference */ class VOLRES; /* forward reference */ class STATUS_PKT; /* forward reference */ /* * Device structure definition. There is one of these for * each physical device. Everything here is "global" to * that device and effects all jobs using the device. */ class DEVICE: public SMARTALLOC { public: int m_blocked; /* set if we must wait (i.e. change tape) */ int m_count; /* Mutex use count -- DEBUG only */ int m_num_reserved; /* counter of device reservations */ pthread_t m_pid; /* Thread that locked -- DEBUG only */ int32_t m_slot; /* slot loaded in drive or -1 if none */ bool m_unload; /* set when Volume must be unloaded */ bool m_load; /* set when Volume must be loaded */ bool m_wait; /* must wait for device to free volume */ bool m_append_reserve; /* reserved for append or read in m_num_reserved set */ bthread_mutex_t m_mutex; /* access control */ bthread_mutex_t acquire_mutex; /* mutex for acquire code */ pthread_mutex_t read_acquire_mutex; /* mutex for acquire read code */ pthread_mutex_t adata_mutex; /* Lock for writing adata */ pthread_mutex_t volcat_mutex; /* VolCatInfo mutex */ pthread_mutex_t dcrs_mutex; /* Attached dcr mutex */ pthread_mutex_t freespace_mutex; /* mutex to compute the freespace */ public: DEVICE() {}; virtual ~DEVICE() {}; DEVICE * volatile swap_dev; /* Swap vol from this device */ dlist *attached_dcrs; /* attached DCR list */ bthread_mutex_t spool_mutex; /* mutex for updating spool_size */ pthread_cond_t wait; /* thread wait variable */ pthread_cond_t wait_next_vol; /* wait for tape to be mounted */ pthread_t no_wait_id; /* this thread must not wait */ int m_fd; /* file descriptor */ int dev_prev_blocked; /* previous blocked state */ int num_waiting; /* number of threads waiting */ int num_writers; /* number of writing threads */ int capabilities; /* capabilities mask */ int state; /* state mask */ int dev_errno; /* Our own errno */ int mode; /* read/write modes */ int openmode; /* parameter passed to open_dev (useful to reopen the device) */ int preserve; /* preserve open state */ int dev_type; /* device type */ uint32_t blocked_by; /* JobId that blocked */ bool enabled; /* Set when enabled */ bool autoselect; /* Autoselect in autochanger */ bool read_only; /* Device is read only */ bool initiated; /* set when init_dev() called */ bool m_is_worm; /* set for worm tape */ bool m_shstore; /* Shares storage can be used */ bool m_shstore_lock; /* set if shared lock set */ bool m_shstore_user_lock; /* set if user set shared lock */ bool m_shstore_register; /* set if register key set */ bool m_shstore_blocked; /* Set if I am blocked */ bool m_delay_adata; /* Set if we need to ask to read adata */ bool adata; /* set if adata device */ int label_type; /* Bacula/ANSI/IBM label types */ uint32_t drive_index; /* Autochanger drive index (base 0) */ POOLMEM *dev_name; /* Physical device name */ POOLMEM *adev_name; /* Aligned device name */ POOLMEM *prt_name; /* Name used for display purposes */ char *errmsg; /* nicely edited error message */ uint32_t block_num; /* current block number base 0 */ uint32_t LastBlock; /* last DEV_BLOCK number written to Volume */ uint32_t file; /* current file number base 0 */ uint64_t file_addr; /* Current file read/write address */ uint64_t file_size; /* Current file size */ uint32_t EndBlock; /* last block written */ uint32_t EndFile; /* last file written */ uint64_t EndAddr; /* Ending write addr */ uint32_t min_block_size; /* min block size */ uint32_t max_block_size; /* max block size */ uint32_t max_concurrent_jobs; /* maximum simultaneous jobs this drive */ uint64_t max_volume_size; /* max bytes to put on one volume */ uint64_t max_file_size; /* max file size to put in one file on volume */ uint64_t volume_capacity; /* advisory capacity */ uint64_t max_spool_size; /* maximum spool file size */ uint64_t spool_size; /* current spool size for this device */ uint32_t max_rewind_wait; /* max secs to allow for rewind */ uint32_t max_open_wait; /* max secs to allow for open */ uint32_t padding_size; /* adata block padding -- bytes */ uint32_t file_alignment; /* adata file alignment -- bytes */ uint32_t adata_size; /* adata block size */ uint64_t adata_addr; /* Next adata write address */ uint64_t max_part_size; /* max part size */ uint32_t max_vol_parts_num; /* Max number of parts in a cloud volume */ uint64_t part_size; /* current part size */ uint32_t part; /* current part number (starts at 0) */ /* state ST_FREESPACE_OK is set if free_space is valid */ uint64_t free_space; /* current free space on device */ uint64_t total_space; /* current used space on device */ uint64_t devno; /* device id */ uint64_t min_free_space; /* Minimum free disk space */ int free_space_errno; /* indicates errno getting freespace */ bool truncating; /* if set, we are currently truncating */ utime_t vol_poll_interval; /* interval between polling Vol mount */ DEVRES *device; /* pointer to Device Resource */ VOLRES *vol; /* Pointer to Volume reservation item */ btimer_t *tid; /* timer id */ VOLUME_CAT_INFO VolCatInfo; /* Volume Catalog Information */ VOLUME_LABEL VolHdr; /* Actual volume label (only needs to be correct when writing a new header) */ char pool_name[MAX_NAME_LENGTH]; /* pool name */ char pool_type[MAX_NAME_LENGTH]; /* pool type */ char reserved_pool_name[MAX_NAME_LENGTH]; /* pool name for reserves */ char LoadedVolName[MAX_NAME_LENGTH]; /* Last loaded Volume */ char lock_holder[12]; /* holder of SCSI lock */ bool poll; /* set to poll Volume */ /* Device wait times ***FIXME*** look at durations */ int min_wait; int max_wait; int max_num_wait; int wait_sec; int rem_wait_sec; int num_wait; btime_t last_timer; /* used by read/write/seek to get stats (usec) */ btime_t last_tick; /* contains last read/write time (usec) */ utime_t last_stat_timer; /* used by update_permanent_stats() to count throughput */ btime_t DevReadTime; btime_t DevWriteTime; uint64_t DevWriteBytes; uint64_t DevReadBytes; uint64_t usage; /* Drive usage read+write bytes */ uint64_t last_stat_DevWriteBytes; uint64_t last_stat_DevReadBytes; devstatmetrics_t devstatmetrics; /* these are a device metrics for every device */ bstatcollect *devstatcollector; /* a pointer to daemon's statcollector */ BLOCK_CIPHER_CONTEXT *crypto_device_ctx; /* Methods */ btime_t get_timer_count(); /* return the last timer interval (ms) */ void device_generic_init(JCR *jcr, DEVRES *device); int has_cap(int cap) const { return capabilities & cap; } void clear_cap(int cap) { capabilities &= ~cap; } void set_cap(int cap) { capabilities |= cap; } void set_worm(bool is_worm) { m_is_worm = is_worm; } bool do_checksum() const { return (capabilities & CAP_BLOCKCHECKSUM) != 0; } int is_autochanger() const { return capabilities & CAP_AUTOCHANGER; } bool is_virtual_autochanger() const; int requires_mount() const { return capabilities & CAP_REQMOUNT; } int is_removable() const { return capabilities & CAP_REM; } bool is_tape() const { return (dev_type == B_TAPE_DEV || dev_type == B_VTAPE_DEV); } bool is_file() const { return (dev_type == B_FILE_DEV) || is_aligned() || is_dedup() || is_cloud(); } bool is_cloud() const { return dev_type == B_CLOUD_DEV; } bool is_adata() const { return dev_type == B_ADATA_DEV; } bool is_aligned() const { return dev_type == B_ALIGNED_DEV; } bool is_dedup() const { return dev_type == B_DEDUP_DEV; } bool is_null() const { return dev_type == B_NULL_DEV; } bool is_fifo() const { return dev_type == B_FIFO_DEV; } bool is_vtl() const { return dev_type == B_VTL_DEV; } bool is_vtape() const { return dev_type == B_VTAPE_DEV; } bool is_worm() const { return m_is_worm; } bool is_open() const { return m_fd >= 0; } int is_offline() const { return state & ST_OFFLINE; } int is_labeled() const { return state & ST_LABEL; } int is_mounted() const { return state & ST_MOUNTED; } int is_unmountable() const { return ((is_file() && is_removable())); } int num_reserved() const { return m_num_reserved; }; int is_part_spooled() const { return state & ST_PART_SPOOLED; } int have_media() const { return state & ST_MEDIA; } int is_short_block() const { return state & ST_SHORT; } int is_busy() const { return (state & ST_READ) || num_writers || num_reserved(); } bool is_reserved_for_read() const { return num_reserved() && !m_append_reserve; } bool is_ateot() const { return (state & ST_EOF) && (state & ST_EOT) && (state & ST_WEOT); } int at_eof() const { return state & ST_EOF; } int at_eot() const { return state & ST_EOT; } int at_weot() const { return state & ST_WEOT; } int can_append() const { return state & ST_APPEND; } int is_freespace_ok() const { return state & ST_FREESPACE_OK; } int is_nospace() const { return (is_freespace_ok() && (state & ST_NOSPACE)); }; /* * can_write() is meant for checking at the end of a job to see * if we still have a tape (perhaps not if at end of tape * and the job is canceled). */ int can_write() const { return is_open() && can_append() && is_labeled() && !at_weot(); } bool can_read() const { return (state & ST_READ) != 0; } bool can_obtain_block() const { return (m_blocked == BST_NOT_BLOCKED || m_blocked == BST_UNMOUNTED || m_blocked == BST_WAITING_FOR_SYSOP || m_blocked == BST_UNMOUNTED_WAITING_FOR_SYSOP); }; bool waiting_for_mount() const { return (m_blocked == BST_UNMOUNTED || m_blocked == BST_WAITING_FOR_SYSOP || m_blocked == BST_UNMOUNTED_WAITING_FOR_SYSOP); }; bool must_unload() const { return m_unload; }; bool must_load() const { return m_load; }; const char *strerror() const; const char *archive_name() const; const char *name() const; const char *print_name() const; /* Name for display purposes */ void set_ateot(); /* in dev.c */ void set_eot() { state |= ST_EOT; }; void set_eof() { state |= ST_EOF; }; void set_labeled() { state |= ST_LABEL; }; void set_offline() { state |= ST_OFFLINE; }; void set_mounted() { state |= ST_MOUNTED; }; void set_media() { state |= ST_MEDIA; }; void set_short_block() { state |= ST_SHORT; }; void set_freespace_ok() { state |= ST_FREESPACE_OK; } void set_part_spooled(int val) { if (val) state |= ST_PART_SPOOLED; \ else state &= ~ST_PART_SPOOLED; }; void get_freespace(uint64_t *freeval, uint64_t *totalval); /* in dev.c */ void set_freespace(uint64_t freeval, uint64_t totalval, int errnoval, bool valid); /* in dev.c */ virtual bool is_fs_nearly_full(uint64_t threshold); bool is_volume_to_unload() const { \ return m_unload && strcmp(VolHdr.VolumeName, LoadedVolName) == 0; }; void set_load() { m_load = true; }; void set_wait() { m_wait = true; }; void clear_wait() { m_wait = false; }; void set_delay_adata() { m_delay_adata = true;}; void clear_delay_adata() { m_delay_adata = false;}; bool must_wait() const { return m_wait; }; void inc_reserved() { m_num_reserved++; } void set_mounted(int val) { if (val) state |= ST_MOUNTED; \ else state &= ~ST_MOUNTED; }; void dec_reserved() { m_num_reserved--; ASSERT(m_num_reserved>=0); }; void set_read_reserve() { m_append_reserve = false; }; void set_append_reserve() { m_append_reserve = true; }; void clear_labeled() { state &= ~ST_LABEL; }; void clear_offline() { state &= ~ST_OFFLINE; }; void clear_eot() { state &= ~ST_EOT; }; void clear_eof() { state &= ~ST_EOF; }; void clear_opened() { m_fd = -1; }; void clear_mounted() { state &= ~ST_MOUNTED; }; void clear_media() { state &= ~ST_MEDIA; }; void clear_short_block() { state &= ~ST_SHORT; }; void clear_freespace_ok() { state &= ~ST_FREESPACE_OK; }; void clear_unload() { m_unload = false; }; void clear_load() { m_load = false; }; char *bstrerror(void) { return errmsg; }; char *print_errmsg() { return errmsg; }; int32_t get_slot() const { return m_slot; }; void setVolCatInfo(bool valid) { VolCatInfo.is_valid = valid; }; bool haveVolCatInfo() const { return VolCatInfo.is_valid; }; void clearVolCatBytes() { VolCatInfo.VolCatBytes = 0; VolCatInfo.VolCatAmetaBytes = 0; VolCatInfo.VolCatAdataBytes = 0; }; char *getVolCatName() { return VolCatInfo.VolCatName; }; void set_unload(); /* in dev.c */ void clear_volhdr(); /* in dev.c */ ssize_t read(void *buf, size_t len); /* in dev.c */ ssize_t write(const void *buf, size_t len); /* in dev.c */ void edit_mount_codes(POOL_MEM &omsg, const char *imsg); /* in dev.c */ bool offline_or_rewind(DCR *dcr); /* in dev.c */ bool fsr(int num); /* in dev.c */ bool bsr(int num); /* in dev.c */ int32_t get_os_tape_file(); /* in dev.c */ bool scan_dir_for_volume(DCR *dcr); /* in scan.c */ void clrerror(int func); /* in dev.c */ void set_slot(int32_t slot); /* in dev.c */ void clear_slot(); /* in dev.c */ bool update_freespace(); /* in dev.c */ virtual bool get_os_device_freespace(); /* in dev.c */ void notify_newvol_in_attached_dcrs(const char *VolumeName); /* in dev.c */ void notify_newfile_in_attached_dcrs();/* in dev.c */ void attach_dcr_to_dev(DCR *dcr); /* in acquire.c */ void detach_dcr_from_dev(DCR *dcr); /* in acquire.c */ uint32_t get_file(); /* in dev.c */ uint32_t get_block_num(); /* in dev.c */ int fd() const { return m_fd; }; bool mount_file(int mount, int dottimout); void dump_volume_label(); DEV_BLOCK *new_block(DCR *dcr, int size=0); /* Virtual functions that can be overridden */ virtual void set_file_size(uint64_t val) { file_size = val;}; virtual uint64_t update_file_size(uint64_t add) { file_size += add; return file_size; }; virtual void setVolCatName(const char *name); virtual void setVolCatStatus(const char *status); virtual void free_dcr_blocks(DCR *dcr); /* in block_util.c */ virtual void new_dcr_blocks(DCR *dcr); /* in block_util.c */ virtual boffset_t get_adata_size(DCR *dcr) { return (boffset_t)0; }; virtual void updateVolCatBytes(uint64_t); /* in dev.c */ virtual void updateVolCatExtraBytes(uint64_t); /* in dev.c */ virtual void updateVolCatBlocks(uint32_t); /* in dev.c */ virtual void updateVolCatWrites(uint32_t); /* in dev.c */ virtual void updateVolCatReads(uint32_t); /* in dev.c */ virtual void updateVolCatReadBytes(uint64_t); /* in dev.c */ virtual void updateVolCatPadding(uint64_t); /* in dev.c */ virtual void updateVolCatHoleBytes(uint64_t); /* in dev.c */ virtual bool setVolCatAdataBytes(uint64_t) { return false; }; virtual void set_volcatinfo_from_dcr(DCR *dcr); virtual bool allow_maxbytes_concurrency(DCR *dcr) { return true; }; virtual bool flush_before_eos(DCR *dcr) { return true; }; virtual void set_nospace(); /* in dev.c */ virtual void set_append(); /* in dev.c */ virtual void set_read(); /* in dev.c */ virtual void clear_nospace(); /* in dev.c */ virtual void clear_append(); /* in dev.c */ virtual void clear_read(); /* in dev.c */ virtual int device_specific_init(JCR *jcr, DEVRES *device); virtual void device_specific_open(DCR *dcr) { return; }; virtual int device_specific_close(DCR *dcr); virtual int d_ioctl(int fd, ioctl_req_t request, char *mt_com=NULL); virtual int d_open(const char *pathname, int flags); virtual int d_close(int fd); virtual ssize_t d_read(int fd, void *buffer, size_t count); virtual ssize_t d_write(int fd, const void *buffer, size_t count); virtual boffset_t lseek(DCR *dcr, boffset_t offset, int whence); virtual bool update_pos(DCR *dcr); virtual bool rewind(DCR *dcr); virtual bool truncate(DCR *dcr); virtual int truncate_cache(DCR *dcr, const char *VolName, int64_t *size, POOLMEM *&msg) { return 0; }; virtual bool get_cloud_volumes_list(DCR* dcr, alist *volumes, POOLMEM *&err) { pm_strcpy(err, _("Not implemented")); return false;}; virtual bool get_cloud_volume_parts_list(DCR *dcr, const char *VolumeName, ilist *parts, POOLMEM *&err) { pm_strcpy(err, _("Not implemented")); return false; }; virtual uint32_t get_cloud_upload_transfer_status(POOL_MEM &msg, bool verbose) { pm_strcpy(msg, _("Not implemented")); return 0; }; virtual void get_api_cloud_upload_transfer_status(OutputWriter &, bool) {}; virtual uint32_t get_cloud_download_transfer_status(POOL_MEM &msg, bool verbose) { pm_strcpy(msg, _("Not implemented")); return 0; }; virtual void get_api_cloud_download_transfer_status(OutputWriter &, bool) {}; virtual bool upload_cache(DCR *dcr, const char *VolName, uint32_t truncate, POOLMEM *&err) {return true; }; virtual bool open_device(DCR *dcr, int omode) = 0; virtual bool open_next_part(DCR *dcr); virtual bool close(DCR *dcr); /* in dev.c */ virtual bool sync_data(DCR *dcr); /* in dev.c */ virtual void term(DCR *dcr); /* in dev.c */ virtual bool close_part(DCR *dcr); virtual bool reposition(DCR *dcr, uint64_t raddr); virtual bool mount(int timeout); virtual bool unmount(int timeout); virtual int read_dev_volume_label(DCR *dcr); /* in label.c */ virtual bool write_volume_label_to_block(DCR *dcr); /* in label.c */ virtual bool write_volume_label(DCR *dcr, const char *VolName, const char *PoolName, bool relabel, bool no_prelabel); /* in label.c */ virtual bool write_volume_label_to_dev(DCR *dcr, const char *VolName, const char *PoolName, bool relabel, bool no_prelabel); /* in label.c */ virtual bool rewrite_volume_label(DCR *dcr, bool recycle); /* in label.c */ virtual bool is_eod_valid(DCR *dcr); /* in dev.c */ virtual bool eod(DCR *dcr); /* in dev.c */ virtual bool weof(DCR *dcr, int num); /* in dev.c */ virtual bool end_of_volume(DCR *dcr) { return true; }; virtual bool start_of_job(DCR *dcr) {return true; }; virtual bool end_of_job(DCR *dcr, uint32_t truncate) {return true; }; virtual bool is_indexed() { return true; }; virtual void set_ateof(); /* in dev.c */ /* Methods below are responsible for managing * the append and immutable flags on device-specific volumes */ virtual bool set_append_only(const char *vol_name, POOLMEM **error) { pm_strcpy(error, _("Not Implemented")); return false; }; virtual bool clear_append_only(const char *vol_name, POOLMEM **error) { pm_strcpy(error, _("Not Implemented")); return false; }; virtual bool set_immutable(const char *vol_name, POOLMEM **error) { pm_strcpy(error, _("Not Implemented")); return false; }; virtual bool clear_immutable(const char *vol_name, POOLMEM **error) { pm_strcpy(error, _("Not Implemented")); return false; }; virtual bool check_volume_protection_time(const char *vol_name) { return true; }; virtual bool check_for_immutable(const char *vol_name) { return false; }; virtual bool check_for_read_only(int fd, const char *vol_name) { return false; }; virtual int set_writable(int fd, const char *vol_name, POOLMEM **error) { pm_strcpy(error, "Not implemented"); errno=ENOSYS; return -1;}; virtual int set_readonly(int fd, const char *vol_name, POOLMEM **error) { pm_strcpy(error, "Not implemented"); errno=ENOSYS; return -1;}; virtual int set_atime(int fd, const char *vol_name, btime_t val, POOLMEM **error) { pm_strcpy(error, "Not implemented"); errno=ENOSYS; return -1;}; virtual int use_protect() { return 0; }; virtual int use_volume_encryption(); virtual const char *print_type() = 0; /* in dev.c */ virtual const char *print_driver_type() { return "";}; virtual const char *print_full_type() { return print_type();}; virtual DEVICE *get_dev(DCR *dcr); /* in dev.c */ virtual uint32_t get_hi_addr(); /* in dev.c */ virtual uint32_t get_low_addr(); /* in dev.c */ virtual uint32_t get_hi_addr(boffset_t addr); /* in dev.c */ virtual uint32_t get_low_addr(boffset_t addr); /* in dev.c */ virtual uint64_t get_full_addr(); /* in dev.c */ virtual uint64_t get_full_addr(boffset_t addr); /* in dev.c */ virtual uint64_t get_full_addr(uint32_t hi, uint32_t low); /* in dev.c */ virtual char *print_addr(char *buf, int32_t maxlen); virtual char *print_addr(char *buf, int32_t maxlen, boffset_t addr); virtual bool do_size_checks(DCR *dcr, DEV_BLOCK *block); /* in dev.c */ virtual bool get_tape_worm(DCR *dcr); virtual bool get_tape_alerts(DCR *dcr); virtual void show_tape_alerts(DCR *dcr, alert_list_type type, alert_list_which which, alert_cb alert_callback); virtual int delete_alerts(); /* These could probably be made tape_dev only */ virtual bool bsf(int count) { return true; } virtual bool fsf(int num) { return true; } virtual bool offline(DCR *dcr) { return true; } virtual void lock_door() { return; } virtual void unlock_door() { return; } virtual bool write_adata_label(DCR *dcr, DEV_RECORD *rec) { return false; }; virtual void write_adata(DCR *dcr, DEV_RECORD *rec) { return; }; virtual void write_cont_adata(DCR *dcr, DEV_RECORD *rec) { return; }; virtual int write_adata_rechdr(DCR *dcr, DEV_RECORD *rec) { return -1; }; virtual bool have_adata_header(DCR *dcr, DEV_RECORD *rec, int32_t FileIndex, int32_t Stream, uint32_t VolSessionId) { return false; }; virtual bool read_adata_record_header(DCR *dcr, DEV_BLOCK *block, DEV_RECORD *rec, bool *firstcall) { return false; }; virtual void read_adata_block_header(DCR *dcr) { return; }; virtual int read_adata(DCR *dcr, DEV_RECORD *rec) { return -1; }; virtual void select_data_stream(DCR *dcr, DEV_RECORD *rec) { return; }; virtual bool flush_block(DCR *dcr); /* in block_util.c */ virtual bool do_pre_write_checks(DCR *dcr, DEV_RECORD *rec) { return true; }; virtual void register_metrics(bstatcollect *collector); virtual void dbg_print(FILE *fp) { return; }; /* * Locking and blocking calls */ #ifdef DEV_DEBUG_LOCK virtual void dbg_Lock(const char *, int); /* in lock.c */ virtual void dbg_Unlock(const char *, int); /* in lock.c */ virtual void dbg_rLock(const char *, int, bool locked=false); /* in lock.c */ virtual void dbg_rUnlock(const char *, int); /* in lock.c */ #else virtual void Lock(); /* in lock.c */ virtual void Unlock(); /* in lock.c */ virtual void rLock(bool locked=false); /* in lock.c */ virtual void rUnlock(); /* in lock.c */ #endif #ifdef SD_DEBUG_LOCK virtual void dbg_Lock_acquire(const char *, int); /* in lock.c */ virtual void dbg_Unlock_acquire(const char *, int); /* in lock.c */ virtual void dbg_Lock_read_acquire(const char *, int); /* in lock.c */ virtual void dbg_Unlock_read_acquire(const char *, int); /* in lock.c */ virtual void dbg_Lock_VolCatInfo(const char *, int); /* in lock.c */ virtual void dbg_Unlock_VolCatInfo(const char *, int); /* in lock.c */ #else virtual void Lock_acquire(); /* in lock.c */ virtual void Unlock_acquire(); /* in lock.c */ virtual void Lock_read_acquire(); /* in lock.c */ virtual void Unlock_read_acquire(); /* in lock.c */ virtual void Lock_VolCatInfo(); /* in lock.c */ virtual void Unlock_VolCatInfo(); /* in lock.c */ #endif virtual void dblock(int why); /* in lock.c */ virtual void dunblock(bool locked=false); /* in lock.c */ /* use obtain_device_block() macro */ bool _obtain_device_block(const char *file, int line, bsteal_lock_t *hold, int retry, int state); int init_mutex(); /* in lock.c */ int init_acquire_mutex(); /* in lock.c */ int init_read_acquire_mutex(); /* in lock.c */ int init_volcat_mutex(); /* in lock.c */ int init_dcrs_mutex(); /* in lock.c */ int init_freespace_mutex(); /* in lock.c */ void set_mutex_priorities(); /* in lock.c */ int next_vol_timedwait(const struct timespec *timeout); /* in lock.c */ void Lock_dcrs() { P(dcrs_mutex); }; void Unlock_dcrs() { V(dcrs_mutex); }; bool is_device_unmounted(); /* in lock.c */ void set_blocked(int block) { m_blocked = block; }; int blocked() const { return m_blocked; }; bool is_blocked() const { return m_blocked != BST_NOT_BLOCKED; }; const char *print_blocked() const; /* in dev.c */ void open_tape_device(DCR *dcr, int omode); /* in dev.c */ void open_file_device(DCR *dcr, int omode); /* in dev.c */ virtual int rehydrate_record(DCR *dcr, DEV_RECORD *rec) { return 1;}; virtual bool setup_dedup_rehydration_interface(DCR *dcr) { return false; }; virtual void free_dedup_rehydration_interface(DCR *dcr) { }; virtual GetMsg *get_msg_queue(JCR *jcr, BSOCK *sock, int32_t bufsize) { return New(GetMsg(jcr, sock, NULL, bufsize)); }; virtual void *dedup_get_dedupengine() { return NULL; }; virtual void dedup_get_status(STATUS_PKT *sp, int options) { }; virtual bool dedup_cmd(JCR *jcr) { return false; }; virtual const char *dedup_get_dedupengine_name() { return "not_a_dedupengine"; }; private: bool mount_tape(int mount, int dotimeout); /* in dev.c */ protected: void set_mode(int omode); /* in dev.c */ bool load_encryption_key(DCR *dcr, const char *operation, const char *volume_name, /* in dev.c */ uint32_t *enc_cipher_key_size, unsigned char *enc_cipher_key, uint32_t *master_keyid_size, unsigned char *master_keyid); }; inline const char *DEVICE::strerror() const { return errmsg; } inline const char *DEVICE::archive_name() const { return dev_name; } inline const char *DEVICE::print_name() const { return NPRT(prt_name); } #define CHECK_BLOCK_NUMBERS true #define NO_BLOCK_NUMBER_CHECK false /* * Device Context (or Control) Record. * There is one of these records for each Job that is using * the device. Items in this record are "local" to the Job and * do not affect other Jobs. Note, a job can have multiple * DCRs open, each pointing to a different device. * Normally, there is only one JCR thread per DCR. However, the * big and important exception to this is when a Job is being * canceled. At that time, there may be two threads using the * same DCR. Consequently, when creating/attaching/detaching * and freeing the DCR we must lock it (m_mutex). */ class DCR { private: bool m_dev_locked; /* set if dev already locked */ int m_dev_lock; /* non-zero if rLock already called */ bool m_reserved; /* set if reserved device */ bool m_found_in_use; /* set if a volume found in use */ bool m_writing; /* set when DCR used for writing */ public: dlink dev_link; /* link to attach to dev */ JCR *jcr; /* pointer to JCR */ DEVICE * volatile dev; /* pointer to device */ DEVICE *adata_dev; /* pointer to adata dev */ DEVICE *ameta_dev; /* pointer to ameta_dev */ DEVRES *device; /* pointer to device resource */ DEV_BLOCK *block; /* pointer to block */ DEV_BLOCK *adata_block; /* aligned data block */ DEV_BLOCK *ameta_block; /* aligned meta data block */ DEV_RECORD *rec; /* pointer to record */ BSR *dest_position; /* BSR used with do_interactive_reposition */ VOL_LIST *CurrentVol; /* From JCR::VolList, freed at the end, passed to records */ alist *uploads; /* Current upload transfers to the cloud */ alist *downloads; /* Current donwload transfers from the cloud */ pthread_t tid; /* Thread running this dcr */ int spool_fd; /* fd if spooling */ int crc32_type; /* Handle bad CRC32 on Solaris for volumes written with 8.4 */ bool adata_label; /* writing adata label block */ bool adata; /* writing aligned data block */ bool spool_data; /* set to spool data */ bool spooling; /* set when actually spooling */ bool despooling; /* set when despooling */ bool despool_wait; /* waiting for despooling */ bool NewVol; /* set if new Volume mounted */ bool WroteVol; /* set if Volume written */ bool NewFile; /* set when EOF written */ bool reserved_volume; /* set if we reserved a volume */ bool any_volume; /* Any OK for dir_find_next... */ bool attached_to_dev; /* set when attached to dev */ bool keep_dcr; /* do not free dcr in release_dcr */ bool no_mount_request; /* do not submit any mount request */ bool reading_label; /* Reading volume label */ bool discard_invalid_records; /* we should not try to assemble invalid records */ bool force_update_volume_info; /* update the volume information, no matter the job type */ bool session_interactive; /* set if we allow to seek in the restore stream */ bool do_interactive_reposition; /* Set if we want to seek */ int32_t FileMedia_FI; /* Last File Index used to generate a FileMedia record */ uint64_t FileMedia_Off; /* Last File Offset used to generate a FileMedia record */ uint64_t max_index_size; /* Max amount of data between two indexes */ uint64_t index_size; /* Amount of data without index */ uint32_t VolFirstIndex; /* First file index this Volume */ uint32_t VolLastIndex; /* Last file index this Volume */ int32_t FileIndex; /* Current File Index */ uint64_t StartAddr; /* Starting write addr */ uint64_t EndAddr; /* Ending write addr */ int64_t VolMediaId; /* MediaId */ int64_t job_spool_size; /* Current job spool size */ int64_t max_job_spool_size; /* Max job spool size */ char VolumeName[MAX_NAME_LENGTH]; /* Volume name */ char pool_name[MAX_NAME_LENGTH]; /* pool name */ char pool_type[MAX_NAME_LENGTH]; /* pool type */ char media_type[MAX_NAME_LENGTH]; /* media type */ char dev_name[MAX_NAME_LENGTH]; /* dev name */ int Copy; /* identical copy number */ int Stripe; /* RAIT stripe */ VOLUME_CAT_INFO VolCatInfo; /* Catalog info for desired volume */ /* *** BEEF *** */ uint32_t crc32(unsigned char *buf, int len, uint32_t expected_crc); /* *** SIR *** */ bool need_to_reposition(); void set_interactive_reposition(BSR *); BSR *clear_interactive_reposition(); void set_session_interactive(); /* Methods */ void set_no_mount_request() { no_mount_request = true; }; /* Just fail in case of mount request */ void set_dev(DEVICE *ndev) { dev = ndev; ameta_dev = ndev; }; void set_adata() { if (adata_dev) {dev = adata_dev; block = adata_block;} }; void set_ameta() { dev = ameta_dev; block = ameta_block; }; void set_dev_locked() { m_dev_locked = true; }; void set_writing() { m_writing = true; }; void clear_writing() { m_writing = false; }; bool is_reading() const { return !m_writing; }; bool is_writing() const { return m_writing; }; void clear_dev_locked() { m_dev_locked = false; }; void inc_dev_lock() { m_dev_lock++; }; void dec_dev_lock() { m_dev_lock--; }; bool found_in_use() const { return m_found_in_use; }; void set_found_in_use() { m_found_in_use = true; }; void clear_found_in_use() { m_found_in_use = false; }; bool is_reserved() const { return m_reserved; }; bool is_dev_locked() const { return m_dev_locked; } void setVolCatInfo(bool valid) { VolCatInfo.is_valid = valid; }; bool haveVolCatInfo() const { return VolCatInfo.is_valid; }; void setVolCatName(const char *name) { bstrncpy(VolCatInfo.VolCatName, name, sizeof(VolCatInfo.VolCatName)); setVolCatInfo(false); }; char *getVolCatName() { return VolCatInfo.VolCatName; }; bool write_final_block_to_device() { return write_block_to_device(true); }; /* Methods in autochanger.c */ bool is_virtual_autochanger(); /* Methods in lock.c */ void dblock(int why) { dev->dblock(why); } /* Methods in record.c */ bool write_record(DEV_RECORD *rec); /* Methods in read_record.c */ bool read_records( bool record_cb(DCR *dcr, DEV_RECORD *rec), bool mount_cb(DCR *dcr)); bool try_repositioning(DEV_RECORD *rec); BSR *position_to_first_file(); /* Methods in reserve.c */ void clear_reserved(); void set_reserved_for_read(); void set_reserved_for_append(); void unreserve_device(bool locked); /* Methods in vol_mgr.c */ bool can_i_use_volume(); bool can_i_write_volume(); /* Methods in mount.c */ bool mount_next_write_volume(); bool mount_next_read_volume(); void mark_volume_in_error(); void mark_volume_read_only(); void mark_volume_not_inchanger(); void volume_is_unavailable(); int try_autolabel(bool opened); bool find_a_volume(); bool is_suitable_volume_mounted(); int check_volume_label(bool &ask, bool &autochanger); void release_volume(); void do_swapping(bool is_writing); bool do_unload(); bool do_load(bool is_writing); bool is_tape_position_ok(); /* Methods in block.c */ bool write_block_to_device(bool final=false); bool write_block_to_dev(); bool read_block_from_device(bool check_block_numbers); bool read_block_from_dev(bool check_block_numbers); }; /* Get some definition of function to position * to the end of the medium in MTEOM. System * dependent. Arrgggg! */ #ifndef MTEOM #ifdef MTSEOD #define MTEOM MTSEOD #endif #ifdef MTEOD #undef MTEOM #define MTEOM MTEOD #endif #endif #endif /* __DEV_H */ bacula-15.0.3/src/stored/ebcdic.c0000644000175000017500000001316414771010173016333 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Taken from the public domain ansitape program for * integration into Bacula. KES - Mar 2005 */ /* Mapping of EBCDIC codes to ASCII equivalents. */ static char to_ascii_table[256] = { '\000', '\001', '\002', '\003', '\234', '\011', '\206', '\177', '\227', '\215', '\216', '\013', '\014', '\015', '\016', '\017', '\020', '\021', '\022', '\023', '\235', '\205', '\010', '\207', '\030', '\031', '\222', '\217', '\034', '\035', '\036', '\037', '\200', '\201', '\202', '\203', '\204', '\012', '\027', '\033', '\210', '\211', '\212', '\213', '\214', '\005', '\006', '\007', '\220', '\221', '\026', '\223', '\224', '\225', '\226', '\004', '\230', '\231', '\232', '\233', '\024', '\025', '\236', '\032', '\040', '\240', '\241', '\242', '\243', '\244', '\245', '\246', '\247', '\250', '\133', '\056', '\074', '\050', '\053', '\041', '\046', '\251', '\252', '\253', '\254', '\255', '\256', '\257', '\260', '\261', '\135', '\044', '\052', '\051', '\073', '\136', '\055', '\057', '\262', '\263', '\264', '\265', '\266', '\267', '\270', '\271', '\174', '\054', '\045', '\137', '\076', '\077', '\272', '\273', '\274', '\275', '\276', '\277', '\300', '\301', '\302', '\140', '\072', '\043', '\100', '\047', '\075', '\042', '\303', '\141', '\142', '\143', '\144', '\145', '\146', '\147', '\150', '\151', '\304', '\305', '\306', '\307', '\310', '\311', '\312', '\152', '\153', '\154', '\155', '\156', '\157', '\160', '\161', '\162', '\313', '\314', '\315', '\316', '\317', '\320', '\321', '\176', '\163', '\164', '\165', '\166', '\167', '\170', '\171', '\172', '\322', '\323', '\324', '\325', '\326', '\327', '\330', '\331', '\332', '\333', '\334', '\335', '\336', '\337', '\340', '\341', '\342', '\343', '\344', '\345', '\346', '\347', '\173', '\101', '\102', '\103', '\104', '\105', '\106', '\107', '\110', '\111', '\350', '\351', '\352', '\353', '\354', '\355', '\175', '\112', '\113', '\114', '\115', '\116', '\117', '\120', '\121', '\122', '\356', '\357', '\360', '\361', '\362', '\363', '\134', '\237', '\123', '\124', '\125', '\126', '\127', '\130', '\131', '\132', '\364', '\365', '\366', '\367', '\370', '\371', '\060', '\061', '\062', '\063', '\064', '\065', '\066', '\067', '\070', '\071', '\372', '\373', '\374', '\375', '\376', '\377' }; /* Mapping of ASCII codes to EBCDIC equivalents. */ static char to_ebcdic_table[256] = { '\000', '\001', '\002', '\003', '\067', '\055', '\056', '\057', '\026', '\005', '\045', '\013', '\014', '\015', '\016', '\017', '\020', '\021', '\022', '\023', '\074', '\075', '\062', '\046', '\030', '\031', '\077', '\047', '\034', '\035', '\036', '\037', '\100', '\117', '\177', '\173', '\133', '\154', '\120', '\175', '\115', '\135', '\134', '\116', '\153', '\140', '\113', '\141', '\360', '\361', '\362', '\363', '\364', '\365', '\366', '\367', '\370', '\371', '\172', '\136', '\114', '\176', '\156', '\157', '\174', '\301', '\302', '\303', '\304', '\305', '\306', '\307', '\310', '\311', '\321', '\322', '\323', '\324', '\325', '\326', '\327', '\330', '\331', '\342', '\343', '\344', '\345', '\346', '\347', '\350', '\351', '\112', '\340', '\132', '\137', '\155', '\171', '\201', '\202', '\203', '\204', '\205', '\206', '\207', '\210', '\211', '\221', '\222', '\223', '\224', '\225', '\226', '\227', '\230', '\231', '\242', '\243', '\244', '\245', '\246', '\247', '\250', '\251', '\300', '\152', '\320', '\241', '\007', '\040', '\041', '\042', '\043', '\044', '\025', '\006', '\027', '\050', '\051', '\052', '\053', '\054', '\011', '\012', '\033', '\060', '\061', '\032', '\063', '\064', '\065', '\066', '\010', '\070', '\071', '\072', '\073', '\004', '\024', '\076', '\341', '\101', '\102', '\103', '\104', '\105', '\106', '\107', '\110', '\111', '\121', '\122', '\123', '\124', '\125', '\126', '\127', '\130', '\131', '\142', '\143', '\144', '\145', '\146', '\147', '\150', '\151', '\160', '\161', '\162', '\163', '\164', '\165', '\166', '\167', '\170', '\200', '\212', '\213', '\214', '\215', '\216', '\217', '\220', '\232', '\233', '\234', '\235', '\236', '\237', '\240', '\252', '\253', '\254', '\255', '\256', '\257', '\260', '\261', '\262', '\263', '\264', '\265', '\266', '\267', '\270', '\271', '\272', '\273', '\274', '\275', '\276', '\277', '\312', '\313', '\314', '\315', '\316', '\317', '\332', '\333', '\334', '\335', '\336', '\337', '\352', '\353', '\354', '\355', '\356', '\357', '\372', '\373', '\374', '\375', '\376', '\377' }; /* * Convert from ASCII to EBCDIC */ void ascii_to_ebcdic(char *dst, char *src, int count) { while (count--) { *dst++ = to_ebcdic_table[0377 & *src++]; } } /* * Convert from EBCDIC to ASCII */ void ebcdic_to_ascii(char *dst, char *src, int count) { while (count--) { *dst++ = to_ascii_table[0377 & *src++]; } } bacula-15.0.3/src/stored/cloud_parts.c0000644000175000017500000003666014771010173017447 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Routines for writing Cloud drivers * * Written by Kern Sibbald, May MMXVI * */ #include "cloud_parts.h" bool operator==(const cloud_part& lhs, const cloud_part& rhs) { return (lhs.index == rhs.index && lhs.mtime == rhs.mtime && lhs.size == rhs.size /*not just yet */ /*&& (strcmp(reinterpret_cast(lhs.hash64), reinterpret_cast(rhs.hash64)) == 0)*/ ); } bool operator!=(const cloud_part& lhs, const cloud_part& rhs) { return !operator==(lhs, rhs); } bool operator==(const cloud_part& lhs, const uint32_t& rhs) { return (lhs.index == rhs); } bool operator!=(const cloud_part& lhs, const uint32_t& rhs) { return !operator==(lhs, rhs); } /* compares the all cloud_part, according to the operator==() above*/ bool list_contains_part(ilist *parts, cloud_part *p) { if (parts && p) { cloud_part *ap = (cloud_part *)parts->get(p->index); if (ap && *ap == *p) { return true; } } return false; } /* only checks if the part_idx part exists in the parts lst*/ bool list_contains_part(ilist *parts, uint32_t part_idx) { if (parts && part_idx > 0) { return parts->get(part_idx) != NULL; } return false; } bool identical_lists(ilist *parts1, ilist *parts2) { if (parts1 && parts2) { /* Using indexed ilist forces us to treat it differently (foreach not working b.e.) */ int max_size = parts1->last_index(); if (parts2->last_index() > parts1->last_index()) { max_size = parts2->last_index(); } for(int index=0; index<=max_size; index++ ) { cloud_part *p1 = (cloud_part *)parts1->get(index); cloud_part *p2 = (cloud_part *)parts2->get(index); if (!p1) { if (p2) return false; } else if (!p2) { if (p1) return false; } else if (*p1 != *p2) { return false; } } return true; } return false; } /* cloud_parts present in source but not in dest are appended to diff. * there's no cloud_part copy made. * Diff only holds references and shoudn't own them */ bool diff_lists(ilist *source, ilist *dest, ilist *diff) { if (source && dest && diff) { /* Using indexed list forces us to treat it differently (foreach not working b.e.) */ int max_size = source->last_index(); if (dest->last_index() > source->last_index()) { max_size = dest->last_index(); } for(int index=0; index<=max_size; index++ ) { cloud_part *p1 = (cloud_part *)source->get(index); cloud_part *p2 = (cloud_part *)dest->get(index); if (!p1) { if (p2) diff->put(index, p1); } else if (!p2) { if (p1) diff->put(index, p1); } else if (*p1 != *p2) { diff->put(index, p1); } } return true; } return false; } /*================================================= * cloud_proxy definitions ================================================= */ cloud_proxy * cloud_proxy::m_pinstance=NULL; uint64_t cloud_proxy::m_count=0; /* hash table node structure */ typedef struct { hlink hlnk; ilist *parts_lst; char *key_name; } VolHashItem; /* constructor * size: the default hash size * owns: determines if the ilists own the cloud_parts or not */ cloud_proxy::cloud_proxy(uint32_t size, bool owns) { pthread_mutex_init(&m_mutex, 0); VolHashItem *hitem=NULL; m_hash = New(htable(hitem, &hitem->hlnk, size)); m_owns = owns; } /* destructor * we need to go thru each htable node and manually delete * the associated alist before deleting the htable itself */ cloud_proxy::~cloud_proxy() { VolHashItem *hitem; foreach_htable(hitem, m_hash) { delete hitem->parts_lst; free (hitem->key_name); } delete m_hash; pthread_mutex_destroy(&m_mutex); } /* insert the cloud_part into the proxy. * create the volume ilist if necessary */ bool cloud_proxy::set(const char *volume, cloud_part *part) { if (part) { return set(volume, part->index, part->mtime, part->size, part->hash64); } return false; } bool cloud_proxy::set(const char *volume, uint32_t index, utime_t mtime, uint64_t size, unsigned char* hash64) { if (!volume || index < 1) { return false; } lock_guard lg(m_mutex); /* allocate new part */ cloud_part *part = (cloud_part*) malloc(sizeof(cloud_part)); /* fill it with result info from the transfer */ part->index = index; part->mtime = mtime; part->size = size; if (hash64) { memcpy(part->hash64, hash64, 64); } else { bmemzero(part->hash64, 64); } VolHashItem *hitem = (VolHashItem*)m_hash->lookup(const_cast(volume)); if (hitem) { /* when the node already exist, put the cloud_part into the vol list */ /* free the existing part */ if (hitem->parts_lst->get(index)) { free(hitem->parts_lst->get(index)); } hitem->parts_lst->put(index, part); return true; } else { /* if the node doesnt exist for this key, create it */ ilist *new_lst = New(ilist(100,m_owns)); new_lst->put(part->index, part); /* use hashtable helper malloc */ VolHashItem *new_hitem = (VolHashItem *) m_hash->hash_malloc(sizeof(VolHashItem)); new_hitem->parts_lst = new_lst; new_hitem->key_name = bstrdup(volume); return m_hash->insert(new_hitem->key_name, new_hitem); } return false; } /* retrieve the cloud_part for the volume name at index part idx * can return NULL */ cloud_part *cloud_proxy::get(const char *volume, uint32_t index) { lock_guard lg(m_mutex); if (volume) { VolHashItem *hitem = (VolHashItem *)m_hash->lookup(const_cast(volume)); if (hitem) { ilist * ilst = hitem->parts_lst; if (ilst) { return (cloud_part*)ilst->get(index); } } } return NULL; } uint64_t cloud_proxy::get_size(const char *volume, uint32_t part_idx) { cloud_part *cld_part = get(volume, part_idx); return cld_part ? cld_part->size:0; } /* Check if the volume entry exists and return true if it's the case */ bool cloud_proxy::volume_lookup(const char *volume) { lock_guard lg(m_mutex); return ( (volume) && m_hash->lookup(const_cast(volume)) ); } /* reset the volume list content with the content of part_list */ bool cloud_proxy::reset(const char *volume, ilist *part_list) { lock_guard lg(m_mutex); if (volume && part_list) { VolHashItem *hitem = (VolHashItem*)m_hash->lookup(const_cast(volume)); if (hitem) { /* when the node already exist, recycle it */ delete hitem->parts_lst; } else { /* create the node */ hitem = (VolHashItem *) m_hash->hash_malloc(sizeof(VolHashItem)); hitem->key_name = bstrdup(volume); if (!m_hash->insert(hitem->key_name, hitem)) { return false; } } /* re-create the volume list */ hitem->parts_lst = New(ilist(100, m_owns)); /* feed it with cloud_part elements */ for(int index=1; index<=part_list->last_index(); index++ ) { cloud_part *part = (cloud_part *)part_list->get(index); if (part) { hitem->parts_lst->put(index, part); } } return true; } return false; } uint32_t cloud_proxy::last_index(const char *volume) { lock_guard lg(m_mutex); if (volume) { VolHashItem *hitem = (VolHashItem*)m_hash->lookup(const_cast(volume)); if (hitem && hitem->parts_lst) { return hitem->parts_lst->last_index(); } } return 0; } ilist *cloud_proxy::exclude(const char *volume, ilist *exclusion_lst) { lock_guard lg(m_mutex); if (volume && exclusion_lst) { VolHashItem *hitem = (VolHashItem*)m_hash->lookup(const_cast(volume)); if (hitem) { ilist *res_lst = New(ilist(100, false)); if (diff_lists(hitem->parts_lst, exclusion_lst, res_lst)) { return res_lst; } } } return NULL; } static pthread_mutex_t singleton_mutex = PTHREAD_MUTEX_INITIALIZER; cloud_proxy *cloud_proxy::get_instance() { lock_guard lg(singleton_mutex); if (!m_pinstance) { m_pinstance = New(cloud_proxy()); } ++m_count; return m_pinstance; } void cloud_proxy::release() { lock_guard lg(singleton_mutex); if (--m_count == 0) { delete m_pinstance; m_pinstance = NULL; } } void cloud_proxy::dump() { VolHashItem *hitem; foreach_htable(hitem, m_hash) { Dmsg2(0, "proxy (%d) Volume:%s\n", m_hash->size(), hitem->hlnk.key.key); for(int index=0; index<=hitem->parts_lst->last_index(); index++ ) { cloud_part *p = (cloud_part *)hitem->parts_lst->get(index); if (p) { Dmsg1(0, "part.%d\n", p->index); } } } } //================================================= #ifdef TEST_PROGRAM int main (int argc, char *argv[]) { pthread_attr_t attr; mark_heap(); setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); my_name_is(argc, argv, "cloud_parts_test"); init_msg(NULL, NULL); daemon_start_time = time(NULL); set_thread_concurrency(150); lmgr_init_thread(); /* initialize the lockmanager stack */ pthread_attr_init(&attr); berrno be; printf("Test0\n"); { cloud_part p1, p2, p3, p4; p1.index = 1; p1.mtime = 1000; p1.size = 1000; p2.index = 2; p2.mtime = 2000; p2.size = 2020; p3.index = 3; p3.mtime = 3000; p3.size = 3030; p4.index = 4; p4.mtime = 4000; p4.size = 4040; ilist l(10,false); l.put(p1.index,&p1); l.put(p2.index,&p2); l.put(p3.index,&p3); ASSERT(list_contains_part(&l, &p1)); ASSERT(list_contains_part(&l, &p2)); ASSERT(list_contains_part(&l, &p3)); ASSERT(!list_contains_part(&l, &p4)); ASSERT(list_contains_part(&l, 3)); ASSERT(list_contains_part(&l, 1)); ASSERT(list_contains_part(&l, 2)); ASSERT(!list_contains_part(&l, 4)); } printf("Test1\n"); { cloud_part p1, p2, p3; p1.index = 1; p1.mtime = 1000; p1.size = 1000; p2.index = 2; p2.mtime = 2000; p2.size = 2020; p3.index = 3; p3.mtime = 3000; p3.size = 3030; ilist cloud(10,false); cloud.put(p1.index, &p1); cloud.put(p2.index, &p2); ilist cache(10,false); cache.put(p3.index, &p3); ASSERT(!identical_lists(&cloud, &cache)); cache.put(p1.index, &p1); ASSERT(!identical_lists(&cloud, &cache)); } printf("Test2\n"); { cloud_part p1, p2, p3, p4; p1.index = 1; p1.mtime = 1000; p1.size = 1000; p2.index = 2; p2.mtime = 2000; p2.size = 2020; p3.index = 3; p3.mtime = 3000; p3.size = 3030; p4.index = 4; p4.mtime = 4000; p4.size = 4040; ilist cloud(10,false); cloud.put(p1.index, &p1); cloud.put(p2.index, &p2); ilist cache(10,false); cloud.put(p3.index, &p3); cloud.put(p4.index, &p4); ASSERT(!identical_lists(&cloud, &cache)); cache.put(p1.index, &p1); ASSERT(!identical_lists(&cloud, &cache)); } printf("Test3\n"); { cloud_part p1, p2, p3; p1.index = 1; p1.mtime = 1000; p1.size = 1000; p2.index = 2; p2.mtime = 2000; p2.size = 2020; p3.index = 3; p3.mtime = 3000; p3.size = 3030; ilist cloud(10,false); cloud.put(p1.index, &p1); cloud.put(p2.index, &p2); cloud.put(p3.index, &p3); ilist cache(10,false); cache.put(p3.index, &p3); cache.put(p1.index, &p1); cache.put(p2.index, &p2); ASSERT(identical_lists(&cloud, &cache)); } printf("Test4\n"); { cloud_part p1, p2, p3; p1.index = 1; p1.mtime = 1000; p1.size = 1000; p2.index = 2; p2.mtime = 2000; p2.size = 2020; p3.index = 3; p3.mtime = 3000; p3.size = 3030; ilist cloud(10,false); cloud.put(p1.index, &p1); cloud.put(p2.index, &p2); cloud.put(p3.index, &p3); ilist cache(10,false); cache.put(p2.index, &p2); cache.put(p1.index, &p1); ASSERT(!identical_lists(&cloud, &cache)); ilist diff(10,false); ASSERT(diff_lists(&cloud, &cache, &diff)); ASSERT(diff.size() == 1); cloud_part *dp = (cloud_part *)diff.get(3); ASSERT(*dp == p3); } printf("Test proxy set\\get\n"); { cloud_part p1, p2, p3; p1.index = 1; p1.mtime = 1000; p1.size = 1000; p2.index = 2; p2.mtime = 2000; p2.size = 2020; p3.index = 3; p3.mtime = 3000; p3.size = 3030; cloud_proxy *prox = cloud_proxy::get_instance(); /* add to the cloud proxy with no error */ /* in volume1 */ ASSERT(prox->set("volume1", &p1)); ASSERT(prox->set("volume1", &p2)); /* in volume2 */ ASSERT(prox->set("volume2", &p3)); /* retrieve the correct elements */ ASSERT(prox->get("volume1", 1) != NULL); ASSERT(prox->get("volume1", 1)->mtime == 1000); ASSERT(prox->get("volume1", 1)->size == 1000); ASSERT(prox->get("volume1", 2) != NULL); ASSERT(prox->get("volume1", 2)->mtime == 2000); ASSERT(prox->get("volume1", 2)->size == 2020); /* part3 is in volume2, not in volume1 */ ASSERT(prox->get("volume1", 3) == NULL); ASSERT(prox->get("volume2", 3) != NULL); ASSERT(prox->get("volume2", 3)->mtime == 3000); ASSERT(prox->get("volume2", 3)->size == 3030); /* there's no volume3 */ ASSERT(prox->get("volume3", 1) == NULL); /* there's no volume3 nor part4 */ ASSERT(prox->get("volume3", 4) == NULL); } printf("Test proxy reset\n"); { cloud_part p1, p2, p3, p4, p5; p1.index = 1; p1.mtime = 1000; p1.size = 1000; p2.index = 2; p2.mtime = 2000; p2.size = 2020; p3.index = 3; p3.mtime = 3000; p3.size = 3030; cloud_proxy *prox = cloud_proxy::get_instance(); /* add to the cloud proxy with no error */ /* in volume1 */ ASSERT(prox->set("volume1", &p1)); ASSERT(prox->set("volume1", &p2)); /* in volume2 */ ASSERT(prox->set("volume2", &p3)); p4.index = 3; p4.mtime = 4000; p4.size = 4040; p5.index = 50; p5.mtime = 5000; p5.size = 5050; ilist part_list(10,false); part_list.put(p4.index, &p4); part_list.put(p5.index, &p5); /* reset volume 1 */ prox->reset("volume1", &part_list); /* old elements are gone */ ASSERT(prox->get("volume1", 1) == NULL); ASSERT(prox->get("volume1", 2) == NULL); /* new elements are at the correct index */ ASSERT(prox->get("volume1", 3) != NULL); ASSERT(prox->get("volume1", 3)->mtime == 4000); ASSERT(prox->get("volume1", 3)->size == 4040); ASSERT(prox->get("volume1", 50) != NULL); ASSERT(prox->get("volume1", 50)->mtime == 5000); ASSERT(prox->get("volume1", 50)->size == 5050); /* part3 is still in volume2 */ ASSERT(prox->get("volume2", 3) != NULL); ASSERT(prox->get("volume2", 3)->mtime == 3000); ASSERT(prox->get("volume2", 3)->size == 3030); /* there's no volume3 */ ASSERT(prox->get("volume3", 1) == NULL); /* there's no volume3 nor part.index 4 */ ASSERT(prox->get("volume3", 4) == NULL); prox->dump(); } return 0; } #endif /* TEST_PROGRAM */ bacula-15.0.3/src/stored/sd_plugins.c0000644000175000017500000003232114771010173017265 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Main program to test loading and running Bacula plugins. * Destined to become Bacula pluginloader, ... * * Kern Sibbald, October 2007 */ #include "bacula.h" #include "stored.h" #include "sd_plugins.h" const int dbglvl = 250; const char *plugin_type = "-sd.so"; /* Forward referenced functions */ static bRC baculaGetValue(bpContext *ctx, bsdrVariable var, void *value); static bRC baculaGetGlobal(bsdrGlobalVariable var, void *value); static bRC baculaSetValue(bpContext *ctx, bsdwVariable var, void *value); static bRC baculaRegisterEvents(bpContext *ctx, ...); static bRC baculaJobMsg(bpContext *ctx, const char *file, int line, int type, utime_t mtime, const char *fmt, ...); static bRC baculaDebugMsg(bpContext *ctx, const char *file, int line, int level, const char *fmt, ...); static void baculaEditDeviceCodes(DCR *dcr, POOLMEM **omsg, const char *imsg, const char *cmd); static bool is_plugin_compatible(Plugin *plugin); /* Bacula info */ static bsdInfo binfo = { sizeof(bsdFuncs), SD_PLUGIN_INTERFACE_VERSION }; /* Bacula entry points */ static bsdFuncs bfuncs = { sizeof(bsdFuncs), SD_PLUGIN_INTERFACE_VERSION, baculaRegisterEvents, baculaGetValue, baculaSetValue, baculaJobMsg, baculaDebugMsg, baculaEditDeviceCodes, baculaGetGlobal }; /* * Bacula private context */ struct bacula_ctx { JCR *jcr; /* jcr for plugin */ bRC rc; /* last return code */ bool disabled; /* set if plugin disabled */ }; static bool is_plugin_disabled(bpContext *plugin_ctx) { bacula_ctx *b_ctx; if (!plugin_ctx) { return true; } b_ctx = (bacula_ctx *)plugin_ctx->bContext; return b_ctx->disabled; } #ifdef needed static bool is_plugin_disabled(JCR *jcr) { return is_plugin_disabled(jcr->plugin_ctx); } #endif /* * Create a plugin event */ int generate_plugin_event(JCR *jcr, bsdEventType eventType, void *value) { bpContext *plugin_ctx; bsdEvent event; Plugin *plugin; int i = 0; bRC rc = bRC_OK; if (!b_plugin_list) { Dmsg0(dbglvl, "No b_plugin_list: generate_plugin_event ignored.\n"); return bRC_OK; } if (!jcr) { Dmsg0(dbglvl, "No jcr: generate_plugin_event ignored.\n"); return bRC_OK; } if (!jcr->plugin_ctx_list) { Dmsg0(dbglvl, "No plugin_ctx_list: generate_plugin_event ignored.\n"); return bRC_OK; /* Return if no plugins loaded */ } /* Always handle JobEnd and DeviceClose requests */ switch (eventType) { case bsdEventJobEnd: case bsdEventDeviceClose: break; /* pass these through even if canceled */ default: if (jcr->is_job_canceled()) { Dmsg0(dbglvl, "Cancel return from generate_plugin_event\n"); return bRC_Cancel; } } bpContext *plugin_ctx_list = (bpContext *)jcr->plugin_ctx_list; event.eventType = eventType; Dmsg2(dbglvl, "sd-plugin_ctx_list=%p JobId=%d\n", jcr->plugin_ctx_list, jcr->JobId); foreach_alist_index(i, plugin, b_plugin_list) { plugin_ctx = &plugin_ctx_list[i]; if (is_plugin_disabled(plugin_ctx)) { continue; } rc = sdplug_func(plugin)->handlePluginEvent(plugin_ctx, &event, value); if (rc != bRC_OK) { break; } } return rc; } /* * Create a global plugin event -- i.e. no context */ int generate_global_plugin_event(bsdGlobalEventType eventType, void *value) { bsdEvent event; Plugin *plugin; int i; bRC rc = bRC_OK; if (!b_plugin_list) { Dmsg0(dbglvl, "No b_plugin_list: generate_global_plugin_event ignored.\n"); return bRC_OK; } event.eventType = eventType; foreach_alist_index(i, plugin, b_plugin_list) { if (sdplug_func(plugin)->handleGlobalPluginEvent != NULL) { rc = sdplug_func(plugin)->handleGlobalPluginEvent(&event, value); if (rc != bRC_OK) { break; } } } return rc; } /* * Print to file the plugin info. */ void dump_sd_plugin(Plugin *plugin, FILE *fp) { if (!plugin) { return ; } psdInfo *info = (psdInfo *) plugin->pinfo; fprintf(fp, "\tversion=%d\n", info->version); fprintf(fp, "\tdate=%s\n", NPRTB(info->plugin_date)); fprintf(fp, "\tmagic=%s\n", NPRTB(info->plugin_magic)); fprintf(fp, "\tauthor=%s\n", NPRTB(info->plugin_author)); fprintf(fp, "\tlicence=%s\n", NPRTB(info->plugin_license)); fprintf(fp, "\tversion=%s\n", NPRTB(info->plugin_version)); fprintf(fp, "\tdescription=%s\n", NPRTB(info->plugin_description)); } /** * This entry point is called internally by Bacula to ensure * that the plugin IO calls come into this code. */ void load_sd_plugins(const char *plugin_dir) { Plugin *plugin; int i; Dmsg0(dbglvl, "Load sd plugins\n"); if (!plugin_dir) { Dmsg0(dbglvl, "No sd plugin dir!\n"); return; } b_plugin_list = New(alist(10, not_owned_by_alist)); if (!load_plugins((void *)&binfo, (void *)&bfuncs, plugin_dir, plugin_type, is_plugin_compatible)) { /* Either none found, or some error */ if (b_plugin_list->size() == 0) { delete b_plugin_list; b_plugin_list = NULL; Dmsg0(dbglvl, "No plugins loaded\n"); return; } } /* * Verify that the plugin is acceptable, and print information * about it. */ foreach_alist_index(i, plugin, b_plugin_list) { Jmsg(NULL, M_INFO, 0, _("Loaded plugin: %s\n"), plugin->file); Dmsg1(dbglvl, "Loaded plugin: %s\n", plugin->file); } Dmsg1(dbglvl, "num plugins=%d\n", b_plugin_list->size()); dbg_plugin_add_hook(dump_sd_plugin); } /** * Check if a plugin is compatible. Called by the load_plugin function * to allow us to verify the plugin. */ static bool is_plugin_compatible(Plugin *plugin) { psdInfo *info = (psdInfo *)plugin->pinfo; Dmsg0(50, "is_plugin_compatible called\n"); if (chk_dbglvl(50)) { dump_sd_plugin(plugin, stdin); } if (strcmp(info->plugin_magic, SD_PLUGIN_MAGIC) != 0) { Jmsg(NULL, M_ERROR, 0, _("Plugin magic wrong. Plugin=%s wanted=%s got=%s\n"), plugin->file, SD_PLUGIN_MAGIC, info->plugin_magic); Dmsg3(000, "Plugin magic wrong. Plugin=%s wanted=%s got=%s\n", plugin->file, SD_PLUGIN_MAGIC, info->plugin_magic); return false; } if (info->version != SD_PLUGIN_INTERFACE_VERSION) { Jmsg(NULL, M_ERROR, 0, _("Plugin version incorrect. Plugin=%s wanted=%d got=%d\n"), plugin->file, SD_PLUGIN_INTERFACE_VERSION, info->version); Dmsg3(000, "Plugin version incorrect. Plugin=%s wanted=%d got=%d\n", plugin->file, SD_PLUGIN_INTERFACE_VERSION, info->version); return false; } if (strcmp(info->plugin_license, "Bacula AGPLv3") != 0 && strcmp(info->plugin_license, "AGPLv3") != 0 && strcmp(info->plugin_license, BPLUGIN_LICENSE) != 0) { Jmsg(NULL, M_ERROR, 0, _("Plugin license incompatible. Plugin=%s license=%s\n"), plugin->file, info->plugin_license); Dmsg2(000, "Plugin license incompatible. Plugin=%s license=%s\n", plugin->file, info->plugin_license); return false; } if (info->size != sizeof(psdInfo)) { Jmsg(NULL, M_ERROR, 0, _("Plugin size incorrect. Plugin=%s wanted=%d got=%d\n"), plugin->file, sizeof(psdInfo), info->size); return false; } return true; } /* * Create a new instance of each plugin for this Job */ void new_plugins(JCR *jcr) { Plugin *plugin; int i = 0;; Dmsg0(dbglvl, "=== enter new_plugins ===\n"); if (!b_plugin_list) { Dmsg0(dbglvl, "No sd plugin list!\n"); return; } if (jcr->is_job_canceled()) { return; } /* * If plugins already loaded, just return */ if (jcr->plugin_ctx_list) { return; } int num = b_plugin_list->size(); Dmsg1(dbglvl, "sd-plugin-list size=%d\n", num); if (num == 0) { return; } jcr->plugin_ctx_list = (bpContext *)malloc(sizeof(bpContext) * num); bpContext *plugin_ctx_list = jcr->plugin_ctx_list; Dmsg2(dbglvl, "Instantiate sd-plugin_ctx_list=%p JobId=%d\n", jcr->plugin_ctx_list, jcr->JobId); foreach_alist_index(i, plugin, b_plugin_list) { /* Start a new instance of each plugin */ bacula_ctx *b_ctx = (bacula_ctx *)malloc(sizeof(bacula_ctx)); memset(b_ctx, 0, sizeof(bacula_ctx)); b_ctx->jcr = jcr; plugin_ctx_list[i].bContext = (void *)b_ctx; plugin_ctx_list[i].pContext = NULL; if (sdplug_func(plugin)->newPlugin(&plugin_ctx_list[i]) != bRC_OK) { b_ctx->disabled = true; } } } /* * Free the plugin instances for this Job */ void free_plugins(JCR *jcr) { Plugin *plugin; int i = 0; if (!b_plugin_list || !jcr->plugin_ctx_list) { return; } bpContext *plugin_ctx_list = (bpContext *)jcr->plugin_ctx_list; Dmsg2(dbglvl, "Free instance sd-plugin_ctx_list=%p JobId=%d\n", jcr->plugin_ctx_list, jcr->JobId); foreach_alist_index(i, plugin, b_plugin_list) { /* Free the plugin instance */ sdplug_func(plugin)->freePlugin(&plugin_ctx_list[i]); free(plugin_ctx_list[i].bContext); /* free Bacula private context */ } free(plugin_ctx_list); jcr->plugin_ctx_list = NULL; } /* ============================================================== * * Callbacks from the plugin * * ============================================================== */ /* Get a global. No job context */ static bRC baculaGetGlobal(bsdrGlobalVariable var, void *value) { if (!value) { return bRC_Error; } switch (var) { #if 0 case bsdVarDevTypes: *((s_kw **)value) = dev_types; #endif default: break; } return bRC_OK; } static bRC baculaGetValue(bpContext *ctx, bsdrVariable var, void *value) { JCR *jcr; if (!ctx) { return bRC_Error; } jcr = ((bacula_ctx *)ctx->bContext)->jcr; if (!jcr) { return bRC_Error; } if (!value) { return bRC_Error; } switch (var) { case bsdVarJobId: *((int *)value) = jcr->JobId; Dmsg1(dbglvl, "sd-plugin: return bVarJobId=%d\n", jcr->JobId); break; case bsdVarJobName: *((char **)value) = jcr->Job; Dmsg1(dbglvl, "Bacula: return Job name=%s\n", jcr->Job); break; #if 0 case bsdVarDevTypes: *((s_kw **)value) = dev_types; #endif default: break; } return bRC_OK; } static bRC baculaSetValue(bpContext *ctx, bsdwVariable var, void *value) { JCR *jcr; if (!value || !ctx) { return bRC_Error; } // Dmsg1(dbglvl, "bacula: baculaSetValue var=%d\n", var); jcr = ((bacula_ctx *)ctx->bContext)->jcr; if (!jcr) { return bRC_Error; } // Dmsg1(dbglvl, "Bacula: jcr=%p\n", jcr); /* Nothing implemented yet */ Dmsg1(dbglvl, "sd-plugin: baculaSetValue var=%d\n", var); return bRC_OK; } static bRC baculaRegisterEvents(bpContext *ctx, ...) { va_list args; uint32_t event; va_start(args, ctx); while ((event = va_arg(args, uint32_t))) { Dmsg1(dbglvl, "sd-Plugin wants event=%u\n", event); } va_end(args); return bRC_OK; } static bRC baculaJobMsg(bpContext *ctx, const char *file, int line, int type, utime_t mtime, const char *fmt, ...) { va_list arg_ptr; char buf[2000]; JCR *jcr; if (ctx) { jcr = ((bacula_ctx *)ctx->bContext)->jcr; } else { jcr = NULL; } va_start(arg_ptr, fmt); bvsnprintf(buf, sizeof(buf), fmt, arg_ptr); va_end(arg_ptr); Jmsg(jcr, type, mtime, "%s", buf); return bRC_OK; } static bRC baculaDebugMsg(bpContext *ctx, const char *file, int line, int level, const char *fmt, ...) { va_list arg_ptr; char buf[2000]; va_start(arg_ptr, fmt); bvsnprintf(buf, sizeof(buf), fmt, arg_ptr); va_end(arg_ptr); d_msg(file, line, level, "%s", buf); return bRC_OK; } static void baculaEditDeviceCodes(DCR *dcr, POOLMEM **omsg, const char *imsg, const char *cmd) { edit_device_codes(dcr, omsg, imsg, cmd); } #ifdef TEST_PROGRAM int main(int argc, char *argv[]) { char plugin_dir[1000]; JCR mjcr1, mjcr2; JCR *jcr1 = &mjcr1; JCR *jcr2 = &mjcr2; strcpy(my_name, "test-dir"); getcwd(plugin_dir, sizeof(plugin_dir)-1); load_sd_plugins(plugin_dir); jcr1->JobId = 111; new_plugins(jcr1); jcr2->JobId = 222; new_plugins(jcr2); generate_plugin_event(jcr1, bsdEventJobStart, (void *)"Start Job 1"); generate_plugin_event(jcr1, bsdEventJobEnd); generate_plugin_event(jcr2, bsdEventJobStart, (void *)"Start Job 1"); free_plugins(jcr1); generate_plugin_event(jcr2, bsdEventJobEnd); free_plugins(jcr2); unload_plugins(); Dmsg0(dbglvl, "sd-plugin: Test OK ...\n"); close_memory_pool(); sm_dump(false); return 0; } #endif /* TEST_PROGRAM */ bacula-15.0.3/src/stored/sdcollect.c0000644000175000017500000002377614771010173017110 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Copyright (c) 2018 by Inteos sp. z o.o. * All rights reserved. IP transfered to Bacula Systems according to agreement. * * This is a Bacula statistics Storage Daemon collector routines. * Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. * */ #include "bacula.h" #include "stored.h" /* imported functions and variables */ extern const char collect_all_cmd[]; extern const char collect_metrics_cmd[]; extern bool init_done; static bool collector_threads_started = false; /* * Initialize a daemon wide Bacula statcollector. * Creates a global daemon collector and registers all global metrics. * This is a statcollector initialization function specific for StorageDaemon. */ void initialize_statcollector() { POOL_MEM met(PM_NAME); /* create a statistics collector */ statcollector = New(bstatcollect); /* register config metrics */ Mmsg(met, "bacula.storage.%s.config.devices", me->hdr.name); sdstatmetrics.bacula_storage_config_devices = statcollector->registration_int64(met.c_str(), METRIC_UNIT_DEVICE, ((rblist *)res_head[R_DEVICE-r_first]->res_list)->size(), "The number of defined devices in Storage."); Mmsg(met, "bacula.storage.%s.config.autochangers", me->hdr.name); sdstatmetrics.bacula_storage_config_autochangers = statcollector->registration_int64(met.c_str(), METRIC_UNIT_AUTOCHANGER, ((rblist *)res_head[R_AUTOCHANGER-r_first]->res_list)->size(), "The number of defined autochangers in Storage."); /* memory utilization */ Mmsg(met, "bacula.storage.%s.memory.bufs", me->hdr.name); sdstatmetrics.bacula_storage_memory_bufs = statcollector->registration(met.c_str(), METRIC_INT, METRIC_UNIT_NUMBER, "The number of buffers allocated."); Mmsg(met, "bacula.storage.%s.memory.heap", me->hdr.name); sdstatmetrics.bacula_storage_memory_heap = statcollector->registration(met.c_str(), METRIC_INT, METRIC_UNIT_BYTE, "The size of the heap."); Mmsg(met, "bacula.storage.%s.memory.maxbufs", me->hdr.name); sdstatmetrics.bacula_storage_memory_maxbufs = statcollector->registration(met.c_str(), METRIC_INT, METRIC_UNIT_NUMBER, "The maximum buffers allocated."); Mmsg(met, "bacula.storage.%s.memory.maxbytes", me->hdr.name); sdstatmetrics.bacula_storage_memory_maxbytes = statcollector->registration(met.c_str(), METRIC_INT, METRIC_UNIT_BYTE, "The allocated memory size."); Mmsg(met, "bacula.storage.%s.memory.smbytes", me->hdr.name); sdstatmetrics.bacula_storage_memory_smbytes = statcollector->registration(met.c_str(), METRIC_INT, METRIC_UNIT_BYTE, "The allocated memory size."); // statcollector->dump(); }; /* * This is a callback function executed by updatecollector_thread() every time some permanent and hard to count * metrics should be collected. This include a memory allocator statistics and DEVICE throughput. */ bool update_permanent_stats(void *data) { DEVRES *device; DEVICE *dev; float speed; utime_t t; utime_t delta; uint64_t rd; uint64_t wd; uint64_t fs, ts; /* update memory statistics */ statcollector->set_value_int64(sdstatmetrics.bacula_storage_memory_bufs, sm_buffers); #ifdef HAVE_WIN32 uint64_t memused; memused = get_memory_info(NULL, 0); statcollector->set_value_int64(sdstatmetrics.bacula_storage_memory_heap, memused); #else statcollector->set_value_int64(sdstatmetrics.bacula_storage_memory_heap, heap_used()); #endif statcollector->set_value_int64(sdstatmetrics.bacula_storage_memory_maxbufs, sm_max_buffers); statcollector->set_value_int64(sdstatmetrics.bacula_storage_memory_maxbytes, sm_max_bytes); statcollector->set_value_int64(sdstatmetrics.bacula_storage_memory_smbytes, sm_bytes); /* update device throughput */ t = time(NULL); LockRes(); if (init_done){ foreach_res(device, R_DEVICE) { dev = device->dev; if (!dev){ continue; } rd = statcollector->get_int(dev->devstatmetrics.bacula_storage_device_readbytes); wd = statcollector->get_int(dev->devstatmetrics.bacula_storage_device_writebytes); if (dev->last_stat_timer > 0){ /* count throughput */ delta = t - dev->last_stat_timer; if (delta > 0){ /* only when a time passed */ speed = ((float)rd - dev->last_stat_DevReadBytes) / (float)delta; statcollector->set_value_float(dev->devstatmetrics.bacula_storage_device_readspeed, speed); speed = ((float)wd - dev->last_stat_DevWriteBytes) / (float)delta; statcollector->set_value_float(dev->devstatmetrics.bacula_storage_device_writespeed, speed); } } dev->last_stat_timer = t; dev->last_stat_DevReadBytes = rd; dev->last_stat_DevWriteBytes = wd; /* count total/free space */ dev->get_freespace(&fs, &ts); statcollector->set_value_int64(dev->devstatmetrics.bacula_storage_device_freespace, fs); statcollector->set_value_int64(dev->devstatmetrics.bacula_storage_device_totalspace, ts); }; } UnlockRes(); return true; }; /* * Starts a single thread for every defined Statistics resource and one globally single thread for * Update Statistics if at least one Statistics resource is available. */ void start_collector_threads() { COLLECTOR *collect; utime_t interval = 24*3600; // the minimal update interval is 1 day UPDATE_COLLECTOR_INIT_t initdata; LockRes(); foreach_res(collect, R_COLLECTOR){ interval = MIN(interval, collect->interval); collect->statcollector = statcollector; collect->spool_directory = working_directory; collect->daemon = "bacula-sd"; collect->jcr = new_jcr(sizeof(JCR), stored_free_jcr); Dmsg1(100, "Starting statistics thread for \"%s\"\n", collect->name()); start_collector_thread(collect); collector_threads_started = true; }; if (((rblist *)res_head[R_COLLECTOR-r_first]->res_list)->size() > 0){ initdata.interval = interval; initdata.jcr = new_jcr(sizeof(JCR), stored_free_jcr); initdata.routine = &update_permanent_stats; initdata.data = NULL; /* start update thread */ Dmsg0(100, "Starting update collector thread.\n"); start_updcollector_thread(initdata); } UnlockRes(); }; /* * Terminates all Statistics's threads executed. */ void terminate_collector_threads() { COLLECTOR *collect; if (collector_threads_started){ foreach_res(collect, R_COLLECTOR){ stop_collector_thread(collect); }; stop_updcollector_thread(); collector_threads_started = false; } }; /* * Display a text representation of the metric for UA. * * in: * user - a BSOCK class for sending rendered text to UA * m - metric to display * format - a display format * out: * metric rendered and sent to the UA */ void display_metric(BSOCK *user, bstatmetric *m, display_format_t format, int nr) { POOL_MEM out(PM_MESSAGE); int len; rendermetric(out, m, format, nr); len = strlen(out.c_str()) + 1; user->msg = check_pool_memory_size(user->msg, len); memcpy(user->msg, out.c_str(), len); user->msglen = len; user->send(); }; /* * Collect command from Director */ bool collect_cmd(JCR *jcr) { BSOCK *dir = jcr->dir_bsock; POOLMEM *cmd; POOLMEM *fmt; POOLMEM *metrics = NULL; char *ch; char *met; bool doall = true; display_format_t format; bstatmetric *item; alist *list; int nr; cmd = get_memory(dir->msglen+1); fmt = get_memory(dir->msglen+1); Dmsg1(100, "statistics cmd: %s", dir->msg); if (sscanf(dir->msg, collect_all_cmd, fmt) != 1) { if (sscanf(dir->msg, collect_metrics_cmd, fmt, cmd) != 2) { pm_strcpy(&jcr->errmsg, dir->msg); Jmsg1(jcr, M_FATAL, 0, _("Bad collect command: %s\n"), jcr->errmsg); dir->fsend(_("2900 Bad statistics command, missing argument.\n")); dir->signal(BNET_EOD); free_memory(cmd); free_memory(fmt); return false; } else { doall = false; metrics = strstr(dir->msg, cmd); pm_strcpy(cmd, metrics); metrics = cmd; } } format = scandisplayformat(fmt); if (!doall) Dmsg1(100, "statistics metrics: %s\n", metrics); if (format == COLLECT_JSON){ dir->fsend("["); } update_permanent_stats(NULL); nr = 0; if (doall){ /* we should display all the metrics found */ list = statcollector->get_all(); foreach_alist(item, list){ display_metric(dir, item, format, nr++); } free_metric_alist(list); } else { /* we should display selected metrics only */ met = metrics; /* eliminate the EOL char in metrics */ ch = strchr(met, '\n'); if (ch){ *ch = ' '; } while (*met != 0 && (ch = strchr(met, ' ')) != NULL){ /* prepare and get a single metric */ *ch = '\0'; Dmsg1(100, "statistics get metric: %s\n", met); item = statcollector->get_metric(met); if (item){ Dmsg0(200, "metric found\n"); display_metric(dir, item, format, nr++); delete(item); } met = ch + 1; } } if (format == COLLECT_JSON){ dir->fsend("\n]\n"); } dir->signal(BNET_EOD); free_memory(cmd); free_memory(fmt); return true; } bacula-15.0.3/src/bc_types.h0000644000175000017500000001336414771010173015441 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* Define integer types for Bacula -- Kern Sibbald Integer types. These types should be be used in all contexts in which the length of an integer stored on removable media must be known regardless of the architecture of the platform. Bacula types are: int8_t, int16_t, int32_t, int64_t uint8_t, uint16_t, uint32_t, uint64_t Also, we define types such as file address lengths. */ #ifndef __bc_types_INCLUDED #define __bc_types_INCLUDED /* * These are the sizes of the current definitions of database * Ids. In general, FileId_t can be set to uint64_t and it * *should* work. Users have reported back that it does work * for PostgreSQL. For the other types, all places in Bacula * have been converted, but no one has actually tested it. * In principle, the only field that really should need to be * 64 bits is the FileId_t */ typedef uint64_t FileId_t; typedef uint32_t DBId_t; /* general DB id type */ typedef uint32_t JobId_t; typedef char POOLMEM; /* Types */ /* If sys/types.h does not supply intXX_t, supply them ourselves */ /* (or die trying) */ #ifndef HAVE_U_INT typedef unsigned int u_int; #endif #ifndef HAVE_INTXX_T # if (SIZEOF_CHAR == 1) typedef signed char int8_t; # else # error "8 bit int type not found." # endif # if (SIZEOF_SHORT_INT == 2) typedef short int int16_t; # else # error "16 bit int type not found." # endif # if (SIZEOF_INT == 4) typedef int int32_t; # else # error "32 bit int type not found." # endif #endif /* If sys/types.h does not supply u_intXX_t, supply them ourselves */ #ifndef HAVE_U_INTXX_T # ifdef HAVE_UINTXX_T typedef uint8_t u_int8_t; typedef uint16_t u_int16_t; typedef uint32_t u_int32_t; # define HAVE_U_INTXX_T 1 # else # if (SIZEOF_CHAR == 1) typedef unsigned char u_int8_t; # else # error "8 bit int type not found. Required!" # endif # if (SIZEOF_SHORT_INT == 2) typedef unsigned short int u_int16_t; # else # error "16 bit int type not found. Required!" # endif # if (SIZEOF_INT == 4) typedef unsigned int u_int32_t; # else # error "32 bit int type not found. Required!" # endif # endif #endif /* 64-bit types */ #ifndef HAVE_INT64_T # if (SIZEOF_LONG_LONG_INT == 8) typedef long long int int64_t; # define HAVE_INT64_T 1 # else # if (SIZEOF_LONG_INT == 8) typedef long int int64_t; # define HAVE_INT64_T 1 # endif # endif #endif #ifndef HAVE_INTMAX_T # ifdef HAVE_INT64_T typedef int64_t intmax_t; # else # error "64 bit type not found. Required!" # endif #endif #ifndef HAVE_U_INT64_T # if (SIZEOF_LONG_LONG_INT == 8) typedef unsigned long long int u_int64_t; # define HAVE_U_INT64_T 1 # else # if (SIZEOF_LONG_INT == 8) typedef unsigned long int u_int64_t; # define HAVE_U_INT64_T 1 # else # error "64 bit type not found. Required!" # endif # endif #endif #ifndef HAVE_U_INTMAX_T # ifdef HAVE_U_INT64_T typedef u_int64_t u_intmax_t; # else # error "64 bit type not found. Required!" # endif #endif #ifndef HAVE_INTPTR_T #define HAVE_INTPTR_T 1 # if (SIZEOF_INT_P == 4) typedef int32_t intptr_t; # else # if (SIZEOF_INT_P == 8) typedef int64_t intptr_t; # else # error "Can't find sizeof pointer. Required!" # endif # endif #endif #ifndef HAVE_UINTPTR_T #define HAVE_UINTPTR_T 1 # if (SIZEOF_INT_P == 4) typedef uint32_t uintptr_t; # else # if (SIZEOF_INT_P == 8) typedef uint64_t uintptr_t; # else # error "Can't find sizeof pointer. Required!" # endif # endif #endif /* Limits for the above types. */ #undef INT8_MIN #undef INT8_MAX #undef UINT8_MAX #undef INT16_MIN #undef INT16_MAX #undef UINT16_MAX #undef INT32_MIN #undef INT32_MAX #undef UINT32_MAX #define INT8_MIN (-127-1) #define INT8_MAX (127) #define UINT8_MAX (255u) #define INT16_MIN (-32767-1) #define INT16_MAX (32767) #define UINT16_MAX (65535u) #define INT32_MIN (-2147483647-1) #define INT32_MAX (2147483647) #define UINT32_MAX (4294967295u) typedef double float64_t; typedef float float32_t; /* Define the uint versions actually used in Bacula */ #ifndef uint8_t #define uint8_t u_int8_t #define uint16_t u_int16_t #define uint32_t u_int32_t #define uint64_t u_int64_t #define uintmax_t u_intmax_t #endif /* Bacula time -- Unix time with microseconds */ #define btime_t int64_t /* Unix time (time_t) widened to 64 bits */ #define utime_t int64_t #ifndef HAVE_SOCKLEN_T #define socklen_t int #endif #ifndef HAVE_WIN32 #ifndef SOCKET_ERROR #define SOCKET_ERROR (-1) #endif #endif #ifdef HAVE_OLD_SOCKOPT #define sockopt_val_t char * #else #define sockopt_val_t void * #endif /* * Status codes returned by create_file() * Used in findlib, filed, and plugins */ enum { CF_SKIP = 1, /* skip file (not newer or something) */ CF_ERROR, /* error creating file */ CF_EXTRACT, /* file created, data to extract */ CF_CREATED, /* file created, no data to extract */ CF_CORE /* let bacula core handle the file creation */ }; #ifndef MAX #define MAX(a, b) ((a) > (b) ? (a) : (b)) #endif #ifndef MIN #define MIN(a, b) ((a) < (b) ? (a) : (b)) #endif #endif /* __bc_types_INCLUDED */ bacula-15.0.3/src/version.h0000644000175000017500000001761114771010173015315 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef VERSION_H #define VERSION_H #undef VERSION #define COMMUNITY 1 /* Define to create a Windows community binary */ /* Note: there can be only *one* VERSION in this file */ #define VERSION "15.0.3" #define BDATE "25 March 2025" #define LSMDATE "25Mar25" #define RELEASE 1 /* Use ONLY in rpms */ #define PROG_COPYRIGHT "Copyright (C) %d-2025 Kern Sibbald.\n" #define BYEAR "2025" /* year for copyright messages in progs */ /* * Versions of packages needed to build Bacula components */ #define DEPKGS_QT_VERSION "30Nov17" #define DEPKGS_VERSION "02Nov23" #define BQT4_VERSION "5.9.3" #define VIX_VERSION "29Mar21" #define JAVA_VERSION "26Aug20" #define NDMP_VERSION "20Apr20" #define EXTRAJS_VERSION "20Apr20" #define EXPAT_VERSION "1.95.8" #define ACSLS_VERSION "20Mar18" /* Normally included in the base depkgs */ #define LIBRSYNC_VERSION "0.9.7b" #define LIBLZO_VERSION "2.06" #define TOKYOCABINET_VERSION "1.5.1" #define MSSQL_VERSION "24Jun21" #define DOCKER_TAR_IMAGE "07Dec22" #define KUBERNETES_IMAGE_VERSION "07Dec22" #define HADOOP_VERSION "15Dec21" /* Debug flags */ #undef DEBUG #define DEBUG 1 #define TRACEBACK 1 #define TRACE_FILE 1 #define ENTER_LEAVE 1 //#define FORCE_ALIGNED 1 /* If this is set stdout will not be closed on startup */ /* #define DEVELOPER 1 */ /* adjust DEVELOPER_MODE for status command */ #ifdef DEVELOPER # define DEVELOPER_MODE 1 #else # define DEVELOPER_MODE 0 #endif /* * SMCHECK does orphaned buffer checking (memory leaks) * it can always be turned on, but has some minor performance * penalties. */ #ifdef DEVELOPER # define SMCHECK #endif /* * _USE_LOCKMGR does lock/unlock mutex tracking (dead lock) * it can always be turned on, but we advise to use it only * for debug */ # ifndef USE_LOCKMGR # define USE_LOCKMGR # endif /* USE_LOCKMGR */ /* * Enable priority management with the lock manager * * Note, turning this on will cause the Bacula SD to abort if * mutexes are executed out of order, which could lead to a * deadlock. However, note that this is not necessarily a * deadlock, so turn this on only for debugging. */ #define USE_LOCKMGR_PRIORITY /* * Enable thread verification before kill * * Note, this extra check have a high cost when using * dozens of thread, so turn this only for debugging. */ #define USE_LOCKMGR_SAFEKILL #if !HAVE_LINUX_OS && !HAVE_SUN_OS && !HAVE_DARWIN_OS && !HAVE_FREEBSD_OS && !HAVE_KFREEBSD_OS # undef USE_LOCKMGR #endif /* * USE_VTAPE is a dummy tape driver. This is useful to * run regress test. */ #ifdef HAVE_LINUX_OS # define USE_VTAPE #endif /* * USE_FTP is a ftp driver for the FD using curl. */ // #define USE_FTP /* * for fastest speed but you must have a UPS to avoid unwanted shutdowns */ //#define SQLITE3_INIT_QUERY "PRAGMA synchronous = OFF" /* * for more safety, but is 30 times slower than above */ #define SQLITE3_INIT_QUERY "PRAGMA synchronous = NORMAL" /* * This should always be on. It enables data encryption code * providing it is configured. */ #define DATA_ENCRYPTION 1 /* * This uses a Bacula specific bsnprintf rather than the sys lib * version because it is much more secure. It should always be * on. */ #define USE_BSNPRINTF 1 /* Debug flags not normally turned on */ /* #define TRACE_JCR_CHAIN 1 */ /* #define TRACE_RES 1 */ /* #define DEBUG_MEMSET 1 */ /* #define DEBUG_MUTEX 1 */ /* #define DEBUG_BLOCK_CHECKSUM 1 */ #define BEEF 0 #define BDEMO "" /* * SD_VERSION history Enterprise * Note: Enterprise versions now numbered in 30000 * and community is at SD version 3 * None prior to 06Aug13 * 1 06Aug13 - added comm line compression * 2 13Dec13 - added api version to status command * 3 22Feb14 - Added SD->SD with SD_Calls_Client * 4 22Jun14 - Added capabilities comm protocol * 30005 04Jun15 - Added JobMedia queueing * 30006 11Apr17 - Added PoolBytes, MaxPoolBytes and Recycle * 30007 06Feb20 - Added can_create to the Find media request * 30008 04Apr23 - Added VolumeRetention to Find_Media * * Community: * 305 04Jun15 - Added JobMedia queueing * 306 20Mar15 - Added comm line compression * 307 06Feb20 - Added can_create to the Find media request * 30007 02Dec20 - Sync with Enterprise * 30008 04Apr23 - Added VolumeRetention to Find_Media */ #ifdef COMMUNITY #define SD_VERSION 30008 /* Community SD version */ #else #define SD_VERSION 30008 /* Enterprise SD version */ #endif /* FD_VERSION history Enterprise * None prior to 10Mar08 * 1 10Mar08 * 2 13Mar09 - added the ability to restore from multiple storages * 3 03Sep10 - added the restore object command for vss plugin 4.0 * 4 25Nov10 - added bandwidth command 5.1 * 5 24Nov11 - added new restore object command format (pluginname) 6.0 * 6 15Feb12 - added Component selection information list * 7 19Feb12 - added Expected files to restore * 8 22Mar13 - added restore options + version for SD * 9 06Aug13 - added comm line compression * 10 01Jan14 - added SD Calls Client and api version to status command * 11 O4May14 - added dedup aware FD * 12 22Jun14 - added new capabilities comm protocol with the SD * 13 04Feb15 - added snapshot protocol with the DIR * 14 06Sep17 - added send file list during restore * * Community: * 213 04Feb15 - added snapshot protocol with the DIR * 214 20Mar17 - added comm line compression * 14 02Dec20 - Sync with Enterprise */ #ifdef COMMUNITY #define FD_VERSION 15 /* make same as community Linux FD */ #else #define FD_VERSION 15 /* Enterprise FD version */ #endif /* * Set SMALLOC_SANITY_CHECK to zero to turn off, otherwise * it is the maximum memory malloced before Bacula will * abort. Except for debug situations, this should be zero */ #define SMALLOC_SANITY_CHECK 0 /* 500000000 0.5 GB max */ /* Check if header of tape block is zero before writing */ /* #define DEBUG_BLOCK_ZEROING 1 */ /* #define FULL_DEBUG 1 */ /* normally on for testing only */ /* Turn this on ONLY if you want all Dmsg() to append to the * trace file. Implemented mainly for Win32 ... */ /* #define SEND_DMSG_TO_FILE 1 */ /* The following are turned on for performance testing */ /* * If you turn on the NO_ATTRIBUTES_TEST and rebuild, the SD * will receive the attributes from the FD, will write them * to disk, then when the data is written to tape, it will * read back the attributes, but they will not be sent to * the Director. So this will eliminate: 1. the comm time * to send the attributes to the Director. 2. the time it * takes the Director to put them in the catalog database. */ /* #define NO_ATTRIBUTES_TEST 1 */ /* * If you turn on NO_TAPE_WRITE_TEST and rebuild, the SD * will do all normal actions, but will not write to the * Volume. Note, this means a lot of functions such as * labeling will not work, so you must use it only when * Bacula is going to append to a Volume. This will eliminate * the time it takes to write to the Volume (not the time * it takes to do any positioning). */ /* #define NO_TAPE_WRITE_TEST 1 */ /* * If you turn on FD_NO_SEND_TEST and rebuild, the FD will * not send any attributes or data to the SD. This will * eliminate the comm time sending to the SD. */ /* #define FD_NO_SEND_TEST 1 */ #ifndef COMMUNITY #include "bee_version.h" #endif #endif /* VERSION_H */ bacula-15.0.3/src/baconfig.h0000644000175000017500000007435014771010173015403 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * General header file configurations that apply to * all daemons. System dependent stuff goes here. * */ #ifndef _BACONFIG_H #define _BACONFIG_H 1 /* Bacula common configuration defines */ #undef TRUE #undef FALSE #define TRUE 1 #define FALSE 0 #ifdef HAVE_TLS #define have_tls 1 #ifdef HAVE_TLS_PSK #define tls_psk_default 1 #else #define tls_psk_default 0 #endif #else #define have_tls 0 #define tls_psk_default 0 #endif #ifndef ETIME #define ETIME ETIMEDOUT #endif #ifdef HAVE_IOCTL_ULINT_REQUEST #define ioctl_req_t unsigned long int #else #define ioctl_req_t int #endif #define MANUAL_AUTH_URL "http://www.bacula.org/rel-manual/en/problems/Bacula_Frequently_Asked_Que.html" #ifdef PROTOTYPES # define __PROTO(p) p #else # define __PROTO(p) () #endif #ifdef DEBUG #define ASSERT(x) if (!(x)) { \ char *tjcr = NULL; \ Emsg1(M_ERROR, 0, _("Failed ASSERT: %s\n"), #x); \ Pmsg1(000, _("Failed ASSERT: %s\n"), #x); \ tjcr[0] = 0; } #define ASSERT2(x,y) if (!(x)) { \ set_assert_msg(__FILE__, __LINE__, y); \ Emsg1(M_ERROR, 0, _("Failed ASSERT: %s\n"), #x); \ Pmsg1(000, _("Failed ASSERT: %s\n"), #x); \ char *tjcr = NULL; \ tjcr[0] = 0; } #else #define ASSERT(x) #define ASSERT2(x, y) #endif #ifdef DEVELOPER #define ASSERTD(x, y) if (!(x)) { \ set_assert_msg(__FILE__, __LINE__, y); \ Emsg1(M_ERROR, 0, _("Failed ASSERT: %s\n"), #x); \ Pmsg1(000, _("Failed ASSERT: %s\n"), #x); \ char *jcr = NULL; \ jcr[0] = 0; } #else #define ASSERTD(x, y) #endif /* Until DDE has its own logging, use Tmsg, with following debug levels */ #define DDE_MSG 0 #define VACUUM_MSG 0 #define DDEERR_MSG 0 #define VACERR_MSG 0 #define SCRUB_MSG 0 #define SCRUBERR_MSG 0 /* Allow printing of NULL pointers */ #define NPRT(x) (x)?(x):_("*None*") #define NPRTB(x) (x)?(x):"" /* stored in statp.st_rdev */ #define WIN32_REPARSE_NONE 0 /* Not a reparse point */ #define WIN32_REPARSE_POINT 1 /* Can be any reparse point except one of the following */ #define WIN32_MOUNT_POINT 2 /* Directory link to Volume */ #define WIN32_JUNCTION_POINT 3 /* Directory link to a directory */ #define WIN32_SYMLINK_POINT 4 /* A Symlink to a file OR a directory */ #define WIN32_ROOT_POINT 5 /* Root of a volume, like C: */ #if defined(HAVE_WIN32) /* Reduce compiler warnings from Windows vss code */ #define uuid(x) void InitWinAPIWrapper(); #define OSDependentInit() InitWinAPIWrapper() #define clear_thread_id(x) memset(&(x), 0, sizeof(x)) #if defined(BUILDING_DLL) # define DLL_IMP_EXP _declspec(dllexport) #elif defined(USING_DLL) # define DLL_IMP_EXP _declspec(dllimport) #else # define DLL_IMP_EXP #endif #if defined(USING_CATS) # define CATS_IMP_EXP _declspec(dllimport) #else # define CATS_IMP_EXP #endif #else /* HAVE_WIN32 */ #define clear_thread_id(x) x = 0 #define DLL_IMP_EXP #define CATS_IMP_EXP #define OSDependentInit() #endif /* HAVE_WIN32 */ #ifdef ENABLE_NLS #include #include #ifndef _ #define _(s) gettext((s)) #endif /* _ */ #ifndef N_ #define N_(s) (s) #endif /* N_ */ #else /* !ENABLE_NLS */ #undef _ #undef N_ #undef textdomain #undef bindtextdomain #undef setlocale #ifndef _ #define _(s) (s) #endif #ifndef N_ #define N_(s) (s) #endif #ifndef textdomain #define textdomain(d) #endif #ifndef bindtextdomain #define bindtextdomain(p, d) #endif #ifndef setlocale #define setlocale(p, d) #endif #endif /* ENABLE_NLS */ /* Use the following for strings not to be translated */ #define NT_(s) (s) /* Maximum number of fileset options * FileSet definitions very similar to the resource * contained in the Director because the components * of the structure are passed by the Director to the * File daemon and recompiled back into this structure */ #define MAX_FOPTS 50 /* This should go away! ****FIXME***** */ #define MAXSTRING 500 /* Maximum length to edit time/date */ #define MAX_TIME_LENGTH 50 /* Maximum Name length including EOS */ #define MAX_NAME_LENGTH 128 /* Maximum Name length for Plugin Object values as UUID, Category, SRC, etc. including EOS */ #define MAX_PLUGINOBJ_NAME_LENGTH 512 /* Maximum escaped Name lenght including EOS 2*MAX_NAME_LENGTH+1 */ #define MAX_ESCAPE_NAME_LENGTH 257 /* Maximume number of user entered command args */ #define MAX_CMD_ARGS 30 /* All tape operations MUST be a multiple of this */ #define TAPE_BSIZE 1024 #ifdef DEV_BSIZE #define B_DEV_BSIZE DEV_BSIZE #endif #if !defined(B_DEV_BSIZE) & defined(BSIZE) #define B_DEV_BSIZE BSIZE #endif #ifndef B_DEV_BSIZE #define B_DEV_BSIZE 512 #endif /** * Set to time limit for other end to respond to * authentication. Normally 10 minutes is *way* * more than enough. The idea is to keep the Director * from hanging because there is a dead connection on * the other end. */ #define AUTH_TIMEOUT 60 * 10 /* * Default network buffer size */ #define DEFAULT_NETWORK_BUFFER_SIZE (64 * 1024) /* * Size of a buffer when editing numbers via edit_uint64/edit_int * Normally, it's around 20. */ #define SIZE_EDIT_INT 50 /** * Tape label types -- stored in catalog */ #define B_BACULA_LABEL 0 #define B_ANSI_LABEL 1 #define B_IBM_LABEL 2 /* * Device types * If you update this table, be sure to add an * entry in prt_dev_types[] in stored/dev.c * This number is stored in the Catalog as VolType or VolParts, do not change. */ enum { B_FILE_DEV = 1, B_TAPE_DEV = 2, B_DVD_DEV = 3, B_FIFO_DEV = 4, B_VTAPE_DEV= 5, /* change to B_TAPE_DEV after init */ B_FTP_DEV = 6, B_VTL_DEV = 7, /* Virtual tape library device */ B_ADATA_DEV = 8, /* Aligned data Data file */ B_ALIGNED_DEV = 9, /* Aligned data Meta file */ B_DEDUP_OLD_DEV = 10, /* Old Deduplication device */ B_NULL_DEV = 11, /* /dev/null for testing */ B_VALIGNED_DEV = 12, /* Virtual for Aligned device (not stored) */ B_VDEDUP_DEV = 13, /* Virtual for Dedup device (not stored) */ B_CLOUD_DEV = 14, /* New Cloud device type (available in 8.8) */ B_DEDUP_DEV = 15 /* New Deduplication Device (do not split ref records) */ }; /** * Actions on purge (bit mask) */ #define ON_PURGE_TRUNCATE 1 /* Size of File Address stored in STREAM_SPARSE_DATA. Do NOT change! */ #define OFFSET_FADDR_SIZE (sizeof(uint64_t)) /* Size of crypto length stored at head of crypto buffer. Do NOT change! */ #define CRYPTO_LEN_SIZE ((int)sizeof(uint32_t)) /* Plugin Features */ #define PLUGIN_FEATURE_RESTORELISTFILES "RestoreListFiles" /** * This is for dumb compilers/libraries like Solaris. Linux GCC * does it correctly, so it might be worthwhile * to remove the isascii(c) with ifdefs on such * "smart" systems. */ #define B_ISSPACE(c) (isascii((int)(c)) && isspace((int)(c))) #define B_ISALPHA(c) (isascii((int)(c)) && isalpha((int)(c))) #define B_ISUPPER(c) (isascii((int)(c)) && isupper((int)(c))) #define B_ISDIGIT(c) (isascii((int)(c)) && isdigit((int)(c))) #define B_ISXDIGIT(c) (isascii((int)(c)) && isxdigit((int)(c))) /** For multiplying by 10 with shift and addition */ #define B_TIMES10(d) ((d<<3)+(d<<1)) typedef void (HANDLER)(); typedef int (INTHANDLER)(); #ifdef SETPGRP_VOID # define SETPGRP_ARGS(x, y) /* No arguments */ #else # define SETPGRP_ARGS(x, y) (x, y) #endif #ifndef S_ISLNK #define S_ISLNK(m) (((m) & S_IFM) == S_IFLNK) #endif /** Added by KES to deal with Win32 systems */ #ifndef S_ISWIN32 #define S_ISWIN32 020000 #endif #ifndef INADDR_NONE #define INADDR_NONE ((unsigned long) -1) #endif #ifdef TIME_WITH_SYS_TIME # include # include #else # ifdef HAVE_SYS_TIME_H # include # else # include # endif #endif #ifndef O_BINARY #define O_BINARY 0 #endif #ifndef O_NOFOLLOW #define O_NOFOLLOW 0 #endif #ifndef MODE_RW #define MODE_RW 0666 #endif #if defined(HAVE_WIN32) typedef int64_t boffset_t; #define caddr_t char * #else typedef off_t boffset_t; #endif /* These probably should be subroutines */ #define Pw(x) \ do { int errstat; if ((errstat=rwl_writelock(&(x)))) \ e_msg(__FILE__, __LINE__, M_ABORT, 0, "Write lock lock failure. ERR=%s\n",\ strerror(errstat)); \ } while(0) #define Vw(x) \ do { int errstat; if ((errstat=rwl_writeunlock(&(x)))) \ e_msg(__FILE__, __LINE__, M_ABORT, 0, "Write lock unlock failure. ERR=%s\n",\ strerror(errstat)); \ } while(0) #define LockRes() b_LockRes(__FILE__, __LINE__) #define UnlockRes() b_UnlockRes(__FILE__, __LINE__) #ifdef DEBUG_MEMSET #define memset(a, v, n) b_memset(__FILE__, __LINE__, a, v, n) void b_memset(const char *file, int line, void *mem, int val, size_t num); #endif /* we look for simple debug level * then finally we check if tags are set on debug_level and lvl */ /* lvl | debug_level | tags | result -------+----------------+-------+------- 0 | 0 | | OK T1|0 | 0 | | NOK T1|0 | 0 | T1 | OK 10 | 0 | | NOK 10 | 10 | | OK T1|10 | 10 | | NOK T1|10 | 10 | T1 | OK T1|10 | 10 | T2 | NOK */ /* The basic test is working because tags are on high bits */ #if 1 #define chk_dbglvl(lvl) ((lvl) <= debug_level || \ (((lvl) & debug_level_tags) && (((lvl) & ~DT_ALL) <= debug_level))) #else /* Alain's macro for debug */ #define chk_dbglvl(lvl) (((lvl) & debug_level_tags) || (((lvl) & ~DT_ALL) <= debug_level)) #endif /** * The digit following Dmsg and Emsg indicates the number of substitutions in * the message string. We need to do this kludge because non-GNU compilers * do not handle varargs #defines. */ /** Debug Messages that are printed */ #ifdef DEBUG #define Dmsg0(lvl, msg) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg) #define Dmsg1(lvl, msg, a1) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg, a1) #define Dmsg2(lvl, msg, a1, a2) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg, a1, a2) #define Dmsg3(lvl, msg, a1, a2, a3) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3) #define Dmsg4(lvl, msg, arg1, arg2, arg3, arg4) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg, arg1, arg2, arg3, arg4) #define Dmsg5(lvl, msg, a1, a2, a3, a4, a5) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5) #define Dmsg6(lvl, msg, a1, a2, a3, a4, a5, a6) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6) #define Dmsg7(lvl, msg, a1, a2, a3, a4, a5, a6, a7) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6, a7) #define Dmsg8(lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) if (chk_dbglvl(lvl)) d_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) #define Dmsg9(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9) if (chk_dbglvl(lvl)) d_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9) #define Dmsg10(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) if (chk_dbglvl(lvl)) d_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #define Dmsg11(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) if (chk_dbglvl(lvl)) d_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #define Dmsg12(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) if (chk_dbglvl(lvl)) d_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) #define Dmsg13(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) if (chk_dbglvl(lvl)) d_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) #else #define Dmsg0(lvl, msg) #define Dmsg1(lvl, msg, a1) #define Dmsg2(lvl, msg, a1, a2) #define Dmsg3(lvl, msg, a1, a2, a3) #define Dmsg4(lvl, msg, arg1, arg2, arg3, arg4) #define Dmsg5(lvl, msg, a1, a2, a3, a4, a5) #define Dmsg6(lvl, msg, a1, a2, a3, a4, a5, a6) #define Dmsg7(lvl, msg, a1, a2, a3, a4, a5, a6, a7) #define Dmsg8(lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) #define Dmsg11(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #define Dmsg12(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) #define Dmsg13(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) #endif /* DEBUG */ #ifdef TRACE_FILE #define Tmsg0(lvl, msg) t_msg(__FILE__, __LINE__, lvl, msg) #define Tmsg1(lvl, msg, a1) t_msg(__FILE__, __LINE__, lvl, msg, a1) #define Tmsg2(lvl, msg, a1, a2) t_msg(__FILE__, __LINE__, lvl, msg, a1, a2) #define Tmsg3(lvl, msg, a1, a2, a3) t_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3) #define Tmsg4(lvl, msg, arg1, arg2, arg3, arg4) t_msg(__FILE__, __LINE__, lvl, msg, arg1, arg2, arg3, arg4) #define Tmsg5(lvl, msg, a1, a2, a3, a4, a5) t_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5) #define Tmsg6(lvl, msg, a1, a2, a3, a4, a5, a6) t_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6) #define Tmsg7(lvl, msg, a1, a2, a3, a4, a5, a6, a7) t_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6, a7) #define Tmsg8(lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) t_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) #define Tmsg9(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9) t_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9) #define Tmsg10(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) t_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #define Tmsg11(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) t_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #define Tmsg12(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) t_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) #define Tmsg13(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) t_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) #define Tmsg14(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14) t_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14) #define Tmsg15(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15) t_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15) #else #define Tmsg0(lvl, msg) #define Tmsg1(lvl, msg, a1) #define Tmsg2(lvl, msg, a1, a2) #define Tmsg3(lvl, msg, a1, a2, a3) #define Tmsg4(lvl, msg, arg1, arg2, arg3, arg4) #define Tmsg5(lvl, msg, a1, a2, a3, a4, a5) #define Tmsg6(lvl, msg, a1, a2, a3, a4, a5, a6) #define Tmsg7(lvl, msg, a1, a2, a3, a4, a5, a6, a7) #define Tmsg8(lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) #define Tmsg11(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #define Tmsg12(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) #define Tmsg13(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) #define Tmsg14(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14) #define Tmsg15(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15) #endif /* TRACE_FILE */ /** Messages that are printed (uses d_msg) */ #define Pmsg0(lvl, msg) p_msg(__FILE__, __LINE__, lvl, msg) #define Pmsg1(lvl, msg, a1) p_msg(__FILE__, __LINE__, lvl, msg, a1) #define Pmsg2(lvl, msg, a1, a2) p_msg(__FILE__, __LINE__, lvl, msg, a1, a2) #define Pmsg3(lvl, msg, a1, a2, a3) p_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3) #define Pmsg4(lvl, msg, arg1, arg2, arg3, arg4) p_msg(__FILE__, __LINE__, lvl, msg, arg1, arg2, arg3, arg4) #define Pmsg5(lvl, msg, a1, a2, a3, a4, a5) p_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5) #define Pmsg6(lvl, msg, a1, a2, a3, a4, a5, a6) p_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6) #define Pmsg7(lvl, msg, a1, a2, a3, a4, a5, a6, a7) p_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6, a7) #define Pmsg8(lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) p_msg(__FILE__, __LINE__, lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) #define Pmsg9(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9) p_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9) #define Pmsg10(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) p_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #define Pmsg11(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) p_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #define Pmsg12(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) p_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) #define Pmsg13(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) p_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) #define Pmsg14(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14) p_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14) #define Pmsg15(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15) p_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15) /** Daemon Error Messages that are delivered according to the message resource */ #define Emsg0(typ, lvl, msg) e_msg(__FILE__, __LINE__, typ, lvl, msg) #define Emsg1(typ, lvl, msg, a1) e_msg(__FILE__, __LINE__, typ, lvl, msg, a1) #define Emsg2(typ, lvl, msg, a1, a2) e_msg(__FILE__, __LINE__, typ, lvl, msg, a1, a2) #define Emsg3(typ, lvl, msg, a1, a2, a3) e_msg(__FILE__, __LINE__, typ, lvl, msg, a1, a2, a3) #define Emsg4(typ, lvl, msg, a1, a2, a3, a4) e_msg(__FILE__, __LINE__, typ, lvl, msg, a1, a2, a3, a4) #define Emsg5(typ, lvl, msg, a1, a2, a3, a4, a5) e_msg(__FILE__, __LINE__, typ, lvl, msg, a1, a2, a3, a4, a5) #define Emsg6(typ, lvl, msg, a1, a2, a3, a4, a5, a6) e_msg(__FILE__, __LINE__, typ, lvl, msg, a1, a2, a3, a4, a5, a6) /** Job Error Messages that are delivered according to the message resource */ #define Jmsg0(jcr, typ, lvl, msg) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg) #define Jmsg1(jcr, typ, lvl, msg, a1) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1) #define Jmsg2(jcr, typ, lvl, msg, a1, a2) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2) #define Jmsg3(jcr, typ, lvl, msg, a1, a2, a3) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2, a3) #define Jmsg4(jcr, typ, lvl, msg, a1, a2, a3, a4) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2, a3, a4) #define Jmsg5(jcr, typ, lvl, msg, a1, a2, a3, a4, a5) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2, a3, a4, a5) #define Jmsg6(jcr, typ, lvl, msg, a1, a2, a3, a4, a5, a6) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2, a3, a4, a5, a6) #define Jmsg7(jcr, typ, lvl, msg, a1, a2, a3, a4, a5, a6, a7) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2, a3, a4, a5, a6, a7) #define Jmsg8(jcr, typ, lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2, a3, a4, a5, a6, a7, a8) /** Queued Job Error Messages that are delivered according to the message resource */ #define Qmsg0(jcr, typ, mtime, msg) q_msg(__FILE__, __LINE__, jcr, typ, mtime, msg) #define Qmsg1(jcr, typ, mtime, msg, a1) q_msg(__FILE__, __LINE__, jcr, typ, mtime, msg, a1) #define Qmsg2(jcr, typ, mtime, msg, a1, a2) q_msg(__FILE__, __LINE__, jcr, typ, mtime, msg, a1, a2) #define Qmsg3(jcr, typ, mtime, msg, a1, a2, a3) q_msg(__FILE__, __LINE__, jcr, typ, mtime, msg, a1, a2, a3) #define Qmsg4(jcr, typ, mtime, msg, a1, a2, a3, a4) q_msg(__FILE__, __LINE__, jcr, typ, mtime, msg, a1, a2, a3, a4) #define Qmsg5(jcr, typ, mtime, msg, a1, a2, a3, a4, a5) q_msg(__FILE__, __LINE__, jcr, typ, mtime, msg, a1, a2, a3, a4, a5) #define Qmsg6(jcr, typ, mtime, msg, a1, a2, a3, a4, a5, a6) q_msg(__FILE__, __LINE__, jcr, typ, mtime, msg, a1, a2, a3, a4, a5, a6) /** Memory Messages that are edited into a Pool Memory buffer */ #define Mmsg0(buf, msg) m_msg(__FILE__, __LINE__, buf, msg) #define Mmsg1(buf, msg, a1) m_msg(__FILE__, __LINE__, buf, msg, a1) #define Mmsg2(buf, msg, a1, a2) m_msg(__FILE__, __LINE__, buf, msg, a1, a2) #define Mmsg3(buf, msg, a1, a2, a3) m_msg(__FILE__, __LINE__, buf, msg, a1, a2, a3) #define Mmsg4(buf, msg, a1, a2, a3, a4) m_msg(__FILE__, __LINE__, buf, msg, a1, a2, a3, a4) #define Mmsg5(buf, msg, a1, a2, a3, a4, a5) m_msg(__FILE__, __LINE__, buf, msg, a1, a2, a3, a4, a5) #define Mmsg6(buf, msg, a1, a2, a3, a4, a5, a6) m_msg(__FILE__, __LINE__, buf, msg, a1, a2, a3, a4, a5, a6) #define Mmsg7(buf, msg, a1, a2, a3, a4, a5, a6, a7) m_msg(__FILE__, __LINE__, buf, msg, a1, a2, a3, a4, a5, a6) #define Mmsg8(buf,msg,a1,a2,a3,a4,a5,a6,a7,a8) m_msg(__FILE__,__LINE__,buf,msg,a1,a2,a3,a4,a5,a6) #define Mmsg11(buf,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) m_msg(__FILE__,__LINE__,buf,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #define Mmsg15(buf,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15) m_msg(__FILE__,__LINE__,buf,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15) class POOL_MEM; /* Edit message into Pool Memory buffer -- no __FILE__ and __LINE__ */ int Mmsg(POOLMEM **msgbuf, const char *fmt,...); int Mmsg(POOLMEM *&msgbuf, const char *fmt,...); int Mmsg(POOL_MEM &msgbuf, const char *fmt,...); #define MmsgD0(level, msgbuf, fmt) \ { Mmsg(msgbuf, fmt); Dmsg1(level, "%s", msgbuf); } #define MmsgD1(level, msgbuf, fmt, a1) \ { Mmsg(msgbuf, fmt, a1); Dmsg1(level, "%s", msgbuf); } #define MmsgD2(level, msgbuf, fmt, a1, a2) \ { Mmsg(msgbuf, fmt, a1, a2); Dmsg1(level, "%s", msgbuf); } #define MmsgD3(level, msgbuf, fmt, a1, a2, a3) \ { Mmsg(msgbuf, fmt, a1, a2, a3); Dmsg1(level, "%s", msgbuf); } #define MmsgD4(level, msgbuf, fmt, a1, a2, a3, a4) \ { Mmsg(msgbuf, fmt, a1, a2, a3, a4); Dmsg1(level, "%s", msgbuf); } #define MmsgD5(level, msgbuf, fmt, a1, a2, a3, a4, a5) \ { Mmsg(msgbuf, fmt, a1, a2, a3, a4, a5); Dmsg1(level, "%s", msgbuf); } #define MmsgD6(level, msgbuf, fmt, a1, a2, a3, a4, a5, a6) \ { Mmsg(msgbuf, fmt, a1, a2, a3, a4, a5, a6); Dmsg1(level, "%s", msgbuf); } #define MmsgD7(level, msgbuf, fmt, a1, a2, a3, a4, a5, a6, a7) \ { Mmsg(msgbuf, fmt, a1, a2, a3, a4, a5, a6, a7); Dmsg1(level, "%s", msgbuf); } #define MmsgD8(level, msgbuf, fmt, a1, a2, a3, a4, a5, a6, a7, a8) \ { Mmsg(msgbuf, fmt, a1, a2, a3, a4, a5, a6, a7, a8); Dmsg1(level, "%s", msgbuf); } #define MmsgT0(level, msgbuf, fmt) \ { Mmsg(msgbuf, fmt); Tmsg1(level, "%s", msgbuf); } #define MmsgT1(level, msgbuf, fmt, a1) \ { Mmsg(msgbuf, fmt, a1); Tmsg1(level, "%s", msgbuf); } #define MmsgT2(level, msgbuf, fmt, a1, a2) \ { Mmsg(msgbuf, fmt, a1, a2); Tmsg1(level, "%s", msgbuf); } #define MmsgT3(level, msgbuf, fmt, a1, a2, a3) \ { Mmsg(msgbuf, fmt, a1, a2, a3); Tmsg1(level, "%s", msgbuf); } #define MmsgT4(level, msgbuf, fmt, a1, a2, a3, a4) \ { Mmsg(msgbuf, fmt, a1, a2, a3, a4); Tmsg1(level, "%s", msgbuf); } #define MmsgT5(level, msgbuf, fmt, a1, a2, a3, a4, a5) \ { Mmsg(msgbuf, fmt, a1, a2, a3, a4, a5); Tmsg1(level, "%s", msgbuf); } #define MmsgT6(level, msgbuf, fmt, a1, a2, a3, a4, a5, a6) \ { Mmsg(msgbuf, fmt, a1, a2, a3, a4, a5, a6); Tmsg1(level, "%s", msgbuf); } #define MmsgT7(level, msgbuf, fmt, a1, a2, a3, a4, a5, a6, a7) \ { Mmsg(msgbuf, fmt, a1, a2, a3, a4, a5, a6, a7); Tmsg1(level, "%s", msgbuf); } #define MmsgT8(level, msgbuf, fmt, a1, a2, a3, a4, a5, a6, a7, a8) \ { Mmsg(msgbuf, fmt, a1, a2, a3, a4, a5, a6, a7, a8); Tmsg1(level, "%s", msgbuf); } class JCR; void d_msg(const char *file, int line, int64_t level, const char *fmt,...); void p_msg(const char *file, int line, int level, const char *fmt,...); void e_msg(const char *file, int line, int type, int level, const char *fmt,...); void j_msg(const char *file, int line, JCR *jcr, int type, utime_t mtime, const char *fmt,...); void q_msg(const char *file, int line, JCR *jcr, int type, utime_t mtime, const char *fmt,...); int m_msg(const char *file, int line, POOLMEM **msgbuf, const char *fmt,...); int m_msg(const char *file, int line, POOLMEM *&pool_buf, const char *fmt, ...); void t_msg(const char *file, int line, int64_t level, const char *fmt,...); /* Helper macro e.g. for comparison of file patch with allowed directory * On Linux/Unix OSes, paths are case sensitive while on Windows it is not the case */ #ifdef HAVE_WIN32 #define b_path_match bstrcasestr #else #define b_path_match strstr #endif // HAVE_WIN32 /* Use bstrncpy instead of strncpy because it ensures last char is a 0 */ #define strncpy bad_call_on_strncpy_use_bstrncpy /** Use our strdup with smartalloc */ #ifdef HAVE_WXCONSOLE /* Groan, WxWidgets has its own way of doing NLS so cleanup */ #ifndef ENABLE_NLS #undef _ #undef setlocale #undef textdomain #undef bindtextdomain #endif #endif /** Use our fgets which handles interrupts */ #undef fgets #define fgets(x,y,z) bfgets((x), (y), (z)) /** Use our sscanf, which is safer and works with known sizes */ #define sscanf bsscanf /* Use our fopen, which uses the CLOEXEC flag */ #define fopen bfopen #ifdef DEBUG #define bstrdup(str) strcpy((char *)b_malloc(__FILE__,__LINE__,strlen((str))+1),(str)) #else #define bstrdup(str) strcpy((char *)bmalloc(strlen((str))+1),(str)) #endif #ifdef DEBUG #define bmalloc(size) b_malloc(__FILE__, __LINE__, (size)) #endif /** Macro to simplify free/reset pointers */ #define bfree_and_null(a) do{if(a){void *b__=(void*)a; (a)=NULL; bfree(b__);}} while(0) /* If changed, please run the tests in tools/test-cpp.c */ #if __cplusplus >= 201103L # define bdelete_and_null(a) do{if(a){auto b__ = a; (a)=NULL; delete b__;}} while(0) #else # ifdef HAVE_TYPEOF # define bdelete_and_null(a) do{if(a){typeof(a) b__ = a; (a)=NULL; delete b__;}} while(0) # else # define bdelete_and_null(a) do{if(a){delete a; (a)=NULL;}} while(0) # endif #endif /** * Replace codes needed in both file routines and non-file routines * Job replace codes -- in "replace" */ #define REPLACE_ALWAYS 'a' #define REPLACE_IFNEWER 'w' #define REPLACE_NEVER 'n' #define REPLACE_IFOLDER 'o' /** This probably should be done on a machine by machine basis, but it works */ /** This is critical for the smartalloc routines to properly align memory */ #define ALIGN_SIZE (sizeof(double)) #define BALIGN(x) (((x) + ALIGN_SIZE - 1) & ~(ALIGN_SIZE -1)) /* ============================================================= * OS Dependent defines * ============================================================= */ #if defined (__digital__) && defined (__unix__) /* Tru64 - has 64 bit fseeko and ftello */ #define fseeko fseek #define ftello ftell #endif /* digital stuff */ #ifndef HAVE_FSEEKO /* This OS does not handle 64 bit fseeks and ftells */ #define fseeko fseek #define ftello ftell #endif #ifdef HAVE_WIN32 /* * Windows */ #define PathSeparator '\\' #define PathSeparatorUp "..\\" #define PathSeparatorCur ".\\" inline bool IsPathSeparator(int ch) { return ch == '/' || ch == '\\'; } inline bool IsWPathSeparator(wchar_t ch) { return ch == L'/' || ch == L'\\'; } inline char *first_path_separator(char *path) { return strpbrk(path, "/\\"); } inline const char *first_path_separator(const char *path) { return strpbrk(path, "/\\"); } extern void pause_msg(const char *file, const char *func, int line, const char *msg); #define pause(msg) if (debug_level) pause_msg(__FILE__, __func__, __LINE__, (msg)) #else /* Unix/Linux */ #define PathSeparator '/' #define PathSeparatorUp "../" #define PathSeparatorCur "./" /* Define Winsock functions if we aren't on Windows */ #define WSA_Init() 0 /* 0 = success */ #define WSACleanup() 0 /* 0 = success */ inline bool IsPathSeparator(int ch) { return ch == '/'; } inline char *first_path_separator(char *path) { return strchr(path, '/'); } inline const char *first_path_separator(const char *path) { return strchr(path, '/'); } #define pause(msg) #endif /* HAVE_WIN32 */ #ifdef HAVE_DARWIN_OS /* Apparently someone forgot to wrap getdomainname as a C function */ #ifdef __cplusplus extern "C" { #endif int getdomainname(char *name, int namelen); #ifdef __cplusplus } #endif #endif /* HAVE_DARWIN_OS */ /* **** Unix Systems **** */ #ifdef HAVE_SUN_OS /* * On Solaris 2.5/2.6/7 and 8, threads are not timesliced by default, * so we need to explictly increase the conncurrency level. */ #ifdef USE_THR_SETCONCURRENCY #include #define set_thread_concurrency(x) thr_setconcurrency(x) extern int thr_setconcurrency(int); #define SunOS 1 #else #define set_thread_concurrency(x) #define thr_setconcurrency(x) #endif #else #define set_thread_concurrency(x) #endif /* HAVE_SUN_OS */ #ifdef HAVE_OSF1_OS #ifdef __cplusplus extern "C" { #endif int fchdir(int filedes); long gethostid(void); int getdomainname(char *name, int namelen); #ifdef __cplusplus } #endif #endif /* HAVE_OSF1_OS */ #ifdef HAVE_HPUX_OS # undef h_errno extern int h_errno; /** the {get,set}domainname() functions exist in HPUX's libc. * the configure script detects that correctly. * the problem is no system headers declares the prototypes for these functions * this is done below */ #ifdef __cplusplus extern "C" { #endif int getdomainname(char *name, int namlen); int setdomainname(char *name, int namlen); #ifdef __cplusplus } #endif #endif /* HAVE_HPUX_OS */ /** Disabled because it breaks internationalisation... #undef HAVE_SETLOCALE #ifdef HAVE_SETLOCALE #include #else #define setlocale(x, y) ("ANSI_X3.4-1968") #endif #ifdef HAVE_NL_LANGINFO #include #else #define nl_langinfo(x) ("ANSI_X3.4-1968") #endif */ /** Determine endianes */ static inline bool bigendian() { return htonl(1) == 1L; } #ifndef __GNUC__ #define __PRETTY_FUNCTION__ __func__ #endif #ifdef HAVE_SUN_OS #undef ENTER_LEAVE #endif #ifdef ENTER_LEAVE #define Enter(lvl) Dmsg1(lvl, "Enter: %s\n", __PRETTY_FUNCTION__) #define Leave(lvl) Dmsg1(lvl, "Leave: %s\n", __PRETTY_FUNCTION__) #else #define Enter(lvl) #define Leave(lvl) #endif #ifdef __GNUC__x # define CHECK_FORMAT(fun, f, a) __attribute__ ((format (fun, f, a))) #else # define CHECK_FORMAT(fun, f, a) #endif /* error_t is a type that can be used for the return value of functions to tell * the developer how to handle the result * value == 0 : means no error * value > 0 : are for expected or controlled conditions, like to notify that * the end of file is reached or that the last element has been processed * value < 0 : are for exceptional or unexpected errors like memory allocation * failures or failing open("/dev/null"), .... */ typedef int error_t; #endif /* _BACONFIG_H */ bacula-15.0.3/src/console/0000755000175000017500000000000014771010173015113 5ustar bsbuildbsbuildbacula-15.0.3/src/console/console_conf.h0000644000175000017500000000721214771010173017735 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Bacula User Agent specific configuration and defines * * Kern Sibbald, Sep MM * */ /* * Resource codes -- they must be sequential for indexing */ bool parse_cons_config(CONFIG *config, const char *configfile, int exit_code); enum { R_CONSOLE = 1001, R_DIRECTOR, R_FIRST = R_CONSOLE, R_LAST = R_DIRECTOR /* Keep this updated */ }; /* * Some resource attributes */ enum { R_NAME = 1020, R_ADDRESS, R_PASSWORD, R_TYPE, R_BACKUP }; /* Definition of the contents of each Resource */ /* Console "globals" */ struct CONRES { RES hdr; char *rc_file; /* startup file */ char *password; /* UA server password */ bool comm_compression; /* Enable comm line compression */ bool tls_authenticate; /* Authenticate with TLS */ bool tls_enable; /* Enable TLS on all connections */ bool tls_psk_enable; /* Enable TLS-PSK on all connections */ bool tls_require; /* Require TLS on all connections */ char *tls_ca_certfile; /* TLS CA Certificate File */ char *tls_ca_certdir; /* TLS CA Certificate Directory */ char *tls_certfile; /* TLS Client Certificate File */ char *tls_keyfile; /* TLS Client Key File */ char *director; /* bind to director */ utime_t heartbeat_interval; /* Interval to send heartbeats to Dir */ bool require_fips; /* Check for FIPS module */ TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ TLS_CONTEXT *psk_ctx; /* Shared TLS-PSK Context */ }; /* Director */ struct DIRRES { RES hdr; uint32_t DIRport; /* UA server port */ char *address; /* UA server address */ char *password; /* UA server password */ bool tls_authenticate; /* Authenticate with TLS */ bool tls_enable; /* Enable TLS */ bool tls_psk_enable; /* Enable TLS-PSK */ bool tls_require; /* Require TLS */ bool require_fips; /* Check for FIPS module */ char *tls_ca_certfile; /* TLS CA Certificate File */ char *tls_ca_certdir; /* TLS CA Certificate Directory */ char *tls_certfile; /* TLS Client Certificate File */ char *tls_keyfile; /* TLS Client Key File */ utime_t heartbeat_interval; /* Interval to send heartbeats to Dir */ char *hist_file; /* command history file */ int32_t hist_file_size; /* command history file size */ TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ TLS_CONTEXT *psk_ctx; /* Shared TLS-PSK Context */ }; /* Define the Union of all the above * resource structure definitions. */ union URES { DIRRES res_dir; CONRES res_cons; RES hdr; }; /* Get the size of a give resource */ int get_resource_size(int type); bacula-15.0.3/src/console/bbconsjson.c0000644000175000017500000003723314771010173017427 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. The licensor of Bacula is the Free Software Foundation Europe (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich, Switzerland, email:ftf@fsfeurope.org. */ /* * * Bacula Console .conf to Json program. * * Kern Sibbald, September MMXII * */ #include "bacula.h" #include "lib/breg.h" #include "console_conf.h" #include "jcr.h" /* Imported variables */ #if defined(_MSC_VER) extern "C" { // work around visual compiler mangling variables extern URES res_all; } #else extern URES res_all; #endif extern s_kw msg_types[]; extern RES_TABLE resources[]; /* Exported functions */ void senditf(const char *fmt, ...); void sendit(const char *buf); /* Imported functions */ extern bool parse_cons_config(CONFIG *config, const char *configfile, int exit_code); typedef struct { /* default { { "Director": { "Name": aa, ...} }, { "Job": {..} */ bool do_list; /* [ {}, {}, ..] or { "aa": {}, "bb": {}, ...} */ bool do_one; /* { "Name": "aa", "Description": "test, ... } */ bool do_only_data; /* [ {}, {}, {}, ] */ char *resource_type; char *resource_name; regex_t directive_reg; } display_filter; /* Forward referenced functions */ static void terminate_console(int sig); static int check_resources(); //static void ressendit(void *ua, const char *fmt, ...); //static void dump_resource_types(); //static void dump_directives(); static void dump_json(display_filter *filter); /* Static variables */ static char *configfile = NULL; static FILE *output = stdout; static int numdir; static POOLMEM *args; static CONFIG *config; #define CONFIG_FILE "bconsole.conf" /* default configuration file */ static void usage() { fprintf(stderr, _( PROG_COPYRIGHT "\n%sVersion: " VERSION " (" BDATE ") %s %s %s\n\n" "Usage: bconsjson [options] [config_file]\n" " -r get resource type \n" " -n get resource \n" " -l get only directives matching dirs (use with -r)\n" " -D get only data\n" " -c set configuration file to file\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -t test - read configuration and exit\n" " -v verbose\n" " -? print this message.\n" "\n"), 2012, BDEMO, HOST_OS, DISTNAME, DISTVER); exit(1); } /********************************************************************* * * Bacula console conf to Json * */ int main(int argc, char *argv[]) { int ch; bool test_config = false; display_filter filter; memset(&filter, 0, sizeof(filter)); int rtn = 0; setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); if (init_crypto() != 0) { Emsg0(M_ERROR_TERM, 0, _("Cryptography library initialization failed.\n")); } init_stack_dump(); init_signals(terminate_console); lmgr_init_thread(); my_name_is(argc, argv, "bconsole"); init_msg(NULL, NULL); working_directory = "/tmp"; args = get_pool_memory(PM_FNAME); while ((ch = getopt(argc, argv, "n:vDabc:d:jl:r:t?")) != -1) { switch (ch) { case 'D': filter.do_only_data = true; break; case 'a': // list_all = true; break; case 'c': /* configuration file */ if (configfile != NULL) { free(configfile); } configfile = bstrdup(optarg); break; break; case 'd': if (*optarg == 't') { dbg_timestamp = true; } else { debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } } case 'l': filter.do_list = true; if (regcomp(&filter.directive_reg, optarg, REG_EXTENDED) != 0) { Jmsg((JCR *)NULL, M_ERROR_TERM, 0, _("Please use valid -l argument: %s\n"), optarg); } break; case 'r': filter.resource_type = optarg; break; case 'n': filter.resource_name = optarg; break; case 't': test_config = true; break; case 'v': /* verbose */ verbose++; break; case '?': default: usage(); exit(1); } } argc -= optind; argv += optind; OSDependentInit(); if (argc) { usage(); exit(1); } if (filter.do_list && !filter.resource_type) { usage(); } if (filter.resource_type && filter.resource_name) { filter.do_one = true; } if (configfile == NULL || configfile[0] == 0) { configfile = bstrdup(CONFIG_FILE); } if (test_config && verbose > 0) { char buf[1024]; find_config_file(configfile, buf, sizeof(buf)); printf("config_file=%s\n", buf); } config = New(CONFIG()); config->encode_password(false); parse_cons_config(config, configfile, M_ERROR_TERM); if (!check_resources()) { Emsg1(M_ERROR_TERM, 0, _("Please correct configuration file: %s\n"), configfile); } if (test_config) { terminate_console(0); exit(0); } dump_json(&filter); terminate_console(0); return rtn; } /* Cleanup and then exit */ static void terminate_console(int sig) { static bool already_here = false; if (already_here) { /* avoid recursive temination problems */ exit(1); } already_here = true; stop_watchdog(); delete config; config = NULL; free(res_head); res_head = NULL; free_pool_memory(args); lmgr_cleanup_main(); if (sig != 0) { exit(1); } return; } /* * Dump out all resources in json format. * Note!!!! This routine must be in this file rather * than in src/lib/parser_conf.c otherwise the pointers * will be all messed up. */ static void dump_json(display_filter *filter) { int resinx, item, sz; int directives; bool first_res; bool first_directive; RES_ITEM *items; RES *res; HPKT hpkt; regmatch_t pmatch[32]; init_hpkt(hpkt); /* List resources and directives */ if (filter->do_only_data) { printf("["); /* { "aa": { "Name": "aa",.. }, "bb": { "Name": "bb", ... } * or print a single item */ } else if (filter->do_one || filter->do_list) { printf("{"); } else { /* [ { "Client": { "Name": "aa",.. } }, { "Director": { "Name": "bb", ... } } ]*/ printf("["); } first_res = true; /* Loop over all resource types */ for (resinx=0; resources[resinx].name; resinx++) { /* Skip this resource type */ if (filter->resource_type && strcasecmp(filter->resource_type, resources[resinx].name) != 0) { continue; } directives = 0; /* Loop over all resources of this type */ foreach_rblist(res, res_head[resinx]->res_list) { hpkt.res = res; items = resources[resinx].items; if (!items) { break; } sz = get_resource_size(resinx + r_first); if (sz < 0) { Dmsg1(10, "Unknown resource type %d\n", resinx); continue; } /* Copy the resource into res_all */ memcpy(&res_all, res, sz); if (filter->resource_name) { bool skip=true; /* The Name should be at the first place, so this is not a real loop */ for (item=0; items[item].name; item++) { if (strcasecmp(items[item].name, "Name") == 0) { if (strcasecmp(*(items[item].value), filter->resource_name) == 0) { skip = false; } break; } } if (skip) { /* The name doesn't match, so skip it */ continue; } } if (first_res) { printf("\n"); } else { printf(",\n"); } if (filter->do_only_data) { printf(" {"); } else if (filter->do_one) { /* Nothing to print */ /* When sending the list, the form is: * { aa: { Name: aa, Description: aadesc...}, bb: { Name: bb */ } else if (filter->do_list) { /* Search and display Name, should be the first item */ for (item=0; items[item].name; item++) { if (strcmp(items[item].name, "Name") == 0) { printf("%s: {\n", quote_string(hpkt.edbuf, *items[item].value)); break; } } } else { /* Begin new resource */ printf("{\n \"%s\": {", resources[resinx].name); } first_res = false; first_directive = true; directives = 0; for (item=0; items[item].name; item++) { /* Check user argument -l */ if (filter->do_list && regexec(&filter->directive_reg, items[item].name, 32, pmatch, 0) != 0) { continue; } hpkt.ritem = &items[item]; if (bit_is_set(item, res_all.hdr.item_present)) { if (!first_directive) printf(","); /* 1: found, 0: not found, -1 found but empty */ int ret = display_global_item(hpkt); if (ret == -1) { /* Do not print a comma after this empty directive */ first_directive = 0; } else if (ret == 1) { /* Fall-through wanted */ } else { printf("\n \"%s\": null", items[item].name); } directives++; first_directive = false; } if (items[item].flags & ITEM_LAST) { display_last(hpkt); /* If last bit set always call to cleanup */ } } /* { "aa": { "Name": "aa",.. }, "bb": { "Name": "bb", ... } */ if (filter->do_only_data || filter->do_list) { printf("\n }"); /* Finish the Resource with a single } */ } else { if (filter->do_one) { /* don't print anything */ } else if (directives) { printf("\n }\n}"); /* end of resource */ } else { printf("}\n}"); } } } /* End loop over all resources of this type */ } /* End loop all resource types */ if (filter->do_only_data) { printf("\n]\n"); } else if (filter->do_one || filter->do_list) { printf("\n}\n"); } else { printf("\n]\n"); } term_hpkt(hpkt); } /* * Make a quick check to see that we have all the * resources needed. */ static int check_resources() { bool OK = true; DIRRES *director; bool tls_needed; LockRes(); numdir = 0; foreach_res(director, R_DIRECTOR) { numdir++; /* tls_require implies tls_enable */ if (director->tls_require) { if (have_tls) { if (director->tls_ca_certfile || director->tls_ca_certdir) { director->tls_enable = true; } } else { Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); OK = false; continue; } } tls_needed = director->tls_enable || director->tls_authenticate; if ((!director->tls_ca_certfile && !director->tls_ca_certdir) && tls_needed) { Emsg2(M_FATAL, 0, _("Neither \"TLS CA Certificate File\"" " or \"TLS CA Certificate Dir\" are defined for Director \"%s\" in %s." " At least one CA certificate store is required.\n"), director->hdr.name, configfile); OK = false; } } if (numdir == 0) { Emsg1(M_FATAL, 0, _("No Director resource defined in %s\n" "Without that I don't how to speak to the Director :-(\n"), configfile); OK = false; } CONRES *cons; /* Loop over Consoles */ foreach_res(cons, R_CONSOLE) { /* tls_require implies tls_enable */ if (cons->tls_require) { if (have_tls) { cons->tls_enable = true; } else { Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); OK = false; continue; } } tls_needed = cons->tls_enable || cons->tls_authenticate; if ((!cons->tls_ca_certfile && !cons->tls_ca_certdir) && tls_needed) { Emsg2(M_FATAL, 0, _("Neither \"TLS CA Certificate File\"" " or \"TLS CA Certificate Dir\" are defined for Console \"%s\" in %s.\n"), cons->hdr.name, configfile); OK = false; } } UnlockRes(); return OK; } #ifdef needed static void ressendit(void *sock, const char *fmt, ...) { char buf[3000]; va_list arg_ptr; va_start(arg_ptr, fmt); bvsnprintf(buf, sizeof(buf), (char *)fmt, arg_ptr); va_end(arg_ptr); sendit(buf); } static void dump_resource_types() { int i; bool first; /* List resources and their code */ printf("[\n"); first = true; for (i=0; resources[i].name; i++) { if (!first) { printf(",\n"); } printf(" \"%s\": %d", resources[i].name, resources[i].rcode); first = false; } printf("\n]\n"); } static void dump_directives() { int i, j; bool first_res; bool first_directive; RES_ITEM *items; /* List resources and directives */ printf("[\n"); first_res = true; for (i=0; resources[i].name; i++) { if (!first_res) { printf(",\n"); } printf("{\n \"%s\": {\n", resources[i].name); first_res = false; first_directive = true; items = resources[i].items; for (j=0; items[j].name; j++) { if (!first_directive) { printf(",\n"); } printf(" \"%s\": null", items[j].name); first_directive = false; } printf("\n }"); /* end of resource */ } printf("\n]\n"); } /* * Send a line to the output file and or the terminal */ void senditf(const char *fmt,...) { char buf[3000]; va_list arg_ptr; va_start(arg_ptr, fmt); bvsnprintf(buf, sizeof(buf), (char *)fmt, arg_ptr); va_end(arg_ptr); sendit(buf); } static bool teeout = false; /* output to output and stdout */ void sendit(const char *buf) { #ifdef CONIO_FIX char obuf[3000]; if (output == stdout || teeout) { const char *p, *q; /* * Here, we convert every \n into \r\n because the * terminal is in raw mode when we are using * conio. */ for (p=q=buf; (p=strchr(q, '\n')); ) { int len = p - q; if (len > 0) { memcpy(obuf, q, len); } memcpy(obuf+len, "\r\n", 3); q = ++p; /* point after \n */ fputs(obuf, output); } if (*q) { fputs(q, output); } fflush(output); } if (output != stdout) { fputs(buf, output); } #else fputs(buf, output); fflush(output); if (teeout) { fputs(buf, stdout); fflush(stdout); } #endif } #endif bacula-15.0.3/src/console/console_conf.c0000644000175000017500000002603514771010173017734 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Main configuration file parser for Bacula User Agent * some parts may be split into separate files such as * the schedule configuration (sch_config.c). * * Note, the configuration file parser consists of three parts * * 1. The generic lexical scanner in lib/lex.c and lib/lex.h * * 2. The generic config scanner in lib/parse_config.c and * lib/parse_config.h. * These files contain the parser code, some utility * routines, and the common store routines (name, int, * string). * * 3. The daemon specific file, which contains the Resource * definitions as well as any specific store routines * for the resource records. * * Kern Sibbald, January MM, September MM */ #include "bacula.h" #include "console_conf.h" /* Define the first and last resource ID record * types. Note, these should be unique for each * daemon though not a requirement. */ int32_t r_first = R_FIRST; int32_t r_last = R_LAST; RES_HEAD **res_head; /* Forward referenced subroutines */ /* We build the current resource here as we are * scanning the resource configuration definition, * then move it to allocated memory when the resource * scan is complete. */ #if defined(_MSC_VER) extern "C" { // work around visual compiler mangling variables URES res_all; } #else URES res_all; #endif int32_t res_all_size = sizeof(res_all); /* Definition of records permitted within each * resource with the routine to process the record * information. */ /* Console "globals" */ static RES_ITEM cons_items[] = { {"Name", store_name, ITEM(res_cons.hdr.name), 0, ITEM_REQUIRED, 0}, {"Description", store_str, ITEM(res_cons.hdr.desc), 0, 0, 0}, {"RCFile", store_dir, ITEM(res_cons.rc_file), 0, 0, 0}, {"Password", store_password, ITEM(res_cons.password), 0, ITEM_REQUIRED, 0}, #if BEEF {"FipsRequire", store_bool, ITEM(res_cons.require_fips), 0, 0, 0}, #endif {"TlsAuthenticate",store_bool, ITEM(res_cons.tls_authenticate), 0, 0, 0}, {"TlsEnable", store_bool, ITEM(res_cons.tls_enable), 0, 0, 0}, {"TlsPskEnable", store_bool, ITEM(res_cons.tls_psk_enable), 0, ITEM_DEFAULT, tls_psk_default}, {"TlsRequire", store_bool, ITEM(res_cons.tls_require), 0, 0, 0}, {"TlsCaCertificateFile", store_dir, ITEM(res_cons.tls_ca_certfile), 0, 0, 0}, {"TlsCaCertificateDir", store_dir, ITEM(res_cons.tls_ca_certdir), 0, 0, 0}, {"TlsCertificate", store_dir, ITEM(res_cons.tls_certfile), 0, 0, 0}, {"TlsKey", store_dir, ITEM(res_cons.tls_keyfile), 0, 0, 0}, {"Director", store_str, ITEM(res_cons.director), 0, 0, 0}, {"HeartbeatInterval", store_time, ITEM(res_cons.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60}, {"CommCompression", store_bool, ITEM(res_cons.comm_compression), 0, ITEM_DEFAULT, true}, {NULL, NULL, {0}, 0, 0, 0} }; /* Director's that we can contact */ static RES_ITEM dir_items[] = { {"Name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0}, {"Description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0}, {"DirPort", store_pint32, ITEM(res_dir.DIRport), 0, ITEM_DEFAULT, 9101}, /* ***BEE*** In BWeb/BConfig, the store_str is replaced by store_addresses_address to check more carefully user inputs */ {"Address", store_str, ITEM(res_dir.address), 0, ITEM_REQUIRED, 0}, {"Password", store_password, ITEM(res_dir.password), 0, ITEM_REQUIRED, 0}, {"FipsRequire", store_bool, ITEM(res_dir.require_fips), 0, 0, 0}, {"TlsAuthenticate",store_bool, ITEM(res_dir.tls_authenticate), 0, 0, 0}, {"TlsEnable", store_bool, ITEM(res_dir.tls_enable), 0, 0, 0}, {"TlsPskEnable", store_bool, ITEM(res_dir.tls_psk_enable), 0, ITEM_DEFAULT, tls_psk_default}, {"TlsRequire", store_bool, ITEM(res_dir.tls_require), 0, 0, 0}, {"TlsCaCertificateFile", store_dir, ITEM(res_dir.tls_ca_certfile), 0, 0, 0}, {"TlsCaCertificateDir", store_dir, ITEM(res_dir.tls_ca_certdir), 0, 0, 0}, {"TlsCertificate", store_dir, ITEM(res_dir.tls_certfile), 0, 0, 0}, {"TlsKey", store_dir, ITEM(res_dir.tls_keyfile), 0, 0, 0}, {"HeartbeatInterval", store_time, ITEM(res_dir.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60}, {"HistoryFile", store_dir, ITEM(res_dir.hist_file), 0, 0, 0}, {"HistoryFileSize",store_int32, ITEM(res_dir.hist_file_size), 0, ITEM_DEFAULT, 100}, {NULL, NULL, {0}, 0, 0, 0} }; /* * This is the master resource definition. * It must have one item for each of the resources. * * name item rcode */ RES_TABLE resources[] = { {"Console", cons_items, R_CONSOLE}, {"Director", dir_items, R_DIRECTOR}, {NULL, NULL, 0} }; /* Dump contents of resource */ void dump_resource(int type, RES *rres, void sendit(void *sock, const char *fmt, ...), void *sock) { URES *res = (URES *)rres; bool recurse = true; if (res == NULL) { printf(_("No record for %d %s\n"), type, res_to_str(type)); return; } if (type < 0) { /* no recursion */ type = - type; recurse = false; } switch (type) { case R_CONSOLE: printf(_("Console: name=%s rcfile=%s\n"), rres->name, res->res_cons.rc_file); break; case R_DIRECTOR: printf(_("Director: name=%s address=%s DIRport=%d histfile=%s\n"), rres->name, res->res_dir.address, res->res_dir.DIRport, NPRTB(res->res_dir.hist_file)); break; default: printf(_("Unknown resource type %d\n"), type); } rres = GetNextRes(type, rres); if (recurse && rres) { dump_resource(type, rres, sendit, sock); } } /* * Free memory of resource. * NB, we don't need to worry about freeing any references * to other resources as they will be freed when that * resource chain is traversed. Mainly we worry about freeing * allocated strings (names). */ void free_resource(RES *rres, int type) { URES *res = (URES *)rres; if (res == NULL) { return; } /* common stuff -- free the resource name */ if (res->res_dir.hdr.name) { free(res->res_dir.hdr.name); } if (res->res_dir.hdr.desc) { free(res->res_dir.hdr.desc); } switch (type) { case R_CONSOLE: if (res->res_cons.rc_file) { free(res->res_cons.rc_file); } if (res->res_cons.tls_ctx) { free_tls_context(res->res_cons.tls_ctx); } if (res->res_cons.psk_ctx) { free_psk_context(res->res_cons.psk_ctx); } if (res->res_cons.tls_ca_certfile) { free(res->res_cons.tls_ca_certfile); } if (res->res_cons.tls_ca_certdir) { free(res->res_cons.tls_ca_certdir); } if (res->res_cons.tls_certfile) { free(res->res_cons.tls_certfile); } if (res->res_cons.tls_keyfile) { free(res->res_cons.tls_keyfile); } if (res->res_cons.director) { free(res->res_cons.director); } if (res->res_cons.password) { free(res->res_cons.password); } break; case R_DIRECTOR: if (res->res_dir.hist_file) { free(res->res_dir.hist_file); } if (res->res_dir.address) { free(res->res_dir.address); } if (res->res_dir.tls_ctx) { free_tls_context(res->res_dir.tls_ctx); } if (res->res_dir.psk_ctx) { free_psk_context(res->res_dir.psk_ctx); } if (res->res_dir.tls_ca_certfile) { free(res->res_dir.tls_ca_certfile); } if (res->res_dir.tls_ca_certdir) { free(res->res_dir.tls_ca_certdir); } if (res->res_dir.tls_certfile) { free(res->res_dir.tls_certfile); } if (res->res_dir.tls_keyfile) { free(res->res_dir.tls_keyfile); } if (res->res_dir.password) { free(res->res_dir.password); } break; default: printf(_("Unknown resource type %d\n"), type); } /* Common stuff again -- free the resource, recurse to next one */ free(res); } /* Get the size of the resource object */ int get_resource_size(int type) { int size = -1; switch (type) { case R_CONSOLE: size = sizeof(CONRES); break; case R_DIRECTOR: size = sizeof(DIRRES); break; default: break; } return size; } /* Save the new resource by chaining it into the head list for * the resource. If this is pass 2, we update any resource * pointers (currently only in the Job resource). */ bool save_resource(CONFIG *config, int type, RES_ITEM *items, int pass) { int rindex = type - r_first; int i, size; int error = 0; /* * Ensure that all required items are present */ for (i=0; items[i].name; i++) { if (items[i].flags & ITEM_REQUIRED) { if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) { Mmsg(config->m_errmsg, _("\"%s\" directive is required in \"%s\" resource, but not found.\n"), items[i].name, resources[rindex].name); return false; } } } /* During pass 2, we looked up pointers to all the resources * referrenced in the current resource, , now we * must copy their address from the static record to the allocated * record. */ if (pass == 2) { switch (type) { /* Resources not containing a resource */ case R_CONSOLE: case R_DIRECTOR: break; default: Emsg1(M_ERROR, 0, _("Unknown resource type %d\n"), type); error = 1; break; } /* Note, the resoure name was already saved during pass 1, * so here, we can just release it. */ if (res_all.res_dir.hdr.name) { free(res_all.res_dir.hdr.name); res_all.res_dir.hdr.name = NULL; } if (res_all.res_dir.hdr.desc) { free(res_all.res_dir.hdr.desc); res_all.res_dir.hdr.desc = NULL; } return true; } /* The following code is only executed during pass 1 */ size = get_resource_size(type); if (size < 0) { printf(_("Unknown resource type %d\n"), type); error = 1; } /* Common */ if (!error) { if (!config->insert_res(rindex, size)) { return false; } } return true; } bool parse_cons_config(CONFIG *config, const char *configfile, int exit_code) { config->init(configfile, NULL, exit_code, (void *)&res_all, res_all_size, r_first, r_last, resources, &res_head); return config->parse_config(); } bacula-15.0.3/src/console/conio.c0000755000175000017500000007647414771010173016413 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* Generalized console input/output handler A maintanable replacement for readline() Updated for Bacula, Kern Sibbald, December MMIII This code is in part derived from code that I wrote in 1981, so some of it is a bit old and could use a cleanup. */ /* * UTF-8 * If the top bit of a UTF-8 string is 0 (8 bits), then it * is a normal ASCII character. * If the top two bits are 11 (i.e. (c & 0xC0) == 0xC0 then * it is the start of a series of chars (up to 5) * Each subsequent character starts with 10 (i.e. (c & 0xC0) == 0x80) */ #ifdef TEST_PROGRAM #include #include #include #include #include #include #define HAVE_CONIO 1 #else /* We are in Bacula */ #include "bacula.h" #endif #ifdef HAVE_CONIO #include #include #ifdef HAVE_SUN_OS #ifndef _TERM_H extern "C" int tgetent(void *, const char *); extern "C" int tgetnum(const char *); extern "C" char *tgetstr (const char*, char**); # Note: the following on older (Solaris 10) systems # may need to be moved to after the #endif extern "C" char *tgoto (const char *, int, int); #endif #elif defined(__sgi) extern "C" int tgetent(char *, char *); extern "C" int tgetnum(char id[2]); extern "C" char *tgetstr(char id[2], char **); extern "C" char *tgoto(char *, int, int); #elif defined (__digital__) && defined (__unix__) extern "C" int tgetent(void *, const char *); extern "C" int tgetnum(const char *); extern "C" char *tgetstr (const char*, char**); extern "C" char *tgoto (const char *, int, int); #endif #include "func.h" /* From termios library */ #if defined(HAVE_HPUX_OS) || defined(HAVE_AIX_OS) static char *BC; static char *UP; #else extern char *BC; extern char *UP; #endif static void add_smap(char *str, int func); /* Global variables */ static const char *t_up = "\n"; /* scroll up character */ static const char *t_honk = "\007"; /* sound beep */ static char *t_il; /* insert line */ static char *t_dl; /* delete line */ static char *t_cs; /* clear screen */ static char *t_cl; /* clear line */ static int t_width = 79; /* terminal width */ static int t_height = 24; /* terminal height */ static int linsdel_ok = 0; /* set if term has line insert & delete fncs */ static char *t_cm; /* cursor positioning */ static char *t_ti; /* init sequence */ static char *t_te; /* end sequence */ static char *t_do; /* down one line */ static char *t_sf; /* scroll screen one line up */ /* Keypad and Function Keys */ static char *kl; /* left key */ static char *kr; /* right */ static char *ku; /* up */ static char *kd; /* down */ static char *kh; /* home */ static char *kb; /* backspace */ static char *kD; /* delete key */ static char *kI; /* insert */ static char *kN; /* next page */ static char *kP; /* previous page */ static char *kH; /* home */ static char *kE; /* end */ #ifndef EOS #define EOS '\0' /* end of string terminator */ #endif #define TRUE 1 #define FALSE 0 /* * Stab entry. Input chars (str), the length, and the desired * func code. */ typedef struct s_stab { struct s_stab *next; char *str; int len; int func; } stab_t; #define MAX_STAB 30 static stab_t **stab = NULL; /* array of stabs by length */ static int num_stab; /* size of stab array */ static bool old_term_params_set = false; static struct termios old_term_params; /* Maintain lines in a doubly linked circular pool of lines. Each line is preceded by a header defined by the lstr structure */ struct lstr { /* line pool structure */ struct lstr *prevl; /* link to previous line */ struct lstr *nextl; /* link to next line */ long len; /* length of line+header */ char used; /* set if line valid */ char line; /* line is actually varying length */ }; #ifdef unix #define POOLEN 128000 /* bytes in line pool */ #else #define POOLEN 500 /* bytes in line pool */ #endif char pool[POOLEN]; /* line pool */ #define PHDRL ((int)sizeof(struct lstr)) /* length of line header */ static struct lstr *lptr; /* current line pointer */ static struct lstr *slptr; /* store line pointer */ static int cl, cp; static char *getnext(), *getprev(); static int first = 1; static int mode_insert = 1; static int mode_wspace = 1; /* words separated by spaces */ static short char_map[600]= { 0, F_SOL, /* ^a Line start */ F_PRVWRD, /* ^b Previous word */ F_BREAK, /* ^C break */ F_DELCHR, /* ^D Delete character */ F_EOL, /* ^e End of line */ F_CSRRGT, /* ^f Right */ F_TABBAK, /* ^G Back tab */ F_CSRLFT, /* ^H Left */ F_TAB, /* ^I Tab */ F_CSRDWN, /* ^J Down */ F_DELEOL, /* ^K kill to eol */ F_CLRSCRN,/* ^L clear screen */ F_RETURN, /* ^M Carriage return */ F_RETURN, /* ^N enter line */ F_CONCAT, /* ^O Concatenate lines */ F_CSRUP, /* ^P cursor up */ F_TINS, /* ^Q Insert character mode */ F_PAGUP, /* ^R Page up */ F_CENTER, /* ^S Center text */ F_PAGDWN, /* ^T Page down */ F_DELSOL, /* ^U delete to start of line */ F_DELWRD, /* ^V Delete word */ F_PRVWRD, /* ^W Previous word */ F_NXTMCH, /* ^X Next match */ F_DELEOL, /* ^Y Delete to end of line */ F_BACKGND,/* ^Z Background */ 0x1B, /* ^[=ESC escape */ F_TENTRY, /* ^\ Entry mode */ F_PASTECB,/* ^]=paste clipboard */ F_HOME, /* ^^ Home */ F_ERSLIN, /* ^_ Erase line */ ' ','!','"','#','$','%','&','\047', '(',')','*','+','\054','-','.','/', '0','1','2','3','4','5','6','7', '8','9',':',';','<','=','>','?', '@','A','B','C','D','E','F','G', 'H','I','J','K','L','M','N','O', 'P','Q','R','S','T','U','V','W', 'X','Y','Z','[','\\',']','^','_', '\140','a','b','c','d','e','f','g', 'h','i','j','k','l','m','n','o', 'p','q','r','s','t','u','v','w', 'x','y','z','{','|','}','\176',F_ERSCHR /* erase character */ }; /* Local variables */ #define CR '\r' /* carriage return */ /* Function Prototypes */ static unsigned int input_char(void); static unsigned int t_gnc(void); static void insert_space(char *curline, int line_len); static void insert_hole(char *curline, int line_len); static void forward(char *str, int str_len); static void backup(char *curline); static void delchr(int cnt, char *curline, int line_len); static int iswordc(char c); static int next_word(char *ldb_buf); static int prev_word(char *ldb_buf); static void prtcur(char *str); static void poolinit(void); static char * getnext(void); static char * getprev(void); static void putline(char *newl, int newlen); static void t_honk_horn(void); static void t_insert_line(void); static void t_delete_line(void); static void t_clrline(int pos, int width); void t_sendl(const char *msg, int len); void t_send(const char *msg); void t_char(char c); static void asclrs(); static void ascurs(int y, int x); static void rawmode(FILE *input); static void normode(void); static unsigned t_getch(); static void asclrl(int pos, int width); static void asinsl(); static void asdell(); int input_line(char *string, int length); void con_term(); void trapctlc(); int usrbrk(); void clrbrk(); void con_init(FILE *input) { atexit(con_term); rawmode(input); trapctlc(); } /* * Zed control keys */ void con_set_zed_keys(void) { char_map[1] = F_NXTWRD; /* ^A Next Word */ char_map[2] = F_SPLIT; /* ^B Split line */ char_map[3] = F_EOI; /* ^C Quit */ char_map[4] = F_DELCHR; /* ^D Delete character */ char_map[5] = F_EOF; /* ^E End of file */ char_map[6] = F_INSCHR; /* ^F Insert character */ char_map[7] = F_TABBAK; /* ^G Back tab */ char_map[8] = F_CSRLFT; /* ^H Left */ char_map[9] = F_TAB; /* ^I Tab */ char_map[10] = F_CSRDWN; /* ^J Down */ char_map[11] = F_CSRUP; /* ^K Up */ char_map[12] = F_CSRRGT; /* ^L Right */ char_map[13] = F_RETURN; /* ^M Carriage return */ char_map[14] = F_EOL; /* ^N End of line */ char_map[15] = F_CONCAT; /* ^O Concatenate lines */ char_map[16] = F_MARK; /* ^P Set marker */ char_map[17] = F_TINS; /* ^Q Insert character mode */ char_map[18] = F_PAGUP; /* ^R Page up */ char_map[19] = F_CENTER; /* ^S Center text */ char_map[20] = F_PAGDWN; /* ^T Page down */ char_map[21] = F_SOL; /* ^U Line start */ char_map[22] = F_DELWRD; /* ^V Delete word */ char_map[23] = F_PRVWRD; /* ^W Previous word */ char_map[24] = F_NXTMCH; /* ^X Next match */ char_map[25] = F_DELEOL; /* ^Y Delete to end of line */ char_map[26] = F_DELLIN; /* ^Z Delete line */ /* 27 = ESC */ char_map[28] = F_TENTRY; /* ^\ Entry mode */ char_map[29] = F_PASTECB;/* ^]=paste clipboard */ char_map[30] = F_HOME; /* ^^ Home */ char_map[31] = F_ERSLIN; /* ^_ Erase line */ } void con_term() { normode(); } #ifdef TEST_PROGRAM /* * Guarantee that the string is properly terminated */ char *bstrncpy(char *dest, const char *src, int maxlen) { strncpy(dest, src, maxlen-1); dest[maxlen-1] = 0; return dest; } #endif /* * New style string mapping to function code */ static unsigned do_smap(unsigned c) { char str[MAX_STAB]; int len = 0; stab_t *tstab; int i, found; unsigned cm; len = 1; str[0] = c; str[1] = 0; cm = char_map[c]; if (cm == 0) { return c; } else { c = cm; } for ( ;; ) { found = 0; for (i=len-1; inext) { if (strncmp(str, tstab->str, len) == 0) { if (len == tstab->len) { return tstab->func; } found = 1; break; /* found possibility continue searching */ } } } if (!found) { return len==1?c:0; } /* found partial match, so get next character and retry */ str[len++] = t_gnc(); str[len] = 0; } } #ifdef DEBUG_x static void dump_stab() { int i, j, c; stab_t *tstab; char buf[100]; for (i=0; inext) { for (j=0; jlen; j++) { c = tstab->str[j]; if (c < 0x20 || c > 0x7F) { sprintf(buf, " 0x%x ", c); t_send(buf); } else { buf[0] = c; buf[1] = 0; t_sendl(buf, 1); } } sprintf(buf, " func=%d len=%d\n\r", tstab->func, tstab->len); t_send(buf); } } } #endif /* * New routine. Add string to string->func mapping table. */ static void add_smap(char *str, int func) { stab_t *tstab; int len; if (!str) { return; } len = strlen(str); if (len == 0) { /* errmsg("String for func %d is zero length\n", func); */ return; } tstab = (stab_t *)malloc(sizeof(stab_t)); memset(tstab, 0, sizeof(stab_t)); tstab->len = len; tstab->str = (char *)malloc(tstab->len + 1); bstrncpy(tstab->str, str, tstab->len + 1); tstab->func = func; if (tstab->len > num_stab) { printf("stab string too long %d. Max is %d\n", tstab->len, num_stab); exit(1); } tstab->next = stab[tstab->len-1]; stab[tstab->len-1] = tstab; /* printf("Add_smap tstab=%x len=%d func=%d tstab->next=%x\n\r", tstab, len, func, tstab->next); */ } /* Get the next character from the terminal - performs table lookup on the character to do the desired translation */ static unsigned int input_char() { unsigned c; if ((c=t_gnc()) <= 599) { /* IBM generates codes up to 260 */ c = do_smap(c); } else if (c > 1000) { /* stuffed function */ c -= 1000; /* convert back to function code */ } if (c <= 0) { t_honk_horn(); } /* if we got a screen size escape sequence, read height, width */ if (c == F_SCRSIZ) { t_gnc(); /* - 0x20 = y */ t_gnc(); /* - 0x20 = x */ c = input_char(); } return c; } /* Get a complete input line */ int input_line(char *string, int length) { char curline[2000]; /* edit buffer */ int noline; unsigned c; int more; int i; if (first) { poolinit(); /* build line pool */ first = 0; } noline = 1; /* no line fetched yet */ for (cl=cp=0; cl cp) cl = cp; break; case F_NXTWRD: i = next_word(curline); while (i--) { forward(curline, sizeof(curline)); } break; case F_PRVWRD: i = prev_word(curline); while (i--) { backup(curline); } break; case F_DELWRD: delchr(next_word(curline), curline, sizeof(curline)); /* delete word */ break; case F_NXTMCH: /* Ctl-X */ if (cl==0) { *string = EOS; /* terminate string */ return(c); /* give it to him */ } /* Note fall through */ case F_DELLIN: case F_ERSLIN: while (cp > 0) { backup(curline); /* backup to beginning of line */ } t_clrline(0, t_width); /* erase line */ cp = 0; cl = 0; /* reset cursor counter */ t_char(' '); t_char(0x8); break; case F_SOL: while (cp > 0) { backup(curline); } break; case F_EOL: while (cp < cl) { forward(curline, sizeof(curline)); } while (cp > cl) { backup(curline); } break; case F_TINS: /* toggle insert mode */ mode_insert = !mode_insert; /* flip bit */ break; default: if (c > 255) { /* function key hit */ if (cl==0) { /* if first character then */ *string = EOS; /* terminate string */ return c; /* return it */ } t_honk_horn(); /* complain */ } else { if ((c & 0xC0) == 0xC0) { if ((c & 0xFC) == 0xFC) { more = 5; } else if ((c & 0xF8) == 0xF8) { more = 4; } else if ((c & 0xF0) == 0xF0) { more = 3; } else if ((c & 0xE0) == 0xE0) { more = 2; } else { more = 1; } } else { more = 0; } if (mode_insert) { insert_space(curline, sizeof(curline)); } curline[cp++] = c; /* store character in line being built */ t_char(c); /* echo character to terminal */ while (more--) { c= input_char(); insert_hole(curline, sizeof(curline)); curline[cp++] = c; /* store character in line being built */ t_char(c); /* echo character to terminal */ } if (cp > cl) { cl = cp; /* keep current length */ curline[cp] = 0; } } break; } /* end switch */ } /* If we fall through here rather than goto done, the line is too long simply return what we have now. */ done: curline[cl++] = EOS; /* terminate */ bstrncpy(string,curline,length); /* return line to caller */ /* Save non-blank lines. Note, put line zaps curline */ if (curline[0] != EOS) { putline(curline,cl); /* save line for posterity */ } return 0; /* give it to him/her */ } /* Insert a space at the current cursor position */ static void insert_space(char *curline, int curline_len) { int i; if (cp >= cl || cl+1 > curline_len) { return; } /* Note! source and destination overlap */ memmove(&curline[cp+1],&curline[cp],i=cl-cp); cl++; curline[cp] = ' '; i = 0; while (cl > cp) { forward(curline, curline_len); i++; } while (i--) { backup(curline); } } static void insert_hole(char *curline, int curline_len) { int i; if (cp > cl || cl+1 > curline_len) { return; } /* Note! source and destination overlap */ memmove(&curline[cp+1], &curline[cp], i=cl-cp); cl++; curline[cl] = 0; } /* Move cursor forward keeping characters under it */ static void forward(char *str, int str_len) { if (cp > str_len) { return; } if (cp >= cl) { t_char(' '); str[cp+1] = ' '; str[cp+2] = 0; } else { t_char(str[cp]); if ((str[cp] & 0xC0) == 0xC0) { cp++; while ((str[cp] & 0xC0) == 0x80) { t_char(str[cp]); cp++; } cp--; } } cp++; } /* How many characters under the cursor */ static int char_count(int cptr, char *str) { int cnt = 1; if (cptr > cl) { return 0; } if ((str[cptr] & 0xC0) == 0xC0) { cptr++; while ((str[cptr] & 0xC0) == 0x80) { cnt++; cptr++; } } return cnt; } /* Backup cursor keeping characters under it */ static void backup(char *str) { if (cp == 0) { return; } while ((str[cp] & 0xC0) == 0x80) { cp--; } t_char('\010'); cp--; } /* Delete the character under the cursor */ static void delchr(int del, char *curline, int line_len) { int i, cnt; if (cp > cl || del == 0) { return; } while (del-- && cl > 0) { cnt = char_count(cp, curline); if ((i=cl-cp-cnt) > 0) { memcpy(&curline[cp], &curline[cp+cnt], i); } cl -= cnt; curline[cl] = EOS; t_clrline(0, t_width); i = 0; while (cl > cp) { forward(curline, line_len); i++; } while (i--) { backup(curline); } } } /* Determine if character is part of a word */ static int iswordc(char c) { if (mode_wspace) return !isspace(c); if (c >= '0' && c <= '9') return true; if (c == '$' || c == '%') return true; return isalpha(c); } /* Return number of characters to get to next word */ static int next_word(char *ldb_buf) { int ncp; if (cp > cl) return 0; ncp = cp; for ( ; ncp cl) /* if past eol start at eol */ ncp=cl+1; else ncp = cp; /* backup to end of previous word - i.e. skip special chars */ for (i=ncp-1; i && !iswordc(*(ldb_buf+i)); i--) ; if (i == 0) { /* at beginning of line? */ return cp; /* backup to beginning */ } /* now move back through word to beginning of word */ for ( ; i && iswordc(*(ldb_buf+i)); i--) ; ncp = i+1; /* position to first char of word */ if (i==0 && iswordc(*ldb_buf)) /* check for beginning of line */ ncp = 0; return cp-ncp; /* return count */ } /* Display new current line */ static void prtcur(char *str) { while (cp > 0) { backup(str); } t_clrline(0,t_width); cp = cl = strlen(str); t_sendl(str, cl); } /* Initialize line pool. Split pool into two pieces. */ static void poolinit() { slptr = lptr = (struct lstr *)pool; lptr->nextl = lptr; lptr->prevl = lptr; lptr->used = 1; lptr->line = 0; lptr->len = POOLEN; } /* Return pointer to next line in the pool and advance current line pointer */ static char * getnext() { do { /* find next used line */ lptr = lptr->nextl; } while (!lptr->used); return (char *)&lptr->line; } /* Return pointer to previous line in the pool */ static char * getprev() { do { /* find previous used line */ lptr = lptr->prevl; } while (!lptr->used); return (char *)&lptr->line; } static void putline(char *newl, int newlen) { struct lstr *nptr; /* points to next line */ char *p; lptr = slptr; /* get ptr to last line stored */ lptr = lptr->nextl; /* advance pointer */ if ((char *)lptr-pool+newlen+PHDRL > POOLEN) { /* not enough room */ lptr->used = 0; /* delete line */ lptr = (struct lstr *)pool; /* start at beginning of buffer */ } while (lptr->len < newlen+PHDRL) { /* concatenate buffers */ nptr = lptr->nextl; /* point to next line */ lptr->nextl = nptr->nextl; /* unlink it from list */ nptr->nextl->prevl = lptr; lptr->len += nptr->len; } if (lptr->len > newlen + 2 * PHDRL + 7) { /* split buffer */ nptr = (struct lstr *)((char *)lptr + newlen + PHDRL); /* Appropriate byte alignment - for Intel 2 byte, but on Sparc we need 8 byte alignment, so we always do 8 */ if (((long unsigned)nptr & 7) != 0) { /* test eight byte alignment */ p = (char *)nptr; nptr = (struct lstr *)((((long unsigned) p) & ~7) + 8); } nptr->len = lptr->len - ((char *)nptr - (char *)lptr); lptr->len -= nptr->len; nptr->nextl = lptr->nextl; /* link in new buffer */ lptr->nextl->prevl = nptr; lptr->nextl = nptr; nptr->prevl = lptr; nptr->used = 0; } memcpy(&lptr->line,newl,newlen); lptr->used = 1; /* mark line used */ slptr = lptr; /* save as stored line */ } #ifdef DEBUGOUT static void dump(struct lstr *ptr, char *msg) { printf("%s buf=%x nextl=%x prevl=%x len=%d used=%d\n", msg,ptr,ptr->nextl,ptr->prevl,ptr->len,ptr->used); if (ptr->used) printf("line=%s\n",&ptr->line); } #endif /* DEBUGOUT */ /* Honk horn on terminal */ static void t_honk_horn() { t_send(t_honk); } /* Insert line on terminal */ static void t_insert_line() { asinsl(); } /* Delete line from terminal */ static void t_delete_line() { asdell(); } /* clear line from pos to width */ static void t_clrline(int pos, int width) { asclrl(pos, width); /* clear to end of line */ } /* Helper function to add string preceded by * ESC to smap table */ static void add_esc_smap(const char *str, int func) { char buf[1000]; buf[0] = 0x1B; /* esc */ bstrncpy(buf+1, str, sizeof(buf)-1); add_smap(buf, func); } /* Set raw mode on terminal file. Basically, get the terminal into a mode in which all characters can be read as they are entered. CBREAK mode is not sufficient. */ static void rawmode(FILE *input) { struct termios t; static char term_buf[2048]; static char *term_buffer = term_buf; char *termtype = (char *)getenv("TERM"); /* Make sure we are dealing with a terminal */ if (!isatty(fileno(input))) { return; } if (tcgetattr(0, &old_term_params) != 0) { printf("conio: Cannot tcgetattr()\n"); exit(1); } old_term_params_set = true; t = old_term_params; t.c_cc[VMIN] = 1; /* satisfy read after 1 char */ t.c_cc[VTIME] = 0; t.c_iflag &= ~(BRKINT | IGNPAR | PARMRK | INPCK | ISTRIP | ICRNL | IXON | IXOFF | INLCR | IGNCR); t.c_iflag |= IGNBRK; t.c_oflag |= ONLCR; t.c_lflag &= ~(ECHO | ECHOE | ECHOK | ECHONL | ICANON | NOFLSH | TOSTOP); tcflush(0, TCIFLUSH); if (tcsetattr(0, TCSANOW, &t) == -1) { printf("Cannot tcsetattr()\n"); } /* Defaults, the main program can override these */ signal(SIGQUIT, SIG_IGN); signal(SIGHUP, SIG_IGN); trapctlc(); signal(SIGWINCH, SIG_IGN); if (!termtype) { printf("Cannot get terminal type.\n"); normode(); exit(1); } if (tgetent(term_buffer, termtype) < 0) { printf("Cannot get terminal termcap entry.\n"); normode(); exit(1); } t_width = t_height = -1; /* Note (char *)casting is due to really stupid compiler warnings */ t_width = tgetnum((char *)"co") - 1; t_height = tgetnum((char *)"li"); BC = NULL; UP = NULL; t_cm = (char *)tgetstr((char *)"cm", &term_buffer); t_cs = (char *)tgetstr((char *)"cl", &term_buffer); /* clear screen */ t_cl = (char *)tgetstr((char *)"ce", &term_buffer); /* clear line */ t_dl = (char *)tgetstr((char *)"dl", &term_buffer); /* delete line */ t_il = (char *)tgetstr((char *)"al", &term_buffer); /* insert line */ t_honk = (char *)tgetstr((char *)"bl", &term_buffer); /* beep */ t_ti = (char *)tgetstr((char *)"ti", &term_buffer); t_te = (char *)tgetstr((char *)"te", &term_buffer); t_up = (char *)tgetstr((char *)"up", &term_buffer); t_do = (char *)tgetstr((char *)"do", &term_buffer); t_sf = (char *)tgetstr((char *)"sf", &term_buffer); num_stab = MAX_STAB; /* get default stab size */ stab = (stab_t **)malloc(sizeof(stab_t *) * num_stab); memset(stab, 0, sizeof(stab_t *) * num_stab); /* Key bindings */ kl = (char *)tgetstr((char *)"kl", &term_buffer); kr = (char *)tgetstr((char *)"kr", &term_buffer); ku = (char *)tgetstr((char *)"ku", &term_buffer); kd = (char *)tgetstr((char *)"kd", &term_buffer); kh = (char *)tgetstr((char *)"kh", &term_buffer); kb = (char *)tgetstr((char *)"kb", &term_buffer); kD = (char *)tgetstr((char *)"kD", &term_buffer); kI = (char *)tgetstr((char *)"kI", &term_buffer); kN = (char *)tgetstr((char *)"kN", &term_buffer); kP = (char *)tgetstr((char *)"kP", &term_buffer); kH = (char *)tgetstr((char *)"kH", &term_buffer); kE = (char *)tgetstr((char *)"kE", &term_buffer); add_smap(kl, F_CSRLFT); add_smap(kr, F_CSRRGT); add_smap(ku, F_CSRUP); add_smap(kd, F_CSRDWN); add_smap(kI, F_TINS); add_smap(kN, F_PAGDWN); add_smap(kP, F_PAGUP); add_smap(kH, F_HOME); add_smap(kE, F_EOF); add_esc_smap("[A", F_CSRUP); add_esc_smap("[B", F_CSRDWN); add_esc_smap("[C", F_CSRRGT); add_esc_smap("[D", F_CSRLFT); add_esc_smap("[1~", F_HOME); add_esc_smap("[2~", F_TINS); add_esc_smap("[3~", F_DELCHR); add_esc_smap("[4~", F_EOF); add_esc_smap("f", F_NXTWRD); add_esc_smap("b", F_PRVWRD); } /* Restore tty mode */ static void normode() { if (old_term_params_set) { tcsetattr(0, TCSANOW, &old_term_params); old_term_params_set = false; } } /* Get next character from terminal/script file/unget buffer */ static unsigned t_gnc() { return t_getch(); } /* Get next character from OS */ static unsigned t_getch(void) { unsigned char c; if (read(0, &c, 1) != 1) { c = 0; } return (unsigned)c; } /* Send message to terminal - primitive routine */ void t_sendl(const char *msg, int len) { write(1, msg, len); } void t_send(const char *msg) { if (msg == NULL) { return; } t_sendl(msg, strlen(msg)); /* faster than one char at time */ } /* Send single character to terminal - primitive routine - */ void t_char(char c) { (void)write(1, &c, 1); } /* ASCLRL() -- Clear to end of line from current position */ static void asclrl(int pos, int width) { int i; if (t_cl) { t_send(t_cl); /* use clear to eol function */ return; } if (pos==1 && linsdel_ok) { t_delete_line(); /* delete line */ t_insert_line(); /* reinsert it */ return; } for (i=1; i<=width-pos+1; i++) t_char(' '); /* last resort, blank it out */ for (i=1; i<=width-pos+1; i++) /* backspace to original position */ t_char(0x8); return; } /* ASCURS -- Set cursor position */ static void ascurs(int y, int x) { t_send((char *)tgoto(t_cm, x, y)); } /* ASCLRS -- Clear whole screen */ static void asclrs() { ascurs(0,0); t_send(t_cs); } /* ASINSL -- insert new line after cursor */ static void asinsl() { t_clrline(0, t_width); t_send(t_il); /* insert before */ } /* ASDELL -- Delete line at cursor */ static void asdell() { t_send(t_dl); } #endif bacula-15.0.3/src/console/console.c0000644000175000017500000013452414771010173016732 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Bacula Console interface to the Director * * Kern Sibbald, September MM * */ #include "bacula.h" #include "console_conf.h" #include "jcr.h" #if defined(HAVE_CONIO) #include "conio.h" //#define CONIO_FIX 1 #else /* defined(HAVE_READLINE) || "DUMB" */ #define con_init(x) #define con_term() #define con_set_zed_keys(); #endif void trapctlc(); void clrbrk(); int usrbrk(); static int brkflg = 0; /* set on user break */ static int require_fips = 0; #if defined(HAVE_WIN32) #define isatty(fd) (fd==0) #endif /* Exported variables */ //extern int rl_catch_signals; /* Imported functions */ int authenticate_director(BSOCK *dir, DIRRES *director, CONRES *cons, bool avoid_getpass); /* Forward referenced functions */ static void terminate_console(int sig); static int check_resources(); int get_cmd(FILE *input, const char *prompt, BSOCK *sock, int sec); static int do_outputcmd(FILE *input, BSOCK *UA_sock); void senditf(const char *fmt, ...); void sendit(const char *buf); extern "C" void got_sigstop(int sig); extern "C" void got_sigcontinue(int sig); extern "C" void got_sigtout(int sig); extern "C" void got_sigtin(int sig); /* Static variables */ static char *configfile = NULL; static BSOCK *UA_sock = NULL; static FILE *output = stdout; static bool teeout = false; /* output to output and stdout */ static bool teein = false; /* input to output and stdout */ static bool stop = false; static bool no_conio = false; static int timeout = 0; static int argc; static POOLMEM *args; static char *argk[MAX_CMD_ARGS]; static char *argv[MAX_CMD_ARGS]; static CONFIG *config; /* Command prototypes */ static int versioncmd(FILE *input, BSOCK *UA_sock); static int inputcmd(FILE *input, BSOCK *UA_sock); static int outputcmd(FILE *input, BSOCK *UA_sock); static int teecmd(FILE *input, BSOCK *UA_sock); static int teeallcmd(FILE *input, BSOCK *UA_sock); static int quitcmd(FILE *input, BSOCK *UA_sock); static int helpcmd(FILE *input, BSOCK *UA_sock); static int echocmd(FILE *input, BSOCK *UA_sock); static int timecmd(FILE *input, BSOCK *UA_sock); static int sleepcmd(FILE *input, BSOCK *UA_sock); static int execcmd(FILE *input, BSOCK *UA_sock); static int putfilecmd(FILE *input, BSOCK *UA_sock); static int encodecmd(FILE *input, BSOCK *UA_sock); #ifdef HAVE_READLINE static int eolcmd(FILE *input, BSOCK *UA_sock); # ifndef HAVE_REGEX_H # include "lib/bregex.h" # else # include # endif #endif #define CONFIG_FILE "bconsole.conf" /* default configuration file */ static void usage() { fprintf(stderr, _( PROG_COPYRIGHT "\n%sVersion: " VERSION " (" BDATE ") %s %s %s\n\n" "Usage: bconsole [-s] [-c config_file] [-d debug_level]\n" " -D select a Director\n" " -l list Directors defined\n" " -L list Consoles defined\n" " -C select a console\n" " -c set configuration file to file\n" " -d set debug level to \n" " -dt print timestamp in debug output\n" " -n no conio\n" " -s no signals\n" " -u set command execution timeout to seconds\n" " -t test - read configuration and exit\n" #if defined(DEVELOPER) " -p regress test - avoid getpass()\n" #endif " -? print this message.\n" "\n"), 2000, BDEMO, HOST_OS, DISTNAME, DISTVER); } extern "C" void got_sigstop(int sig) { stop = true; } extern "C" void got_sigcontinue(int sig) { stop = false; } extern "C" void got_sigtout(int sig) { // printf("Got tout\n"); } extern "C" void got_sigtin(int sig) { // printf("Got tin\n"); } static int zed_keyscmd(FILE *input, BSOCK *UA_sock) { con_set_zed_keys(); return 1; } /* * These are the @command */ struct cmdstruct { const char *key; int (*func)(FILE *input, BSOCK *UA_sock); const char *help; }; static struct cmdstruct commands[] = { { N_("input"), inputcmd, _("input from file")}, { N_("output"), outputcmd, _("output to file")}, { N_("quit"), quitcmd, _("quit")}, { N_("tee"), teecmd, _("output to file and terminal")}, { N_("tall"), teeallcmd, _("output everything to file and terminal (tee all)")}, { N_("sleep"), sleepcmd, _("sleep specified time")}, { N_("time"), timecmd, _("print current time")}, { N_("version"), versioncmd, _("print Console's version")}, { N_("echo"), echocmd, _("echo command string")}, { N_("encode"), encodecmd, _("encode command string")}, { N_("exec"), execcmd, _("execute an external command")}, { N_("exit"), quitcmd, _("exit = quit")}, { N_("putfile"), putfilecmd, _("send a file to the director")}, { N_("zed_keys"), zed_keyscmd, _("zed_keys = use zed keys instead of bash keys")}, { N_("help"), helpcmd, _("help listing")}, #ifdef HAVE_READLINE { N_("separator"), eolcmd, _("set command separator")}, #endif }; #define comsize ((int)(sizeof(commands)/sizeof(struct cmdstruct))) static int do_a_command(FILE *input, BSOCK *UA_sock) { unsigned int i; int stat; int found; int len; char *cmd; found = 0; stat = 1; Dmsg1(120, "Command: %s\n", UA_sock->msg); if (argc == 0) { return 1; } cmd = argk[0]+1; if (*cmd == '#') { /* comment */ return 1; } len = strlen(cmd); for (i=0; imsg, _(": is an invalid command\n")); UA_sock->msglen = strlen(UA_sock->msg); sendit(UA_sock->msg); } return stat; } /* When getting .api command, we can ignore some signals, so we set * api_mode=true */ static bool api_mode=false; static bool ignore_signal(int stat, BSOCK *s) { /* Not in API mode */ if (!api_mode) { return false; } /* not a signal */ if (stat != -1) { return false; } /* List signal that should not stop the read loop */ Dmsg1(100, "Got signal %s\n", bnet_sig_to_ascii(s->msglen)); switch(s->msglen) { case BNET_CMD_BEGIN: case BNET_CMD_FAILED: /* might want to print **ERROR** */ case BNET_CMD_OK: /* might want to print **OK** */ case BNET_MSGS_PENDING: return true; default: break; } /* The signal should break the read loop */ return false; } static void read_and_process_input(FILE *input, BSOCK *UA_sock) { const char *prompt = "*"; bool at_prompt = false; int tty_input = isatty(fileno(input)); int stat; btimer_t *tid=NULL; for ( ;; ) { if (at_prompt) { /* don't prompt multiple times */ prompt = ""; } else { prompt = "*"; at_prompt = true; } if (tty_input) { stat = get_cmd(input, prompt, UA_sock, 30); if (usrbrk() >= 1) { clrbrk(); } if (usrbrk()) { break; } } else { /* Reading input from a file */ if (usrbrk()) { break; } if (bfgets(UA_sock->msg, input) == NULL) { stat = -1; } else { sendit(UA_sock->msg); /* echo to terminal */ strip_trailing_junk(UA_sock->msg); UA_sock->msglen = strlen(UA_sock->msg); stat = 1; } } if (stat < 0) { break; /* error or interrupt */ } else if (stat == 0) { /* timeout */ if (strcmp(prompt, "*") == 0) { tid = start_bsock_timer(UA_sock, timeout); UA_sock->fsend(".messages"); stop_bsock_timer(tid); } else { continue; } } else { at_prompt = false; /* @ => internal command for us */ if (UA_sock->msg[0] == '@') { parse_args(UA_sock->msg, &args, &argc, argk, argv, MAX_CMD_ARGS); if (!do_a_command(input, UA_sock)) { break; } continue; } tid = start_bsock_timer(UA_sock, timeout); if (!UA_sock->send()) { /* send command */ stop_bsock_timer(tid); break; /* error */ } stop_bsock_timer(tid); } if (strncasecmp(UA_sock->msg, ".api", 4) == 0) { api_mode = true; } if (strcasecmp(UA_sock->msg, ".quit") == 0 || strcasecmp(UA_sock->msg, ".exit") == 0) { break; } tid = start_bsock_timer(UA_sock, timeout); while (1) { stat = UA_sock->recv(); if (ignore_signal(stat, UA_sock)) { continue; } if (stat < 0) { break; } if (at_prompt) { if (!stop) { sendit("\n"); } at_prompt = false; } /* Suppress output if running in background or user hit ctl-c */ if (!stop && !usrbrk()) { sendit(UA_sock->msg); } } stop_bsock_timer(tid); if (usrbrk() > 1) { break; } else { clrbrk(); } if (!stop) { fflush(stdout); } if (UA_sock->is_stop()) { break; /* error or term */ } else if (stat == BNET_SIGNAL) { if (UA_sock->msglen == BNET_SUB_PROMPT) { at_prompt = true; } Dmsg1(100, "Got poll %s\n", bnet_sig_to_ascii(UA_sock->msglen)); } } } /* * Call-back for reading a passphrase for an encrypted PEM file * This function uses getpass(), * which uses a static buffer and is NOT thread-safe. */ static int tls_pem_callback(char *buf, int size, const void *userdata) { #ifdef HAVE_TLS const char *prompt = (const char *)userdata; # if defined(HAVE_WIN32) sendit(prompt); if (win32_cgets(buf, size) == NULL) { buf[0] = 0; return 0; } else { return strlen(buf); } # else char *passwd; passwd = getpass(prompt); bstrncpy(buf, passwd, size); return strlen(buf); # endif #else buf[0] = 0; return 0; #endif } #ifdef HAVE_READLINE #define READLINE_LIBRARY 1 #include "readline.h" #include "history.h" static int history_lines_added=0; /* Number of lines added to the history file */ /* Get the first keyword of the line */ static char * get_first_keyword() { char *ret=NULL; int len; char *first_space = strchr(rl_line_buffer, ' '); if (first_space) { len = first_space - rl_line_buffer; ret = (char *) malloc((len + 1) * sizeof(char)); memcpy(ret, rl_line_buffer, len); ret[len]=0; } return ret; } /* * Return the command before the current point. * Set nb to the number of command to skip */ static char * get_previous_keyword(int current_point, int nb) { int i; int end = -1; int start = -1; int inquotes = 0; char *s = NULL; while (nb-- >= 0) { /* first we look for a space before the current word */ for (i = current_point; i >= 0; i--) { if (rl_line_buffer[i] == ' ' || rl_line_buffer[i] == '=') { break; } } /* find the end of the command */ for (; i >= 0; i--) { if (rl_line_buffer[i] != ' ') { end = i; break; } } /* no end of string */ if (end == -1) { return NULL; } /* look for the start of the command */ for (start = end; start > 0; start--) { if (rl_line_buffer[start] == '"') { inquotes = !inquotes; } if ((rl_line_buffer[start - 1] == ' ') && inquotes == 0) { break; } current_point = start; } } if (start == -1 || end == -1) { // something went wrong during scan return NULL; } s = (char *)malloc(end - start + 2); memcpy(s, rl_line_buffer + start, end - start + 1); s[end - start + 1] = 0; // printf("=======> %i:%i <%s>\n", start, end, s); return s; } /* Simple structure that will contain the completion list */ struct ItemList { alist list; }; static ItemList *items = NULL; void init_items() { if (!items) { items = (ItemList*)bmalloc(sizeof(ItemList)); /* bmalloc clears memory */ } else { items->list.destroy(); } items->list.init(); } /* Match a regexp and add the result to the items list * This function is recursive */ static void match_kw(regex_t *preg, const char *what, int len, POOLMEM **buf) { int rc, size; int nmatch=20; regmatch_t pmatch[nmatch]; if (len <= 0) { return; } rc = regexec(preg, what, nmatch, pmatch, 0); if (rc == 0) { #if 0 Pmsg1(0, "\n\n%s\n0123456789012345678901234567890123456789\n 10 20 30\n", what); Pmsg2(0, "%i-%i\n", pmatch[0].rm_so, pmatch[0].rm_eo); Pmsg2(0, "%i-%i\n", pmatch[1].rm_so, pmatch[1].rm_eo); Pmsg2(0, "%i-%i\n", pmatch[2].rm_so, pmatch[2].rm_eo); Pmsg2(0, "%i-%i\n", pmatch[3].rm_so, pmatch[3].rm_eo); #endif size = pmatch[1].rm_eo - pmatch[1].rm_so; *buf = check_pool_memory_size(*buf, size + 1); memcpy(*buf, what+pmatch[1].rm_so, size); (*buf)[size] = 0; items->list.append(bstrdup(*buf)); /* We search for the next keyword in the line */ match_kw(preg, what + pmatch[1].rm_eo, len - pmatch[1].rm_eo, buf); } } /* fill the items list with the output of the help command */ void get_arguments(const char *what) { regex_t preg; POOLMEM *buf; int rc; init_items(); rc = regcomp(&preg, "(([a-z]+=)|([a-z]+)( |$))", REG_EXTENDED); if (rc != 0) { return; } buf = get_pool_memory(PM_MESSAGE); UA_sock->fsend(".help item=%s", what); while (UA_sock->recv() > 0) { strip_trailing_junk(UA_sock->msg); match_kw(&preg, UA_sock->msg, UA_sock->msglen, &buf); } free_pool_memory(buf); regfree(&preg); } /* retreive a simple list (.pool, .client) and store it into items */ void get_items(const char *what) { init_items(); UA_sock->fsend("%s", what); while (UA_sock->recv() > 0) { strip_trailing_junk(UA_sock->msg); items->list.append(bstrdup(UA_sock->msg)); } } typedef enum { ITEM_ARG, /* item with simple list like .jobs */ ITEM_HELP /* use help item=xxx and detect all arguments */ } cpl_item_t; /* Generator function for command completion. STATE lets us know whether * to start from scratch; without any state (i.e. STATE == 0), then we * start at the top of the list. */ static char *item_generator(const char *text, int state, const char *item, cpl_item_t type) { static int list_index, len; char *name; /* If this is a new word to complete, initialize now. This includes * saving the length of TEXT for efficiency, and initializing the index * variable to 0. */ if (!state) { list_index = 0; len = strlen(text); switch(type) { case ITEM_ARG: get_items(item); break; case ITEM_HELP: get_arguments(item); break; } } /* Return the next name which partially matches from the command list. */ while (items && list_index < items->list.size()) { name = (char *)items->list[list_index]; list_index++; if (strncmp(name, text, len) == 0) { char *ret = (char *) actuallymalloc(strlen(name)+1); strcpy(ret, name); return ret; } } /* If no names matched, then return NULL. */ return ((char *)NULL); } /* gobal variables for the type and the item to search * the readline API doesn' permit to pass user data. */ static const char *cpl_item; static cpl_item_t cpl_type; static char *cpl_generator(const char *text, int state) { return item_generator(text, state, cpl_item, cpl_type); } /* this function is used to not use the default filename completion */ static char *dummy_completion_function(const char *text, int state) { return NULL; } struct cpl_keywords_t { const char *key; const char *cmd; }; static struct cpl_keywords_t cpl_keywords[] = { {"pool=", ".pool" }, {"fileset=", ".fileset" }, {"client=", ".client" }, {"job=", ".jobs" }, {"restore_job=",".jobs type=R" }, {"level=", ".level" }, {"storage=", ".storage" }, {"schedule=", ".schedule" }, {"volume=", ".media" }, {"oldvolume=", ".media" }, {"volstatus=", ".volstatus" }, {"ls", ".ls" }, {"cd", ".lsdir" }, {"mark", ".ls" }, {"m", ".ls" }, {"unmark", ".lsmark" }, {"catalog=", ".catalogs" }, {"actiononpurge=", ".actiononpurge" }, {"tags=", ".tags" }, {"recylepool=", ".pool" }, {"allfrompool=",".pool" }, {"nextpool=", ".pool" } }; #define key_size ((int)(sizeof(cpl_keywords)/sizeof(struct cpl_keywords_t))) /* Attempt to complete on the contents of TEXT. START and END bound the * region of rl_line_buffer that contains the word to complete. TEXT is * the word to complete. We can use the entire contents of rl_line_buffer * in case we want to do some simple parsing. Return the array of matches, * or NULL if there aren't any. */ static char **readline_completion(const char *text, int start, int end) { bool found=false; char **matches; char *s, *cmd; matches = (char **)NULL; /* If this word is at the start of the line, then it is a command * to complete. Otherwise it is the name of a file in the current * directory. */ s = get_previous_keyword(start, 0); cmd = get_first_keyword(); if (s) { for (int i=0; i < key_size; i++) { if (!strcasecmp(s, cpl_keywords[i].key)) { cpl_item = cpl_keywords[i].cmd; cpl_type = ITEM_ARG; matches = rl_completion_matches(text, cpl_generator); found=true; break; } } if (!found) { /* we try to get help with the first command */ cpl_item = cmd; cpl_type = ITEM_HELP; /* we don't want to append " " at the end */ rl_completion_suppress_append=true; matches = rl_completion_matches(text, cpl_generator); } free(s); } else { /* nothing on the line, display all commands */ cpl_item = ".help all"; cpl_type = ITEM_ARG; matches = rl_completion_matches(text, cpl_generator); } if (cmd) { free(cmd); } return (matches); } static char eol = '\0'; static int eolcmd(FILE *input, BSOCK *UA_sock) { if ((argc > 1) && (strchr("!$%&'()*+,-/:;<>?[]^`{|}~", argk[1][0]) != NULL)) { eol = argk[1][0]; } else if (argc == 1) { eol = '\0'; } else { sendit(_("Illegal separator character.\n")); } return 1; } /* * Return 1 if OK * 0 if no input * -1 error (must stop) */ int get_cmd(FILE *input, const char *prompt, BSOCK *sock, int sec) { static char *line = NULL; static char *next = NULL; static int do_history = 0; char *command; if (line == NULL) { do_history = 0; rl_catch_signals = 0; /* do it ourselves */ /* Here, readline does ***real*** malloc * so, be we have to use the real free */ line = readline((char *)prompt); /* cast needed for old readlines */ if (!line) { return -1; /* error return and exit */ } strip_trailing_junk(line); command = line; } else if (next) { command = next + 1; } else { sendit(_("Command logic problem\n")); sock->msglen = 0; sock->msg[0] = 0; return 0; /* No input */ } /* * Split "line" into multiple commands separated by the eol character. * Each part is pointed to by "next" until finally it becomes null. */ if (eol == '\0') { next = NULL; } else { next = strchr(command, eol); if (next) { *next = '\0'; } } if (command != line && isatty(fileno(input))) { senditf("%s%s\n", prompt, command); } else { /* Send the intput to the output file if needed */ if (teein && output != stdout) { fputs(prompt, output); fputs(command, output); fputs("\n", output); } } sock->msglen = pm_strcpy(&sock->msg, command); if (sock->msglen) { do_history++; } if (!next) { if (do_history) { add_history(line); /* Count the number of lines added, we use it to truncate the history * file correctly */ history_lines_added++; } actuallyfree(line); /* allocated by readline() malloc */ line = NULL; } return 1; /* OK */ } #else /* no readline, do it ourselves */ #ifdef HAVE_CONIO static bool bisatty(int fd) { if (no_conio) { return false; } return isatty(fd); } #endif /* * Returns: 1 if data available * 0 if timeout * -1 if error */ static int wait_for_data(int fd, int sec) { #if defined(HAVE_WIN32) return 1; #else for ( ;; ) { switch(fd_wait_data(fd, WAIT_READ, sec, 0)) { case 0: /* timeout */ return 0; case -1: if (errno == EINTR || errno == EAGAIN) { continue; } return -1; /* error return */ default: return 1; } } #endif } /* * Get next input command from terminal. * * Returns: 1 if got input * 0 if timeout * -1 if EOF or error */ int get_cmd(FILE *input, const char *prompt, BSOCK *sock, int sec) { int len; if (!stop) { if (output == stdout || teeout) { sendit(prompt); } } again: switch (wait_for_data(fileno(input), sec)) { case 0: return 0; /* timeout */ case -1: return -1; /* error */ default: len = sizeof_pool_memory(sock->msg) - 1; if (stop) { sleep(1); goto again; } #ifdef HAVE_CONIO if (bisatty(fileno(input))) { input_line(sock->msg, len); break; } #endif #ifdef HAVE_WIN32 /* use special console for input on win32 */ if (input == stdin) { if (win32_cgets(sock->msg, len) == NULL) { return -1; } } else #endif if (bfgets(sock->msg, input) == NULL) { return -1; } break; } if (usrbrk()) { clrbrk(); } strip_trailing_junk(sock->msg); sock->msglen = strlen(sock->msg); /* Send input to log file if needed */ if (teein && output != stdout) { fputs(sock->msg, output); fputs("\n", output); } return 1; } #endif /* ! HAVE_READLINE */ /* Routine to return true if user types break */ int usrbrk() { return brkflg; } /* Clear break flag */ void clrbrk() { brkflg = 0; } /* Interrupt caught here */ static void sigintcatcher(int sig) { brkflg++; if (brkflg > 3) { terminate_console(sig); } signal(SIGINT, sigintcatcher); } /* Trap Ctl-C */ void trapctlc() { signal(SIGINT, sigintcatcher); } #ifdef HAVE_READLINE static int histfile_size = 100; #endif static int console_update_history(const char *histfile) { int ret=0; #ifdef HAVE_READLINE /* * first, try to truncate the history file, and if it * fails, the file is probably not present, and we * can use write_history to create it */ int nlines = MAX(histfile_size - history_lines_added, 0); if (history_truncate_file(histfile, nlines) == 0) { nlines = MIN(histfile_size, history_lines_added); ret = append_history(nlines, histfile); } else { ret = write_history(histfile); } #endif return ret; } static int console_init_history(const char *histfile, int size) { int ret=0; #ifdef HAVE_READLINE histfile_size = size; using_history(); ret = read_history(histfile); /* Tell the completer that we want a complete . */ rl_completion_entry_function = dummy_completion_function; rl_attempted_completion_function = readline_completion; rl_filename_completion_desired = 0; stifle_history(histfile_size); #endif return ret; } static bool select_director(const char *director, const char *console, DIRRES **ret_dir, CONRES **ret_cons) { int numcon=0, numdir=0; int i=0, item=0; BSOCK *UA; DIRRES *dir = NULL; CONRES *cons = NULL; *ret_cons = NULL; *ret_dir = NULL; LockRes(); numdir = 0; foreach_res(dir, R_DIRECTOR) { numdir++; } numcon = 0; foreach_res(cons, R_CONSOLE) { numcon++; } UnlockRes(); if (numdir == 1) { /* No choose */ dir = (DIRRES *)GetNextRes(R_DIRECTOR, NULL); } if (director) { /* Command line choice overwrite the no choose option */ LockRes(); foreach_res(dir, R_DIRECTOR) { if (bstrcasecmp(dir->hdr.name, director)) { break; } } UnlockRes(); if (!dir) { /* Can't find Director used as argument */ senditf(_("Can't find %s in Director list\n"), director); return 0; } } if (dir == NULL) { /* prompt for director */ UA = new_bsock(); try_again: sendit(_("Available Directors:\n")); LockRes(); numdir = 0; foreach_res(dir, R_DIRECTOR) { senditf( _("%2d: %s at %s:%d\n"), 1+numdir++, dir->hdr.name, dir->address, dir->DIRport); } UnlockRes(); if (get_cmd(stdin, _("Select Director by entering a number: "), UA, 600) < 0) { (void)WSACleanup(); /* Cleanup Windows sockets */ return 0; } if (!is_a_number(UA->msg)) { senditf(_("%s is not a number. You must enter a number between " "1 and %d\n"), UA->msg, numdir); goto try_again; } item = atoi(UA->msg); if (item < 0 || item > numdir) { senditf(_("You must enter a number between 1 and %d\n"), numdir); goto try_again; } free_bsock(UA); LockRes(); for (i=0; ihdr.name, console) == 0) { break; } } else if (cons->director && strcasecmp(cons->director, dir->hdr.name) == 0) { break; } if (i == (numcon - 1)) { cons = NULL; } } if (cons == NULL && console != NULL) { UnlockRes(); senditf(_("Can't find %s in Console list\n"), console); return 0; } /* Look for the first non-linked console */ if (cons == NULL) { for (i=0; idirector == NULL) { break; } if (i == (numcon - 1)) { cons = NULL; } } } /* If no console, take first one */ if (!cons) { cons = (CONRES *)GetNextRes(R_CONSOLE, (RES *)NULL); } UnlockRes(); *ret_dir = dir; *ret_cons = cons; return 1; } /********************************************************************* * * Main Bacula Console -- User Interface Program * */ int main(int argc, char *argv[]) { int ch; char *director = NULL; char *console = NULL; bool list_directors=false, list_consoles=false; bool no_signals = false; bool test_config = false; utime_t heart_beat; bool avoid_getpass = false; DIRRES *dir = NULL; CONRES *cons = NULL; setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); lmgr_init_thread(); my_name_is(argc, argv, "bconsole"); init_msg(NULL, NULL); working_directory = "/tmp"; args = get_pool_memory(PM_FNAME); while ((ch = getopt(argc, argv, "D:lc:d:npstu:?C:L")) != -1) { switch (ch) { case 'D': /* Director */ if (director) { free(director); } director = bstrdup(optarg); break; case 'C': /* Console */ if (console) { free(console); } console = bstrdup(optarg); break; case 'L': /* Console */ list_consoles = true; test_config = true; break; case 'l': list_directors = true; test_config = true; break; case 'c': /* configuration file */ if (configfile != NULL) { free(configfile); } configfile = bstrdup(optarg); break; case 'd': if (*optarg == 't') { dbg_timestamp = true; } else { debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } } break; case 'n': /* no conio */ no_conio = true; break; case 's': /* turn off signals */ no_signals = true; break; case 't': test_config = true; break; case 'u': timeout = atoi(optarg); break; #if defined(DEVELOPER) case 'p': avoid_getpass = true; break; #endif case '?': default: usage(); exit(1); } } argc -= optind; argv += optind; if (!no_signals) { init_signals(terminate_console); } #if !defined(HAVE_WIN32) /* Override Bacula default signals */ signal(SIGQUIT, SIG_IGN); signal(SIGTSTP, got_sigstop); signal(SIGCONT, got_sigcontinue); signal(SIGTTIN, got_sigtin); signal(SIGTTOU, got_sigtout); trapctlc(); #endif OSDependentInit(); if (argc) { usage(); exit(1); } if (configfile == NULL) { configfile = bstrdup(CONFIG_FILE); } config = New(CONFIG()); parse_cons_config(config, configfile, M_ERROR_TERM); /* TODO: Get the FIPS settings from the configuration file */ if (init_crypto() != 0) { Emsg0(M_ERROR_TERM, 0, _("Cryptography library initialization failed.\n")); } if (!check_resources()) { Emsg1(M_ERROR_TERM, 0, _("Please correct configuration file: %s\n"), configfile); } if (crypto_check_fips(require_fips) < 0) { Emsg0(M_ERROR_TERM, 0, _("Enable to set FIPS mode\n")); } if (!no_conio) { con_init(stdin); } if (list_directors) { LockRes(); foreach_res(dir, R_DIRECTOR) { senditf("%s\n", dir->hdr.name); } UnlockRes(); } if (list_consoles) { LockRes(); foreach_res(cons, R_CONSOLE) { senditf("%s\n", cons->hdr.name); } UnlockRes(); } if (test_config) { terminate_console(0); exit(0); } (void)WSA_Init(); /* Initialize Windows sockets */ start_watchdog(); /* Start socket watchdog */ if (!select_director(director, console, &dir, &cons)) { terminate_console(0); return 1; } senditf(_("Connecting to Director %s:%d\n"), dir->address,dir->DIRport); char buf[1024]; /* Initialize Console TLS context */ if (cons && cons->tls_enable) { /* Generate passphrase prompt */ bsnprintf(buf, sizeof(buf), "Passphrase for Console \"%s\" TLS private key: ", cons->hdr.name); /* Initialize TLS context: * Args: CA certfile, CA certdir, Certfile, Keyfile, * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ cons->tls_ctx = new_tls_context(cons->tls_ca_certfile, cons->tls_ca_certdir, cons->tls_certfile, cons->tls_keyfile, tls_pem_callback, &buf, NULL, true); if (!cons->tls_ctx) { senditf(_("Failed to initialize TLS context for Console \"%s\".\n"), cons->hdr.name); terminate_console(0); return 1; } } /* Initialize a TLS-PSK context (even if a TLS context already exists) */ if (cons) { cons->psk_ctx = new_psk_context(cons->password); /* In this case, we have TLS Require=Yes and TLS not setup and no PSK */ if (cons->tls_enable == false && cons->tls_require && cons->psk_ctx == NULL) { senditf(_("Failed to initialize TLS PSK context for Console \"%s\".\n"), cons->hdr.name); terminate_console(0); return 1; } } /* Initialize Director TLS context */ if (dir->tls_enable) { /* Generate passphrase prompt */ bsnprintf(buf, sizeof(buf), "Passphrase for Director \"%s\" TLS private key: ", dir->hdr.name); /* Initialize TLS context: * Args: CA certfile, CA certdir, Certfile, Keyfile, * Keyfile PEM Callback, Keyfile CB Userdata, DHfile, Verify Peer */ dir->tls_ctx = new_tls_context(dir->tls_ca_certfile, dir->tls_ca_certdir, dir->tls_certfile, dir->tls_keyfile, tls_pem_callback, &buf, NULL, true); if (!dir->tls_ctx) { senditf(_("Failed to initialize TLS context for Director \"%s\".\n"), dir->hdr.name); terminate_console(0); return 1; } } /* Initialize a TLS-PSK context (even if a TLS context already exists) */ dir->psk_ctx = new_psk_context(dir->password); /* In this case, we have TLS Require=Yes and TLS not setup and no PSK */ if (dir->tls_enable == false && dir->tls_require && dir->psk_ctx == NULL) { senditf(_("Failed to initialize TLS PSK context for Director \"%s\".\n"), dir->hdr.name); terminate_console(0); return 1; } if (dir->heartbeat_interval) { heart_beat = dir->heartbeat_interval; } else if (cons) { heart_beat = cons->heartbeat_interval; } else { heart_beat = 0; } if (!UA_sock) { UA_sock = new_bsock(); } if (!UA_sock->connect(NULL, 5, 15, heart_beat, "Director daemon", dir->address, NULL, dir->DIRport, 0)) { senditf("%s", UA_sock->errmsg); UA_sock->destroy(); UA_sock = NULL; terminate_console(0); return 1; } /* If cons==NULL, default console will be used */ if (!authenticate_director(UA_sock, dir, cons, avoid_getpass)) { terminate_console(0); return 1; } Dmsg0(40, "Opened connection with Director daemon\n"); sendit(_("Enter a period to cancel a command.\n")); /* Read/Update history file if HOME exists */ POOL_MEM history_file; int hist_file_size=0; /* Run commands in ~/.bconsolerc if any */ char *env = getenv("HOME"); if (env) { FILE *fd; pm_strcpy(&UA_sock->msg, env); pm_strcat(&UA_sock->msg, "/.bconsolerc"); fd = bfopen(UA_sock->msg, "rb"); if (fd) { read_and_process_input(fd, UA_sock); fclose(fd); } } hist_file_size = dir->hist_file_size; if (dir->hist_file) { pm_strcpy(history_file, dir->hist_file); console_init_history(history_file.c_str(), hist_file_size); } else if (env) { pm_strcpy(history_file, env); pm_strcat(history_file, "/.bconsole_history"); console_init_history(history_file.c_str(), hist_file_size); } read_and_process_input(stdin, UA_sock); if (UA_sock) { UA_sock->signal(BNET_TERMINATE); /* send EOF */ UA_sock->close(); } if (env) { console_update_history(history_file.c_str()); } terminate_console(0); return 0; } /* Cleanup and then exit */ static void terminate_console(int sig) { static bool already_here = false; if (already_here) { /* avoid recursive temination problems */ exit(1); } already_here = true; stop_watchdog(); delete(config); config = NULL; cleanup_crypto(); free(res_head); res_head = NULL; free_pool_memory(args); #if defined(HAVE_CONIO) if (!no_conio) { con_term(); } #elif defined(HAVE_READLINE) rl_cleanup_after_signal(); #else /* !HAVE_CONIO && !HAVE_READLINE */ #endif (void)WSACleanup(); /* Cleanup Windows sockets */ lmgr_cleanup_main(); if (sig != 0) { exit(1); } return; } /* * Make a quick check to see that we have all the * resources needed. */ static int check_resources() { bool OK = true; DIRRES *director; bool tls_needed; int numdir = 0; LockRes(); foreach_res(director, R_DIRECTOR) { numdir++; /* tls_require implies tls_enable */ if (director->tls_require) { if (have_tls) { if (director->tls_ca_certfile || director->tls_ca_certdir) { director->tls_enable = true; } } else { Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); OK = false; continue; } } tls_needed = director->tls_enable || director->tls_authenticate; if ((!director->tls_ca_certfile && !director->tls_ca_certdir) && tls_needed) { Emsg2(M_FATAL, 0, _("Neither \"TLS CA Certificate File\"" " or \"TLS CA Certificate Dir\" are defined for Director \"%s\" in %s." " At least one CA certificate store is required.\n"), director->hdr.name, configfile); OK = false; } if (director->require_fips) { require_fips = 1; } } if (numdir == 0) { Emsg1(M_FATAL, 0, _("No Director resource defined in %s\n" "Without that I don't how to speak to the Director :-(\n"), configfile); OK = false; } CONRES *cons; /* Loop over Consoles */ foreach_res(cons, R_CONSOLE) { /* tls_require implies tls_enable */ if (cons->tls_require) { if (have_tls) { if (cons->tls_ca_certfile || cons->tls_ca_certdir) { cons->tls_enable = true; } } else { Jmsg(NULL, M_FATAL, 0, _("TLS required but not configured in Bacula.\n")); OK = false; continue; } } tls_needed = cons->tls_enable || cons->tls_authenticate; if ((!cons->tls_ca_certfile && !cons->tls_ca_certdir) && tls_needed) { Emsg2(M_FATAL, 0, _("Neither \"TLS CA Certificate File\"" " or \"TLS CA Certificate Dir\" are defined for Console \"%s\" in %s.\n"), cons->hdr.name, configfile); OK = false; } if (cons->require_fips) { require_fips = 1; } } UnlockRes(); return OK; } /* @version */ static int versioncmd(FILE *input, BSOCK *UA_sock) { senditf("Version: " VERSION " (" BDATE ") %s %s %s\n", /* TODO: Add FIPS setting here */ HOST_OS, DISTNAME, DISTVER); if (crypto_get_fips()) { senditf("Crypto: fips=%s crypto=%s\n", crypto_get_fips_enabled(), crypto_get_version()); } return 1; } /* @input */ static int inputcmd(FILE *input, BSOCK *UA_sock) { FILE *fd; if (argc > 2) { sendit(_("Too many arguments on input command.\n")); return 1; } if (argc == 1) { sendit(_("First argument to input command must be a filename.\n")); return 1; } fd = bfopen(argk[1], "rb"); if (!fd) { berrno be; senditf(_("Cannot open file %s for input. ERR=%s\n"), argk[1], be.bstrerror()); return 1; } read_and_process_input(fd, UA_sock); fclose(fd); return 1; } /* @tall */ /* Send input/output to both terminal and specified file */ static int teeallcmd(FILE *input, BSOCK *UA_sock) { teeout = true; teein = true; return do_outputcmd(input, UA_sock); } /* @tee */ /* Send output to both terminal and specified file */ static int teecmd(FILE *input, BSOCK *UA_sock) { teeout = true; teein = false; return do_outputcmd(input, UA_sock); } /* @output */ /* Send output to specified "file" */ static int outputcmd(FILE *input, BSOCK *UA_sock) { teeout = false; teein = false; return do_outputcmd(input, UA_sock); } static int do_outputcmd(FILE *input, BSOCK *UA_sock) { FILE *fd; const char *mode = "a+b"; if (argc > 3) { sendit(_("Too many arguments on output/tee command.\n")); return 1; } if (argc == 1) { if (output != stdout) { fclose(output); output = stdout; teeout = false; teein = false; } return 1; } if (argc == 3) { mode = argk[2]; } fd = bfopen(argk[1], mode); if (!fd) { berrno be; senditf(_("Cannot open file %s for output. ERR=%s\n"), argk[1], be.bstrerror(errno)); return 1; } output = fd; return 1; } /* * @exec "some-command" [wait-seconds] */ static int execcmd(FILE *input, BSOCK *UA_sock) { BPIPE *bpipe; char line[5000]; int stat; int wait = 0; char *cmd; if (argc > 3) { sendit(_("Too many arguments. Enclose command in double quotes.\n")); return 1; } /* old syntax */ if (argc == 3) { wait = atoi(argk[2]); } cmd = argk[1]; /* handle cmd=XXXX and wait=XXXX */ for (int i=1; irfd)) { senditf("%s", line); } stat = close_bpipe(bpipe); if (stat != 0) { berrno be; be.set_errno(stat); senditf(_("@exec error: ERR=%s\n"), be.bstrerror()); } return 1; } /* Encode a string with a simple algorithm */ static int encodecmd(FILE *input, BSOCK *UA_sock) { int len; int salt; char pepper; if (argc == 2) { POOLMEM *buf = get_pool_memory(PM_NAME); POOLMEM *buf2 = get_pool_memory(PM_NAME); char *str = argk[1]; salt = rand() % 1000; /* Display max 3 digits */ pepper = salt % 256; /* salt for password itself */ if (argv[1]) { if (strcmp(argk[1], "string") == 0) { str = argv[1]; } else { senditf("Unable to encode the given string. Try to use @encode string=\"yourstring\"\n"); return 1; } } len = strlen(str); if (len > MAX_NAME_LENGTH) { sendit(_("The String to encode is too long\n")); return 1; } Mmsg(buf, "%03d:%03d:%s", salt, len ^ salt, str); for(char *p = buf + 8 ; *p ; p++) { *p = *p ^ pepper; } buf2 = check_pool_memory_size(buf2, len*3); bin_to_base64(buf2, sizeof_pool_memory(buf2), buf, 8 + len + 1, 1); sendit(buf2); free_pool_memory(buf); free_pool_memory(buf2); sendit("\n"); } return 1; } /* @echo xxx yyy */ static int echocmd(FILE *input, BSOCK *UA_sock) { for (int i=1; i < argc; i++) { senditf("%s ", argk[i]); } sendit("\n"); return 1; } /* @quit */ static int quitcmd(FILE *input, BSOCK *UA_sock) { return 0; } /* @help */ static int helpcmd(FILE *input, BSOCK *UA_sock) { int i; for (i=0; i 1) { sleep(atoi(argk[1])); } return 1; } /* @putfile key /path/to/file * * The Key parameter is needed to use the file on the director side. */ static int putfilecmd(FILE *input, BSOCK *UA_sock) { const char *key = "putfile"; const char *fname; FILE *fp; if (argc != 3) { sendit("Usage: @putfile key file\n"); return 1; } key = argk[1]; fname = argk[2]; if (!key || !fname) { senditf("Syntax error in @putfile command\n"); return 1; } fp = bfopen(fname, "r"); if (!fp) { berrno be; senditf("Unable to open %s. ERR=%s\n", fname, be.bstrerror(errno)); return 1; } UA_sock->fsend(".putfile key=\"%s\"", key); /* Just read the file and send it to the director */ while (!feof(fp)) { int i = fread(UA_sock->msg, 1, sizeof_pool_memory(UA_sock->msg) - 1, fp); if (i > 0) { UA_sock->msg[i] = 0; UA_sock->msglen = i; UA_sock->send(); } } UA_sock->signal(BNET_EOD); fclose(fp); /* Get the file name associated */ while (UA_sock->recv() > 0) { senditf("%s", UA_sock->msg); } return 1; } /* @time */ static int timecmd(FILE *input, BSOCK *UA_sock) { char sdt[50]; time_t ttime = time(NULL); struct tm tm; (void)localtime_r(&ttime, &tm); strftime(sdt, sizeof(sdt), "%d-%b-%Y %H:%M:%S", &tm); sendit("\n"); return 1; } /* * Send a line to the output file and or the terminal */ void senditf(const char *fmt,...) { char buf[3000]; va_list arg_ptr; va_start(arg_ptr, fmt); bvsnprintf(buf, sizeof(buf), (char *)fmt, arg_ptr); va_end(arg_ptr); sendit(buf); } void sendit(const char *buf) { #ifdef CONIO_FIX char obuf[3000]; if (output == stdout || teeout) { const char *p, *q; /* * Here, we convert every \n into \r\n because the * terminal is in raw mode when we are using * conio. */ for (p=q=buf; (p=strchr(q, '\n')); ) { int len = p - q; if (len > 0) { memcpy(obuf, q, len); } memcpy(obuf+len, "\r\n", 3); q = ++p; /* point after \n */ fputs(obuf, output); } if (*q) { fputs(q, output); } fflush(output); } if (output != stdout) { fputs(buf, output); } #else fputs(buf, output); fflush(output); if (teeout) { fputs(buf, stdout); fflush(stdout); } #endif } bacula-15.0.3/src/console/authenticate.c0000644000175000017500000002424014771010173017737 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Bacula UA authentication. Provides authentication with * the Director. * * Kern Sibbald, June MMI * * This routine runs as a thread and must be thread reentrant. * * Basic tasks done here: * */ #include "bacula.h" #include "console_conf.h" /* * Version at end of Hello Enterprise: * prior to 06Aug13 no version * 1 06Aug13 - added comm line compression * Community * prior to 06Aug13 no version * 100 14Feb17 - added comm line compression * * Starting from version 9.7 both Hellos become the same: * 200 1/10/2020 - added `auth interactive` functionality */ #define UA_VERSION UA_VERSION_PLUGINAUTH /* Enterprise and Community */ void senditf(const char *fmt, ...); void sendit(const char *buf); /* Commands sent to Director */ static char hello[] = "Hello %s calling %d tlspsk=%d\n"; /* Response from Director */ static char oldOKhello[] = "1000 OK:"; static char newOKhello[] = "1000 OK: %d"; static char FDOKhello[] = "2000 OK Hello %d"; class ConsoleAuthenticate: public AuthenticateBase { public: ConsoleAuthenticate(BSOCK *dir): AuthenticateBase(NULL, dir, dtCli, dcCON, dcDIR), avoid_getpass(false) { } virtual ~ConsoleAuthenticate() {}; virtual void TLSFailure() { sendit(_("TLS negotiation failed\n")); } virtual bool CheckTLSRequirement(); int authenticate_director(DIRRES *director, CONRES *cons); bool ClientAuthenticate(CONRES *cons, const char *password); bool avoid_getpass; // this is for regress testing }; int authenticate_director(BSOCK *dir, DIRRES *director, CONRES *cons, bool avoid_getpass = false) { ConsoleAuthenticate auth(dir); auth.avoid_getpass = avoid_getpass; return auth.authenticate_director(director, cons); } bool ConsoleAuthenticate::CheckTLSRequirement() { /* Verify that the connection is willing to meet our TLS requirements */ switch (TestTLSRequirement()) { case TLS_REQ_ERR_LOCAL: sendit(_("Authorization problem:" " Remote server requires TLS.\n")); return false; case TLS_REQ_ERR_REMOTE: sendit(_("Authorization problem:" " Remote server did not advertise required TLS support.\n")); return false; case TLS_REQ_OK: break; } return true; } bool ConsoleAuthenticate::ClientAuthenticate(CONRES *cons, const char *password) { BSOCK *dir = bsock; bool legacy = true; // by default we should execute legacy authentication POOL_MEM msg(PM_MESSAGE); // at first handle optional startTLS if (!ClientEarlyTLS()) { return false; } // auth interactive is handled in named console only if (cons){ // if starttls does not left our auth interactive command get it from director if (!check_early_tls){ if (dir->recv() <= 0) { bmicrosleep(5, 0); // original cram_md5_respond() wait for 5s here return false; } } // temporary buffer pm_strcpy(msg, dir->msg); // the check_early_tls=false means legacy authentication should do standard recv() else it can get command already in the bsock->msg check_early_tls = true; if (strncmp(msg.c_str(), UA_AUTH_INTERACTIVE, strlen(UA_AUTH_INTERACTIVE)) == 0){ // do challenge/response chatting check_early_tls = false; // "tell" cram_md5_respond to do a recv() legacy = false; // we do the authentication // first display a welcome string if available char *welcome = msg.c_str() + strlen(UA_AUTH_INTERACTIVE) + 1; if (strlen(welcome) > 2){ sendit("> "); sendit(welcome); } for (;;){ // loop over operations if (dir->recv() <= 0) { bmicrosleep(5, 0); // original cram_md5_respond() wait for 5s here return false; } // early check if auth interaction finish if (dir->msg[0] == UA_AUTH_INTERACTIVE_FINISH){ // break the loop break; } pm_strcpy(msg, NULL); pm_strcpy(msg, dir->msg + 1); strip_trailing_junk(msg.c_str()); char *data = msg.c_str(); POOL_MEM buf(PM_MESSAGE); char *input; // the command is encoded as a first char of the message switch (dir->msg[0]){ case UA_AUTH_INTERACTIVE_PLAIN: sendit(data); input = fgets(buf.c_str(), buf.size(), stdin); if (!input){ Dmsg0(1, "Error reading user input!\n"); return false; } // now we should return it to director strip_trailing_junk(buf.c_str()); dir->fsend("%c%s", UA_AUTH_INTERACTIVE_RESPONSE, buf.c_str()); break; case UA_AUTH_INTERACTIVE_HIDDEN: #if defined(DEVELOPER) if (!avoid_getpass){ // normal pass handling #endif #if defined(HAVE_WIN32) sendit(data); if (win32_cgets(buf.c_str(), buf.size()) == NULL) { pm_strcpy(buf, NULL); } #else bstrncpy(buf.c_str(), getpass(data), buf.size()); #endif #if defined(DEVELOPER) } else { sendit(data); input = fgets(buf.c_str(), buf.size(), stdin); if (!input){ Dmsg0(1, "Error reading user input!\n"); return false; } strip_trailing_junk(buf.c_str()); } #endif // now we should get a hidden response at `buf` class, return it to director dir->fsend("%c%s", UA_AUTH_INTERACTIVE_RESPONSE, buf.c_str()); break; case UA_AUTH_INTERACTIVE_MESSAGE: sendit(data); sendit("\n"); break; case UA_AUTH_INTERACTIVE_FINISH: // well it is not possible that we will reach this code, so report insanity return false; default: bmicrosleep(5, 0); // original cram_md5_respond() wait for 5s here return false; } } // now check if authorized if (bsock->wait_data(180) <= 0 || bsock->recv() <= 0) { Dmsg1(1, "Receive auth confirmation failed. ERR=%s\n", bsock->bstrerror()); bmicrosleep(5, 0); return false; } if (strcmp(bsock->msg, "1000 OK auth\n") == 0) { // authorization ok return true; } Dmsg1(1, "Received bad response: %s\n", bsock->msg); bmicrosleep(5, 0); return false; } } /* standard md5 handle */ if (legacy && !ClientCramMD5AuthenticateBase(password)) { return false; } return true; } /* * Authenticate Director */ int ConsoleAuthenticate::authenticate_director(DIRRES *director, CONRES *cons) { BSOCK *dir = bsock; int dir_version = 0; char bashed_name[MAX_NAME_LENGTH]; bool skip_msg = false; /* * Send my name to the Director then do authentication */ if (cons) { bstrncpy(bashed_name, cons->hdr.name, sizeof(bashed_name)); bash_spaces(bashed_name); CalcLocalTLSNeedFromRes(cons->tls_enable, cons->tls_require, cons->tls_authenticate, false, NULL, cons->tls_ctx, cons->tls_psk_enable, cons->psk_ctx, cons->password); } else { bstrncpy(bashed_name, "*UserAgent*", sizeof(bashed_name)); CalcLocalTLSNeedFromRes(director->tls_enable, director->tls_require, director->tls_authenticate, false, NULL, director->tls_ctx, director->tls_psk_enable, director->psk_ctx, director->password); } /* Timeout Hello after 15 secs */ StartAuthTimeout(1500); dir->fsend(hello, bashed_name, UA_VERSION, tlspsk_local_need); if (!ClientAuthenticate(cons, password)){ if (dir->is_timed_out()) { sendit(_("The Director is busy or the MaximumConsoleConnections limit is reached.\n")); skip_msg = true; } goto bail_out; } if (!HandleTLS()) { goto bail_out; } /* * It's possible that the TLS connection will * be dropped here if an invalid client certificate was presented */ Dmsg1(6, ">dird: %s", dir->msg); if (dir->recv() <= 0) { senditf(_("Bad response to Hello command: ERR=%s\n"), dir->bstrerror()); goto bail_out; } Dmsg1(10, "msg); if (strncmp(dir->msg, oldOKhello, sizeof(oldOKhello)-1) == 0) { /* If Dir version exists, get it */ sscanf(dir->msg, newOKhello, &dir_version); sendit(dir->msg); /* We do not check the last %d */ } else if (strncmp(dir->msg, FDOKhello, sizeof(FDOKhello)-3) == 0) { sscanf(dir->msg, FDOKhello, &dir_version); sendit(dir->msg); } else { sendit(_("Director rejected Hello command\n")); goto bail_out; } /* Turn on compression for newer Directors */ if (dir_version >= 1 && (!cons || cons->comm_compression)) { dir->set_compress(); } else { dir->clear_compress(); } return 1; bail_out: if (!skip_msg) { sendit( _("Director authorization problem.\n" "Most likely the passwords do not agree.\n" "If you are using TLS, there may have been a certificate validation error during the TLS handshake.\n" "For help, please see " MANUAL_AUTH_URL "\n")); } return 0; } bacula-15.0.3/src/console/bconsole.conf.in0000644000175000017500000000040214771010173020167 0ustar bsbuildbsbuild# # Bacula User Agent (or Console) Configuration File # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # Director { Name = @basename@-dir DIRport = @dir_port@ address = @hostname@ Password = "@dir_password@" } bacula-15.0.3/src/console/Makefile.in0000644000175000017500000001130614771010173017161 0ustar bsbuildbsbuild# # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # @MCOMMON@ srcdir = . VPATH = . .PATH: . # one up basedir = .. # top dir topdir = ../.. # this dir relative to top dir thisdir = src/console DEBUG=@DEBUG@ first_rule: all dummy: # CONSSRCS = console.c console_conf.c authenticate.c @CONS_SRC@ CONSOBJS = console.o console_conf.o authenticate.o @CONS_OBJ@ JSONOBJS = bbconsjson.o console_conf.o # these are the objects that are changed by the .configure process EXTRAOBJS = @OBJLIST@ GETTEXT_LIBS = @LIBINTL@ CONS_INC=@CONS_INC@ CONS_LIBS=@CONS_LIBS@ CONS_LDFLAGS=@CONS_LDFLAGS@ .SUFFIXES: .c .o .PHONY: .DONTCARE: # inference rules .c.o: @echo "Compiling $<" $(NO_ECHO)$(DO_CHECK_DMSG) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) -E $< | $(CHECK_DMSG) $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) $(CONS_INC) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< #------------------------------------------------------------------------- all: Makefile bconsole @STATIC_CONS@ bbconsjson @echo "==== Make of console is good ====" @echo " " bconsole: Makefile $(CONSOBJS) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) $(CONS_LDFLAGS) -L../lib -L../cats -o $@ $(CONSOBJS) \ $(DLIB) $(CONS_LIBS) -lbaccfg -lbac -lm $(LIBS) $(GETTEXT_LIBS) \ $(OPENSSL_LIBS) bbconsjson: Makefile $(JSONOBJS) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) $(CONS_LDFLAGS) -L../lib -L../cats -o $@ $(JSONOBJS) \ $(DLIB) $(CONS_LIBS) -lbaccfg -lbac -lm $(LIBS) $(GETTEXT_LIBS) \ $(OPENSSL_LIBS) static-bconsole: Makefile $(CONSOBJS) ../lib/libbac.a ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) $(LIBTOOL_LINK) $(CXX) -static $(LDFLAGS) $(CONS_LDFLAGS) -L../lib -L../cats -o $@ $(CONSOBJS) \ $(DLIB) $(CONS_LIBS) -lbaccfg -lbac -lm $(LIBS) $(GETTEXT_LIBS) \ $(OPENSSL_LIBS) strip $@ Makefile: $(srcdir)/Makefile.in $(topdir)/config.status cd $(topdir) \ && CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status libtool-clean: @$(RMF) -r .libs _libs clean: libtool-clean @$(RMF) console bconsole core core.* a.out *.o *.bak *~ *.intpro *.extpro 1 2 3 @$(RMF) static-console static-bconsole gmon.out realclean: clean @$(RMF) tags console.conf bconsole.conf distclean: realclean if test $(srcdir) = .; then $(MAKE) realclean; fi (cd $(srcdir); $(RMF) Makefile) devclean: realclean if test $(srcdir) = .; then $(MAKE) realclean; fi (cd $(srcdir); $(RMF) Makefile) install: all @if test -f ${DESTDIR}${sbindir}/console; then \ echo " "; \ echo "Warning!!! ${DESTDIR}${sbindir}/console found."; \ echo " console has been renamed bconsole, so console"; \ echo " is no longer used, and you might want to delete it."; \ echo " "; \ fi $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM_ALL) bconsole $(DESTDIR)$(sbindir)/bconsole $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bbconsjson $(DESTDIR)$(sbindir)/bbconsjson @srcconf=bconsole.conf; \ if test -f ${DESTDIR}${sysconfdir}/$$srcconf; then \ destconf=$$srcconf.new; \ echo " ==> Found existing $$srcconf, installing new conf file as $$destconf"; \ else \ destconf=$$srcconf; \ if test -f ${DESTDIR}${sysconfdir}/console.conf; then \ echo "Existing console.conf moved to bconsole.conf"; \ @$(MV) ${DESTDIR}${sysconfdir}/console.conf ${DESTDIR}${sysconfdir}/bconsole.conf; \ destconf=$$srcconf.new; \ fi; \ fi; \ echo "${INSTALL_CONFIG} $$srcconf ${DESTDIR}${sysconfdir}/$$destconf"; \ ${INSTALL_CONFIG} $$srcconf ${DESTDIR}${sysconfdir}/$$destconf if test -f static-bconsole; then \ $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) static-bconsole $(DESTDIR)$(sbindir)/static-bconsole; \ fi uninstall: (cd $(DESTDIR)$(sbindir); $(RMF) bconsole static-bconsole bbconsjson) (cd $(DESTDIR)$(sysconfdir); $(RMF) console.conf bconsole.conf bconsole.conf.new) # Semi-automatic generation of dependencies: # Use gcc -MM because X11 `makedepend' doesn't work on all systems # and it also includes system headers. # `semi'-automatic since dependencies are generated at distribution time. depend: @$(MV) Makefile Makefile.bak @$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile @$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile @$(CXX) -S -M $(CPPFLAGS) $(CONS_INC) -I$(srcdir) -I$(basedir) *.c >> Makefile @if test -f Makefile ; then \ $(RMF) Makefile.bak; \ else \ $(MV) Makefile.bak Makefile; \ echo " ===== Something went wrong in make depend ====="; \ fi # ----------------------------------------------------------------------- # DO NOT DELETE: nice dependency list follows bacula-15.0.3/src/console/func.h0000755000175000017500000001316214771010173016225 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* Definitions of internal function codes */ /* Functions that work on current line */ #define F_CSRRGT 301 /* cursor right */ #define F_CSRLFT 302 /* cursor left */ #define F_ERSCHR 303 /* erase character */ #define F_INSCHR 304 /* insert character */ #define F_DELCHR 305 /* delete next character character */ #define F_SOL 306 /* go to start of line */ #define F_EOL 307 /* go to end of line */ #define F_DELEOL 308 /* delete to end of line */ #define F_NXTWRD 309 /* go to next word */ #define F_PRVWRD 310 /* go to previous word */ #define F_DELWRD 311 /* delete word */ #define F_ERSLIN 312 /* erase line */ #define F_TAB 313 /* tab */ #define F_TABBAK 314 /* tab backwards */ #define F_DELSOL 315 /* delete from start of line to cursor */ #define F_LEFT40 316 /* move cursor left 40 cols */ #define F_RIGHT40 317 /* move cursor right 40 cols */ #define F_CASE 318 /* change case of next char advance csr */ #define F_CENTERL 319 /* center line */ /* Functions that move the cursor line or work on groups of lines */ #define F_CSRDWN 401 /* cursor down */ #define F_CSRUP 402 /* cursor up */ #define F_HOME 403 /* home cursor */ #define F_EOF 404 /* go to end of file */ #define F_PAGDWN 405 /* page down */ #define F_PAGUP 406 /* page up */ #define F_CENTER 407 /* center cursor on screen */ #define F_SPLIT 408 /* split line at cursor */ #define F_DELLIN 409 /* delete line */ #define F_CONCAT 410 /* concatenate next line to current */ #define F_RETURN 411 /* carriage return */ #define F_NXTMCH 412 /* next match */ #define F_DWN5 413 /* cursor down 5 lines */ #define F_UP5 414 /* cursor up 5 lines */ #define F_PUSH 415 /* push current location */ #define F_POP 416 /* pop previous location */ #define F_PAREN 417 /* find matching paren */ #define F_POPVIEW 418 /* pop to saved view */ #define F_OOPS 419 /* restore last oops buffer */ #define F_PARENB 420 /* find matching paren backwards */ #define F_BOTSCR 421 /* cursor to bottom of screen */ #define F_TOPSCR 422 /* cursor to top of screen */ #define F_TOPMARK 423 /* cursor to top marker line */ #define F_BOTMARK 424 /* cursor to bottom marker line */ #define F_CPYMARK 425 /* copy marked lines */ #define F_MOVMARK 426 /* move marked lines */ #define F_DELMARK 427 /* delete marked lines */ #define F_SHFTLEFT 428 /* shift marked text left one char */ #define F_SHFTRIGHT 429 /* shift marked text right one char */ /* Miscellaneous */ #define F_ESCAPE 501 /* escape character */ #define F_ESC 501 /* escape character */ #define F_EOI 502 /* end of input */ #define F_TENTRY 503 /* toggle entry mode */ #define F_TINS 504 /* toggle insert mode */ #define F_MARK 505 /* set marker on lines */ #define F_CRESC 506 /* carriage return, escape */ #define F_MACDEF 507 /* begin "macro" definition */ #define F_MACEND 508 /* end "macro" definition */ #define F_ZAPESC 509 /* clear screen, escape */ #define F_CLRMARK 510 /* clear marked text */ #define F_MARKBLK 511 /* mark blocks */ #define F_MARKCHR 512 /* mark characters */ #define F_HOLD 513 /* hold line */ #define F_DUP 514 /* duplicate line */ #define F_CHANGE 515 /* apply last change command */ #define F_RCHANGE 516 /* reverse last change command */ #define F_NXTFILE 517 /* next file */ #define F_INCLUDE 518 /* include */ #define F_FORMAT 519 /* format paragraph */ #define F_HELP 520 /* help */ #define F_JUSTIFY 521 /* justify paragraph */ #define F_SAVE 522 /* save file -- not implemented */ #define F_MOUSEI 523 /* mouse input coming -- not completed */ #define F_SCRSIZ 524 /* Screen size coming */ #define F_PASTECB 525 /* Paste clipboard */ #define F_CLRSCRN 526 /* Clear the screen */ #define F_CRNEXT 527 /* Send line, get next line */ #define F_BREAK 528 /* Break */ #define F_BACKGND 529 /* go into background */ bacula-15.0.3/src/console/conio.h0000644000175000017500000000162514771010173016377 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef __CONIO_H #define __CONIO_H extern int input_line(char *line, int len); extern void con_init(FILE *input); extern void con_term(); extern void con_set_zed_keys(); extern void t_sendl(char *buf, int len); extern void t_send(char *buf); extern void t_char(char c); #endif bacula-15.0.3/src/.indent.pro0000644000175000017500000000124714771010173015536 0ustar bsbuildbsbuild--dont-format-comments --procnames-start-lines --parameter-indentation4 --indent-level4 --line-comments-indentation4 --cuddle-else --brace-indent0 --start-left-side-of-comments --no-blank-lines-after-commas --blank-lines-after-declarations --blank-lines-after-procedures --comment-indentation33 --declaration-comment-column33 --no-comment-delimiters-on-blank-lines --continuation-indentation4 --case-indentation0 --else-endif-column33 --no-space-after-casts --no-blank-before-sizeof --declaration-indentation16 --continue-at-parentheses --no-space-after-function-call-names --swallow-optional-blank-lines --space-special-semicolon --tab-size8 --line-length79 --braces-on-if-line bacula-15.0.3/src/c0000644000175000017500000000117314771010173013620 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ bacula-15.0.3/src/plugins/0000755000175000017500000000000014771010173015132 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/sd/0000755000175000017500000000000014771010173015540 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/sd/main.c0000644000175000017500000000563614771010173016642 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Main program to test loading and running Bacula plugins. * Destined to become Bacula pluginloader, ... * * Kern Sibbald, October 2007 */ #include "bacula.h" #include #include "lib/plugin.h" #include "plugin-sd.h" const char *plugin_type = "-sd.so"; /* Forward referenced functions */ static bpError baculaGetValue(bpContext *ctx, bVariable var, void *value); static bpError baculaSetValue(bpContext *ctx, bVariable var, void *value); /* Bacula entry points */ static bFuncs bfuncs = { sizeof(bFuncs), PLUGIN_INTERFACE, baculaGetValue, baculaSetValue, NULL, NULL }; int main(int argc, char *argv[]) { char plugin_dir[1000]; bpContext ctx; bEvent event; Plugin *plugin; b_plugin_list = New(alist(10, not_owned_by_alist)); ctx.bContext = NULL; ctx.pContext = NULL; getcwd(plugin_dir, sizeof(plugin_dir)-1); load_plugins((void *)&bfuncs, plugin_dir, plugin_type); foreach_alist(plugin, b_plugin_list) { printf("bacula: plugin_size=%d plugin_version=%d\n", pref(plugin)->size, pref(plugin)->interface); printf("License: %s\nAuthor: %s\nDate: %s\nVersion: %s\nDescription: %s\n", pref(plugin)->plugin_license, pref(plugin)->plugin_author, pref(plugin)->plugin_date, pref(plugin)->plugin_version, pref(plugin)->plugin_description); /* Start a new instance of the plugin */ pref(plugin)->newPlugin(&ctx); event.eventType = bEventNewVolume; pref(plugin)->handlePluginEvent(&ctx, &event); /* Free the plugin instance */ pref(plugin)->freePlugin(&ctx); /* Start a new instance of the plugin */ pref(plugin)->newPlugin(&ctx); event.eventType = bEventNewVolume; pref(plugin)->handlePluginEvent(&ctx, &event); /* Free the plugin instance */ pref(plugin)->freePlugin(&ctx); } unload_plugins(); printf("bacula: OK ...\n"); close_memory_pool(); sm_dump(false); return 0; } static bpError baculaGetValue(bpContext *ctx, bVariable var, void *value) { printf("bacula: baculaGetValue var=%d\n", var); if (value) { *((int *)value) = 100; } return 0; } static bpError baculaSetValue(bpContext *ctx, bVariable var, void *value) { printf("bacula: baculaSetValue var=%d\n", var); return 0; } bacula-15.0.3/src/plugins/sd/Makefile.in0000644000175000017500000000245214771010173017610 0ustar bsbuildbsbuild# # Simple Makefile for building test SD plugins for Bacula # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # @MCOMMON@ # No optimization for now for easy debugging SDDIR=../../stored SRCDIR=../.. LIBDIR=../../lib .SUFFIXES: .c .lo .c.lo: $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I${SRCDIR} -I${SDDIR} -DTEST_PROGRAM -c $< all: example-plugin-sd.la example-plugin-sd.lo: example-plugin-sd.c $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CFLAGS) -I../.. -I${SDDIR} -c example-plugin-sd.c example-plugin-sd.la: Makefile example-plugin-sd$(DEFAULT_OBJECT_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared example-plugin-sd.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version install: all $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) example-plugin-sd$(DEFAULT_SHARED_OBJECT_TYPE) $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/example-plugin-sd.la libtool-clean: find . -name '*.lo' -print | xargs $(LIBTOOL_CLEAN) $(RMF) $(RMF) *.la $(RMF) -r .libs _libs clean: @LIBTOOL_CLEAN_TARGET@ rm -f main *.la *.so *.o 1 2 3 distclean: clean rm -f Makefile libtool-uninstall: $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(plugindir)/example-plugin-sd.so uninstall: @LIBTOOL_UNINSTALL_TARGET@ depend: bacula-15.0.3/src/plugins/sd/example-plugin-sd.c0000644000175000017500000001171214771010173021241 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Sample Storage daemon Plugin program * * Kern Sibbald, October 2007 */ #include "bacula.h" /* General Bacula headers */ #include "stored.h" /* Pull in storage daemon headers */ #ifdef __cplusplus extern "C" { #endif #define PLUGIN_LICENSE "AGPLv3" #define PLUGIN_AUTHOR "Kern Sibbald" #define PLUGIN_DATE "November 2011" #define PLUGIN_VERSION "2" #define PLUGIN_DESCRIPTION "Test Storage Daemon Plugin" /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC getPluginValue(bpContext *ctx, psdVariable var, void *value); static bRC setPluginValue(bpContext *ctx, psdVariable var, void *value); static bRC handlePluginEvent(bpContext *ctx, bsdEvent *event, void *value); static bRC handleGlobalPluginEvent(bsdEvent *event, void *value); /* Pointers to Bacula functions */ static bsdFuncs *bfuncs = NULL; static bsdInfo *binfo = NULL; static psdInfo pluginInfo = { sizeof(pluginInfo), SD_PLUGIN_INTERFACE_VERSION, SD_PLUGIN_MAGIC, PLUGIN_LICENSE, PLUGIN_AUTHOR, PLUGIN_DATE, PLUGIN_VERSION, PLUGIN_DESCRIPTION }; static psdFuncs pluginFuncs = { sizeof(pluginFuncs), SD_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, /* new plugin instance */ freePlugin, /* free plugin instance */ getPluginValue, setPluginValue, handlePluginEvent, handleGlobalPluginEvent }; /* * loadPlugin() and unloadPlugin() are entry points that are * exported, so Bacula can directly call these two entry points * they are common to all Bacula plugins. * * External entry point called by Bacula to "load the plugin */ bRC DLL_IMP_EXP loadPlugin(bsdInfo *lbinfo, bsdFuncs *lbfuncs, psdInfo **pinfo, psdFuncs **pfuncs) { bfuncs = lbfuncs; /* set Bacula funct pointers */ binfo = lbinfo; printf("example-plugin-sd: Loaded: size=%d version=%d\n", bfuncs->size, bfuncs->version); *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ printf("example-plugin-sd: Loaded\n"); return bRC_OK; } /* * External entry point to unload the plugin */ bRC DLL_IMP_EXP unloadPlugin() { printf("example-plugin-sd: Unloaded\n"); return bRC_OK; } /* * The following entry points are accessed through the function * pointers we supplied to Bacula. Each plugin type (dir, fd, sd) * has its own set of entry points that the plugin must define. */ /* * Create a new instance of the plugin i.e. allocate our private storage */ static bRC newPlugin(bpContext *ctx) { int JobId = 0; bfuncs->getBaculaValue(ctx, bsdVarJobId, (void *)&JobId); printf("example-plugin-sd: newPlugin JobId=%d\n", JobId); bfuncs->registerBaculaEvents(ctx, 1, 2, 0); return bRC_OK; } /* * Free a plugin instance, i.e. release our private storage */ static bRC freePlugin(bpContext *ctx) { int JobId = 0; bfuncs->getBaculaValue(ctx, bsdVarJobId, (void *)&JobId); printf("example-plugin-sd: freePlugin JobId=%d\n", JobId); return bRC_OK; } /* * Return some plugin value (none defined) */ static bRC getPluginValue(bpContext *ctx, psdVariable var, void *value) { printf("example-plugin-sd: getPluginValue var=%d\n", var); return bRC_OK; } /* * Set a plugin value (none defined) */ static bRC setPluginValue(bpContext *ctx, psdVariable var, void *value) { printf("example-plugin-sd: setPluginValue var=%d\n", var); return bRC_OK; } /* * Handle an event that was generated in Bacula */ static bRC handlePluginEvent(bpContext *ctx, bsdEvent *event, void *value) { char *name; switch (event->eventType) { case bsdEventJobStart: printf("example-plugin-sd: HandleEvent JobStart :%s:\n", (char *)value); break; case bsdEventJobEnd: printf("example-plugin-sd: HandleEvent JobEnd\n"); break; } bfuncs->getBaculaValue(ctx, bsdVarJobName, (void *)&name); printf("Job Name=%s\n", name); bfuncs->JobMessage(ctx, __FILE__, __LINE__, 1, 0, "JobMesssage message"); bfuncs->DebugMessage(ctx, __FILE__, __LINE__, 1, "DebugMesssage message"); return bRC_OK; } /* * Handle a Global event -- no context */ static bRC handleGlobalPluginEvent(bsdEvent *event, void *value) { return bRC_OK; } #ifdef __cplusplus } #endif bacula-15.0.3/src/plugins/dir/0000755000175000017500000000000014771010173015710 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/dir/ldap/0000755000175000017500000000000014771010173016630 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/dir/ldap/ldap-dir.c0000644000175000017500000004205114771010173020472 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Bacula Pluggable Authentication Modules - LDAP Plugin * * Author: Radosław Korzeniewski, radoslaw@korzeniewski.net, Inteos Sp. z o.o. */ #include "ldap-dir.h" /* * This is a BPAM (Bacula Pluggable Authentication Modules) which authenticate users * found in any LDAP Directory Server. It support OpenLDAP and Microsoft Active Directory * among others standard LDAPs. */ // a list of authentication operations we expect and handle before final authenticate static bDirAuthenticationData bpamldapquestions[] = { // operation; question; seqdata; {bDirAuthenticationOperationLogin, "Username:", 0}, {bDirAuthenticationOperationPassword, "Password:", 1}, }; #define LDAP_WELCOME "LDAP Authentication" // plugin authentication registration struct static bDirAuthenticationRegister bpamldapregister = { #if __cplusplus > 201103L .name = PLUGIN_NAME, .welcome = LDAP_WELCOME, // our plugin name which should correspond to plugin filename .num = 2, // it shows the number of defined auth operations .data = bpamldapquestions, // our auth operations list .nsTTL = 0, // future usage #else PLUGIN_NAME, LDAP_WELCOME, 2, bpamldapquestions, 0 #endif }; #ifdef __cplusplus extern "C" { #endif /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC handlePluginEvent(bpContext *ctx, bDirEvent *event, void *value); static bRC getAuthenticationData(bpContext *ctx, const char *console, const char *param, void **data); static bRC getAuthorizationData(bpContext *ctx, const char *console, const char *param, void **data); /* Pointers to Bacula functions */ bDirFuncs *bfuncs = NULL; bDirInfo *binfo = NULL; static pDirInfo pluginInfo = { sizeof(pluginInfo), DIR_PLUGIN_INTERFACE_VERSION, DIR_PLUGIN_MAGIC, PLUGIN_LICENSE, PLUGIN_AUTHOR, PLUGIN_DATE, PLUGIN_VERSION, PLUGIN_DESCRIPTION }; static pDirFuncs pluginFuncs = { sizeof(pluginFuncs), DIR_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, /* new plugin instance */ freePlugin, /* free plugin instance */ NULL, // we do not provide this callback NULL, // we do not provide this callback handlePluginEvent, getAuthenticationData, getAuthorizationData, }; bRC loadPlugin(bDirInfo *lbinfo, bDirFuncs *lbfuncs, pDirInfo **pinfo, pDirFuncs **pfuncs) { bfuncs = lbfuncs; /* set Bacula funct pointers */ binfo = lbinfo; Dmsg2(DINFO, "Loaded: size=%d version=%d\n", bfuncs->size, bfuncs->version); *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ return bRC_OK; } bRC unloadPlugin() { Dmsg0(DINFO, "plugin: Unloaded\n"); return bRC_OK; } static bRC newPlugin(bpContext *ctx) { BPAMLDAP *self = New (BPAMLDAP(ctx)); DMSG0(ctx, DINFO, "newPlugin\n"); ctx->pContext = (void*)self; return bRC_OK; } static bRC freePlugin(bpContext *ctx) { DMSG0(ctx, DINFO, "freePlugin\n"); if (!ctx){ return bRC_Error; } BPAMLDAP *self = (BPAMLDAP*)ctx->pContext; if (!self){ return bRC_Error; } delete self; return bRC_OK; } static bRC handlePluginEvent(bpContext *ctx, bDirEvent *event, void *value) { DMSG(ctx, D1, "handlePluginEvent (%i)\n", event->eventType); BPAMLDAP *self = (BPAMLDAP*)ctx->pContext; return self->handlePluginEvent(event, value); } static bRC getAuthenticationData(bpContext *ctx, const char *console, const char *param, void **data) { DMSG(ctx, D1, "getAuthenticationData (%s)\n", param); BPAMLDAP *self = (BPAMLDAP*)ctx->pContext; return self->getAuthenticationData(param, data); } static bRC getAuthorizationData(bpContext *ctx, const char *console, const char *param, void **data) { DMSG(ctx, D1, "getAuthorizationData (%s)\n", param); BPAMLDAP *self = (BPAMLDAP*)ctx->pContext; return self->getAuthorizationData(param, data); } #ifdef __cplusplus } #endif /** * @brief display common connection error message and diagnose * * @param ret the return code from ldap_* functions to analize and display */ void BPAMLDAP::ldapserverconnectionerror(int ret) { char *errmsg = NULL; ldap_get_option(ld, LDAP_OPT_DIAGNOSTIC_MESSAGE, (void*)&errmsg); DMSG1(ctx, D1, "LDAP Server connection error: %s\n", ldap_err2string(ret)); if (errmsg){ DMSG1(ctx, D1, "diagnose error: %s\n", errmsg); } ldap_memfree(errmsg); } /** * @brief connect to LDAP service * * @return bRC on success bRC_OK, on error bRC_Error */ bRC BPAMLDAP::ldapconnect() { const int desired_version = LDAP_VERSION3; POOL_MEM tmp(PM_FNAME); int ret; struct berval cred; struct berval *msgidp = NULL; bool starttls = false; bool starttlsforce = false; // first initialize ldap connection context `LDAP` #ifdef __sun__ // In Solaris our `url` parameter is a host name only and not a real URL. // It should be notice in documentation if ((ld = ldap_init(url.c_str(), LDAP_PORT)) == NULL) { return bRC_Error; } #else if (ldap_initialize(&ld, url.c_str()) != LDAP_SUCCESS) { return bRC_Error; } #endif // required a desired LDAP protocol version #ifdef __sun__ if ((ret = ldap_set_option(ld, LDAP_OPT_PROTOCOL_VERSION, &desired_version)) != LDAP_SUCCESS) #else if ((ret = ldap_set_option(ld, LDAP_OPT_PROTOCOL_VERSION, &desired_version)) != LDAP_OPT_SUCCESS) #endif { ldapserverconnectionerror(ret); return bRC_Error; } // disable referals return in queries - we do not support it #ifdef __sun__ if ((ret = ldap_set_option(ld, LDAP_OPT_REFERRALS, LDAP_OPT_OFF)) != LDAP_SUCCESS) #else if ((ret = ldap_set_option(ld, LDAP_OPT_REFERRALS, LDAP_OPT_OFF)) != LDAP_OPT_SUCCESS) #endif { ldapserverconnectionerror(ret); return bRC_Error; } // handle starttls if requested in configuration #ifdef HAVE_LDAP_START_TLS // // Start TLS support // // To enable StartTLS for LDAP connection you have to setup 'starttls' or 'starttlsforce' plugin options // where 'starttlsforce' will fail connection if server do not support it. // if (starttls || starttlsforce){ DMSG0(ctx, D2, "executing ldap_start_tls_s\n"); if ((ret = ldap_start_tls_s(ld, NULL, NULL)) != LDAP_SUCCESS) { ldapserverconnectionerror(ret); if (starttlsforce) { DMSG0(ctx, D1, "STARTTLSFORCE set, cannot continue!\n"); return bRC_Error; } } } #endif /* HAVE_LDAP_START_TLS */ DMSG2(ctx, D1, "credentials to connect: binddn='%s' pass='%s'\n", binddn.c_str(), bindpass.c_str()); cred.bv_val = bindpass.c_str(); cred.bv_len = strlen(cred.bv_val); if ((ret = ldap_sasl_bind_s(ld, binddn.c_str(), LDAP_SASL_SIMPLE, &cred, NULL, NULL, &msgidp)) != LDAP_SUCCESS) { ldapserverconnectionerror(ret); if (strcasestr(url.c_str(), "ldaps://") != NULL || starttls || starttlsforce){ DMSG0(ctx, M_INFO, "Using TLS/SSL for LDAP service require CA certificate configuration on the backup server.\n"); DMSG0(ctx, M_INFO, "If it is not configured properly a connection over TLS/SSL will fail.\n"); } return bRC_Error; } DMSG0(ctx, DDEBUG, "LDAP connection successful\n"); return bRC_OK; } /** * @brief disconnect from LDAP service * * @return bRC on success bRC_OK, on error bRC_Error */ bRC BPAMLDAP::ldapdisconnect() { int rc; rc = ldap_unbind_ext(ld, NULL, NULL); if (rc != LDAP_SUCCESS) { return bRC_Error; } return bRC_OK; } /** * @brief check if supplied dn exist in directory * * @param dn checked dn * @return true dn exist in directory * @return false dn does not exist */ bool BPAMLDAP::ldapsearchonedn() { int rc; bool ret = false; int type; const char *attrs[] = { "cn", NULL }; LDAPMessage *msg = NULL; const char *dn; DMSG2(ctx, D3, "ldapsearchonedn for: %s and filter: %s\n", basedn.c_str(), filter.c_str()); rc = ldap_search_ext_s(ld, basedn.c_str(), LDAP_SCOPE_SUB, filter.c_str(), (char **)attrs, 0, NULL, NULL, NULL, LDAP_NO_LIMIT, &msg); switch (rc){ case LDAP_NO_SUCH_OBJECT: /* object not found */ case LDAP_REFERRAL: /* for LDAP we reached an out of scope query */ DMSG0(ctx, DDEBUG, "no such object or referral found\n"); break; case LDAP_SUCCESS: type = ldap_msgtype(msg); DMSG(ctx, D3, "ldapsearchonedn resulting msgtype: %i\n", type); if (type == LDAP_RES_SEARCH_ENTRY){ ret = true; dn = ldap_get_dn(ld, msg); if (!dn){ DMSG0(ctx, DERROR, "ldapsearchonedn cannot get entry DN!\n"); ret = false; } else { DMSG1(ctx, DDEBUG, "ldapsearchonedn get DN: %s\n", dn); pm_strcpy(userdn, dn); } } break; default: /* we reach some error */ DMSG2(ctx, D1, "ldapsearchonedn search error: %s for: %s\n", ldap_err2string(rc), basedn.c_str()); break; } ldap_msgfree(msg); return ret; } /** * @brief This method perform a variable substitute in filter expression. * Substituted variables are: * * %p - for user password response * * %u - for username response * Any single `%` character without next `p` or `u` will be simple rewritten without change. * If you want to rewrite '%p' or '%u' without substituting it with predefined variables then * you have to write it as: '%%p' or '%%u' respectively. */ void BPAMLDAP::substitute_filter_parameters() { if (strlen(filter.c_str()) > 0){ POOL_MEM tmp(PM_MESSAGE); char *p; char *s; const char *c = NULL; char *q = s = filter.c_str(); tmp.c_str()[0] = '\0'; while ((p = strchr(q, '%')) != NULL) { switch (p[1]){ case 'u': *p = '\0'; c = username.c_str(); p++; break; case 'p': *p = '\0'; c = password.c_str(); p++; break; case '%': p[1] = '\0'; p++; c = ""; break; default: c = NULL; break; } q = p + 1; if (c){ pm_strcat(tmp, s); pm_strcat(tmp, c); s = q; } } if (s){ pm_strcat(tmp, s); } pm_strcpy(filter, tmp.c_str()); DMSG1(ctx, DINFO, "filter after substitute: %s\n", filter.c_str()); } } /** * @brief Perform a standard LDAP authentication using authentication parameters and user credentials. * The The LDAP authentication procedure is similar to the one implemented by Apache mod_authnz_ldap * (check: https://httpd.apache.org/docs/2.4/mod/mod_authnz_ldap.html) which is known as "search/bind". * In this case Plugin first connects to LDAP Service to search for authenticated user definition * (using binddn and bindpass) as it requires a proper user DN to do the second part of authentication. * If found then it will bind to LDAP on behalf of the user with user credentials from bconsole * interactive query/response session. * * @return bRC bRC_OK on successful authentication, bRC_Error when not */ bRC BPAMLDAP::do_ldap_authenticate() { // prepare search filter based on response variables substitute_filter_parameters(); // connect using bind credentials // TODO: we should handle first search without credentials if (ldapconnect() != bRC_OK){ return bRC_Error; } // check if user query rerurn required object if (!ldapsearchonedn()){ return bRC_Error; } // its all with bind credentials if (ldapdisconnect() != bRC_OK){ return bRC_Error; } // now we should connect to LDAP using user credentials pm_strcpy(binddn, userdn.c_str()); pm_strcpy(bindpass, password.c_str()); // login as user if (ldapconnect() != bRC_OK){ return bRC_Error; } // disconnect if (ldapdisconnect() != bRC_OK){ return bRC_Error; } // now we should report login success! DMSG0(ctx, DINFO, "LDAP Authentication Successfull!\n"); return bRC_OK; } /** * @brief Hndles plugin events defined in BPAM API. * * @param event the event to handle * @param value a pointer to opional value for selected event * @return bRC bRC_OK on success, bRC_Error on any error */ bRC BPAMLDAP::handlePluginEvent(bDirEvent *event, void *value) { bDirAuthValue *pvalue; switch (event->eventType) { case bDirEventAuthenticationQuestion: break; case bDirEventAuthenticationResponse: pvalue = (bDirAuthValue *)value; DMSG_EVENT_STR(event, pvalue->response); switch (pvalue->seqdata){ case 0: // username pm_strcpy(username, pvalue->response); break; case 1: // password pm_strcpy(password, pvalue->response); break; default: return bRC_Error; } break; case bDirEventAuthenticate: DMSG_EVENT_PTR(event, value); return do_ldap_authenticate(); default: break; } return bRC_OK; } /** * @brief Handles Plugin parameters defined in "Authentication Plugin = ..." Console resource parameter. * * @param param a string from "Authentication Plugin = ..." Console resource parameter * @return bRC bRC_OK when all parameters are valid and parsed, bRC_Error on any error */ bRC BPAMLDAP::parse_plugin_params(const char *param) { cmd_parser parser; if (!param){ return bRC_Error; } // and parse command if (parser.parse_cmd(param) != bRC_OK) { DMSG0(ctx, DERROR, "Unable to parse Plugin parameters.\n"); return bRC_Error; } // the first (zero) parameter is a plugin name if (!bstrcmp(parser.argk[0], PLUGIN_NAME)){ return bRC_Error; } // iterate over next parameters for (int i = 1; i < parser.argc; i++) { if (bstrcmp(parser.argk[i], "url")){ pm_strcpy(url, parser.argv[i]); DMSG1(ctx, DDEBUG, "parsed url: %s\n", url.c_str()); continue; } else if (bstrcmp(parser.argk[i], "binddn")){ pm_strcpy(binddn, parser.argv[i]); DMSG1(ctx, DDEBUG, "parsed binddn: %s\n", binddn.c_str()); continue; } else if (bstrcmp(parser.argk[i], "bindpass")) { pm_strcpy(bindpass, parser.argv[i]); DMSG1(ctx, DDEBUG, "parsed bindpass: %s\n", bindpass.c_str()); continue; } else if (bstrcmp(parser.argk[i], "query")) { POOL_MEM tmp(PM_MESSAGE); pm_strcpy(tmp, parser.argv[i]); char *d = strchr(tmp.c_str(), '/'); if (!d){ DMSG1(ctx, DERROR, "Cannot find basedn delimiter in query=%s\n", tmp.c_str()); return bRC_Error; } // separate basedn and filter *d = '\0'; pm_strcpy(basedn, tmp.c_str()); pm_strcpy(filter, d + 1); DMSG2(ctx, DDEBUG, "parsed query - basedn:%s filter:%s \n", basedn.c_str(), filter.c_str()); continue; } else if (bstrcmp(parser.argk[i], "starttls")) { starttls = true; DMSG0(ctx, DDEBUG, "parsed starttls\n"); continue; } else if (bstrcmp(parser.argk[i], "starttlsforce")) { starttlsforce = true; DMSG0(ctx, DDEBUG, "parsed starttlsforce\n"); continue; } else { DMSG1(ctx, DERROR, "unknown parameter: %s\n", parser.argk[i]); return bRC_Error; } } return bRC_OK; } /** * @brief Register BPAM LDAP Plugin authentication operations. * * @param param a string from "Authentication Plugin = ..." Console resource parameter * @param data a double pointer used for return value * @return bRC bRC_OK on success, bRC_Error on any error */ bRC BPAMLDAP::getAuthenticationData(const char *param, void **data) { bDirAuthenticationRegister **padata = (bDirAuthenticationRegister **)data; DMSG1(ctx, DINFO, "registering with: %s\n", NPRT(param)); if (parse_plugin_params(param) != bRC_OK){ return bRC_Error; } *padata = &bpamldapregister; return bRC_OK; } /** * @brief Unimplemented * * @param param Unimplemented * @param data Unimplemented * @return bRC Unimplemented */ bRC BPAMLDAP::getAuthorizationData(const char *param, void **data) { return bRC_OK; } bacula-15.0.3/src/plugins/dir/ldap/ldap-util.h0000644000175000017500000000305614771010173020700 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Common definitions and utility functions for Inteos plugins. * Functions defines a common framework used in our utilities and plugins. * LDAP plugin specific functions. * * Author: Radosław Korzeniewski, radoslaw@korzeniewski.net, Inteos Sp. z o.o. */ #ifndef _LDAP_UTIL_H_ #define _LDAP_UTIL_H_ #include "bacula.h" #include "dir_plugins.h" #ifdef __WIN32__ #include #else #include #endif /* Plugin compile time variables */ #define PLUGINPREFIX "ldap:" #define PLUGINNAME "LDAP" typedef struct s_ldap_paramlist { const char *ldapuri; const char *binddn; char *bindpass; bool starttls; bool starttlsforce; } ldap_paramlist; /* util definitions */ LDAP * ldapconnect(bpContext * ctx, ldap_paramlist * paramlist); int ldapdisconnect(bpContext * ctx, LDAP * ld); bool ldapcheckonedn(bpContext * ctx, LDAP * ld, char *dn); #endif /* _LDAP_UTIL_H_ */ bacula-15.0.3/src/plugins/dir/ldap/ldap-dir.h0000644000175000017500000000656614771010173020512 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * This is a Bacula plugin for making a backup and restore of individual LDAP objects. * Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. */ #ifndef _LDAP_DIR_H_ #define _LDAP_DIR_H_ #include "bacula.h" #include "dir_plugins.h" #include "dir_authplugin.h" // #include "lib/ini.h" #include "lib/cmd_parser.h" /* it is an LDAP authentication plugin, so we need a libldap library */ #ifdef __WIN32__ #include #else #include #endif #include #include #include #include "dirpluglib.h" #include "ldap-util.h" /* * libbac uses its own sscanf implementation which is not compatible with * libc implementation, unfortunately. usage of bsscanf require format string rewriting. */ #ifdef sscanf #undef sscanf #endif /* Plugin compile time variables */ #define PLUGIN_LICENSE "Bacula AGPLv3" #define PLUGIN_AUTHOR "Inteos Sp. z o.o." #define PLUGIN_DATE "September 2020" #define PLUGIN_VERSION "0.1.0" #define PLUGIN_DESCRIPTION "BPAM LDAP plugin (c) Inteos" #define PLUGIN_NAME "ldap" class BPAMLDAP : public SMARTALLOC { private: POOL_MEM url; // This is our LDAP server url POOL_MEM binddn; // This is a binddn (user) to make queries at LDAP server POOL_MEM bindpass; // This is a binddn (user) password POOL_MEM basedn; // This is a LDAP basedn query starting point POOL_MEM filter; // This is a LDAP query string for selecting required username bool starttls; bool starttlsforce; LDAP *ld; // This is out connection to LDAP server POOL_MEM userdn; POOL_MEM username; // here we will save user response to our queries POOL_MEM password; // here we will save user response to our queries bpContext *ctx; bRC ldapconnect(); bRC ldapdisconnect(); bool ldapsearchonedn(); void ldapserverconnectionerror(int ret); bRC parse_plugin_params(const char *param); bRC do_ldap_authenticate(); void substitute_filter_parameters(); public: #if __cplusplus > 201103L BPAMLDAP() = delete; ~BPAMLDAP() = default; #else BPAMLDAP() {}; ~BPAMLDAP() {}; #endif BPAMLDAP(bpContext *pctx) : url(PM_FNAME), binddn(PM_FNAME), bindpass(PM_NAME), basedn(PM_FNAME), filter(PM_FNAME), starttls(false), starttlsforce(false), ld(NULL), userdn(PM_NAME), username(PM_NAME), password(PM_NAME), ctx(pctx) {}; bRC handlePluginEvent(bDirEvent *event, void *value); bRC getAuthenticationData(const char *param, void **data); bRC getAuthorizationData(const char *param, void **data); }; #endif /* _LDAP_FD_H_ */ bacula-15.0.3/src/plugins/dir/ldap/ldap-util.c0000644000175000017500000001456514771010173020702 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Common definitions and utility functions for Inteos plugins. * Functions defines a common framework used in our utilities and plugins. * LDAP plugin specific functions. * * Author: Radosław Korzeniewski, radoslaw@korzeniewski.net, Inteos Sp. z o.o. */ #include "ldap-util.h" #include "dirpluglib.h" /* * libbac uses its own sscanf implementation which is not compatible with * libc implementation, unfortunately. usage of bsscanf require format string rewriting. */ #ifdef sscanf #undef sscanf #endif /* Pointers to Bacula functions used in plugins */ extern bDirFuncs *bfuncs; extern bDirInfo *binfo; /* * display common connection error message and diagnose */ void ldapserverconnectionerror(bpContext * ctx, LDAP *ldapconn, int ret) { char *errmsg = NULL; ldap_get_option(ldapconn, LDAP_OPT_DIAGNOSTIC_MESSAGE, (void*)&errmsg); DMSG1(ctx, D1, "LDAP Server connection error: %s\n", ldap_err2string(ret)); if (errmsg){ DMSG1(ctx, D1, "diagnose error: %s\n", errmsg); } JMSG1(ctx, M_WARNING, "LDAP Server connection error: %s\n", ldap_err2string(ret)); if (errmsg){ JMSG1(ctx, M_WARNING, "diagnose error: %s\n", errmsg); } ldap_memfree(errmsg); } /* * connect to LDAP service * * in: * paramlist - list of parameters from config file * required params: LDAPURI, BINDDN, BINDPASS, BINDMODE * out: * on success: LDAP * - working LDAP connection * on error: NULL */ LDAP *ldapconnect(bpContext *ctx, ldap_paramlist *paramlist) { LDAP *ldapconn = NULL; const char *ldapuri; const int desired_version = LDAP_VERSION3; const char *binddn; char *bindpass; POOLMEM *tmp = NULL; int ret; struct berval cred; struct berval *msgidp = NULL; bool starttls = false; bool starttlsforce = false; ldapuri = paramlist->ldapuri; #ifdef __sun__ /* TODO: change ldapuri to ldaphost + ldapport */ if ((ldapconn = ldap_init(ldapuri, LDAP_PORT)) == NULL) { return NULL; } #else if (ldap_initialize(&ldapconn, ldapuri) != LDAP_SUCCESS) { return NULL; } #endif #ifdef __sun__ if ((ret = ldap_set_option(ldapconn, LDAP_OPT_PROTOCOL_VERSION, &desired_version)) != LDAP_SUCCESS) #else if ((ret = ldap_set_option(ldapconn, LDAP_OPT_PROTOCOL_VERSION, &desired_version)) != LDAP_OPT_SUCCESS) #endif { ldapserverconnectionerror(ctx, ldapconn, ret); return NULL; } #ifdef __sun__ if ((ret = ldap_set_option(ldapconn, LDAP_OPT_REFERRALS, LDAP_OPT_OFF)) != LDAP_SUCCESS) #else if ((ret = ldap_set_option(ldapconn, LDAP_OPT_REFERRALS, LDAP_OPT_OFF)) != LDAP_OPT_SUCCESS) #endif { ldapserverconnectionerror(ctx, ldapconn, ret); return NULL; } #ifdef HAVE_LDAP_START_TLS /* * Start TLS support * * To enable StartTLS for LDAP connection you have to setup 'starttls' * or 'starttlsforce' plugin options * where 'starttlsforce' will fail connection if server do not support it. */ starttls = paramlist->starttls; starttlsforce = paramlist->starttlsforce; if (starttls || starttlsforce){ DMSG0(ctx, D2, "executing ldap_start_tls_s\n"); if ((ret = ldap_start_tls_s(ldapconn, NULL, NULL)) != LDAP_SUCCESS) { ldapserverconnectionerror(ctx, ldapconn, ret); if (starttlsforce) { DMSG0(ctx, D1, "STARTTLSFORCE set, cannot continue!\n"); JMSG0(ctx, M_ERROR, "STARTTLSFORCE set, cannot continue!\n"); return NULL; } } } #endif /* HAVE_LDAP_START_TLS */ binddn = paramlist->binddn; /* The password might be obfuscated */ bindpass = paramlist->bindpass; DMSG2(ctx, D1, "credentials to connect: binddn='%s' pass='%s'\n", binddn, bindpass); cred.bv_val = bindpass; cred.bv_len = strlen(cred.bv_val); if ((ret = ldap_sasl_bind_s(ldapconn, binddn, LDAP_SASL_SIMPLE, &cred, NULL, NULL, &msgidp)) != LDAP_SUCCESS) { free_and_null_pool_memory(tmp); ldapserverconnectionerror(ctx, ldapconn, ret); if (strcasestr(ldapuri, "ldaps://") != NULL || starttls || starttlsforce){ JMSG0(ctx, M_INFO, "Using TLS/SSL for LDAP service require CA certificate configuration on the backup server.\n"); JMSG0(ctx, M_INFO, "If it is not configured properly to do a job over TLS/SSL it will fail.\n"); } return NULL; } free_and_null_pool_memory(tmp); return ldapconn; } /* * disconnect from LDAP service * * in: * ld - ldap connection handle to unbind * out: * 0 - on success * 1 - on error */ int ldapdisconnect(bpContext * ctx, LDAP * ld) { int rc; rc = ldap_unbind_ext(ld, NULL, NULL); if (rc != LDAP_SUCCESS) { return 1; } return 0; } /* * check if supplied dn exist in directory * * in: * ld - ldap connection * dn - checked dn * out: * True - dn exist in directory * False - dn does not exist */ bool ldapcheckonedn(bpContext * ctx, LDAP * ld, char *dn) { int rc; bool ret = false; int type; LDAPMessage *msg = NULL; DMSG(ctx, D3, "ldapcheckonedn for: %s\n", dn); rc = ldap_search_ext_s(ld, dn, LDAP_SCOPE_BASE, NULL, NULL, 0, NULL, NULL, NULL, LDAP_NO_LIMIT, &msg); switch (rc){ case LDAP_NO_SUCH_OBJECT: /* object not found */ case LDAP_REFERRAL: /* for LDAP we reached an out of scope query */ break; case LDAP_SUCCESS: type = ldap_msgtype(msg); DMSG(ctx, D3, "ldapcheckonedn: resulting msgtype: %i\n", type); ret = type == LDAP_RES_SEARCH_ENTRY; break; default: /* we reach some error */ JMSG2(ctx, M_ERROR, "ldapcheckonedn: search error: %s for: %s\n", ldap_err2string(rc),dn); DMSG2(ctx, D1, "ldapcheckonedn: search error: %s for: %s\n", ldap_err2string(rc),dn); } ldap_msgfree(msg); return ret; } bacula-15.0.3/src/plugins/dir/ldap/.gitignore0000644000175000017500000000001614771010173020615 0ustar bsbuildbsbuild.libs ldaptestbacula-15.0.3/src/plugins/dir/ldap/ldaptest.c0000644000175000017500000001424514771010173020622 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include #include #include #include "config.h" /* gcc/g++ -Wall ldaptest.c -o ldaptest -lldap ./ldaptest "" "" "" */ const char *extensionStartTLS_OID = "1.3.6.1.4.1.1466.20037"; const char *extensionTLSattrib = "supportedExtension"; const char *extensionSASLattrib = "supportedSASLMechanisms"; /* * connect to LDAP service */ LDAP *ldapconnect(char *ldapuri, char *binddn, char *bindpass, bool starttls) { LDAP *ldapconn = NULL; int desired_version = LDAP_VERSION3; int ret; char *errmsg = NULL; int debug = 0xffff; struct berval cred; struct berval *msgidp = NULL; if ((ret = ldap_initialize(&ldapconn, ldapuri)) != LDAP_SUCCESS) { ldap_get_option(ldapconn, LDAP_OPT_DIAGNOSTIC_MESSAGE, (void*)&errmsg); printf("LDAP initialize error %s\n", ldap_err2string(ret)); printf("diagnose error: %s\n", errmsg); ldap_memfree(errmsg); return NULL; } if ((ret = ldap_set_option(ldapconn, LDAP_OPT_PROTOCOL_VERSION, &desired_version)) != LDAP_OPT_SUCCESS) { ldap_get_option(ldapconn, LDAP_OPT_DIAGNOSTIC_MESSAGE, (void*)&errmsg); printf("LDAP set option error %s\n", ldap_err2string(ret)); printf("diagnose error: %s\n", errmsg); ldap_memfree(errmsg); return NULL; } if ((ret = ldap_set_option(ldapconn, LDAP_OPT_REFERRALS, LDAP_OPT_OFF)) != LDAP_OPT_SUCCESS) { ldap_get_option(ldapconn, LDAP_OPT_DIAGNOSTIC_MESSAGE, (void*)&errmsg); printf("LDAP set option error %s\n", ldap_err2string(ret)); printf("diagnose error: %s\n", errmsg); ldap_memfree(errmsg); return NULL; } #ifdef HAVE_LDAP_START_TLS if (starttls){ if ((ret = ldap_start_tls_s(ldapconn, NULL, NULL)) != LDAP_SUCCESS) { ldap_get_option(ldapconn, LDAP_OPT_DIAGNOSTIC_MESSAGE, (void*)&errmsg); printf("LDAP start TLS error %s\n", ldap_err2string(ret)); printf("diagnose error: %s\n", errmsg); ldap_memfree(errmsg); return NULL; } } #endif if( ldap_set_option(NULL, LDAP_OPT_DEBUG_LEVEL, &debug) != LDAP_OPT_SUCCESS) { ldap_get_option(ldapconn, LDAP_OPT_DIAGNOSTIC_MESSAGE, (void*)&errmsg); printf("LDAP could not set LDAP_OPT_DEBUG_LEVEL %d\n", debug); printf("diagnose error: %s\n", errmsg); ldap_memfree(errmsg); } cred.bv_val = bindpass; cred.bv_len = strlen(cred.bv_val); if ((ret = ldap_sasl_bind_s(ldapconn, binddn, LDAP_SASL_SIMPLE, &cred, NULL, NULL, &msgidp) != LDAP_SUCCESS)) { ldap_get_option(ldapconn, LDAP_OPT_DIAGNOSTIC_MESSAGE, (void*)&errmsg); printf("LDAP Server bind error: %s\n", ldap_err2string(ret)); printf("diagnose error: %s\n", errmsg); ldap_memfree(errmsg); return NULL; } printf("LDAP Server connection OK\n"); return ldapconn; } /* * disconnect from LDAP service * * in: * ld - ldap connection handle to unbind * out: * 0 - on success * 1 - on error */ int ldapdisconnect(LDAP * ld) { int rc; rc = ldap_unbind_ext(ld, NULL, NULL); if (rc != LDAP_SUCCESS) { return 1; } return 0; } int main(int argc, char **argv) { LDAP *ld; /* all and extended attributes required */ const char *attrs[] = { "+", "*", extensionTLSattrib, extensionSASLattrib, NULL }; const char *filter = "(objectClass=*)"; LDAPMessage *msg = NULL; LDAPMessage *entry; BerElement *berdata; struct berval **values; char *attr; int rc, i; bool have_start_tls = false; bool test_start_tls = false; if (argc < 4) { printf("Usage: %s ", argv[0]); #ifdef HAVE_LDAP_START_TLS printf ("[starttls]"); #endif printf ("\n"); exit(1); } #ifdef HAVE_LDAP_START_TLS if (argc > 4 && strcmp(argv[4], "starttls") == 0) { printf ("> Will test with start tls option.\n"); test_start_tls = true; } #endif ld = ldapconnect(argv[1], argv[2], argv[3], test_start_tls); if (ld) { i = 0; ldap_set_option(NULL, LDAP_OPT_DEBUG_LEVEL, &i); rc = ldap_search_ext_s(ld, "", LDAP_SCOPE_BASE, filter, (char **)attrs, 0, NULL, NULL, NULL, LDAP_NO_LIMIT, &msg); switch (rc){ case LDAP_SUCCESS: entry = ldap_first_entry(ld, msg); if (entry != NULL) { printf("\nRootDSE:\n"); for (attr = ldap_first_attribute(ld, entry, &berdata); attr; attr = ldap_next_attribute(ld, entry, berdata)){ if ((values = ldap_get_values_len(ld, entry, attr)) != NULL) { for (i = 0; values[i] != NULL; i++) { printf ("\t %s: %s\n", attr, values[i]->bv_val); if (strcmp(values[i]->bv_val, extensionStartTLS_OID) == 0 && strcmp(attr, extensionTLSattrib) == 0) { have_start_tls = true; } } } ldap_value_free_len(values); ldap_memfree(attr); } } default: ldap_msgfree(msg); } ldapdisconnect(ld); printf ("\nTest successful!\n"); #ifdef HAVE_LDAP_START_TLS if (have_start_tls && !test_start_tls) { printf ("\nIt seems your LDAP server supports Start TLS Extension, so you could use 'starttls'\n"); printf ("parameter to verify if you can use it in your configuration.\n\n"); } #endif } else { printf("\nTest failed. Please check the log above!\n"); } exit(0); } bacula-15.0.3/src/plugins/dir/ldap/Makefile0000644000175000017500000000406414771010173020274 0ustar bsbuildbsbuild# # Simple Makefile for building test FD plugins for Bacula # # Copyright (C) 2000-2015 by Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # include ../Makefile.inc LDAPSRC = ldap-dir.c LDAPTESTSRC = ldaptest.c LDAPOBJ = $(LDAPSRC:.c=.lo) LDAPTESTOBJ = $(LDAPTESTSRC:.c=.lo) # ldap-dir.la ldaptest all: $(BPAM_LDAP_TARGET) .c.lo: @echo "Compiling $< ..." $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) -I${SRCDIR} -I${DIRDIR} -I${DIRPLUGDIR} $(LDAP_INC) -c $< %.lo: %.c %.h Makefile @echo "Compiling $< ..." $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I${SRCDIR} -I${DIRDIR} -I${DIRPLUGDIR} $(LDAP_INC) -c $(@:.lo=.c) ldap-dir.la: Makefile $(LDAPOBJ) $(COMMONOBJ) $(DIRPLUGDIR)/dirpluglib.lo @echo "Linking $(@:.la=.so) ..." $(NO_ECHO)$(LIBTOOL_LINK) --silent $(CXX) $(LDFLAGS) $(LDAP_LDFLAGS) $(LDAP_LIBS) -shared $^ -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version $(DIRPLUGDIR)/dirpluglib.lo: $(MAKE) -C $(DIRPLUGDIR) dirpluglib.lo ldaptest: $(LDAPTESTOBJ) Makefile @echo "Building ldaptest ..." $(NO_ECHO)$(LIBTOOL_LINK) --silent $(CXX) $(LDFLAGS) $(LDAP_LDFLAGS) $(LDAP_LIBS) ldaptest.lo -o $@ install: $(BPAM_LDAP_TARGET_INSTALL) install-ldap: ldap-dir.la @echo "Installing plugin $(^:.la=.so) ..." $(NO_ECHO)$(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) ldap-dir.la $(DESTDIR)$(plugindir) $(NO_ECHO)$(RMF) $(DESTDIR)$(plugindir)/ldap-dir.la install-ldaptest: ldaptest @echo "Installing $< ..." $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $< $(DESTDIR)$(sbindir) $(LIBTOOL_CLEAN_TARGET): $(NO_ECHO)find . -name '*.lo' -print | xargs $(LIBTOOL_CLEAN) $(RMF) $(NO_ECHO)$(RMF) *.la $(NO_ECHO)$(RMF) -r .libs _libs clean: $(LIBTOOL_CLEAN_TARGET) $(NO_ECHO)rm -f main ldaptest *.so *.o distclean: clean $(NO_ECHO)rm -f Makefile *.la *.lo $(NO_ECHO)rm -rf .libs $(LIBTOOL_UNINSTALL_TARGET): $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(plugindir)/ldap-dir.so $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(plugindir)/msad-fd.so uninstall: $(LIBTOOL_UNINSTALL_TARGET) depend:bacula-15.0.3/src/plugins/dir/Makefile.inc.in0000644000175000017500000000140214771010173020522 0ustar bsbuildbsbuild# # Makefile for building FD plugins PluginLibrary for Bacula # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Author: Radoslaw Korzeniewski, radoslaw@korzeniewski.net # @MCOMMON@ topdir = @BUILD_DIR@ working_dir = @working_dir@ LIBTOOL_CLEAN_TARGET = @LIBTOOL_CLEAN_TARGET@ LIBTOOL_UNINSTALL_TARGET = @LIBTOOL_UNINSTALL_TARGET@ LDAP_LIBS=@LDAP_LIBS@ LDAP_LDFLAGS=@LDAP_LDFLAGS@ LDAP_INC=@LDAP_INC@ BPAM_LDAP_TARGET = @BPAM_LDAP_TARGET@ BPAM_LDAP_TARGET_INSTALL = @BPAM_LDAP_TARGET_INSTALL@ SRCDIR = $(topdir)/src DIRDIR = $(SRCDIR)/dird LIBDIR = $(SRCDIR)/lib DIRPLUGDIR = $(SRCDIR)/plugins/dir DIRPLUG_INSTALL_TARGET = @DIRPLUG_INSTALL_TARGET@ CURL_INC = @CURL_INC@ CURL_LIBS = @CURL_LIBS@ .SUFFIXES: .c .cpp .lo bacula-15.0.3/src/plugins/dir/totp/0000755000175000017500000000000014771010173016676 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/dir/totp/totp-dir.c0000644000175000017500000006206614771010173020616 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * TOTP Director Authentication Plugin * Eric Bollengier Aug 2021 */ #include #include "bacula.h" #include "dir_plugins.h" #include "dir_authplugin.h" #include "dirpluglib.h" #define USE_CMD_PARSER #define USE_MAKEDIR #include "../fd/fd_common.h" /* * At the first connection of the console, a key is generated * and stored in sysconfdir/conf.d/totp/BASE32(ConsoleName) */ /* * TODO: We can send the QR code via email or telegram * to an email configured into the plugin command line. */ /****************************************************************/ /* Implementation of the Time-Based One Time Password Algorithm * defined in rfc6238 * * The implementation relies on OpenSSL for HMAC SHA1 * https://github.com/google/google-authenticator/wiki/Key-Uri-Format * otpauth://totp/Bacula:consoleA?secret=ORSXG5BR&issuer=Bacula * * qrencode -t ansiutf8 * dlksflkdslkflksdlfklsklkdkfslkdlfkslklfd * * █████████████████████████████████████ * ████ ▄▄▄▄▄ █▀ █▀▀ █▀▄▄▄▄ █ ▄▄▄▄▄ ████ * ████ █ █ █▀ ▄ █▀ ▄▄▀▄ ██ █ █ ████ * ████ █▄▄▄█ █▀█ █▄▄▀▀ ▄ █ █▄▄▄█ ████ * ████▄▄▄▄▄▄▄█▄█▄█ █▄█▄█▄▀▄█▄▄▄▄▄▄▄████ * ████ ▄ ▄▄█▄ ▄█▄▄▄▄ ▄▀▀ ▀ ▀▄█▄▀████ * █████ ▄█▀█▄ ▀ ▀ ▄▄▄▄▄█▀█▀▀▄▀▀ ▄█▀████ * ████▄▄▀█ ▄▄▀▄▀▄▀▄▄█▀ ▀ ▀▀▀▀▀▀▄ ████ * ██████ ██ ▄ ▀█▀█▀█▄▄▀▄▄▀▄▄ ▄█▀▄▀█████ * ████ ▄████▄ ▄ █▄▀█▄▀▄▀ ▀█▀▀▀▀ ▀ ████ * ████ █ ▀█ ▄▄█▄▀ ▄▄█ ▀▄▀▀▀▄▀▀▄▀█▀█████ * ████▄██▄█▄▄█▀█▄▄▀▀██▀▄██ ▄▄▄ █ ▀▀████ * ████ ▄▄▄▄▄ █▄▄██▀ ▄ ▀▄▄ █▄█ ▄██████ * ████ █ █ █ ▀█▄█▄▄▀ ▀█▄▄▄▄▄▀▄ ████ * ████ █▄▄▄█ █ ▀▄ ▄█▀▄▄█▀▄ ▄▄ ▄▀█ █████ * ████▄▄▄▄▄▄▄█▄█▄▄█████▄█▄█▄██▄▄▄██████ * */ #ifdef HAVE_OPENSSL #define T0 0 #define VALIDITY 30 #define DIGITS 6 #define DIGEST EVP_sha1() /* Generate a hash code for a key */ static uint8_t *hmac(const uint8_t *data, uint32_t datalen, uint64_t atime, uint8_t *dest, uint32_t destlen) { if (destlen < EVP_MAX_MD_SIZE) { return NULL; } return (uint8_t *)HMAC(DIGEST, data, datalen, (uint8_t *)&atime, sizeof(atime), dest, &destlen); } /* Generate the code from the digest */ static uint32_t totp_get_code(uint8_t *digest) { uint64_t offset; uint32_t bin_code; /* dynamically choose what to return */ offset = digest[19] & 0x0f; bin_code = (digest[offset] & 0x7f) << 24 | (digest[offset+1] & 0xff) << 16 | (digest[offset+2] & 0xff) << 8 | (digest[offset+3] & 0xff); return bin_code; } /* Truncate the result to a given number of digits */ static uint32_t mod_hotp(uint32_t bin_code, int digits) { int power = pow(10, digits); uint32_t otp = bin_code % power; return otp; } /* The secret is in "clear" form. When we generate the code for the * authenticator app, it should be encoded in base32 */ static bool totp(uint8_t *key, uint32_t kl, uint64_t atime, int digits, uint32_t *code) { uint8_t *digest; uint8_t dest[EVP_MAX_MD_SIZE]; /* converts atime to big endian if system is little endian */ uint32_t endianness = 0xbacbac01; if ((*(const uint8_t *)&endianness) == 0x01) { atime = ((atime & 0x00000000ffffffff) << 32) | ((atime & 0xffffffff00000000) >> 32); atime = ((atime & 0x0000ffff0000ffff) << 16) | ((atime & 0xffff0000ffff0000) >> 16); atime = ((atime & 0x00ff00ff00ff00ff) << 8) | ((atime & 0xff00ff00ff00ff00) >> 8); } /* Get the digest of the message using the provided key and the timestamp */ digest = (uint8_t *)hmac(key, kl, atime, dest, sizeof(dest)); if (!digest) { return false; } /* Get the code from the hash. The portion used will change * dynamically */ uint32_t dbc = totp_get_code(digest); /* calculate the mod_k of the code to get the correct number */ *code = mod_hotp(dbc, digits); return true; } /* Generate a TOTP code from a password */ static bool totp_from_secret(uint8_t *k, size_t keylen, uint32_t *code) { time_t t = floor((time(NULL) - T0) / VALIDITY); if (totp(k, keylen, t, DIGITS, code)) { return true; } return false; } /* Read a secret from a file */ static bool totp_read_secretfile(const char *fname, char *secret, uint32_t len, uint32_t *slen) { uint32_t nb; FILE *fp = bfopen(fname, "r"); if (!fp) { return false; } nb = fread(secret, 1, len-1, fp); secret[nb] = 0; *slen = nb; fclose(fp); return true; } /* Generate a TOTP code from a secretfile */ static bool totp_from_secretfile(const char *fname, uint32_t *code) { uint32_t nb; uint8_t buf[512]; if (!totp_read_secretfile(fname, (char *)buf, sizeof(buf), &nb)) { return false; } return totp_from_secret(buf, nb, code); } #else static uint32_t totp_from_secret(uint8_t *k, size_t keylen, uint32_t *code) { Dmsg0(10, "TOTP not available without OpenSSL\n"); return false; } static uint32_t totp_from_secretfile(const char *fname, uint32_t *code) { Dmsg0(10, "TOTP not available without OpenSSL\n"); return false; } #endif // HAVE_OPENSSL /* buf len should be 1.6*strlen(secret) + 50 */ static char *totp_get_url(const char *name, const char *secret, int len, POOLMEM **ret) { POOL_MEM buf; buf.check_size(len * 8 / 5 + 1); if (bin_to_base32((uint8_t*)secret, len, buf.c_str(), buf.size()) < 0) { return NULL; } Mmsg(ret, "otpauth://totp/Bacula:%s?secret=%s&issuer=Bacula", name, buf.c_str()); return *ret; } #ifdef TEST_PROGRAM #include "lib/unittests.h" int main(int argc, char **argv) { Unittests t("totp"); uint32_t code; uint8_t *secret = (uint8_t*)"test1"; POOL_MEM b; ok(totp_from_secret(secret, strlen((char *)secret), &code), "get code"); Dmsg1(0, "%d\n", code); Dmsg1(0, "%s\n", totp_get_url("console", (char *)secret, strlen((char *)secret), b.handle())); return report(); } #endif /****************************************************************/ #ifdef __cplusplus extern "C" { #endif #define PLUGIN_LICENSE BPLUGIN_LICENSE #define PLUGIN_AUTHOR "Eric Bollengier" #define PLUGIN_DATE "August 2021" #define PLUGIN_VERSION "1" #define PLUGIN_DESCRIPTION "Director TOTP Plugin" /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC getPluginValue(bpContext *ctx, pDirVariable var, void *value); static bRC setPluginValue(bpContext *ctx, pDirVariable var, void *value); static bRC handlePluginEvent(bpContext *ctx, bDirEvent *event, void *value); static bRC getAuthenticationData(bpContext *ctx, const char *console, const char *param, void **data); static bRC getAuthorizationData(bpContext *ctx, const char *console, const char *param, void **data); /* Plugin compile time variables */ #define PLUGINPREFIX "totp:" #define PLUGIN_NAME "totp" /* Pointers to Bacula functions */ bDirFuncs *bfuncs = NULL; bDirInfo *binfo = NULL; static pDirInfo pluginInfo = { sizeof(pluginInfo), DIR_PLUGIN_INTERFACE_VERSION, DIR_PLUGIN_MAGIC, PLUGIN_LICENSE, PLUGIN_AUTHOR, PLUGIN_DATE, PLUGIN_VERSION, PLUGIN_DESCRIPTION }; static pDirFuncs pluginFuncs = { sizeof(pluginFuncs), DIR_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, /* new plugin instance */ freePlugin, /* free plugin instance */ getPluginValue, setPluginValue, handlePluginEvent, getAuthenticationData, getAuthorizationData }; static bDirAuthenticationData totpquestions0[] = { // operation; question; data; {bDirAuthenticationOperationPlain, "Code:", 0} }; static bDirAuthenticationRegister totpregister0 = { .name = PLUGIN_NAME, .welcome = "Code Authentication", .num = 1, .data = totpquestions0, .nsTTL = 0, }; // TODO: Use the location of bacula-dir or a user defined variable to set it #ifdef SYSCONFDIR #define KEYDIR SYSCONFDIR "/conf.d/totp/" #else #define KEYDIR "/tmp/key/" #endif class totp_api : public SMARTALLOC { public: POOLMEM *code; POOLMEM *keyname; uint32_t input_code; char *keydir; char *sendcommand; bool gen_qrcode; bDirAuthenticationRegister totpregisterQR; totp_api() : code(NULL), keyname(NULL), input_code(0), keydir(NULL), sendcommand(NULL), gen_qrcode(true) { code = get_pool_memory(PM_FNAME); keyname = get_pool_memory(PM_FNAME); *code = *keyname = 0; // The bDirAuthenticationRegister struct is used to display // the QR code at the first connection totpregisterQR.name = PLUGIN_NAME; totpregisterQR.welcome = NULL; // To fill during the session totpregisterQR.num = 1; totpregisterQR.data = totpquestions0; totpregisterQR.nsTTL = 0; }; ~totp_api() { free_and_null_pool_memory(code); free_and_null_pool_memory(keyname); bfree_and_null(keydir); bfree_and_null(sendcommand); }; POOLMEM *edit_codes(POOLMEM *&omsg, const char *imsg, const char *name, const char *qrcode_file) { const char *p; const char *str; char add[50]; extern char my_name[]; /* From libbac */ *omsg = 0; for (p=imsg; *p; p++) { if (*p == '%') { switch (*++p) { case '%': str = "%"; break; case 'a': str = qrcode_file; break; case 'd': str = my_name; /* Director's name */ break; case 'c': str = name; /* Console name */ break; case 'P': edit_uint64(getpid(), add); str = add; break; default: add[0] = '%'; add[1] = *p; add[2] = 0; str = add; break; } } else { add[0] = *p; add[1] = 0; str = add; } pm_strcat(omsg, str); } return omsg; }; bool parse_param(const char *param) { cmd_parser parser; bfree_and_null(keydir); bfree_and_null(sendcommand); gen_qrcode = true; if (!param) { return true; } if (parser.parse_cmd(param) == bRC_OK) { for(int i = 1; i < parser.argc ; i++) { if (strcasecmp(parser.argk[i], "no_qrcode") == 0) { gen_qrcode = false; } else if (!parser.argv[i]) { Dmsg0(0, "Incorrect configuration for totp plugin.\n"); } else if (strcasecmp(parser.argk[i], "keydir") == 0) { keydir = bstrdup(parser.argv[i]); } else if (strcasecmp(parser.argk[i], "sendcommand") == 0) { sendcommand = bstrdup(parser.argv[i]); gen_qrcode = false; } else { Dmsg0(0, "Unknown parameter for totp plugin\n"); } } } else { Dmsg0(0, "Unable to decode totp command line\n"); return false; } return true; } /* Generate a filename for the current console */ bool compute_keyfile(const char *name, POOLMEM **key) { if (!keydir) { keydir = bstrdup(KEYDIR); } int len = strlen(keydir) + 1; int nlen = strlen(name); *key = check_pool_memory_size(*key, len + nlen * 5 / 8 + 10); Mmsg(key, "%s/", keydir); if (bin_to_base32((uint8_t *)name, nlen, *key + len, sizeof_pool_memory(code) - (len + 1)) < 0) { Dmsg1(10, "Unable to encode %s to base32\n", name); *key[0] = 0; return false; } Dmsg1(200, "keyname=%s\n", *key); return true; }; /* Check if we have a key for this console */ bool has_key(const char *name) { if (!compute_keyfile(name, &keyname)) { return false; } if (access(keyname, R_OK) < 0) { return false; } return true; }; bool getQRCode(const char *name, char *k64, POOLMEM **ret) { char urlfile[128]; POOL_MEM tmp; if (totp_get_url(name, k64, strlen(k64), ret) == NULL) { Dmsg0(DINFO, "Unable to generate the totp url from the key\n"); return false; } // Write URL to disk to generate the QR code bstrncpy(urlfile, "/tmp/key.XXXXXX", sizeof(urlfile)); int fpt = mkstemp(urlfile); if (fpt < 0) { berrno be; Dmsg1(DINFO, "Unable to create a new key. ERR=%s\n", be.bstrerror()); return false; } FILE *fp = fdopen(fpt, "w"); fprintf(fp, "%s", *ret); fclose(fp); // Call qrencode to generate the QR code, else display the URL Mmsg(tmp, "sh -c 'cat \"%s\" | qrencode -t ansiutf8'", urlfile); if (run_program_full_output (tmp.c_str(), 0, *ret, NULL) != 0) { berrno be; Dmsg1(DINFO, "Unable to call qrencode on a new key. ERR=%s\n", be.bstrerror()); if (totp_get_url(name, k64, strlen(k64), ret)) { pm_strcat(ret, _("\nUse this URL into your TOTP client and close this screen.\n")); } else { // Should not happen, the first call to totp_get_url() was OK Dmsg0(DINFO, "Unable to generate the totp url from the key\n"); return false; } } else { Mmsg(tmp, "\n%s\nScan the QR code into your TOTP client and close this screen.\n", *ret); pm_strcpy(ret, tmp.c_str()); } unlink(urlfile); return true; }; bool sendQRCode(const char *name, char *k64, POOLMEM **ret) { bool rcode=false; char urlfile[128]; char pngfile[128]; POOL_MEM tmp, tmp2; if (totp_get_url(name, k64, strlen(k64), ret) == NULL) { Dmsg0(DINFO, "Unable to generate the totp url from the key\n"); return false; } // Write URL to disk to generate the QR code bstrncpy(urlfile, "/tmp/key.XXXXXX", sizeof(urlfile)); int fpt = mkstemp(urlfile); if (fpt < 0) { berrno be; Dmsg1(0, "Unable to create a new key. ERR=%s\n", be.bstrerror()); return false; } FILE *fp = fdopen(fpt, "w"); fprintf(fp, "%s", *ret); fclose(fp); // Write png to disk to generate the QR code bstrncpy(pngfile, "/tmp/qrcode.XXXXXX.png", sizeof(pngfile)); fpt = mkstemps(pngfile, 4); if (fpt < 0) { berrno be; Dmsg1(0, "Unable to create a new key. ERR=%s\n", be.bstrerror()); unlink(urlfile); return false; } // Call qrencode to generate the QR code, else display the URL Mmsg(tmp, "sh -c 'cat \"%s\" | qrencode -s 10 -t png -o \"%s\"'", urlfile, pngfile); if (run_program_full_output (tmp.c_str(), 0, *ret, NULL) != 0) { Dmsg0(0, "Unable to generate the totp png file from the key\n"); goto bail_out; } if (sendcommand) { edit_codes(tmp.addr(), sendcommand, name, pngfile); if (run_program_full_output (tmp.c_str(), 10, *ret, NULL) != 0) { Dmsg1(0, "Unable to call the mail program to send the totp key %s\n", *ret); goto bail_out; } } else { pm_strcpy(ret, pngfile); } rcode = true; bail_out: // Run the email command unlink(urlfile); /* cleanup if we are in error or if we have send the file */ if (!rcode || sendcommand) { unlink(pngfile); } return rcode; }; /* Must call has_key() first */ bDirAuthenticationRegister *gen_key(const char *name) { char k[24]; char k64[2*sizeof(k)]; // Normally 33% larger // Generate a random string that will be used as key if (RAND_bytes((uint8_t*)k, sizeof(k)-1) == 0) { return NULL; } k[sizeof(k)-1]=0; bin_to_base64(k64, sizeof(k64), k, sizeof(k)-1, 1); if (!compute_keyfile(name, &keyname)) { return NULL; } umask(0077); makedir(keyname, false, 0700); FILE *fp = bfopen(keyname, "w"); if (!fp) { berrno be; Dmsg2(DINFO, "Unable to create a new key %s. ERR=%s\n", keyname, be.bstrerror()); return NULL; } fprintf(fp, "%s", k64); if (fclose(fp) != 0) { berrno be; Dmsg2(DINFO, "Unable to create a new key %s. ERR=%s\n", keyname, be.bstrerror()); unlink(keyname); return NULL; } if (gen_qrcode) { if (!getQRCode(name, k64, &code)) { unlink(keyname); return NULL; } } else if (sendcommand) { if (!sendQRCode(name, k64, &code)) { unlink(keyname); return NULL; } } else { pm_strcpy(code, "\nTOTP code generated. Ask the QR Code to your Bacula Administrator.\n"); } totpregisterQR.welcome = code; return &totpregisterQR; }; }; bRC loadPlugin(bDirInfo *lbinfo, bDirFuncs *lbfuncs, pDirInfo **pinfo, pDirFuncs **pfuncs) { #ifndef HAVE_OPENSSL Dmsg2(DINFO, "Unable to load TOTP plugin. Requires OpenSSL\n"); return bRC_Error; #endif bfuncs = lbfuncs; /* set Bacula funct pointers */ binfo = lbinfo; Dmsg2(DINFO, "Loaded: size=%d version=%d\n", bfuncs->size, bfuncs->version); *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ return bRC_OK; } bRC unloadPlugin() { Dmsg0(DINFO, "plugin: Unloaded\n"); return bRC_OK; } static bRC newPlugin(bpContext *ctx) { totp_api *self = New (totp_api); DMSG0(ctx, DINFO, "newPlugin\n"); ctx->pContext = self; return bRC_OK; } static bRC freePlugin(bpContext *ctx) { DMSG0(ctx, DINFO, "freePlugin\n"); if (ctx->pContext) { totp_api *self = (totp_api *)ctx->pContext; delete self; } return bRC_OK; } static bRC getPluginValue(bpContext *ctx, pDirVariable var, void *value) { DMSG1(ctx, DINFO, "plugin: getPluginValue var=%d\n", var); return bRC_OK; } static bRC setPluginValue(bpContext *ctx, pDirVariable var, void *value) { DMSG1(ctx, DINFO, "plugin: setPluginValue var=%d\n", var); return bRC_OK; } static bRC handlePluginEvent(bpContext *ctx, bDirEvent *event, void *value) { bDirAuthValue *pvalue; totp_api *self = (totp_api *)ctx->pContext; uint32_t c=999; switch (event->eventType) { case bDirEventAuthenticationQuestion: pvalue = (bDirAuthValue *)value; pvalue->authdata = totpquestions0; break; case bDirEventAuthenticationResponse: pvalue = (bDirAuthValue *)value; DMSG_EVENT_STR(event, pvalue->response); switch (pvalue->seqdata) { case 0: self->input_code = str_to_uint64((char *)pvalue->response); break; default: break; } break; case bDirEventAuthenticate: DMSG_EVENT_PTR(event, value); if (self->keyname[0] == 0) { return bRC_Error; } if (totp_from_secretfile(self->keyname, &c)) { if (self->input_code == c) { return bRC_OK; } Dmsg3(200, "Incorrect code for %s %ld == %ld\n", self->keyname, self->input_code, c); } return bRC_Error; break; default: break; } return bRC_OK; } static bRC getAuthenticationData(bpContext *ctx, const char *console, const char *param, void **data) { totp_api *self = (totp_api *)ctx->pContext; bDirAuthenticationRegister **padata = (bDirAuthenticationRegister **)data; Dmsg2(DINFO, "console=%s param=%s\n", console, param); if (!self->parse_param(param)) { return bRC_Error; } if (self->has_key(console)) { *padata = &totpregister0; } else { *padata = self->gen_key(console); } return *padata ? bRC_OK : bRC_Error; } static bRC getAuthorizationData(bpContext *ctx, const char *console, const char *param, void **data) { return bRC_OK; } #ifdef BTOTP_PROGRAM static void usage(int ret) { fprintf(stderr, _( "Usage: btotp [-k /path/to/keydir] [-d100] [-c] [-r] [-u] [-q] -n name\n" " -d int Set debug level\n" " -c Create a key if needed\n" " -n name Name of the console (or via BTOTP_NAME env)\n" " -u Display otpauth URL\n" " -q Display qrcode\n" " -r Remove key on disk\n" " -k dir Path to the keydir\n\n")); exit(ret); } int main(int argc, char **argv) { setlocale(LC_ALL, ""); bindtextdomain("bacula", LOCALEDIR); textdomain("bacula"); init_stack_dump(); lmgr_init_thread(); totp_api totp; int ch; bool docreate=false, displayQR=false, displayURL=false,remove=false; char *name = NULL; my_name_is(argc, argv, "btotp"); init_msg(NULL, NULL); OSDependentInit(); while ((ch = getopt(argc, argv, "rd:ck:n:?qu")) != -1) { switch (ch) { case 'r': remove=true; break; case 'n': name=optarg; break; case 'q': displayQR=true; break; case 'u': displayURL=true; break; case 'd': /* debug level */ if (*optarg == 't') { dbg_timestamp = true; } else { debug_level = atoi(optarg); if (debug_level <= 0) { debug_level = 1; } } break; case 'c': docreate=true; break; case 'k': totp.keydir = bstrdup(optarg); break; case '?': default: usage(0); } } argc -= optind; argv += optind; if (!name) { name = getenv("BTOTP_NAME"); } if (argc != 0 || !name) { Pmsg0(0, _("Wrong number of arguments: \n")); usage(1); } if (!totp.has_key(name)) { if (docreate) { if (!totp.gen_key(name)) { Pmsg0(0, _("Unable to generate the key\n")); usage(2); } } else { Pmsg0(0, _("Unable to read the key. Use -c to create a key.\n")); usage(2); } } if (docreate) { printf("%s\n", totp.keyname); return 0; } if (remove) { POOL_MEM tmp; if (!totp.compute_keyfile(name, tmp.handle())) { Pmsg0(0, _("Unable to find the key file\n")); usage(2); } if (unlink(tmp.c_str()) < 0) { berrno be; Pmsg3(0, _("Unable to remove disk key %s for %s file. ERR=%s\n"), tmp.c_str(), name, be.bstrerror()); usage(2); } printf("Disk storage %s for key %s has been deleted\n", tmp.c_str(), name); return 0; } if (displayURL || displayQR) { POOL_MEM tmp; char buf[512]; uint32_t len; if (!totp.compute_keyfile(name, tmp.handle())) { Pmsg0(0, _("Unable to find the key file\n")); usage(2); } if (!totp_read_secretfile(tmp.c_str(), buf, sizeof(buf), &len)) { berrno be; Pmsg1(0, _("Unable to read the key. ERR=%s\n"), be.bstrerror()); usage(2); } if (displayURL) { if (totp_get_url(name, buf, len, tmp.handle())) { Pmsg1(0, "\n%s\n", tmp.c_str()); } else { Pmsg0(0, "Unable to generate the totp url from the key\n"); usage(2); } } if (displayQR) { if (totp.getQRCode(name, buf, tmp.handle())) { Pmsg1(0, "\n%s\n", tmp.c_str()); } else { Pmsg0(0, "Unable to generate the QRcode from the key\n"); usage(2); } } return 0; } uint32_t c=99; if (totp_from_secretfile(totp.keyname, &c)) { printf("%06u\n", (unsigned int)c); } else { Pmsg0(0, _("Unable to generate the code from the key\n")); return 2; } return 0; } #endif // BTOTP_PROGRAM #ifdef __cplusplus } #endif bacula-15.0.3/src/plugins/dir/totp/Makefile0000644000175000017500000000605014771010173020337 0ustar bsbuildbsbuild# # Makefile for building FD plugins PluginLibrary for Bacula # # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Author: Radoslaw Korzeniewski, radoslaw@korzeniewski.net # include ../../fd/Makefile.inc TOTPSRC = totp-dir.c TOTPOBJ = $(TOTPSRC:.c=.lo) .SUFFIXES: .c .lo all: totp-dir.la .c.lo: @echo "Compiling $< ..." $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I${SRCDIR} -I${LIBDIR} -I${SRCDIR}/dird -I.. -DSYSCONFDIR=\"$(sysconfdir)\" -DWORKDIR=\"$(DESTDIR)$(working_dir)\" -c $< %.lo: %.c %.h @echo "Pattern compiling $< ..." $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I${SRCDIR} -I${LIBDIR} -I${SRCDIR}/dird -I.. -DSYSCONFDIR=\"$(sysconfdir)\" -DWORKDIR=\"$(DESTDIR)$(working_dir)\" -c $(@:.lo=.c) ../dirpluglib.lo: $(MAKE) -C ../ dirpluglib.lo totp-dir.la: $(TOTPOBJ) @echo "Linking $(@:.la=.so) ..." $(LIBTOOL_LINK) --silent $(CXX) $(LDFLAGS) -shared $^ ../dirpluglib.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version btotp: $(TOTPSRC) ../dirpluglib.lo $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I${SRCDIR} -I${LIBDIR} -I${SRCDIR}/dird -I.. -DSYSCONFDIR=\"$(sysconfdir)\" -DWORKDIR=\"$(DESTDIR)$(working_dir)\" -DBTOTP_PROGRAM=1 -c $< -o btotp.lo $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) -I${SRCDIR} -I${LIBDIR} -I${SRCDIR}/dird -I.. -DSYSCONFDIR=\"$(sysconfdir)\" -DWORKDIR=\"$(DESTDIR)$(working_dir)\" -o $@ btotp.lo ../dirpluglib.lo -L${LIBDIR} -lbac ${OPENSSL_LIBS} btotp_test: $(TOTPSRC) ../dirpluglib.lo $(MAKE) -C ${LIBDIR} alist_test $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I${SRCDIR} -I${LIBDIR} -I${SRCDIR}/dird -I.. -DSYSCONFDIR=\"$(sysconfdir)\" -DWORKDIR=\"$(DESTDIR)$(working_dir)\" -DTEST_PROGRAM=1 -c $< -o btotp_test.lo $(LIBTOOL_LINK) $(CXX) -I${SRCDIR} -I${LIBDIR} -I${SRCDIR}/dird -I.. -DSYSCONFDIR=\"$(sysconfdir)\" -DWORKDIR=\"$(DESTDIR)$(working_dir)\" -o $@ btotp_test.lo ../dirpluglib.lo ${LIBDIR}/unittests.o -L${LIBDIR} -lbac ${OPENSSL_LIBS} install-test: btotp_test $(NO_ECHO)$(MKDIR) $(DESTDIR)$(sbindir) $(NO_ECHO)$(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) btotp_test $(DESTDIR)$(sbindir) install: install-totp install-btotp install-btotp: btotp @echo "Installing btotp" $(NO_ECHO)$(MKDIR) $(DESTDIR)$(sbindir) $(NO_ECHO)$(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) btotp $(DESTDIR)$(sbindir) install-totp: totp-dir.la @echo "Installing plugin ... $(^:.la=.so)" $(NO_ECHO)$(MKDIR) $(DESTDIR)$(plugindir) $(NO_ECHO)$(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) totp-dir.la $(DESTDIR)$(plugindir) $(NO_ECHO)$(RMF) $(DESTDIR)$(plugindir)/totp-dir.la $(LIBTOOL_CLEAN_TARGET): $(NO_ECHO)find . -name '*.lo' -print | xargs $(LIBTOOL_CLEAN) $(RMF) $(NO_ECHO)$(RMF) *.la $(NO_ECHO)$(RMF) -r .libs _libs clean: $(LIBTOOL_CLEAN_TARGET) $(NO_ECHO)$(RMF) main btotp btotp_test *.so *.o distclean: clean $(NO_ECHO)rm -f *.la *.lo $(NO_ECHO)rm -rf .libs $(LIBTOOL_UNINSTALL_TARGET): $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(plugindir)/totp-dir.so uninstall: $(LIBTOOL_UNINSTALL_TARGET) depend: bacula-15.0.3/src/plugins/dir/dirpluglib.c0000644000175000017500000001607114771010173020216 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Common definitions and utility functions for Inteos plugins. * Functions defines a common framework used in our utilities and plugins. * This a Director Plugins flavor. * * Author: Radosław Korzeniewski, MMXIX * radoslaw@korzeniewski.net, radekk@inteos.pl * Inteos Sp. z o.o. http://www.inteos.pl/ */ #include "dirpluglib.h" /* Pointers to Bacula functions used in plugins */ extern bDirFuncs *bfuncs; extern bDirInfo *binfo; /* Events that are passed to plugin typedef enum { bDirEventJobStart = 1, bDirEventJobEnd = 2, bDirEventJobInit = 3, bDirEventJobRun = 4, bDirEventVolumePurged = 5, bDirEventNewVolume = 6, bDirEventNeedVolume = 7, bDirEventVolumeFull = 8, bDirEventRecyle = 9, bDirEventGetScratch = 10, bDirEventAuthenticationParam = 1000, // *value is a char* to console resource param value bDirEventAuthorizationACLParam = 1001, // *value is a char* to console resource param value bDirEventAuthenticationQuestion = 1002, // *value is a bDirAuthValue struct allocated by Dir // to get return value from bDirEventAuthenticationResponse = 1003, // *value is a char* to user response bDirEventAuthenticate = 1004, // return bRC_OK when authenticate is successful } bDirEventsType; */ const char *eventtype2str(bDirEvent *event){ switch (event->eventType){ case bDirEventJobStart: return "bDirEventJobStart"; case bDirEventJobEnd: return "bDirEventJobEnd"; case bDirEventJobInit: return "bDirEventJobInit"; case bDirEventJobRun: return "bDirEventJobRun"; case bDirEventVolumePurged: return "bDirEventVolumePurged"; case bDirEventNewVolume: return "bDirEventNewVolume"; case bDirEventNeedVolume: return "bDirEventNeedVolume"; case bDirEventVolumeFull: return "bDirEventVolumeFull"; case bDirEventRecyle: return "bDirEventRecyle"; case bDirEventGetScratch: return "bDirEventGetScratch"; case bDirEventAuthenticationQuestion: return "bDirEventAuthenticationQuestion"; case bDirEventAuthenticationResponse: return "bDirEventAuthenticationResponse"; case bDirEventAuthenticate: return "bDirEventAuthenticate"; default: return "Unknown"; } } /* * Return the real size of the disk based on the size suffix. * * in: * disksize - the numeric value of the disk size to compute * suff - the suffix for a disksize value * out: * uint64_t - the size of the disk computed with suffix */ uint64_t pluglib_size_suffix(int disksize, char suff) { uint64_t size; switch (suff){ case 'G': size = (uint64_t)disksize * 1024 * 1048576; break; case 'M': size = (uint64_t)disksize * 1048576; break; case 'T': size = (uint64_t)disksize * 1048576 * 1048576; break; case 'K': case 'k': size = (uint64_t)disksize * 1024; break; default: size = disksize; } return size; } /* * Return the real size of the disk based on the size suffix. * This version uses a floating point numbers (double) for computation. * * in: * disksize - the numeric value of the disk size to compute * suff - the suffix for a disksize value * out: * uint64_t - the size of the disk computed with suffix */ uint64_t pluglib_size_suffix(double disksize, char suff) { uint64_t size; switch (suff){ case 'G': size = disksize * 1024.0 * 1048576.0; break; case 'M': size = disksize * 1048576.0; break; case 'T': size = disksize * 1048576.0 * 1048576.0; break; case 'K': case 'k': size = disksize * 1024.0; break; default: size = disksize; } return size; } /* * Creates a path hierarchy on local FS. * It is used for local restore mode to create a required directory. * The functionality is similar to 'mkdir -p'. * * TODO: make a support for relative path * TODO: check if we can use findlib/makepath implementation instead * * in: * bpContext - for Bacula debug and jobinfo messages * path - a full path to create, does not check if the path is relative, * could fail in this case * out: * bRC_OK - path creation was successful * bRC_Error - on any error */ bRC pluglib_mkpath(bpContext* ctx, char* path, bool isfatal) { #ifdef PLUGINPREFIX #define _OLDPREFIX PLUGINPREFIX #endif #define PLUGINPREFIX "pluglibmkpath:" struct stat statp; POOL_MEM dir(PM_FNAME); char *p, *q; if (!path){ return bRC_Error; } if (stat(path, &statp) == 0){ if (S_ISDIR(statp.st_mode)){ return bRC_OK; } else { DMSG(ctx, DERROR, "Path %s is not directory\n", path); JMSG(ctx, isfatal ? M_FATAL : M_ERROR, "Path %s is not directory\n", path); return bRC_Error; } } DMSG(ctx, DDEBUG, "mkpath verify dir: %s\n", path); pm_strcpy(dir, path); p = dir.addr() + 1; while (*p && (q = strchr(p, (int)PathSeparator)) != NULL){ *q = 0; DMSG(ctx, DDEBUG, "mkpath scanning(1): %s\n", dir.c_str()); if (stat(dir.c_str(), &statp) == 0){ *q = PathSeparator; p = q + 1; continue; } DMSG0(ctx, DDEBUG, "mkpath will create dir(1).\n"); if (mkdir(dir.c_str(), 0750) < 0){ /* error */ berrno be; DMSG2(ctx, DERROR, "Cannot create directory %s Err=%s\n", dir.c_str(), be.bstrerror()); JMSG2(ctx, isfatal ? M_FATAL : M_ERROR, "Cannot create directory %s Err=%s\n", dir.c_str(), be.bstrerror()); return bRC_Error; } *q = PathSeparator; p = q + 1; } DMSG0(ctx, DDEBUG, "mkpath will create dir(2).\n"); if (mkdir(path, 0750) < 0){ /* error */ berrno be; DMSG2(ctx, DERROR, "Cannot create directory %s Err=%s\n", path, be.bstrerror()); JMSG2(ctx, isfatal ? M_FATAL : M_ERROR, "Cannot create directory %s Err=%s\n", path, be.bstrerror()); return bRC_Error; } DMSG0(ctx, DDEBUG, "mkpath finish.\n"); #ifdef _OLDPREFIX #define PLUGINPREFIX _OLDPREFIX #undef _OLDPREFIX #else #undef PLUGINPREFIX #endif return bRC_OK; } bacula-15.0.3/src/plugins/dir/dirpluglib.h0000644000175000017500000001125414771010173020221 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Common definitions and utility functions for Inteos plugins. * Functions defines a common framework used in our utilities and plugins. * * Author: Radosław Korzeniewski, MMXX * radoslaw@korzeniewski.net, radekk@inteos.pl * Inteos Sp. z o.o. http://www.inteos.pl/ */ #ifndef _PLUGLIB_H_ #define _PLUGLIB_H_ #include #include #include #include "bacula.h" #include "dir_plugins.h" /* definitions */ /* size of different string or query buffers */ #define BUFLEN 4096 #define BIGBUFLEN 65536 /* debug and messages functions */ #define JMSG0(ctx,type,msg) \ if (ctx) bfuncs->JobMessage ( ctx, __FILE__, __LINE__, type, 0, PLUGINPREFIX " " msg ); #define JMSG1 JMSG #define JMSG(ctx,type,msg,var) \ if (ctx) bfuncs->JobMessage ( ctx, __FILE__, __LINE__, type, 0, PLUGINPREFIX " " msg, var ); #define JMSG2(ctx,type,msg,var1,var2) \ if (ctx) bfuncs->JobMessage ( ctx, __FILE__, __LINE__, type, 0, PLUGINPREFIX " " msg, var1, var2 ); #define JMSG3(ctx,type,msg,var1,var2,var3) \ if (ctx) bfuncs->JobMessage ( ctx, __FILE__, __LINE__, type, 0, PLUGINPREFIX " " msg, var1, var2, var3 ); #define JMSG4(ctx,type,msg,var1,var2,var3,var4) \ if (ctx) bfuncs->JobMessage ( ctx, __FILE__, __LINE__, type, 0, PLUGINPREFIX " " msg, var1, var2, var3, var4 ); #define DMSG0(ctx,level,msg) \ if (ctx) bfuncs->DebugMessage ( ctx, __FILE__, __LINE__, level, PLUGINPREFIX " " msg ); #define DMSG1 DMSG #define DMSG(ctx,level,msg,var) \ if (ctx) bfuncs->DebugMessage ( ctx, __FILE__, __LINE__, level, PLUGINPREFIX " " msg, var ); #define DMSG2(ctx,level,msg,var1,var2) \ if (ctx) bfuncs->DebugMessage ( ctx, __FILE__, __LINE__, level, PLUGINPREFIX " " msg, var1, var2 ); #define DMSG3(ctx,level,msg,var1,var2,var3) \ if (ctx) bfuncs->DebugMessage ( ctx, __FILE__, __LINE__, level, PLUGINPREFIX " " msg, var1, var2, var3 ); #define DMSG4(ctx,level,msg,var1,var2,var3,var4) \ if (ctx) bfuncs->DebugMessage ( ctx, __FILE__, __LINE__, level, PLUGINPREFIX " " msg, var1, var2, var3, var4 ); #define DMSG6(ctx,level,msg,var1,var2,var3,var4,var5,var6) \ if (ctx) bfuncs->DebugMessage ( ctx, __FILE__, __LINE__, level, PLUGINPREFIX " " msg, var1, var2, var3, var4, var5, var6 ); /* fixed debug level definitions */ #define D1 1 /* debug for every error */ #define DERROR D1 #define D2 10 /* debug only important stuff */ #define DINFO D2 #define D3 200 /* debug for information only */ #define DDEBUG D3 #define D4 800 /* debug for detailed information only */ #define DVDEBUG D4 #define getBaculaVar(bvar,val) bfuncs->getBaculaValue(ctx, bvar, val); /* used for sanity check in plugin functions */ #define ASSERT_CTX \ if (!ctx || !ctx->pContext || !bfuncs) \ { \ return bRC_Error; \ } /* defines for handleEvent */ #define DMSG_EVENT_STR(event,value) DMSG2(ctx, DINFO, "%s value=%s\n", eventtype2str(event), NPRT((char *)value)); #define DMSG_EVENT_CHAR(event,value) DMSG2(ctx, DINFO, "%s value='%c'\n", eventtype2str(event), (char)value); #define DMSG_EVENT_LONG(event,value) DMSG2(ctx, DINFO, "%s value=%ld\n", eventtype2str(event), (intptr_t)value); #define DMSG_EVENT_PTR(event,value) DMSG2(ctx, DINFO, "%s value=%p\n", eventtype2str(event), value); const char *eventtype2str(bDirEvent *event); uint64_t pluglib_size_suffix(int disksize, char suff); uint64_t pluglib_size_suffix(double disksize, char suff); bRC pluglib_mkpath(bpContext* ctx, char* path, bool isfatal); /* * Checks if plugin command points to our Plugin * * in: * command - the plugin command used for backup/restore * out: * True - if it is our plugin command * False - the other plugin command */ inline bool isourplugincommand(const char *pluginprefix, const char *command) { /* check if it is our Plugin command */ if (strncmp(pluginprefix, command, strlen(pluginprefix)) == 0){ /* it is not our plugin prefix */ return true; } return false; } #endif /* _PLUGLIB_H_ */ bacula-15.0.3/src/plugins/dir/test-authentication-api-dir.c0000644000175000017500000001732514771010173023403 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Sample Plugin program * * Kern Sibbald, October 2007 */ #include "bacula.h" #include "dir_plugins.h" #include "dir_authplugin.h" #include "dirpluglib.h" #ifdef __cplusplus extern "C" { #endif #define PLUGIN_LICENSE "AGPLv3" #define PLUGIN_AUTHOR "Radosław Korzeniewski" #define PLUGIN_DATE "September 2020" #define PLUGIN_VERSION "1" #define PLUGIN_DESCRIPTION "Test Director Auth API Plugin" /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC getPluginValue(bpContext *ctx, pDirVariable var, void *value); static bRC setPluginValue(bpContext *ctx, pDirVariable var, void *value); static bRC handlePluginEvent(bpContext *ctx, bDirEvent *event, void *value); static bRC getAuthenticationData(bpContext *ctx, const char *console, const char *param, void **data); static bRC getAuthorizationData(bpContext *ctx, const char *console, const char *param, void **data); /* Plugin compile time variables */ #define PLUGINPREFIX "authapi:" #define PLUGIN_NAME "test-authentication-api" /* Pointers to Bacula functions */ bDirFuncs *bfuncs = NULL; bDirInfo *binfo = NULL; static pDirInfo pluginInfo = { sizeof(pluginInfo), DIR_PLUGIN_INTERFACE_VERSION, DIR_PLUGIN_MAGIC, PLUGIN_LICENSE, PLUGIN_AUTHOR, PLUGIN_DATE, PLUGIN_VERSION, PLUGIN_DESCRIPTION }; static pDirFuncs pluginFuncs = { sizeof(pluginFuncs), DIR_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, /* new plugin instance */ freePlugin, /* free plugin instance */ getPluginValue, setPluginValue, handlePluginEvent, getAuthenticationData, getAuthorizationData, }; static bDirAuthenticationData testquestions0[] = { // operation; question; data; {bDirAuthenticationOperationLogin, "Username:", 0}, {bDirAuthenticationOperationPassword, "Password:", 1}, }; static bDirAuthenticationRegister testregister0 = { .name = PLUGIN_NAME, .welcome = "This is a test authplugin API Plugin. Use root/root to login.", .num = 2, .data = testquestions0, .nsTTL = 0, }; static bDirAuthenticationData testquestions1[] = { // operation; question; data; {bDirAuthenticationOperationLogin, "Username:", 0}, {bDirAuthenticationOperationPlugin, NULL, 1}, {bDirAuthenticationOperationPlain, "Response:", 2}, }; static bDirAuthenticationData testquestions1_msg = { bDirAuthenticationOperationMessage, NULL, 0 }; static bDirAuthenticationRegister testregister1 = { .name = PLUGIN_NAME, .welcome = "This is a test authplugin API Plugin. Use bacula username to login.", .num = 3, .data = testquestions1, .nsTTL = 0, }; struct test_api : public SMARTALLOC { #if __cplusplus >= 201103L POOL_MEM username{PM_NAME}; POOL_MEM password{PM_NAME}; POOL_MEM challenge{PM_NAME}; POOL_MEM challenge_str{PM_NAME}; int mode{0}; #else POOL_MEM username; POOL_MEM password; POOL_MEM challenge; POOL_MEM challenge_str; int mode; test_api() : username(PM_NAME), password(PM_NAME), challenge(PM_NAME), challenge_str(PM_NAME), mode(0) {} #endif }; bRC loadPlugin(bDirInfo *lbinfo, bDirFuncs *lbfuncs, pDirInfo **pinfo, pDirFuncs **pfuncs) { bfuncs = lbfuncs; /* set Bacula funct pointers */ binfo = lbinfo; Dmsg2(DINFO, "Loaded: size=%d version=%d\n", bfuncs->size, bfuncs->version); *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ return bRC_OK; } bRC unloadPlugin() { Dmsg0(DINFO, "plugin: Unloaded\n"); return bRC_OK; } static bRC newPlugin(bpContext *ctx) { test_api *self = New (test_api); DMSG0(ctx, DINFO, "newPlugin\n"); ctx->pContext = self; return bRC_OK; } static bRC freePlugin(bpContext *ctx) { DMSG0(ctx, DINFO, "freePlugin\n"); if (ctx->pContext) { test_api *self = (test_api *)ctx->pContext; delete self; } return bRC_OK; } static bRC getPluginValue(bpContext *ctx, pDirVariable var, void *value) { DMSG1(ctx, DINFO, "plugin: getPluginValue var=%d\n", var); return bRC_OK; } static bRC setPluginValue(bpContext *ctx, pDirVariable var, void *value) { DMSG1(ctx, DINFO, "plugin: setPluginValue var=%d\n", var); return bRC_OK; } static bRC handlePluginEvent(bpContext *ctx, bDirEvent *event, void *value) { bDirAuthValue *pvalue; test_api *self = (test_api *)ctx->pContext; int r; switch (event->eventType) { case bDirEventAuthenticationQuestion: pvalue = (bDirAuthValue *)value; switch (self->mode) { case 1: r = rand(); Mmsg(self->challenge, "%d", r); Mmsg(self->challenge_str, "You should use %s as a response.", self->challenge.c_str()); testquestions1_msg.question = self->challenge_str.c_str(); pvalue->authdata = &testquestions1_msg; break; default: break; } break; case bDirEventAuthenticationResponse: pvalue = (bDirAuthValue *)value; DMSG_EVENT_STR(event, pvalue->response); switch (self->mode) { case 0: switch (pvalue->seqdata) { case 0: pm_strcpy(self->username, pvalue->response); break; case 1: pm_strcpy(self->password, pvalue->response); break; default: break; } case 1: switch (pvalue->seqdata) { case 0: pm_strcpy(self->username, pvalue->response); break; case 2: pm_strcpy(self->password, pvalue->response); break; default: break; } } break; case bDirEventAuthenticate: DMSG_EVENT_PTR(event, value); switch (self->mode) { case 0: if (!bstrcmp(self->username.c_str(), "root") || !bstrcmp(self->password.c_str(), "root")) { return bRC_Error; } break; case 1: if (!bstrcmp(self->username.c_str(), "bacula") || !bstrcmp(self->password.c_str(), self->challenge.c_str())) { return bRC_Error; } break; default: return bRC_Error; } break; default: break; } return bRC_OK; } static bRC getAuthenticationData(bpContext *ctx, const char *console, const char *param, void **data) { test_api *self = (test_api *)ctx->pContext; bDirAuthenticationRegister **padata = (bDirAuthenticationRegister **)data; self->mode = 0; // this is a default DMSG2(ctx, DINFO, "registering with: console=%s %s\n", console, NPRT(param)); sscanf(param, PLUGIN_NAME ":%d", &self->mode); switch (self->mode) { case 1: DMSG0(ctx, DINFO, "testregister1\n"); *padata = &testregister1; break; default: DMSG0(ctx, DINFO, "testregister0\n"); *padata = &testregister0; break; } return bRC_OK; } static bRC getAuthorizationData(bpContext *ctx, const char *console, const char *param, void **data) { return bRC_OK; } #ifdef __cplusplus } #endif bacula-15.0.3/src/plugins/dir/example-plugin-dir.c0000644000175000017500000001171714771010173021566 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Sample Plugin program * * Kern Sibbald, October 2007 */ #include "bacula.h" #include "dir_plugins.h" #ifdef __cplusplus extern "C" { #endif #define PLUGIN_LICENSE "AGPLv3" #define PLUGIN_AUTHOR "Kern Sibbald" #define PLUGIN_DATE "January 2008" #define PLUGIN_VERSION "1" #define PLUGIN_DESCRIPTION "Test Director Daemon Plugin" /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC getPluginValue(bpContext *ctx, pDirVariable var, void *value); static bRC setPluginValue(bpContext *ctx, pDirVariable var, void *value); static bRC handlePluginEvent(bpContext *ctx, bDirEvent *event, void *value); /* Pointers to Bacula functions */ static bDirFuncs *bfuncs = NULL; static bDirInfo *binfo = NULL; static pDirInfo pluginInfo = { sizeof(pluginInfo), DIR_PLUGIN_INTERFACE_VERSION, DIR_PLUGIN_MAGIC, PLUGIN_LICENSE, PLUGIN_AUTHOR, PLUGIN_DATE, PLUGIN_VERSION, PLUGIN_DESCRIPTION }; static pDirFuncs pluginFuncs = { sizeof(pluginFuncs), DIR_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, /* new plugin instance */ freePlugin, /* free plugin instance */ getPluginValue, setPluginValue, handlePluginEvent }; bRC loadPlugin(bDirInfo *lbinfo, bDirFuncs *lbfuncs, pDirInfo **pinfo, pDirFuncs **pfuncs) { bfuncs = lbfuncs; /* set Bacula funct pointers */ binfo = lbinfo; printf("plugin: Loaded: size=%d version=%d\n", bfuncs->size, bfuncs->version); *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ return bRC_OK; } bRC unloadPlugin() { printf("plugin: Unloaded\n"); return bRC_OK; } static bRC newPlugin(bpContext *ctx) { int JobId = 0; bfuncs->getBaculaValue(ctx, bDirVarJobId, (void *)&JobId); printf("plugin: newPlugin JobId=%d\n", JobId); bfuncs->registerBaculaEvents(ctx, 1, 2, 0); return bRC_OK; } static bRC freePlugin(bpContext *ctx) { int JobId = 0; bfuncs->getBaculaValue(ctx, bDirVarJobId, (void *)&JobId); printf("plugin: freePlugin JobId=%d\n", JobId); return bRC_OK; } static bRC getPluginValue(bpContext *ctx, pDirVariable var, void *value) { printf("plugin: getPluginValue var=%d\n", var); return bRC_OK; } static bRC setPluginValue(bpContext *ctx, pDirVariable var, void *value) { printf("plugin: setPluginValue var=%d\n", var); return bRC_OK; } static bRC handlePluginEvent(bpContext *ctx, bDirEvent *event, void *value) { char *name; int val; switch (event->eventType) { case bDirEventJobStart: printf("plugin: HandleEvent JobStart\n"); break; case bDirEventJobEnd: printf("plugin: HandleEvent JobEnd\n"); bfuncs->getBaculaValue(ctx, bDirVarJob, (void *)&name); printf("plugin: bDirVarJob=%s\n", name); bfuncs->getBaculaValue(ctx, bDirVarJobId, (void *)&val); printf("plugin: bDirVarJobId=%d\n", val); bfuncs->getBaculaValue(ctx, bDirVarType, (void *)&val); printf("plugin: bDirVarType=%c\n", val); bfuncs->getBaculaValue(ctx, bDirVarLevel, (void *)&val); printf("plugin: bDirVarLevel=%c\n", val); bfuncs->getBaculaValue(ctx, bDirVarClient, (void *)&name); printf("plugin: bDirVarClient=%s\n", name); bfuncs->getBaculaValue(ctx, bDirVarCatalog, (void *)&name); printf("plugin: bDirVarCatalog=%s\n", name); bfuncs->getBaculaValue(ctx, bDirVarPool, (void *)&name); printf("plugin: bDirVarPool=%s\n", name); bfuncs->getBaculaValue(ctx, bDirVarStorage, (void *)&name); printf("plugin: bDirVarStorage=%s\n", name); bfuncs->getBaculaValue(ctx, bDirVarJobErrors, (void *)&val); printf("plugin: bDirVarJobErrors=%d\n", val); bfuncs->getBaculaValue(ctx, bDirVarJobFiles, (void *)&val); printf("plugin: bDirVarJobFiles=%d\n", val); bfuncs->getBaculaValue(ctx, bDirVarNumVols, (void *)&val); printf("plugin: bDirVarNumVols=%d\n", val); break; } bfuncs->getBaculaValue(ctx, bDirVarJobName, (void *)&name); printf("Job Name=%s\n", name); bfuncs->JobMessage(ctx, __FILE__, __LINE__, M_INFO, 0, "JobMesssage message"); bfuncs->DebugMessage(ctx, __FILE__, __LINE__, 1, "DebugMesssage message"); return bRC_OK; } #ifdef __cplusplus } #endif bacula-15.0.3/src/plugins/dir/Makefile0000644000175000017500000000411514771010173017351 0ustar bsbuildbsbuild# # Simple Makefile for building test FD plugins for Bacula # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # include Makefile.inc .c.lo: @echo "Compiling $< ..." $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I${SRCDIR} -I${DIRDIR} -I${LIBDIR} -DWORKDIR=\"$(DESTDIR)$(working_dir)\" -c $< %.lo: %.c %.h @echo "Pattern compiling $< ..." $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I${SRCDIR} -I${DIRDIR} -I${LIBDIR} -DWORKDIR=\"$(DESTDIR)$(working_dir)\" -c $(@:.lo=.c) all: dirpluglib.lo example-plugin-dir.la test-authentication-api-dir.la install-totp: $(MAKE) -C totp/ install # example-plugin-dir.lo: example-plugin-dir.c ${DIRDIR}/dir_plugins.h # $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CFLAGS) -I../.. -I${DIRDIR} -c example-plugin-dir.c example-plugin-dir.la: example-plugin-dir.lo @echo "Linking $(@:.la=.so) ..." $(NO_ECHO)$(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared example-plugin-dir.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version # test-authentication-api-dir.lo: test-authentication-api-dir.c ${DIRDIR}/dir_plugins.h # $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CFLAGS) -I../.. -I${DIRDIR} -c test-authentication-api-dir.c test-authentication-api-dir.la: test-authentication-api-dir.lo dirpluglib.lo @echo "Linking $(@:.la=.so) ..." $(NO_ECHO)$(LIBTOOL_LINK) --silent $(CXX) $(LDFLAGS) -shared $^ -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version install: all $(DIRPLUG_INSTALL_TARGET) $(NO_ECHO)$(MKDIR) $(DESTDIR)$(plugindir) install-bpam-ldap: $(MAKE) -C ldap install $(LIBTOOL_CLEAN_TARGET): $(NO_ECHO)find . -name '*.lo' -print | xargs $(LIBTOOL_CLEAN) $(RMF) $(NO_ECHO)$(RMF) *.la $(NO_ECHO)$(RMF) -r .libs _libs clean: $(LIBTOOL_CLEAN_TARGET) $(NO_ECHO)rm -f main *.so *.o 1 2 3 $(MAKE) -C totp clean $(MAKE) -C ldap clean distclean: clean $(NO_ECHO)rm -f Makefile $(LIBTOOL_UNINSTALL_TARGET): $(NO_ECHO)$(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(plugindir)/example-plugin-dir.la uninstall: $(LIBTOOL_UNINSTALL_TARGET) depend: bacula-15.0.3/src/plugins/README0000644000175000017500000000351714771010173016020 0ustar bsbuildbsbuild This is the plugins implementation for Bacula. The current plugins for the DIR and SD are untested. For the File daemon, there are three plugins. The first is purely experimental. The second is quite functional and useful -- bpipe, and the third is the Exchange plugin For more details on plugins please see the New Features chapter of the Concepts document. What is implemented: - load a plugin - get main entry points in plugin (two) - pass plugin Bacula entry points - get plugin's entry points - keep list of plugins that are loaded - The plugin currently only needs config.h and bc_types.h in the Bacula src directory to be build. Otherwise, it is totally independent of Bacula. - The main test program is intended to be integrated into Bacula (at least a number of the functions therein). - Search for all plugins in the plugin directory - Implement Bacula context for plugin instances - Implement plugin context for plugin instances - Integrate the code into Bacula - Figure out a way to deal with 3 types of plugins (Director, File daemon, Storage daemon). - Pass Version information in plugin Initialize - Document the interface - Test Win32 plugins - Print all plugins loaded when Bacula starts - Print all plugins loaded during traceback - Make libbac.a into a shared object and allow plugin to call functions in directly so that plugins can use the same "safe" system utilities that Bacula uses. - Document libbac.so (when implemented) - Write more plugins that really do something. - Error handling must be much improved and brought into Bacula programming style. It currently just printf()s a message then exits (partially done). What is not yet implemented: - Implement plugin license/version checking - Some better method to pass variables - Define more functionality (Bacula entry points, ...) especially for DIR and SD plugins bacula-15.0.3/src/plugins/fd/0000755000175000017500000000000014771010173015523 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/test-handlexacl-plugin-fd.c0000644000175000017500000003156214771010173022641 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * A simple test plugin for the Bacula File Daemon derived from * the bpipe plugin, but used for testing new features. * * Kern Sibbald, October 2007 * */ #include "bacula.h" #include "fd_plugins.h" #undef malloc #undef free #undef strdup #define fi __FILE__ #define li __LINE__ #ifdef __cplusplus extern "C" { #endif static const int dbglvl = 50; #define PLUGIN_LICENSE "AGPLv3" #define PLUGIN_AUTHOR "Radosław Korzeniewski" #define PLUGIN_DATE "Nov 2017" #define PLUGIN_VERSION "1" #define PLUGIN_DESCRIPTION "Bacula Test handleXACLdata File Daemon Plugin" /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); static bRC endBackupFile(bpContext *ctx); static bRC pluginIO(bpContext *ctx, struct io_pkt *io); static bRC startRestoreFile(bpContext *ctx, const char *cmd); static bRC endRestoreFile(bpContext *ctx); static bRC createFile(bpContext *ctx, struct restore_pkt *rp); static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); static bRC checkFile(bpContext *ctx, char *fname); static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl); /* Pointers to Bacula functions */ static bFuncs *bfuncs = NULL; static bInfo *binfo = NULL; /* Plugin Information block */ static pInfo pluginInfo = { sizeof(pluginInfo), FD_PLUGIN_INTERFACE_VERSION, FD_PLUGIN_MAGIC, PLUGIN_LICENSE, PLUGIN_AUTHOR, PLUGIN_DATE, PLUGIN_VERSION, PLUGIN_DESCRIPTION }; /* Plugin entry points for Bacula */ static pFuncs pluginFuncs = { sizeof(pluginFuncs), FD_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, /* new plugin instance */ freePlugin, /* free plugin instance */ getPluginValue, setPluginValue, handlePluginEvent, startBackupFile, endBackupFile, startRestoreFile, endRestoreFile, pluginIO, createFile, setFileAttributes, checkFile, handleXACLdata, NULL, /* No RestoreFileList */ NULL /* No checkStream */ }; /* * Plugin private context */ struct plugin_ctx { boffset_t offset; char *cmd; /* plugin command line */ char *fname; /* filename to "backup/restore" */ bool acl; /* if acl was saved */ bool xattr; /* if xattr was saved */ POOLMEM *buf; /* store ConfigFile */ }; /* * loadPlugin() and unloadPlugin() are entry points that are * exported, so Bacula can directly call these two entry points * they are common to all Bacula plugins. */ /* * External entry point called by Bacula to "load the plugin */ bRC loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs) { bfuncs = lbfuncs; /* set Bacula funct pointers */ binfo = lbinfo; *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ return bRC_OK; } /* * External entry point to unload the plugin */ bRC unloadPlugin() { return bRC_OK; } /* * The following entry points are accessed through the function * pointers we supplied to Bacula. Each plugin type (dir, fd, sd) * has its own set of entry points that the plugin must define. */ /* * Create a new instance of the plugin i.e. allocate our private storage */ static bRC newPlugin(bpContext *ctx) { struct plugin_ctx *pctx; bfuncs->DebugMessage(ctx, fi, li, dbglvl, "test-handlxacl-plugin-fd: newPlugin\n"); pctx = (struct plugin_ctx *)malloc(sizeof(struct plugin_ctx)); if (!pctx) { return bRC_Error; } memset(pctx, 0, sizeof(struct plugin_ctx)); ctx->pContext = (void *)pctx; /* set our context pointer */ return bRC_OK; } /* * Free a plugin instance, i.e. release our private storage */ static bRC freePlugin(bpContext *ctx) { struct plugin_ctx *pctx = (struct plugin_ctx *)ctx->pContext; if (!pctx) { return bRC_Error; } if (pctx->buf) { free_pool_memory(pctx->buf); } if (pctx->cmd) { free(pctx->cmd); /* free any allocated command string */ } free(pctx); /* free our private context */ ctx->pContext = NULL; return bRC_OK; } /* * Return some plugin value (none defined) */ static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Set a plugin value (none defined) */ static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Handle an event that was generated in Bacula */ static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) { struct plugin_ctx *pctx = (struct plugin_ctx *)ctx->pContext; if (!pctx) { return bRC_Error; } /* * Most events don't interest us so we ignore them. * the debug messages are so that plugin writers can enable them to see * what is really going on. */ switch (event->eventType) { case bEventJobStart: case bEventJobEnd: case bEventEndBackupJob: case bEventLevel: case bEventSince: case bEventStartRestoreJob: case bEventEndRestoreJob: case bEventEndFileSet: case bEventStartBackupJob: case bEventRestoreObject: case bEventPluginCommand: case bEventVssBeforeCloseRestore: case bEventComponentInfo: bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: bEvent=%i\n", event->eventType); break; /* Plugin command e.g. plugin = ::read command:write command */ case bEventRestoreCommand: /* Fall-through wanted */ case bEventEstimateCommand: /* Fall-through wanted */ case bEventBackupCommand: { char *p; bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: plugincommand cmd=%s\n", (char *)value); /* check if already handled, if so it is a new command */ if (pctx->cmd){ free(pctx->cmd); pctx->cmd = NULL; pctx->acl = pctx->xattr = false; } pctx->cmd = strdup((char *)value); p = strchr(pctx->cmd, ':'); if (!p) { bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Plugin terminator not found: %s\n", (char *)value); return bRC_Error; } *p++ = 0; /* terminate plugin */ pctx->fname = p; break; } default: bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: unknown event=%i\n", event->eventType); break; } return bRC_OK; } /* * Start the backup of a specific file */ static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) { struct plugin_ctx *pctx = (struct plugin_ctx *)ctx->pContext; bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: startBackupFile\n"); if (!pctx) { return bRC_Error; } sp->fname = pctx->fname; time_t now = time(NULL); sp->statp.st_mode = 0700 | S_IFREG; sp->statp.st_ctime = now; sp->statp.st_mtime = now; sp->statp.st_atime = now; sp->statp.st_size = 0; sp->statp.st_blksize = 4096; sp->statp.st_blocks = 1; sp->statp.st_nlink = 1; sp->statp.st_uid = 0; sp->statp.st_gid = 0; sp->type = FT_REGE; return bRC_OK; } /* * Done with backup of this file */ static bRC endBackupFile(bpContext *ctx) { /* * We would return bRC_More if we wanted startBackupFile to be * called again to backup another file */ bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: endBackupFile\n"); return bRC_OK; } /* * Bacula is calling us to do the actual I/O */ static bRC pluginIO(bpContext *ctx, struct io_pkt *io) { struct plugin_ctx *pctx = (struct plugin_ctx *)ctx->pContext; bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: pluginIO: %i\n", io->func); if (!pctx) { return bRC_Error; } io->status = 0; io->io_errno = 0; return bRC_OK; } /* * Bacula is notifying us that a plugin name string was found, and * passing us the plugin command, so we can prepare for a restore. */ static bRC startRestoreFile(bpContext *ctx, const char *cmd) { bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: startRestoreFile cmd=%s\n", cmd); return bRC_OK; } /* * Bacula is notifying us that the plugin data has terminated, so * the restore for this particular file is done. */ static bRC endRestoreFile(bpContext *ctx) { bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: endRestoreFile\n"); return bRC_OK; } /* * This is called during restore to create the file (if necessary) * We must return in rp->create_status: * * CF_ERROR -- error * CF_SKIP -- skip processing this file * CF_EXTRACT -- extract the file (i.e.call i/o routines) * CF_CREATED -- created, but no content to extract (typically directories) * */ static bRC createFile(bpContext *ctx, struct restore_pkt *rp) { bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: createFile\n"); rp->create_status = CF_EXTRACT; return bRC_OK; } /* * We will get here if the File is a directory after everything * is written in the directory. */ static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) { bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: setFileAttributes\n"); return bRC_OK; } /* When using Incremental dump, all previous dumps are necessary */ static bRC checkFile(bpContext *ctx, char *fname) { struct plugin_ctx *pctx = (struct plugin_ctx *)ctx->pContext; bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: checkFile\n"); /* it is always better to response with Seen not OK :) */ if (pctx->fname && strcmp(pctx->fname, fname) == 0) { return bRC_Seen; } else { return bRC_OK; } } /* * New Bacula Plugin API require this */ static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl) { struct plugin_ctx *pctx = (struct plugin_ctx *)ctx->pContext; const char * aclstr = "user:bacula:rwx------\n"; const char * xattrstr = "Bacula:Rulez\n"; bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: handleXACLdata: %p\n", xacl); if (!pctx) { return bRC_Error; } switch (xacl->func){ case BACL_BACKUP: bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: ACL backup\n"); if (!pctx->acl){ pctx->acl = true; xacl->content = (char*)aclstr; xacl->count = strlen(xacl->content) + 1; } else { xacl->count = 0; } break; case BACL_RESTORE: if (xacl->count > 0){ bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: ACL restore\n"); if (xacl->count != (int)(strlen(aclstr) + 1) || strcmp(xacl->content, aclstr) != 0){ bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: ACL restore ERROR\n"); bfuncs->JobMessage(ctx,fi, li, M_FATAL, 0, "handlexacl: ACL restore ERROR\n"); return bRC_Error; } } break; case BXATTR_BACKUP: bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: XATTR backup\n"); if (!pctx->xattr){ pctx->xattr = true; xacl->content = (char*)xattrstr; xacl->count = strlen(xacl->content) + 1; } else { xacl->count = 0; } break; case BXATTR_RESTORE: if (xacl->count > 0){ bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: XATTR restore\n"); if (xacl->count != (int)(strlen(xattrstr) + 1) || strcmp(xacl->content, xattrstr) != 0){ bfuncs->DebugMessage(ctx, fi, li, dbglvl, "handlexacl: XATTR restore ERROR\n"); bfuncs->JobMessage(ctx,fi, li, M_FATAL, 0, "handlexacl: XATTR restore ERROR\n"); return bRC_Error; } } break; } return bRC_OK; } #ifdef __cplusplus } #endif bacula-15.0.3/src/plugins/fd/file-record.h0000644000175000017500000000607414771010173020076 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef journalfilerecord_H #define journalfilerecord_H #include "bacula.h" #include /** * Implementation is part of Bacula's findlib module, and is on: * "/src/findlib/attribs.c" */ extern int decode_stat(char *buf, struct stat *statp, int stat_size, int32_t *LinkFI); extern void encode_stat(char *buf, struct stat *statp, int stat_size, int32_t LinkFI, int data_stream); /** * @brief Data that is saved and retrieved by using the @class Journal */ class FileRecord { public: char *name; char *sname; char *fattrs; int64_t mtime; FileRecord(): name(NULL), sname(NULL), fattrs(NULL), mtime(0) {} bool encode_attrs() { struct stat statbuf; #ifndef HAVE_WIN32 if(lstat(this->name, &statbuf) != 0) { return false; } this->mtime = (int64_t) statbuf.st_mtime; this->fattrs = (char *) malloc(500 * sizeof(char)); encode_stat(this->fattrs, &statbuf, sizeof(statbuf), 0, 0); #else FILE *fp = fopen(this->name, "r"); if (!fp) { Dmsg1(10, "Could not open file %s\n", this->name); return false; } int fd = _fileno(fp); if(fstat(fd, &statbuf) != 0) { fclose(fp); Dmsg1(10, "Could not encode attributes of file %s\n", this->name); return false; } this->mtime = (int64_t) statbuf.st_mtime; this->fattrs = (char *) malloc(500 * sizeof(char)); encode_stat(this->fattrs, &statbuf, sizeof(statbuf), 0, 0); fclose(fp); #endif return true; } void decode_attrs(struct stat &sbuf) { int32_t lfi; decode_stat(this->fattrs, &sbuf, sizeof(sbuf), &lfi); } bool equals(const FileRecord *rec) { return strcmp(this->name, rec->name) == 0 && strcmp(this->fattrs, rec->fattrs) == 0 && this->mtime == rec->mtime; } void getBaculaName(POOLMEM *target) { char mtime_date[200]; time_t t = (time_t) mtime; struct tm *timeinfo = localtime(&t); strftime(mtime_date, 200, "%Y%m%d_%H%M%S", timeinfo); Mmsg(target, "%s.%s", name, mtime_date); } ~FileRecord() { if (name != NULL) { free(name); } if (sname != NULL) { free(sname); } if (fattrs != NULL) { free(fattrs); } } }; #endif bacula-15.0.3/src/plugins/fd/cdp-fd.c0000644000175000017500000003636514771010173017041 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "fd_plugins.h" #include "fd_common.h" #include "lib/cmd_parser.h" #include "lib/mem_pool.h" #include "findlib/bfile.h" #include "journal.h" #ifdef __cplusplus extern "C" { #endif #define PLUGIN_LICENSE "AGPLv3" #define PLUGIN_AUTHOR "Henrique Faria" #define PLUGIN_DATE "February 2019" #define PLUGIN_VERSION "0.1" #define PLUGIN_DESCRIPTION "CDP Plugin" #ifdef HAVE_WIN32 #define CONCAT_PATH "%s\\%s" #define WORKING_JOURNAL_TEMPLATE "%s\\%s_%d.journal" #else #define CONCAT_PATH "%s/%s" #define WORKING_JOURNAL_TEMPLATE "%s/%s_%d.journal" #endif /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); static bRC endBackupFile(bpContext *ctx); static bRC pluginIO(bpContext *ctx, struct io_pkt *io); static bRC startRestoreFile(bpContext *ctx, const char *cmd); static bRC createFile(bpContext *ctx, struct restore_pkt *rp); static bRC endRestoreFile(bpContext *ctx); static bRC checkFile(bpContext *ctx, char *fname); /* Pointers to Bacula functions */ static bFuncs *bfuncs = NULL; static bInfo *binfo = NULL; /* Backup Variables */ static char *working = NULL; static pInfo pluginInfo = { sizeof(pluginInfo), FD_PLUGIN_INTERFACE_VERSION, FD_PLUGIN_MAGIC, PLUGIN_LICENSE, PLUGIN_AUTHOR, PLUGIN_DATE, PLUGIN_VERSION, PLUGIN_DESCRIPTION }; static pFuncs pluginFuncs = { sizeof(pluginFuncs), FD_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, /* new plugin instance */ freePlugin, /* free plugin instance */ NULL, NULL, handlePluginEvent, startBackupFile, endBackupFile, startRestoreFile, endRestoreFile, pluginIO, createFile, NULL, checkFile, NULL, /* No ACL/XATTR */ NULL, /* No Restore file list */ NULL /* No checkStream */ }; static int DBGLVL = 50; class CdpContext: public SMARTALLOC { public: bpContext *ctx; /** Used by both Backup and Restore Cycles **/ BFILE fd; POOLMEM *fname; bool is_in_use; /** Used only by the Backup Cycle **/ POOLMEM *clientJPath; POOLMEM *jobJPath; POOLMEM *drivesList; // Windows only char *jobName; bool accurate_warning; bool started_backup; bool canceled; alist userHomes; alist journals; int jIndex; cmd_parser parser; Journal *journal; CdpContext(bpContext *actx): ctx(actx), fname(NULL), is_in_use(false), clientJPath(NULL), jobJPath(NULL), drivesList(NULL), jobName(NULL), accurate_warning(false), started_backup(false), canceled(false), userHomes(100, owned_by_alist), journals(100, not_owned_by_alist), jIndex(0) { fname = get_pool_memory(PM_FNAME); clientJPath = get_pool_memory(PM_FNAME); jobJPath = get_pool_memory(PM_FNAME); #ifdef HAVE_WIN32 drivesList = get_pool_memory(PM_FNAME); *drivesList = 0; #endif *fname = *clientJPath = *jobJPath = 0; }; /** Methods called during Backup */ void migrateJournal() { char *uh; int i = 0; foreach_alist(uh, &userHomes) { Journal *j = new Journal(); Mmsg(clientJPath, CONCAT_PATH, uh, JOURNAL_CLI_FNAME); j->setJournalPath(clientJPath); Mmsg(jobJPath, WORKING_JOURNAL_TEMPLATE, working, jobName, i); j->migrateTo(jobJPath); journals.append(j); i++; } }; bool handleBackupCommand(bpContext *ctx, char *cmd) { int i; POOLMEM *userHome; parser.parse_cmd(cmd); for (i = 1; i < parser.argc ; i++) { if (strcasecmp(parser.argk[i], "userhome") == 0 && parser.argv[i]) { userHome = get_pool_memory(PM_FNAME); pm_strcpy(userHome, parser.argv[i]); struct stat sp; if (stat(userHome, &sp) != 0) { Jmsg(ctx, M_ERROR, _("Parameter userhome not found: %s\n"), userHome); return false; } if (!S_ISDIR(sp.st_mode)) { Jmsg(ctx, M_ERROR, _("Paramater userhome is not a directory: %s\n"), userHome); return false; } Dmsg(ctx, DBGLVL, "User Home: %s\n", userHome); userHomes.append(bstrdup(userHome)); free_and_null_pool_memory(userHome); } else if (strcasecmp(parser.argk[i], "user") == 0 && parser.argv[i]) { userHome = get_pool_memory(PM_FNAME); int rc = get_user_home_directory(parser.argv[i], userHome); if (rc != 0) { Jmsg(ctx, M_ERROR, _("User not found in the system: %s\n"), parser.argv[i]); return false; } userHomes.append(bstrdup(userHome)); Dmsg(ctx, DBGLVL, "User Home: %s\n", userHome); free_and_null_pool_memory(userHome); return true; } else if (strcasecmp(parser.argk[i], "group") == 0 && parser.argv[i]) { int rc = get_home_directories(parser.argv[i], &userHomes); if (rc != 0) { return false; } return true; } else { Jmsg(ctx, M_ERROR, _("Can't analyse plugin command line %s\n"), cmd); return false; } } return true; }; FileRecord *nextRecord() { if (canceled) { if (journal) { journal->endTransaction(); } return NULL; } if (!started_backup) { if (jIndex >= journals.size()) { return NULL; } journal = (Journal *) journals[jIndex]; if (!journal->beginTransaction("r")) { return NULL; } started_backup = true; } FileRecord *fc = journal->readFileRecord(); if (fc == NULL) { journal->endTransaction(); started_backup = false; unlink(journal->_jPath); Dmsg(ctx, DBGLVL, "No more files to backup. Deleting journal: %s\n", journal->_jPath); delete(journal); jIndex++; } return fc; }; ~CdpContext() { if (journal) { // Clear any possible pending transaction (e.g canceled job) journal->endTransaction(); canceled = true; } free_and_null_pool_memory(clientJPath); free_and_null_pool_memory(jobJPath); free_and_null_pool_memory(fname); #ifdef HAVE_WIN32 free_and_null_pool_memory(drivesList); #endif }; /* Adjust the current fileset depending on what we find in the Journal */ void adapt(Journal *j) { SettingsRecord *settings = j->readSettings(); /* We should not backup the Spool Directory */ if (settings != NULL) { char *sdir = bstrdup(settings->getSpoolDir()); bfuncs->AddExclude(ctx, sdir); Dmsg(ctx, DBGLVL, "Excluded Spool Directory from FileSet %s\n", sdir); delete settings; } /* Foreach folder watched, we add the folder to the backup */ if (!j->beginTransaction("r")) { return; } FolderRecord *rec; #ifdef HAVE_WIN32 int i = 0; for(;;) { rec = j->readFolderRecord(); if (rec == NULL) { drivesList[i] = '\0'; break; } /*On Windows, we must also add the folder drives to create the VSS Snapshot */ if (!strchr(drivesList, rec->path[0])) { drivesList[i++] = toupper(rec->path[0]); Dmsg(ctx, DBGLVL, "Included Drive %c\n", rec->path[0]); } bfuncs->AddInclude(ctx, rec->path); Dmsg(ctx, DBGLVL, "Included Directory into the FileSet %s\n", rec->path); delete rec; } #else for(;;) { rec = j->readFolderRecord(); if (rec == NULL) { break; } bfuncs->AddInclude(ctx, rec->path); Dmsg(ctx, DBGLVL, "Included Directory %s\n", rec->path); delete rec; } #endif j->endTransaction(); }; void adaptFileSet() { for (int i = 0; i < journals.size(); i++) { Journal *j = (Journal *) journals[i]; adapt(j); } } }; /* * Plugin called here when it is first loaded */ bRC DLL_IMP_EXP loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs) { bfuncs = lbfuncs; /* set Bacula funct pointers */ binfo = lbinfo; *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ bfuncs->getBaculaValue(NULL, bVarWorkingDir, (void *)&working); return bRC_OK; } /* * Plugin called here when it is unloaded, normally when * Bacula is going to exit. */ bRC DLL_IMP_EXP unloadPlugin() { return bRC_OK; } /* * Called here to make a new instance of the plugin -- i.e. when * a new Job is started. There can be multiple instances of * each plugin that are running at the same time. Your * plugin instance must be thread safe and keep its own * local data. */ static bRC newPlugin(bpContext *ctx) { CdpContext *pCtx = New(CdpContext(ctx)); ctx->pContext = (void *) pCtx; /* set our context pointer */ Dmsg(ctx, DBGLVL, "Working Directory: %s\n", working); return bRC_OK; } /* * Release everything concerning a particular instance of a * plugin. Normally called when the Job terminates. */ static bRC freePlugin(bpContext *ctx) { CdpContext *pCtx = (CdpContext *) ctx->pContext; delete(pCtx); return bRC_OK; } static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) { CdpContext *pCtx = (CdpContext *) ctx->pContext; switch (event->eventType) { case bEventPluginCommand: if (!pCtx->handleBackupCommand(ctx, (char *) value)) { return bRC_Error; }; pCtx->is_in_use = true; pCtx->migrateJournal(); pCtx->adaptFileSet(); break; case bEventEstimateCommand: Jmsg(ctx, M_FATAL, _("The CDP plugin doesn't support estimate\n")); return bRC_Error; case bEventJobStart: bfuncs->getBaculaValue(NULL, bVarJobName, (void *) &(pCtx->jobName)); if (pCtx->jobName == NULL) { pCtx->jobName = (char *) "backup_job"; } Dmsg(ctx, DBGLVL, "Job Name: %s\n", pCtx->jobName); break; case bEventCancelCommand: pCtx->canceled = true; Dmsg(ctx, DBGLVL, "Job canceled\n"); break; #ifdef HAVE_WIN32 case bEventVssPrepareSnapshot: strcpy((char *) value, pCtx->drivesList); Dmsg(ctx, DBGLVL, "VSS Drives list: %s\n", pCtx->drivesList); break; #endif default: break; } return bRC_OK; } /* * Called when starting to backup a file. Here the plugin must * return the "stat" packet for the directory/file and provide * certain information so that Bacula knows what the file is. * The plugin can create "Virtual" files by giving them a * name that is not normally found on the file system. */ static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) { CdpContext *pCtx = (CdpContext *) ctx->pContext; FileRecord *rec = pCtx->nextRecord(); if(rec != NULL) { //Fill save_pkt struct POOLMEM *bacula_fname = get_pool_memory(PM_FNAME); rec->getBaculaName(bacula_fname); sp->fname = bstrdup(bacula_fname); sp->type = FT_REG; rec->decode_attrs(sp->statp); //Save the name of the file that's inside the Spool Dir //That's the file that will be backed up pm_strcpy(pCtx->fname, rec->sname); delete(rec); free_and_null_pool_memory(bacula_fname); Dmsg(ctx, DBGLVL, "Starting backup of file: %s\n", sp->fname); return bRC_OK; } else { return bRC_Stop; } } /* * Done backing up a file. */ static bRC endBackupFile(bpContext *ctx) { return bRC_More; } /* * Do actual I/O. Bacula calls this after startBackupFile * or after startRestoreFile to do the actual file * input or output. */ static bRC pluginIO(bpContext *ctx, struct io_pkt *io) { CdpContext *pCtx = (CdpContext *) ctx->pContext; io->status = -1; io->io_errno = 0; if (!pCtx) { return bRC_Error; } switch (io->func) { case IO_OPEN: if (bopen(&pCtx->fd, pCtx->fname, io->flags, io->mode) < 0) { io->io_errno = errno; io->status = -1; Jmsg(ctx, M_ERROR, "Open file %s failed: ERR=%s\n", pCtx->fname, strerror(errno)); return bRC_Error; } io->status = 1; break; case IO_READ: if (!is_bopen(&pCtx->fd)) { Jmsg(ctx, M_FATAL, "Logic error: NULL read FD\n"); return bRC_Error; } /* Read data from file */ io->status = bread(&pCtx->fd, io->buf, io->count); break; case IO_WRITE: if (!is_bopen(&pCtx->fd)) { Jmsg(ctx, M_FATAL, "Logic error: NULL write FD\n"); return bRC_Error; } io->status = bwrite(&pCtx->fd, io->buf, io->count); break; case IO_SEEK: if (!is_bopen(&pCtx->fd)) { Jmsg(ctx, M_FATAL, "Logic error: NULL FD on delta seek\n"); return bRC_Error; } /* Seek not needed for this plugin, we don't use real sparse file */ io->status = blseek(&pCtx->fd, io->offset, io->whence); break; /* Cleanup things during close */ case IO_CLOSE: io->status = bclose(&pCtx->fd); break; } return bRC_OK; } static bRC startRestoreFile(bpContext *ctx, const char *cmd) { Dmsg(ctx, DBGLVL, "Started file restoration\n"); return bRC_Core; } /* * Called here to give the plugin the information needed to * re-create the file on a restore. It basically gets the * stat packet that was created during the backup phase. * This data is what is needed to create the file, but does * not contain actual file data. */ static bRC createFile(bpContext *ctx, struct restore_pkt *rp) { CdpContext *pCtx = (CdpContext *) ctx->pContext; pm_strcpy(pCtx->fname, rp->ofname); rp->create_status = CF_CORE; Dmsg(ctx, DBGLVL, "Creating file %s\n", rp->ofname); return bRC_OK; } static bRC endRestoreFile(bpContext *ctx) { Dmsg(ctx, DBGLVL, "Finished file restoration\n"); return bRC_OK; } /* When using Incremental dump, all previous dumps are necessary */ static bRC checkFile(bpContext *ctx, char *fname) { CdpContext *pCtx = (CdpContext *) ctx->pContext; if (pCtx->is_in_use) { if (!pCtx->accurate_warning) { pCtx->accurate_warning = true; Jmsg(ctx, M_WARNING, "Accurate mode is not supported. Please disable Accurate mode for this job.\n"); } return bRC_Seen; } else { return bRC_OK; } } #ifdef __cplusplus } #endif bacula-15.0.3/src/plugins/fd/folder-record.h0000644000175000017500000000175014771010173020426 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef folder_record_H #define folder_record_H #include "bacula.h" #include /** * @brief Data that is saved and retrieved by using the @class Journal */ class FolderRecord { public: char *path; FolderRecord(): path(NULL) {} ~FolderRecord() { if (path != NULL) { free(path); } } }; #endif bacula-15.0.3/src/plugins/fd/journal.c0000644000175000017500000003701414771010173017346 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "journal.h" static int DBGLVL = 90; bool Journal::beginTransaction(const char *mode) { if (hasTransaction) { return true; } bool hasLock = false; int timeout = 1800; for (int time = 0; time < timeout; time++) { _fp = bfopen(_jPath, mode); if (!_fp) { Dmsg0(10, "Tried to start transaction but Journal File was not found.\n"); return false; } _fd = fileno(_fp); int rc = flock(_fd, LOCK_EX | LOCK_NB); //Success. Lock acquired. if (rc == 0) { hasLock = true; break; } fclose(_fp); sleep(1); } if (hasLock) { hasTransaction = true; return true; } else { Dmsg0(10, "Tried to start transaction but could not lock Journal File.\n"); return false; } } void Journal::endTransaction() { if (!hasTransaction) { return; } if (_fp != NULL) { int rc = flock(_fd, LOCK_UN); if (rc != 0) { Dmsg0(10, "could not release flock\n"); } fclose(_fp); _fp = NULL; } _fd = -1; hasTransaction = false; } /** * Given a string formatted as 'key=val\n', * this function tries to return 'val' */ char *Journal::extract_val(const char *key_val) { const int SANITY_CHECK = 10000; int max_idx = cstrlen(key_val) - 1; char *val = (char *) malloc(SANITY_CHECK * sizeof(char)); int idx_keyend = 0; while(key_val[idx_keyend] != '=') { idx_keyend++; if(idx_keyend > max_idx) { free(val); return NULL; } } int i; int j = 0; for(i = idx_keyend + 1; key_val[i] != '\n' ; i++) { val[j] = key_val[i]; j++; if(i > max_idx) { free(val); return NULL; } } val[j] = '\0'; return val; } bool Journal::setJournalPath(const char *path) { _jPath = bstrdup(path); FILE *jfile = bfopen(_jPath, "r"); if (!jfile) { if (this->beginTransaction("w")) { SettingsRecord rec; rec.journalVersion = JOURNAL_VERSION; this->writeSettings(rec); } else { Dmsg1(10, "(ERROR) Could not create Journal File: %s\n", path); return false; } } else { fclose(jfile); } return true; } bool Journal::setJournalPath(const char *path, const char *spoolDir) { _jPath = bstrdup(path); FILE *jfile = fopen(_jPath, "r"); if (!jfile) { if (this->beginTransaction("w")) { SettingsRecord rec; rec.journalVersion = JOURNAL_VERSION; rec.setSpoolDir(spoolDir); this->writeSettings(rec); } else { Dmsg1(10, "(ERROR) Could not create Journal File: %s\n", path); return false; } } else { fclose(jfile); } return true; } bool Journal::writeSettings(SettingsRecord &rec) { int rc; bool success = true; const char *spoolDir; char jversion[50]; char heartbeat[50]; if(!this->beginTransaction("r+")) { Dmsg0(50, "Could not start transaction for writeSettings()\n"); success = false; goto bail_out; } spoolDir = rec.getSpoolDir(); if (spoolDir == NULL) { spoolDir = ""; } edit_int64(rec.heartbeat, heartbeat); edit_int64(rec.journalVersion, jversion); rc = fprintf(_fp, "Settings {\n" "spooldir=%s\n" "heartbeat=%s\n" "jversion=%s\n" "}\n", spoolDir, heartbeat, jversion); if(rc < 0) { success = false; Dmsg1(50, "(ERROR) Could not write SettingsRecord. RC=%d\n", rc); goto bail_out; } Dmsg3(DBGLVL, "WROTE RECORD:\n" " Settings {\n" " spooldir=%s\n" " heartbeat=%s\n" " jversion=%s\n" " }\n", spoolDir, heartbeat, jversion); bail_out: this->endTransaction(); return success; } SettingsRecord *Journal::readSettings() { const int SANITY_CHECK = 10000; bool corrupted = false; char tmp[SANITY_CHECK]; char jversion[SANITY_CHECK]; char heartbeat[SANITY_CHECK]; char spoolpath[SANITY_CHECK]; char *jvstr = NULL; char *hbstr = NULL; SettingsRecord *rec = NULL; if(!this->beginTransaction("r+")) { Dmsg0(10, "Could not start transaction for readSettings()\n"); goto bail_out; } //reads line "Settings {\n" if(!bfgets(tmp, SANITY_CHECK, _fp)) { corrupted = true; goto bail_out; } rec = new SettingsRecord(); //reads Spool Dir if(!bfgets(spoolpath, SANITY_CHECK, _fp)) { corrupted = true; goto bail_out; } rec->setSpoolDir(extract_val(spoolpath)); if(rec->getSpoolDir() == NULL) { corrupted = true; goto bail_out; } //reads Heartbeat if(!bfgets(heartbeat, SANITY_CHECK, _fp)) { corrupted = true; goto bail_out; } hbstr = extract_val(heartbeat); if(hbstr == NULL) { corrupted = true; goto bail_out; } rec->heartbeat = atoi(hbstr); //reads Journal Version if(!bfgets(jversion, SANITY_CHECK, _fp)) { corrupted = true; goto bail_out; } jvstr = extract_val(jversion); if(jvstr == NULL) { corrupted = true; goto bail_out; } rec->journalVersion = atoi(jvstr); //reads line "}\n" if(!bfgets(tmp, SANITY_CHECK, _fp)) { corrupted = true; goto bail_out; } Dmsg3(DBGLVL, "READ RECORD:\n" " Settings {\n" " spooldir=%s\n" " heartbeat=%s\n" " jversion=%s\n" " }\n", rec->getSpoolDir(), hbstr, jvstr); bail_out: if(jvstr != NULL) { free(jvstr); } if(hbstr != NULL) { free(hbstr); } if(rec != NULL && rec->getSpoolDir() != NULL && strcmp(rec->getSpoolDir(), "") == 0) { free(rec->getSpoolDir()); rec->setSpoolDir(NULL); } if(corrupted) { Dmsg0(10, "Could not read Settings Record. Journal is Corrupted.\n"); if(rec != NULL) { delete rec; rec = NULL; } } this->endTransaction(); return rec; } bool Journal::writeFileRecord(const FileRecord &record) { int rc; bool success = true; char mtime_str[50]; if(!this->beginTransaction("a")) { success = false; Dmsg0(10, "Could not start transaction for writeFileRecord()\n"); goto bail_out; } edit_int64(record.mtime, mtime_str); rc = fprintf(_fp, "File {\n" "name=%s\n" "sname=%s\n" "mtime=%s\n" "attrs=%s\n" "}\n", record.name, record.sname, mtime_str, record.fattrs); if(rc < 0) { success = false; Dmsg1(50, "(ERROR) Could not write FileRecord. RC=%d\n", rc); goto bail_out; } Dmsg4(DBGLVL, "NEW RECORD:\n" " File {\n" " name=%s\n" " sname=%s\n" " mtime=%s" " attrs=%s\n" " }\n", record.name, record.sname, mtime_str, record.fattrs); bail_out: this->endTransaction(); return success; } FileRecord *Journal::readFileRecord() { const int SANITY_CHECK = 10000; bool corrupted = false; char tmp[SANITY_CHECK]; char fname[SANITY_CHECK]; char sname[SANITY_CHECK]; char fattrs[SANITY_CHECK]; char mtime[SANITY_CHECK]; char *mstr = NULL; FileRecord *rec = NULL; if(!hasTransaction) { Dmsg0(10, "(ERROR) Journal::readFileRecord() called without any transaction\n"); goto bail_out; } //Reads lines until it finds a FileRecord, or until EOF for(;;) { if(!bfgets(tmp, SANITY_CHECK, _fp)) { goto bail_out; } if(strstr(tmp, "File {\n") != NULL) { break; } } rec = new FileRecord(); //reads filename if(!bfgets(fname, SANITY_CHECK, _fp)) { corrupted = true; goto bail_out; } rec->name = extract_val(fname); if(rec->name == NULL) { corrupted = true; goto bail_out; } //reads spoolname if(!bfgets(sname, SANITY_CHECK, _fp)) { corrupted = true; goto bail_out; } rec->sname = extract_val(sname); if(rec->sname == NULL) { corrupted = true; goto bail_out; } //reads mtime if(!bfgets(mtime, SANITY_CHECK, _fp)) { corrupted = true; goto bail_out; } mstr = extract_val(mtime); if(mstr == NULL) { corrupted = true; goto bail_out; } rec->mtime = atoi(mstr); //reads encoded attributes if(!bfgets(fattrs, SANITY_CHECK, _fp)) { corrupted = true; goto bail_out; } rec->fattrs = extract_val(fattrs); if(rec->fattrs == NULL) { corrupted = true; goto bail_out; } Dmsg4(DBGLVL, "READ RECORD:\n" " File {\n" " name=%s\n" " sname=%s\n" " mtime=%s\n" " attrs=%s\n" " }\n", rec->name, rec->sname, mstr, rec->fattrs); //reads line "}\n" if(!bfgets(tmp, SANITY_CHECK, _fp)) { corrupted = true; goto bail_out; } bail_out: if(mstr != NULL) { free(mstr); } if(corrupted) { Dmsg0(10, "Could not read File Record. Journal is Corrupted.\n"); if(rec != NULL) { delete rec; rec = NULL; } } return rec; } bool Journal::writeFolderRecord(const FolderRecord &record) { int rc; bool success = true; if(!this->beginTransaction("a")) { success = false; Dmsg0(10, "Could not start transaction for writeFileRecord()\n"); goto bail_out; } rc = fprintf(_fp, "Folder {\n" "path=%s\n" "}\n", record.path); if(rc < 0) { success = false; Dmsg1(10, "(ERROR) Could not write FolderRecord. RC=%d\n", rc); goto bail_out; } Dmsg1(DBGLVL, "NEW RECORD:\n" " Folder {\n" " path=%s\n" " }\n", record.path); bail_out: this->endTransaction(); return success; } FolderRecord *Journal::readFolderRecord() { const int SANITY_CHECK = 10000; bool corrupted = false; char tmp[SANITY_CHECK]; char path[SANITY_CHECK]; FolderRecord *rec = NULL; if(!hasTransaction) { Dmsg0(10, "(ERROR) Journal::readFolderRecord() called without any transaction\n"); goto bail_out; } //Reads lines until it finds a FolderRecord, or until EOF for(;;) { if(!bfgets(tmp, SANITY_CHECK, _fp)) { //No need to set 'corrupted = true' here goto bail_out; } if(strstr(tmp, "Folder {\n") != NULL) { break; } } rec = new FolderRecord(); //reads folder path if(!bfgets(path, SANITY_CHECK, _fp)) { corrupted = true; goto bail_out; } rec->path = extract_val(path); if(rec->path == NULL) { corrupted = true; goto bail_out; } Dmsg1(DBGLVL, "READ RECORD:\n" " Folder {\n" " path=%s\n" " }\n", rec->path); //reads line "}\n" if(!bfgets(tmp, SANITY_CHECK, _fp)) { corrupted = true; goto bail_out; } bail_out: if(corrupted) { Dmsg0(10, "Could not read FolderRecord. Journal is Corrupted.\n"); if(rec != NULL) { delete rec; rec = NULL; } } return rec; } bool Journal::removeFolderRecord(const char* folder) { bool success = false; const int SANITY_CHECK = 10000; char path[SANITY_CHECK]; char tmp[SANITY_CHECK]; char *recPath; FILE *tmpFp = NULL; int rc; POOL_MEM tmp_jPath; Mmsg(tmp_jPath, "%s.temp", _jPath); if(!this->beginTransaction("r")) { goto bail_out; } tmpFp = bfopen(tmp_jPath.c_str(), "w"); if(tmpFp == NULL) { goto bail_out; } for(;;) { if(!bfgets(tmp, SANITY_CHECK, _fp)) { goto bail_out; } if(strstr(tmp, "Folder {\n") != NULL) { //reads folder path if(!bfgets(path, SANITY_CHECK, _fp)) { goto bail_out; } recPath = extract_val(path); if(recPath == NULL) { goto bail_out; } //reads line "}\n" if(!bfgets(tmp, SANITY_CHECK, _fp)) { goto bail_out; } if(bstrcmp(folder, recPath) == 0) { //Didn't found the Record that needs to be removed rc = fprintf(tmpFp, "Folder {\n" "path=%s\n" "}\n", recPath); if(rc < 0) { goto bail_out; } } else { //Found the Folder Record that needs to be removed success = true; } } else { fprintf(tmpFp, "%s", tmp); } } bail_out: if(tmpFp != NULL) { fclose(tmpFp); } if(success) { fclose(_fp); _fp = NULL; unlink(_jPath); rc = rename(tmp_jPath.c_str(), _jPath); if(rc != 0) { Dmsg0(10, "Could not rename TMP Journal\n"); } } this->endTransaction(); return success; } bool Journal::migrateTo(const char *newPath) { bool success = true; const int SANITY_CHECK = 10000; char tmp[SANITY_CHECK]; FILE *tmpFp = NULL; FILE *newFp = NULL; int rc; POOLMEM *tmp_jPath = get_pool_memory(PM_FNAME); Mmsg(tmp_jPath, "%s.temp", newPath); if(!this->beginTransaction("r")) { success = false; goto bail_out; } Dmsg2(DBGLVL, "Migrating Journal %s to %s...\n", _jPath, newPath); tmpFp = bfopen(tmp_jPath, "w"); newFp = bfopen(newPath, "w"); if (tmpFp == NULL) { Dmsg1(10, "Could not bfopen %s. Aborting migration.\n", tmp_jPath); success = false; goto bail_out; } if (newFp == NULL) { Dmsg1(10, "Could not bfopen %s. Aborting migration.\n", newPath); success = false; goto bail_out; } // Migrate everything to the new Journal ('newFp') // Remove FileRecords from the old Journal // by not saving them in 'tmpFp' for(;;) { if(!bfgets(tmp, SANITY_CHECK, _fp)) { break; } if(strstr(tmp, "File {") != NULL) { //Found a FileRecord //Write it only in the New Jornal fprintf(newFp, "%s", tmp); for(int i = 0; i < 5; i++) { if(!bfgets(tmp, SANITY_CHECK, _fp)) { //Found a corrupted FileRecord Dmsg0(10, "Found a corrupt FileRecord. Canceling Migration"); success = false; goto bail_out; } fprintf(newFp, "%s", tmp); } } else { fprintf(newFp, "%s", tmp); fprintf(tmpFp, "%s", tmp); } } bail_out: if(newFp != NULL) { fclose(newFp); } if(tmpFp != NULL) { fclose(tmpFp); } if(success) { fclose(_fp); _fp = NULL; unlink(_jPath); rc = rename(tmp_jPath, _jPath); if(rc != 0) { Dmsg0(10, "Could not rename TMP Journal\n"); } free(_jPath); _jPath = bstrdup(newPath); Dmsg0(DBGLVL, "Journal migration completed\n"); } free_and_null_pool_memory(tmp_jPath); this->endTransaction(); return success; } bacula-15.0.3/src/plugins/fd/settings-record.h0000644000175000017500000000234114771010173021010 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef settingsrecord_H #define settingsrecord_H #include "bacula.h" #include /** * @brief Data that is saved and retrieved by using the @class Journal */ class SettingsRecord { private: char *spoolDir; public: int64_t heartbeat; int64_t journalVersion; const char *getSpoolDir() { return spoolDir; } void setSpoolDir(const char *sdir) { if (sdir == NULL) { return; } spoolDir = bstrdup(sdir); } SettingsRecord(): spoolDir(NULL), heartbeat(-1), journalVersion(-1) {} ~SettingsRecord() {} }; #endif bacula-15.0.3/src/plugins/fd/Makefile.inc.in0000644000175000017500000000672014771010173020345 0ustar bsbuildbsbuild# # Makefile for building FD plugins PluginLibrary for Bacula # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Author: Radoslaw Korzeniewski, radoslaw@korzeniewski.net # @MCOMMON@ .PHONY: all first_rule: all topdir = @BUILD_DIR@ working_dir = @working_dir@ LIBTOOL_CLEAN_TARGET = @LIBTOOL_CLEAN_TARGET@ LIBTOOL_UNINSTALL_TARGET = @LIBTOOL_UNINSTALL_TARGET@ SYSCONFDIR=@sysconfdir@ _GIT := $(shell eval $(topdir)/scripts/getgitcommit) VERSIONGIT = "/git-$(_GIT)" SYBASE_TMPDIR = @sybase_tmpdir@ SYBASE_CONFIG = @sysconfdir@/sbt.conf SBTCONFIG = @sysconfdir@/sbt.conf ORACLE_TMPDIR = @oracle_tmpdir@ SAP_TMPDIR = @sap_tmpdir@ RMAN_SCRIPT_DIR = @sysconfdir@ RSYNC_INC = @RSYNC_INC@ RSYNC_LIBS = @RSYNC_LIBS@ LDAP_LIBS = @LDAP_LIBS@ LDAP_LDFLAGS = @LDAP_LDFLAGS@ LDAP_INC = @LDAP_INC@ FD_LDAP_TARGET = @FD_LDAP_TARGET@ FD_LDAP_TARGET_INSTALL = @FD_LDAP_TARGET_INSTALL@ SRCDIR = $(topdir)/src FDDIR = $(SRCDIR)/filed LIBDIR = $(SRCDIR)/lib FINDLIBDIR = $(SRCDIR)/findlib FDPLUGDIR = $(SRCDIR)/plugins/fd PLUGINLIBDIR = $(FDPLUGDIR)/pluginlib UNITTESTSOBJ = $(LIBDIR)/unittests.lo INIOBJ = $(LIBDIR)/ini.lo LIBBACOBJ = $(LIBDIR)/libbac.la LIBBACCFGOBJ = $(LIBDIR)/libbaccfg.la INIOBJ = $(LIBDIR)/ini.lo METAPLUGINOBJ = $(PLUGINLIBOBJ) $(PLUGINLIBDIR)/ptcomm.lo $(PLUGINLIBDIR)/metaplugin.lo $(PLUGINLIBDIR)/metaplugin_attributes.lo $(PLUGINLIBDIR)/metaplugin_accurate.lo $(PLUGINLIBDIR)/metaplugin_metadata.lo PLUGINLIBOBJ = $(PLUGINLIBDIR)/pluginlib.lo PLUGINBASE = $(PLUGINLIBOBJ) $(PLUGINLIBDIR)/pluginclass.lo $(PLUGINLIBDIR)/pluginbase.lo $(PLUGINLIBDIR)/pluginctx.lo EXTRA_INSTALL_TARGET = @FD_PLUGIN_INSTALL@ .SUFFIXES: .c .cpp .lo $(UNITTESTSOBJ): $(MAKE) -C $(LIBDIR) unittests.lo $(INIOBJ): $(MAKE) -C $(LIBDIR) ini.lo $(LIBBACOBJ): $(MAKE) -C $(LIBDIR) libbac.la $(LIBBACCFGOBJ): $(MAKE) -C $(LIBDIR) libbaccfg.la $(PLUGINLIBDIR)/test_metaplugin_backend.lo: $(MAKE) -C $(PLUGINLIBDIR) test_metaplugin_backend.lo $(PLUGINLIBDIR)/pluginlib.lo: $(PLUGINLIBDIR)/pluginlib.cpp $(PLUGINLIBDIR)/pluginlib.h $(MAKE) -C $(PLUGINLIBDIR) pluginlib.lo $(PLUGINLIBDIR)/ptcomm.lo: $(PLUGINLIBDIR)/ptcomm.cpp $(PLUGINLIBDIR)/ptcomm.h $(MAKE) -C $(PLUGINLIBDIR) ptcomm.lo $(PLUGINLIBDIR)/metaplugin.lo: $(PLUGINLIBDIR)/metaplugin.cpp $(PLUGINLIBDIR)/metaplugin.h $(MAKE) -C $(PLUGINLIBDIR) metaplugin.lo $(PLUGINLIBDIR)/metaplugin_attributes.lo: $(PLUGINLIBDIR)/metaplugin_attributes.cpp $(PLUGINLIBDIR)/metaplugin_attributes.h $(MAKE) -C $(PLUGINLIBDIR) metaplugin_attributes.lo $(PLUGINLIBDIR)/metaplugin_accurate.lo: $(PLUGINLIBDIR)/metaplugin_accurate.cpp $(PLUGINLIBDIR)/metaplugin_accurate.h $(MAKE) -C $(PLUGINLIBDIR) metaplugin_accurate.lo $(PLUGINLIBDIR)/metaplugin_metadata.lo: $(PLUGINLIBDIR)/metaplugin_metadata.cpp $(PLUGINLIBDIR)/metaplugin_metadata.h $(MAKE) -C $(PLUGINLIBDIR) metaplugin_metadata.lo $(PLUGINLIBDIR)/iso8601.lo: $(PLUGINLIBDIR)/iso8601.cpp $(PLUGINLIBDIR)/iso8601.h $(MAKE) -C $(PLUGINLIBDIR) iso8601.lo $(PLUGINLIBDIR)/execprog.lo: $(PLUGINLIBDIR)/execprog.cpp $(PLUGINLIBDIR)/execprog.h $(MAKE) -C $(PLUGINLIBDIR) execprog.lo $(PLUGINLIBDIR)/pluginclass.lo: $(PLUGINLIBDIR)/pluginclass.cpp $(PLUGINLIBDIR)/pluginclass.h $(MAKE) -C $(PLUGINLIBDIR) pluginclass.lo $(PLUGINLIBDIR)/pluginbase.lo: $(PLUGINLIBDIR)/pluginbase.cpp $(PLUGINLIBDIR)/pluginbase.h $(MAKE) -C $(PLUGINLIBDIR) pluginbase.lo $(PLUGINLIBDIR)/pluginctx.lo: $(PLUGINLIBDIR)/pluginctx.cpp $(PLUGINLIBDIR)/pluginctx.h $(MAKE) -C $(PLUGINLIBDIR) pluginctx.lo bacula-15.0.3/src/plugins/fd/antivirus-fd.c0000644000175000017500000003101714771010173020304 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * A Verify Data plugin to check antivirus signature */ #define BUILD_PLUGIN #include "bacula.h" #include "fd_plugins.h" #define USE_CMD_PARSER #include "fd_common.h" #include "findlib/bfile.h" /* for bopen/bclose/bread/bwrite */ #include "lib/smartall.h" #ifdef __cplusplus extern "C" { #endif static const int dbglvl = 100; static char *working; #define PLUGIN_LICENSE "AGPLv3" #define PLUGIN_AUTHOR "Eric Bollengier" #define PLUGIN_DATE "September 2021" #define PLUGIN_VERSION "1" #define PLUGIN_DESCRIPTION "Bacula Antivirus Plugin" /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); // static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); // static bRC endBackupFile(bpContext *ctx); static bRC pluginIO(bpContext *ctx, struct io_pkt *io); // static bRC startRestoreFile(bpContext *ctx, const char *cmd); // static bRC endRestoreFile(bpContext *ctx); static bRC startVerifyFile(bpContext *ctx, struct restore_pkt *rp); static bRC endVerifyFile(bpContext *ctx); static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); /* Pointers to Bacula functions */ static bFuncs *bfuncs = NULL; static bInfo *binfo = NULL; /* Plugin Information block */ static pInfo pluginInfo = { sizeof(pluginInfo), FD_PLUGIN_INTERFACE_VERSION, FD_PLUGIN_MAGIC, PLUGIN_LICENSE, PLUGIN_AUTHOR, PLUGIN_DATE, PLUGIN_VERSION, PLUGIN_DESCRIPTION }; /* Plugin entry points for Bacula */ static pFuncs pluginFuncs = { sizeof(pluginFuncs), FD_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, /* new plugin instance */ freePlugin, /* free plugin instance */ getPluginValue, setPluginValue, handlePluginEvent, NULL, // startBackupFile NULL, // endBackupFile NULL, // startRestoreFile NULL, // endRestoreFile pluginIO, NULL, // createFile setFileAttributes, NULL, // checkFile NULL, // handleXACLdata NULL, // restoreFileList NULL, // checkStream NULL, // queryParameter NULL, // metadataRestore startVerifyFile, endVerifyFile }; #define get_self(x) (((x)==NULL)?NULL:(antivirus*)((x)->pContext)) /* Plugin instance class */ class antivirus: public cmd_parser, public SMARTALLOC { private: bpContext *ctx; public: BSOCKCORE *bs; POOLMEM *fname; char *hostname; int port; antivirus(bpContext *bpc): cmd_parser(), ctx(bpc), bs(NULL), fname(NULL), hostname(NULL), port(3310) { }; virtual ~antivirus() { free_and_null_pool_memory(fname); }; /* Wait to be called to allocate memory */ void init_mem() { fname = get_pool_memory(PM_FNAME); }; void report_virus(const char *fname, const char *msg); }; void antivirus::report_virus(const char *name, const char *msg) { fileevent_pkt event; Jmsg(ctx, M_ERROR, "%s Virus detected \"%s\"\n", name, msg); bfuncs->getBaculaValue(ctx, bVarFileIndex, &event.FileIndex); event.Severity = 100; event.Type = FILEEVENT_TYPE_ANTIVIRUS; bstrncpy(event.Source, "Clamav", sizeof(event.Source)); bstrncpy(event.Description, msg, sizeof(event.Description)); bfuncs->AddFileEvent(ctx, &event); } /* * loadPlugin() and unloadPlugin() are entry points that are * exported, so Bacula can directly call these two entry points * they are common to all Bacula plugins. */ /* * External entry point called by Bacula to "load the plugin */ bRC DLL_IMP_EXP loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs) { bfuncs = lbfuncs; /* set Bacula funct pointers */ binfo = lbinfo; *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ bfuncs->getBaculaValue(NULL, bVarWorkingDir, (void *)&working); return bRC_OK; } /* * External entry point to unload the plugin */ bRC DLL_IMP_EXP unloadPlugin() { // Dmsg(NULL, dbglvl, "Unloaded\n"); return bRC_OK; } /* * The following entry points are accessed through the function * pointers we supplied to Bacula. Each plugin type (dir, fd, sd) * has its own set of entry points that the plugin must define. */ /* * Create a new instance of the plugin i.e. allocate our private storage */ static bRC newPlugin(bpContext *ctx) { antivirus *self = New(antivirus(ctx)); if (!self) { return bRC_Error; } ctx->pContext = (void *)self; /* set our context pointer */ return bRC_OK; } /* * Free a plugin instance, i.e. release our private storage */ static bRC freePlugin(bpContext *ctx) { if (!ctx) { return bRC_Error; } antivirus *self = get_self(ctx); if (!self) { return bRC_Error; } delete self; return bRC_OK; } /* * Return some plugin value (none defined) */ static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Set a plugin value (none defined) */ static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Handle an event that was generated in Bacula */ static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) { int i=0; if (!ctx) { return bRC_Error; } antivirus *self = get_self(ctx); if (!self) { return bRC_Error; } /* * Most events don't interest us so we ignore them. * the printfs are so that plugin writers can enable them to see * what is really going on. */ switch (event->eventType) { case bEventPluginOptions: Jmsg(ctx, M_INFO, "Got plugin command = %s\n", (char *)value); self->parse_cmd((char *)value); bfuncs->registerBaculaEvents(ctx, bEventVerifyStream); if ((i =self->find_arg_with_value("hostname")) >= 1) { self->hostname = self->argv[i]; } else { self->hostname = (char *)"localhost"; } if ((i =self->find_arg_with_value("port")) >= 1) { self->port = atoi(self->argv[i]); } else { self->port = 3310; } Dmsg0(50, "Register event bEventVerifyStream\n"); break; case bEventVssPrepareSnapshot: break; case bEventJobStart: // Dmsg(ctx, dbglvl, "JobStart=%s\n", (char *)value); break; case bEventJobEnd: // Dmsg(ctx, dbglvl, "JobEnd\n"); break; case bEventLevel: Dmsg0(50, "Register event bEventVerifyStream\n"); break; case bEventSince: // Dmsg(ctx, dbglvl, "since=%d\n", (int)value); break; case bEventStartVerifyJob: self->init_mem(); break; case bEventHandleBackupFile: /* Fall-through wanted */ case bEventStartBackupJob: /* Fall-through wanted */ case bEventEndBackupJob: /* Fall-through wanted */ case bEventStartRestoreJob: /* Fall-through wanted */ case bEventEndRestoreJob: /* Fall-through wanted */ case bEventRestoreCommand: /* Fall-through wanted */ case bEventEstimateCommand: /* Fall-through wanted */ case bEventBackupCommand: /* Plugin = "antivirus" */ return bRC_Error; default: // Dmsg(ctx, dbglvl, "unknown event=%d\n", event->eventType); break; } return bRC_OK; } static bRC startVerifyFile(bpContext *ctx, struct restore_pkt *rp) { antivirus *self = get_self(ctx); pm_strcpy(self->fname, rp->ofname); return bRC_OK; } /* * Done with verify of this file */ static bRC endVerifyFile(bpContext *ctx) { return bRC_OK; } #define retry_interval 1 #define max_retry_time 3 #define heart_beat 100 #define socket_name "bacula_antivirus-fd" #define service NULL #define max_read_size 4096 POOLMEM *pack_chunk(POOLMEM *p, uint32_t count) { typedef struct { uint32_t size; char *buf; } chunkstruct; if (p) { chunkstruct *s = (chunkstruct*)(p - sizeof(uint32_t)); s->size = htonl(count); return (POOLMEM *)s; } return NULL; } /* * Bacula is calling us to do the actual I/O */ static bRC pluginIO(bpContext *ctx, struct io_pkt *io) { if (!ctx) { return bRC_Error; } antivirus *self = get_self(ctx); if (!self) { return bRC_Error; } io->status = 0; io->io_errno = 0; switch(io->func) { case IO_OPEN: Dmsg(ctx, dbglvl, "antivirus: Initialize antivirus\n"); self->bs = New(BSOCKCORE); if (!self->bs->connect(get_jcr_from_tsd(), retry_interval, max_retry_time, heart_beat, socket_name, const_cast(self->hostname), service, self->port, 0)) { /* connect failed*/ io->io_errno = errno; io->status = -1; Jmsg(ctx, M_ERROR, "Connection to %s:%d failed. fname=%s ERR=%s\n", self->hostname, self->port, self->fname, strerror(errno)); delete (self->bs); self->bs = NULL; return bRC_Error; } if (self->bs && self->bs->is_open()) { memset((void*)self->bs->msg, 0, strlen("zINSTREAM")+1); self->bs->msglen = pm_strcpy(self->bs->msg, "zINSTREAM"); self->bs->msglen++; if (! self->bs->send()) { Jmsg(ctx, M_ERROR, "INSTREAM failed: fname=%s ERR=%s\n", self->fname, strerror(errno)); delete (self->bs); self->bs = NULL; return bRC_Error; } } break; case IO_READ: return bRC_Error; case IO_WRITE: Dmsg(ctx, dbglvl, "antivirus: writing\n"); if (self->bs && self->bs->is_open()) { //Dmsg(ctx, dbglvl, "verifyplug: writing\n"); POOLMEM *p = pack_chunk(io->buf, io->count); //Dmsg(ctx, dbglvl, "verifyplug: pack_chunk write %d %s\nOuput :%s",io->count, io->buf, p); if (!self->bs->send2(p, io->count+sizeof(uint32_t))) { Jmsg(ctx, M_ERROR, "INSTREAM WRITE failed: fname=%s ERR=%s\n", self->fname, strerror(errno)); delete (self->bs); self->bs = NULL; return bRC_Error; } } break; /* Cleanup things during close */ case IO_CLOSE: io->status = 0; if (self->bs && self->bs->is_open()) { //Dmsg(ctx, dbglvl, "verifyplug: close\n"); memset(self->bs->msg, 0, sizeof(uint32_t)); self->bs->msglen = 0; POOLMEM *p = pack_chunk(self->bs->msg, self->bs->msglen); //Dmsg(ctx, dbglvl, "verifyplug: pack_chunk close %d %s\nOuput :%s",io->count, io->buf, p); if (!self->bs->send2(p, self->bs->msglen+sizeof(uint32_t))) { Jmsg(ctx, M_ERROR, "INSTREAM CLOSE failed: fname=%s ERR=%s\n", self->fname, strerror(errno)); delete (self->bs); self->bs = NULL; return bRC_Error; } /* read the reply */ self->bs->msg[0] = 0; self->bs->msglen = self->bs->socketRead(self->bs->m_fd, self->bs->msg, max_read_size); if (self->bs->msglen > 0) { self->bs->msg[self->bs->msglen] = 0; if (strstr(self->bs->msg, "OK")) { Dmsg(ctx, dbglvl, "%s %s\n", self->fname, self->bs->msg); } else { self->report_virus(self->fname, self->bs->msg); } } self->bs->close(); delete (self->bs); } break; case IO_SEEK: /* Seek not needed for this plugin, we don't use real sparse file */ io->status = 0; break; } return bRC_OK; } /* * We will get here if the File is a directory after everything * is written in the directory. */ static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) { // Dmsg(ctx, dbglvl, "setFileAttributes\n"); return bRC_Error; } #ifdef __cplusplus } #endif bacula-15.0.3/src/plugins/fd/kubernetes-fd.c0000644000175000017500000000440214771010173020425 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file kubernetes-fd.c * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * Modified by: Francisco Manuel Garcia Botella (francisco.garcia@baculasystems.com) * @brief This is a Bacula Kubernetes Plugin with metaplugin interface. * @version 2.2.0 * @date 2024-02-20 * * @copyright Copyright (c) 2021 All rights reserved. * IP transferred to Bacula Systems according to agreement. */ #include "kubernetes-fd.h" /* Plugin Info definitions */ const char *PLUGIN_LICENSE = "Bacula AGPLv3"; const char *PLUGIN_AUTHOR = "Radoslaw Korzeniewski, Francisco Manuel Garcia Botella"; const char *PLUGIN_DATE = "February 2024"; const char *PLUGIN_VERSION = "2.2.0"; // TODO: should synchronize with kubernetes-fd.json const char *PLUGIN_DESCRIPTION = "Bacula Kubernetes Plugin"; /* Plugin compile time variables */ const char *PLUGINPREFIX = "kubernetes:"; const char *PLUGINNAME = "kubernetes"; const char *PLUGINNAMESPACE = "@kubernetes"; const bool CUSTOMNAMESPACE = true; const bool CUSTOMPREVJOBNAME = false; const char *PLUGINAPI = "3"; const char *BACKEND_CMD = "k8s_backend"; const int32_t CUSTOMCANCELSLEEP = 0; checkFile_t checkFile = NULL; const bool CORELOCALRESTORE = false; const bool ACCURATEPLUGINPARAMETER = true; const int ADDINCLUDESTRIPOPTION = 0; const bool DONOTSAVE_FT_PLUGIN_CONFIG = false; const uint32_t BACKEND_TIMEOUT = 0; // use default #ifdef DEVELOPER const metadataTypeMap plugin_metadata_map[] = {{"METADATA_STREAM", plugin_meta_blob}}; #else const metadataTypeMap plugin_metadata_map[] = {{NULL, plugin_meta_invalid}}; #endif bacula-15.0.3/src/plugins/fd/example-plugin-fd.c0000644000175000017500000002030214771010173021202 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #define BUILD_PLUGIN #define BUILDING_DLL /* required for Windows plugin */ #include "bacula.h" #include "fd_plugins.h" #ifdef __cplusplus extern "C" { #endif #define PLUGIN_LICENSE "AGPLv3" #define PLUGIN_AUTHOR "Your name" #define PLUGIN_DATE "January 2010" #define PLUGIN_VERSION "1" #define PLUGIN_DESCRIPTION "Test File Daemon Plugin" /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); static bRC endBackupFile(bpContext *ctx); static bRC pluginIO(bpContext *ctx, struct io_pkt *io); static bRC startRestoreFile(bpContext *ctx, const char *cmd); static bRC endRestoreFile(bpContext *ctx); static bRC createFile(bpContext *ctx, struct restore_pkt *rp); static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); static bRC checkFile(bpContext *ctx, char *fname); static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl); /* Pointers to Bacula functions */ static bFuncs *bfuncs = NULL; static bInfo *binfo = NULL; static pInfo pluginInfo = { sizeof(pluginInfo), FD_PLUGIN_INTERFACE_VERSION, FD_PLUGIN_MAGIC, PLUGIN_LICENSE, PLUGIN_AUTHOR, PLUGIN_DATE, PLUGIN_VERSION, PLUGIN_DESCRIPTION, }; static pFuncs pluginFuncs = { sizeof(pluginFuncs), FD_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, /* new plugin instance */ freePlugin, /* free plugin instance */ getPluginValue, setPluginValue, handlePluginEvent, startBackupFile, endBackupFile, startRestoreFile, endRestoreFile, pluginIO, createFile, setFileAttributes, checkFile, handleXACLdata }; /* * Plugin called here when it is first loaded */ bRC DLL_IMP_EXP loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs) { bfuncs = lbfuncs; /* set Bacula funct pointers */ binfo = lbinfo; printf("plugin: Loaded: size=%d version=%d\n", bfuncs->size, bfuncs->version); *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ return bRC_OK; } /* * Plugin called here when it is unloaded, normally when * Bacula is going to exit. */ bRC DLL_IMP_EXP unloadPlugin() { printf("plugin: Unloaded\n"); return bRC_OK; } /* * Called here to make a new instance of the plugin -- i.e. when * a new Job is started. There can be multiple instances of * each plugin that are running at the same time. Your * plugin instance must be thread safe and keep its own * local data. */ static bRC newPlugin(bpContext *ctx) { int JobId = 0; bfuncs->getBaculaValue(ctx, bVarJobId, (void *)&JobId); // printf("plugin: newPlugin JobId=%d\n", JobId); bfuncs->registerBaculaEvents(ctx, 1, 2, 0); return bRC_OK; } /* * Release everything concerning a particular instance of a * plugin. Normally called when the Job terminates. */ static bRC freePlugin(bpContext *ctx) { int JobId = 0; bfuncs->getBaculaValue(ctx, bVarJobId, (void *)&JobId); // printf("plugin: freePlugin JobId=%d\n", JobId); return bRC_OK; } /* * Called by core code to get a variable from the plugin. * Not currently used. */ static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) { // printf("plugin: getPluginValue var=%d\n", var); return bRC_OK; } /* * Called by core code to set a plugin variable. * Not currently used. */ static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) { // printf("plugin: setPluginValue var=%d\n", var); return bRC_OK; } /* * Called by Bacula when there are certain events that the * plugin might want to know. The value depends on the * event. */ static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) { char *name; switch (event->eventType) { case bEventJobStart: printf("plugin: JobStart=%s\n", NPRT((char *)value)); break; case bEventJobEnd: printf("plugin: JobEnd\n"); break; case bEventStartBackupJob: printf("plugin: BackupStart\n"); break; case bEventEndBackupJob: printf("plugin: BackupEnd\n"); break; case bEventLevel: printf("plugin: JobLevel=%c %d\n", (int64_t)value, (int64_t)value); break; case bEventSince: printf("plugin: since=%d\n", (int64_t)value); break; case bEventStartRestoreJob: printf("plugin: StartRestoreJob\n"); break; case bEventEndRestoreJob: printf("plugin: EndRestoreJob\n"); break; /* Plugin command e.g. plugin = ::command */ case bEventRestoreCommand: printf("plugin: backup command=%s\n", NPRT((char *)value)); break; case bEventBackupCommand: printf("plugin: backup command=%s\n", NPRT((char *)value)); break; case bEventComponentInfo: printf("plugin: Component=%s\n", NPRT((char *)value)); break; default: printf("plugin: unknown event=%d\n", event->eventType); } bfuncs->getBaculaValue(ctx, bVarFDName, (void *)&name); // printf("FD Name=%s\n", name); // bfuncs->JobMessage(ctx, __FILE__, __LINE__, 1, 0, "JobMesssage message"); // bfuncs->DebugMessage(ctx, __FILE__, __LINE__, 1, "DebugMesssage message"); return bRC_OK; } /* * Called when starting to backup a file. Here the plugin must * return the "stat" packet for the directory/file and provide * certain information so that Bacula knows what the file is. * The plugin can create "Virtual" files by giving them a * name that is not normally found on the file system. */ static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) { return bRC_OK; } /* * Done backing up a file. */ static bRC endBackupFile(bpContext *ctx) { return bRC_OK; } /* * Do actual I/O. Bacula calls this after startBackupFile * or after startRestoreFile to do the actual file * input or output. */ static bRC pluginIO(bpContext *ctx, struct io_pkt *io) { io->status = 0; io->io_errno = 0; switch(io->func) { case IO_OPEN: printf("plugin: IO_OPEN\n"); break; case IO_READ: printf("plugin: IO_READ buf=%p len=%d\n", io->buf, io->count); break; case IO_WRITE: printf("plugin: IO_WRITE buf=%p len=%d\n", io->buf, io->count); break; case IO_CLOSE: printf("plugin: IO_CLOSE\n"); break; } return bRC_OK; } static bRC startRestoreFile(bpContext *ctx, const char *cmd) { return bRC_OK; } static bRC endRestoreFile(bpContext *ctx) { return bRC_OK; } /* * Called here to give the plugin the information needed to * re-create the file on a restore. It basically gets the * stat packet that was created during the backup phase. * This data is what is needed to create the file, but does * not contain actual file data. */ static bRC createFile(bpContext *ctx, struct restore_pkt *rp) { return bRC_OK; } /* * Called after the file has been restored. This can be used to * set directory permissions, ... */ static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) { return bRC_OK; } /* When using Incremental dump, all previous dumps are necessary */ static bRC checkFile(bpContext *ctx, char *fname) { return bRC_OK; } /* * New Bacula Plugin API require this */ static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl) { return bRC_OK; } #ifdef __cplusplus } #endif bacula-15.0.3/src/plugins/fd/test-plugin-fd.c0000644000175000017500000013413514771010173020540 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * A simple test plugin for the Bacula File Daemon derived from * the bpipe plugin, but used for testing new features. * * Kern Sibbald, October 2007 * */ #include "bacula.h" #include "fd_plugins.h" #include "lib/ini.h" #include "fileopts.h" #include #define USE_CMD_PARSER #include "fd_common.h" #define fi __FILE__ #define li __LINE__ #ifdef __cplusplus extern "C" { #endif static const int dbglvl = 000; #define PLUGIN_LICENSE "AGPLv3" #define PLUGIN_AUTHOR "Kern Sibbald" #define PLUGIN_DATE "September 2017" #define PLUGIN_VERSION "4" #define PLUGIN_DESCRIPTION "Bacula Test File Daemon Plugin" /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); static bRC metadataRestore(bpContext *ctx, meta_pkt *mp); static bRC endBackupFile(bpContext *ctx); static bRC pluginIO(bpContext *ctx, struct io_pkt *io); static bRC startRestoreFile(bpContext *ctx, const char *cmd); static bRC endRestoreFile(bpContext *ctx); static bRC createFile(bpContext *ctx, struct restore_pkt *rp); static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); static bRC checkFile(bpContext *ctx, char *fname); static bRC restoreFileList(bpContext *ctx, struct restore_filelist_pkt *rp); static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl); static bRC queryParameter(bpContext *ctx, struct query_pkt *qp); /* Pointers to Bacula functions */ static bFuncs *bfuncs = NULL; static bInfo *binfo = NULL; /* Plugin Information block */ static pInfo pluginInfo = { sizeof(pluginInfo), FD_PLUGIN_INTERFACE_VERSION, FD_PLUGIN_MAGIC, PLUGIN_LICENSE, PLUGIN_AUTHOR, PLUGIN_DATE, PLUGIN_VERSION, PLUGIN_DESCRIPTION }; /* Plugin entry points for Bacula */ static pFuncs pluginFuncs = { sizeof(pluginFuncs), FD_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, /* new plugin instance */ freePlugin, /* free plugin instance */ getPluginValue, setPluginValue, handlePluginEvent, startBackupFile, endBackupFile, startRestoreFile, endRestoreFile, pluginIO, createFile, setFileAttributes, checkFile, handleXACLdata, restoreFileList, NULL, /* No checkStream */ queryParameter, metadataRestore }; static struct ini_items test_items[] = { // name handler comment required { "string1", ini_store_str, "Special String", 1}, { "string2", ini_store_str, "2nd String", 0}, { "ok", ini_store_bool, "boolean", 0}, { "req", ini_store_bool, "boolean", 1, "yes"}, { "list", ini_store_alist_str, "list", 0}, // We can also use the ITEMS_DEFAULT // { "ok", ini_store_bool, "boolean", 0, ITEMS_DEFAULT}, { NULL, NULL, NULL, 0} }; /* * Plugin private context */ struct plugin_ctx { boffset_t offset; FILE *fd; /* pipe file descriptor */ char *cmd; /* plugin command line */ char *fname; /* filename to "backup/restore" */ char *reader; /* reader program for backup */ char *writer; /* writer program for backup */ char where[512]; int replace; int nb_obj; /* Number of objects created */ int nb; /* used in queryParameter */ char *query_buf; /* buffer used to not loose memory */ plugin_metadata *meta_mgr; /* Metadata manager */ int job_level; /* current Job level */ POOLMEM *buf; /* store ConfigFile */ }; /* * loadPlugin() and unloadPlugin() are entry points that are * exported, so Bacula can directly call these two entry points * they are common to all Bacula plugins. */ /* * External entry point called by Bacula to "load the plugin */ bRC loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs) { bfuncs = lbfuncs; /* set Bacula funct pointers */ binfo = lbinfo; *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ return bRC_OK; } /* * External entry point to unload the plugin */ bRC unloadPlugin() { // printf("test-plugin-fd: Unloaded\n"); return bRC_OK; } /* * The following entry points are accessed through the function * pointers we supplied to Bacula. Each plugin type (dir, fd, sd) * has its own set of entry points that the plugin must define. */ /* * Create a new instance of the plugin i.e. allocate our private storage */ static bRC newPlugin(bpContext *ctx) { struct plugin_ctx *p_ctx = (struct plugin_ctx *)malloc(sizeof(struct plugin_ctx)); if (!p_ctx) { return bRC_Error; } memset(p_ctx, 0, sizeof(struct plugin_ctx)); ctx->pContext = (void *)p_ctx; /* set our context pointer */ /* Create metadata manager class */ p_ctx->meta_mgr = New(plugin_metadata); return bRC_OK; } /* * Free a plugin instance, i.e. release our private storage */ static bRC freePlugin(bpContext *ctx) { struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; if (!p_ctx) { return bRC_Error; } if (p_ctx->buf) { free_pool_memory(p_ctx->buf); } if (p_ctx->query_buf) { free(p_ctx->query_buf); } if (p_ctx->cmd) { free(p_ctx->cmd); /* free any allocated command string */ } if (p_ctx->meta_mgr) { delete p_ctx->meta_mgr; /* delete metadata manager object */ } free(p_ctx); /* free our private context */ ctx->pContext = NULL; return bRC_OK; } /* * Return some plugin value (none defined) */ static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Set a plugin value (none defined) */ static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Handle an event that was generated in Bacula */ static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) { struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; restore_object_pkt *rop; if (!p_ctx) { return bRC_Error; } // char *name; /* * Most events don't interest us so we ignore them. * the printfs are so that plugin writers can enable them to see * what is really going on. */ switch (event->eventType) { case bEventJobStart: bfuncs->DebugMessage(ctx, fi, li, dbglvl, "test-plugin-fd: JobStart=%s\n", (char *)value); break; case bEventLevel: p_ctx->job_level = (intptr_t)value; break; case bEventJobEnd: case bEventEndBackupJob: case bEventSince: case bEventStartRestoreJob: case bEventEndRestoreJob: break; /* End of Dir FileSet commands, now we can add excludes */ case bEventEndFileSet: bfuncs->NewOptions(ctx); bfuncs->AddWild(ctx, "*.c", ' '); bfuncs->AddWild(ctx, "*.cpp", ' '); bfuncs->AddOptions(ctx, "ei"); /* exclude, ignore case */ bfuncs->AddExclude(ctx, "/home/kern/bacula/regress/README"); break; case bEventStartBackupJob: break; case bEventRestoreObject: { printf("Plugin RestoreObject\n"); if (!value) { bfuncs->DebugMessage(ctx, fi, li, dbglvl, "test-plugin-fd: End restore objects\n"); break; } rop = (restore_object_pkt *)value; bfuncs->DebugMessage(ctx, fi, li, dbglvl, "Get RestoreObject len=%d JobId=%d oname=%s type=%d data=%.256s\n", rop->object_len, rop->JobId, rop->object_name, rop->object_type, rop->object); FILE *fp; POOLMEM *q; char *working; static int _nb=0; q = get_pool_memory(PM_FNAME); bfuncs->getBaculaValue(ctx, bVarWorkingDir, &working); Mmsg(q, "%s/restore.%d", working, _nb++); if ((fp = bfopen(q, "w")) != NULL) { fwrite(rop->object, rop->object_len, 1, fp); fclose(fp); } free_pool_memory(q); if (!strcmp(rop->object_name, INI_RESTORE_OBJECT_NAME)) { ConfigFile ini; if (!ini.dump_string(rop->object, rop->object_len)) { break; } ini.register_items(test_items, sizeof(struct ini_items)); if (ini.parse(ini.out_fname)) { bfuncs->JobMessage(ctx, fi, li, M_INFO, 0, "string1 = %s\n", ini.items[0].val.strval); if (ini.items[4].found) { POOL_MEM tmp; char *elt; foreach_alist(elt, ini.items[4].val.alistval) { pm_strcat(tmp, "["); pm_strcat(tmp, elt); pm_strcat(tmp, "]"); } bfuncs->JobMessage(ctx, fi, li, M_INFO, 0, "list = %s\n", tmp.c_str()); } } else { bfuncs->JobMessage(ctx, fi, li, M_ERROR, 0, "Can't parse config\n"); } } break; } /* Plugin command e.g. plugin = ::read command:write command */ case bEventRestoreCommand: /* Fall-through wanted */ case bEventEstimateCommand: /* Fall-through wanted */ case bEventBackupCommand: { char *p; bfuncs->DebugMessage(ctx, fi, li, dbglvl, "test-plugin-fd: pluginEvent cmd=%s\n", (char *)value); p_ctx->cmd = bstrdup((char *)value); p = strchr(p_ctx->cmd, ':'); if (!p) { bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Plugin terminator not found: %s\n", (char *)value); return bRC_Error; } *p++ = 0; /* terminate plugin */ p_ctx->fname = p; p = strchr(p, ':'); if (!p) { bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "File terminator not found: %s\n", (char *)value); return bRC_Error; } *p++ = 0; /* terminate file */ p_ctx->reader = p; p = strchr(p, ':'); if (!p) { bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Reader terminator not found: %s\n", (char *)value); return bRC_Error; } *p++ = 0; /* terminate reader string */ p_ctx->writer = p; p_ctx->nb_obj = MIN(p_ctx->nb_obj, 3); printf("test-plugin-fd: plugin=%s fname=%s reader=%s writer=%s\n", p_ctx->cmd, p_ctx->fname, p_ctx->reader, p_ctx->writer); break; } case bEventPluginCommand: break; case bEventVssBeforeCloseRestore: break; case bEventComponentInfo: printf("plugin: Component=%s\n", NPRT((char *)value)); break; case bEventFeatures: *((const char **)value) = PLUGIN_FEATURE_RESTORELISTFILES; printf("test-plugin-fd: Send plugin feature\n"); break; default: printf("test-plugin-fd: unknown event=%d\n", event->eventType); break; } return bRC_OK; } static bRC metadataRestore(bpContext *ctx, struct meta_pkt *mp) { //TODO add proper handling instead of printing (i. e. add data buffer comparison) Dmsg1(0, "Restoring metadata of index: %d\n", mp->index); /*switch (mp->type) {*/ /*case plugin_meta_blob:*/ /*Dmsg0(0, "Restoring metadata of 'blob' type!\n");*/ /*Dmsg1(0, _("---- [pluginrestore] len: %lld\n"), mp->buf_len);*/ /*Dmsg2(0, _("---- [pluginrestore] buf: %.*s\n"), mp->buf_len, mp->buf);*/ /*break;*/ /*case plugin_meta_catalog_email:*/ /*Dmsg0(0, "Restoring metadata of 'email catalog' type!\n");*/ /*break;*/ /*default:*/ /*Dmsg1(0, "Invalid metadata type: %d!\n", mp->type);*/ /*break;*/ /*}*/ return bRC_OK; } /* * Start the backup of a specific file */ static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) { struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; static int obj_uuid = 1; int jobid; bfuncs->getBaculaValue(ctx, bVarJobId, &jobid); if (!p_ctx) { return bRC_Error; } Dmsg1(0, "nb_obj = %d\n", p_ctx->nb_obj); if (p_ctx->job_level != 'F') { switch(p_ctx->nb_obj++) { case 0: sp->restore_obj.object_name = (char *)"james.xml"; sp->restore_obj.object = (char *)"This is test data for the restore object incr."; sp->restore_obj.object_len = strlen(sp->restore_obj.object)+1; sp->type = FT_RESTORE_FIRST; break; case 1: // The file is needed to reference the plugin object sp->type = FT_REG; sp->link = sp->fname = (char *)NT_("/@testplugin/test.zero"); // Use the filename in argument stat(p_ctx->reader, &sp->statp); break; case 2: sp->plugin_obj.path = (char *)NT_("/@testplugin/test.zero"); sp->plugin_obj.plugin_name = (char *)NT_("Test Plugin"); sp->plugin_obj.object_category = (char *)NT_("Virtual Machine"); sp->plugin_obj.object_type = (char *)NT_("VMWare"); sp->plugin_obj.object_name = (char *)NT_("test vm"); sp->plugin_obj.object_source = (char *)NT_("test plugin source"); sp->plugin_obj.object_uuid = (char *)NT_("1234-abc-testplugin"); sp->plugin_obj.status = PLUG_OBJ_STATUS_TERMINATED; sp->plugin_obj.count = 1; sp->plugin_obj.object_size = jobid; sp->type = FT_PLUGIN_OBJECT; bfuncs->JobMessage(ctx, fi, li, M_INFO, 0, "Generating Plugin Object size=%lu\n", sp->plugin_obj.object_size); break; case 3: // New version of a file present in full job { time_t now = time(NULL); p_ctx->nb_obj++; sp->type = FT_REG; sp->fname = (char *)"/@size_update_file@"; sp->statp.st_mode = 0640; sp->statp.st_ctime = now; sp->statp.st_mtime = now; sp->statp.st_atime = now; sp->statp.st_size = -1; /* Size is unknown at the beginning, should be updated in the next step */ sp->statp.st_blksize = 4096; Dmsg0(0, "@size_update_file@ initial step\n"); } break; default: return bRC_Stop; } return bRC_OK; } if (p_ctx->nb_obj == 0) { sp->fname = (char *)"takeme.h"; bfuncs->DebugMessage(ctx, fi, li, dbglvl, "AcceptFile=%s = %d\n", sp->fname, bfuncs->AcceptFile(ctx, sp)); sp->fname = (char *)"/path/to/excludeme.o"; bfuncs->DebugMessage(ctx, fi, li, dbglvl, "AcceptFile=%s = %d\n", sp->fname, bfuncs->AcceptFile(ctx, sp)); sp->fname = (char *)"/path/to/excludeme.c"; bfuncs->DebugMessage(ctx, fi, li, dbglvl, "AcceptFile=%s = %d\n", sp->fname, bfuncs->AcceptFile(ctx, sp)); } if (p_ctx->nb_obj == 0) { sp->restore_obj.object_name = (char *)"james.xml"; sp->restore_obj.object = (char *)"This is test data for the restore object. " "garbage=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" "\0secret"; sp->restore_obj.object_len = strlen(sp->restore_obj.object)+1+6+1; /* str + 0 + secret + 0 */ sp->type = FT_RESTORE_FIRST; static int _nb=0; POOLMEM *q = get_pool_memory(PM_FNAME); char *working; FILE *fp; bfuncs->getBaculaValue(ctx, bVarWorkingDir, &working); Mmsg(q, "%s/torestore.%d", working, _nb++); if ((fp = bfopen(q, "w")) != NULL) { fwrite(sp->restore_obj.object, sp->restore_obj.object_len, 1, fp); fclose(fp); } free_pool_memory(q); } else if (p_ctx->nb_obj == 1) { ConfigFile ini; p_ctx->buf = get_pool_memory(PM_BSOCK); ini.register_items(test_items, sizeof(struct ini_items)); sp->restore_obj.object_name = (char*)INI_RESTORE_OBJECT_NAME; sp->restore_obj.object_len = ini.serialize(&p_ctx->buf); sp->restore_obj.object = p_ctx->buf; sp->type = FT_PLUGIN_CONFIG; Dmsg1(0, "RestoreOptions=<%s>\n", p_ctx->buf); } else if (p_ctx->nb_obj == 2) { sp->flags |= FO_OFFSETS; sp->type = FT_REG; sp->link = sp->fname = (char *)"/@testplugin/test.zero"; Dmsg1(0, "Create virtual file with index information flags=%llx\n", sp->flags); } else if (p_ctx->nb_obj == 3) { sp->flags |= FO_OFFSETS; sp->type = FT_REG; sp->link = sp->fname = p_ctx->fname; } else if (p_ctx->nb_obj == 4) { sp->plugin_obj.path = (char *)NT_("/@testplugin/test.zero"); sp->plugin_obj.plugin_name = (char *)NT_("Test Plugin"); sp->plugin_obj.object_category = (char *)NT_("Virtual Machine"); sp->plugin_obj.object_type = (char *)NT_("VMWare"); sp->plugin_obj.object_name = (char *)NT_("test vm"); sp->plugin_obj.object_source = (char *)NT_("test plugin source"); sp->plugin_obj.object_uuid = (char *)NT_("1234-abc-testplugin"); sp->plugin_obj.status = PLUG_OBJ_STATUS_TERMINATED; sp->plugin_obj.count = 2; sp->plugin_obj.object_size = obj_uuid++; sp->type = FT_PLUGIN_OBJECT; p_ctx->nb_obj++; return bRC_OK; } else if (p_ctx->nb_obj == 5) { sp->plugin_obj.path = (char *)NT_("/@testplugin/test.zero"); sp->plugin_obj.plugin_name = (char *)NT_("Test Plugin"); sp->plugin_obj.object_category = (char *)NT_("Database"); sp->plugin_obj.object_type = (char *)NT_("PostgreSQL"); sp->plugin_obj.object_name = (char *)NT_("test db"); sp->plugin_obj.object_source = (char *)NT_("test plugin source"); sp->plugin_obj.object_uuid = (char *)NT_("5678-abc-testplugin"); sp->plugin_obj.status = PLUG_OBJ_STATUS_ERROR; sp->plugin_obj.object_size = obj_uuid++; sp->type = FT_PLUGIN_OBJECT; p_ctx->nb_obj++; return bRC_OK; } else if (p_ctx->nb_obj == 6) { sp->plugin_obj.path = (char *)NT_("/@testplugin/"); sp->plugin_obj.plugin_name = (char *)NT_("Test Plugin"); sp->plugin_obj.object_category = (char *)NT_("Database"); sp->plugin_obj.object_type = (char *)NT_("PostgreSQL"); sp->plugin_obj.object_name = (char *)NT_("everything"); sp->plugin_obj.object_source = (char *)NT_("test plugin source"); sp->plugin_obj.object_uuid = (char *)NT_("5678-abc-testplugin"); sp->plugin_obj.status = PLUG_OBJ_STATUS_TERMINATED; sp->plugin_obj.object_size = obj_uuid++; sp->type = FT_PLUGIN_OBJECT; p_ctx->nb_obj++; return bRC_OK; } else if (p_ctx->nb_obj == 7) { p_ctx->nb_obj++; if (p_ctx->job_level == 'F') { sp->type = FT_REG; sp->link = sp->fname = (char *)"/@testplugin/test1.zero"; stat("/etc/passwd", &sp->statp); /* Assign some metadata for the fake file */ p_ctx->meta_mgr->reset(); const char* m1 = "{\ \"key1\": \"val1\", \ \"key2\": \"val2\", \ \"key3\": \"val3\" \ }"; /*TODO change payload to catalog packet when it's defined*/ const char *m2 = "{\n \"EmailBodyPreview\" : \"pretium sollicitudin vocent vulputate te semper partiendo mi cu consectetur antiopam commodo nominavi facilis iaculis montes veniam congue vitae eam ponderum idque doctus condimentum patrioque epicurei amet nominavi possit ancillae quo bibendum definition\",\n \"EmailCc\" : \"\",\n \"EmailConversationId\" : \"AAQkAGZmZjBlMjI0LTMxMmEtNDFkMi1hM2YxLWEzNjI5MjY4M2JkMQAQAE9i5o-JJZ5HvgNdGUT4uEA=\",\n \"EmailFolderName\" : \"jorgegea/users/jonis@jorgegea.onmicrosoft.com/email/REGRESS_20210915123756\",\n \"EmailFrom\" : \"eric@bacula\",\n \"EmailHasAttachment\" : 0,\n \"EmailId\" : \"AAMkAGZmZjBlMjI0LTMxMmEtNDFkMi1hM2YxLWEzNjI5MjY4M2JkMQBGAAAAAAChUr1sDFmcSYm7PK3nvLVxBwB-S4yOymgVRpR5CA4-eilAAABABKyCAAB-S4yOymgVRpR5CA4-eilAAABABPgVAAA=\",\n \"EmailImportance\" : \"NORMAL\",\n \"EmailInternetMessageId\" : \"\",\n \"EmailIsDraft\" : 0,\n \"EmailIsRead\" : 1,\n \"EmailTime\" : \"2021-09-15 12:39:19\",\n \"EmailOwner\" : \"xxxx\",\n \"EmailSize\" : 4096,\n \"EmailTenant\": \"xxxx\",\n \"EmailSubject\" : \"Elaboraret Tellus - t\",\n \"EmailTags\" : \"\",\n \"EmailTo\" : \"jorge@bacula\",\n \"Plugin\" : \"m365\",\n \"Type\" : \"EMAIL\",\n \"Version\" : 1\n }"; const char *m3 = "{\n \"AttachmentOwner\" : \"xxxx\", \"AttachmentTenant\" : \"xxxx\", \"AttachmentContentType\" : \"application/octet-stream\",\n \"AttachmentEmailId\" : \"AAMkAGZmZjBlMjI0LTMxMmEtNDFkMi1hM2YxLWEzNjI5MjY4M2JkMQBGAAAAAAChUr1sDFmcSYm7PK3nvLVxBwB-S4yOymgVRpR5CA4-eilAAABABKybAAB-S4yOymgVRpR5CA4-eilAAABABUnfAAA=\",\n \"AttachmentId\" : \"AAMkAGZmZjBlMjI0LTMxMmEtNDFkMi1hM2YxLWEzNjI5MjY4M2JkMQBGAAAAAAChUr1sDFmcSYm7PK3nvLVxBwB-S4yOymgVRpR5CA4-eilAAABABKybAAB-S4yOymgVRpR5CA4-eilAAABABUnfAAABEgAQAKT86cEi1S9PgA8I5xS0vKA=\",\n \"AttachmentIsInline\" : 0,\n \"AttachmentName\" : \"Ancillae.gen\",\n \"AttachmentSize\" : 81920,\n \"Plugin\" : \"m365\",\n \"Type\" : \"ATTACHMENT\",\n \"Version\" : 1\n}"; /* Send some huge packet to test the limits. * Currently max packet size is 3MB (it's size of buf and all of the meta_pkt fields being sent), * here we are sending 2,8MB just to be a bit below */ uint32_t m4_size = 2800000; void *m4 = malloc(m4_size); p_ctx->meta_mgr->add_packet(plugin_meta_blob, strlen(m1)+1, (void *)m1); p_ctx->meta_mgr->add_packet(plugin_meta_catalog_email, strlen(m2)+1, (void *)m2); p_ctx->meta_mgr->add_packet(plugin_meta_catalog_email, strlen(m3)+1, (void *)m3); p_ctx->meta_mgr->add_packet(plugin_meta_blob, m4_size, m4); sp->plug_meta = p_ctx->meta_mgr; Dmsg0(0, "Insert metadata!!!!!!\n"); return bRC_OK; } } else if (p_ctx->nb_obj == 8) { p_ctx->nb_obj++; sp->type = FT_REG; sp->link = sp->fname = (char *)"/@testplugin/test2.zero"; stat("/etc/passwd", &sp->statp); /* Assign some metadata for the fake file */ p_ctx->meta_mgr->reset(); /*TODO change payload to catalog packet when it's defined*/ const char *m2 = "{\n \"EmailBodyPreview\" : \"Hello John, say hello to Veronica. Best Regards\",\n \"EmailCc\" : \"veronica@gmail.com\",\n \"EmailConversationId\" : \"AAQkAGZmZjBlMjI0LTMxMmEtNDFkMi1hM2YxLWEzNjI5MjY4M2JkMQAQAE9i5o-JJZ5HvgNdGUTddEA=\",\n \"EmailFolderName\" : \"jorgegea/users/jonis@jorgegea.onmicrosoft.com/email/REGRESS_20210915123756\",\n \"EmailFrom\" : \"eric@bacula\",\n \"EmailHasAttachment\" : 1,\n \"EmailId\" : \"AAMkAGZmZjBlMjI0LTMxMmEtNDFkMi1hM2Yx\",\n \"EmailImportance\" : \"IMPORTANT\",\n \"EmailInternetMessageId\" : \"\",\n \"EmailIsDraft\" : 1,\n \"EmailIsRead\" : 0,\n \"EmailTime\" : \"2021-09-15 12:40:19\",\n \"EmailOwner\" : \"xxxx\",\n \"EmailSize\" : 4096,\n \"EmailTenant\": \"xxxx\",\n \"EmailSubject\" : \"Hello From regress\",\n \"EmailTags\" : \"draft,important,work\",\n \"EmailTo\" : \"john@bacula\",\n \"Plugin\" : \"m365\",\n \"Type\" : \"EMAIL\",\n \"Version\" : 1\n }"; const char *m3 = "{\n \"AttachmentOwner\" : \"xxxx\", \"AttachmentTenant\" : \"xxxx\", \"AttachmentContentType\" : \"application/octet-stream\",\n \"AttachmentEmailId\" : \"AAMkAGZmZjBlMjI0LTMxMmEtNDFkMi1hM2Yx\",\n \"AttachmentId\" : \"AAMkAGZmZjBlMjI0LTMxMmEtNDFkMi1hM2YxLWEzNjI5MjY4M2JkMQBGAAAAAAChUr1sDFmcSYm7PK3nvLVxBwB-S4yOymgVRpR5CA4-eilAAABABKybAAB-S4yOymgVRpR5CA4-eilAAABABUnfAAABEgAQAKT86cEi1S9PgA8I5xS0vKA=\",\n \"AttachmentIsInline\" : 0,\n \"AttachmentName\" : \"CV.pdf\",\n \"AttachmentSize\" : 81920,\n \"Plugin\" : \"m365\",\n \"Type\" : \"ATTACHMENT\",\n \"Version\" : 1\n}"; p_ctx->meta_mgr->add_packet(plugin_meta_catalog_email, strlen(m2)+1, (void *)m2); p_ctx->meta_mgr->add_packet(plugin_meta_catalog_email, strlen(m3)+1, (void *)m3); sp->plug_meta = p_ctx->meta_mgr; Dmsg0(0, "Insert metadata!!!!!! CV.pdf\n"); return bRC_OK; } else if (p_ctx->nb_obj == 9) { time_t now = time(NULL); p_ctx->nb_obj++; sp->type = FT_REG; sp->fname = (char *)"/@size_update_file@"; sp->statp.st_mode = 0640; sp->statp.st_ctime = now; sp->statp.st_mtime = now; sp->statp.st_atime = now; sp->statp.st_size = -1; /* Size is unknown at the beginning, should be updated in the next step */ sp->statp.st_blksize = 4096; Dmsg0(0, "@size_update_file@ initial step\n"); return bRC_OK; } else if (p_ctx->nb_obj == 10) { p_ctx->nb_obj++; sp->type = FT_REG; sp->fname = (char *)"/@size_update_file@"; sp->stat_update = true; /* File attributes should be updated */ sp->statp.st_size = 666; /* Update size */ sp->statp.st_mode = 0777; /* Update perissions */ Dmsg0(0, "@size_update_file@ update size\n"); return bRC_OK; } else { return bRC_Stop; } if (p_ctx->nb_obj < 2) { time_t now = time(NULL); sp->statp.st_mode = 0700 | S_IFREG; sp->statp.st_ctime = now; sp->statp.st_mtime = now; sp->statp.st_atime = now; sp->statp.st_size = sp->restore_obj.object_len; sp->statp.st_blksize = 4096; sp->statp.st_blocks = 1; } else { stat(p_ctx->reader, &sp->statp); } sp->restore_obj.index = ++p_ctx->nb_obj; if (sp->type != FT_REG) { bfuncs->DebugMessage(ctx, fi, li, dbglvl, "Creating RestoreObject len=%d oname=%s data=%.256s\n", sp->restore_obj.object_len, sp->restore_obj.object_name, sp->restore_obj.object); } printf("test-plugin-fd: startBackupFile\n"); return bRC_OK; } /* * Done with backup of this file */ static bRC endBackupFile(bpContext *ctx) { struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; if (!p_ctx) { return bRC_Error; } /* * We would return bRC_More if we wanted startBackupFile to be * called again to backup another file */ return bRC_More; } /* * Bacula is calling us to do the actual I/O */ static bRC pluginIO(bpContext *ctx, struct io_pkt *io) { struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; if (!p_ctx) { return bRC_Error; } io->status = 0; io->io_errno = 0; switch(io->func) { case IO_OPEN: if (p_ctx->nb_obj == 4 && p_ctx->reader) { p_ctx->fd = bfopen(p_ctx->reader, "r"); if (p_ctx->fd) { io->status = fileno(p_ctx->fd); } else { io->status = -1; } } else { io->status = 1; /* dummy file */ } p_ctx->offset = 0; break; case IO_READ: if (p_ctx->fd) { io->offset = ftell(p_ctx->fd); io->status = fread(io->buf, 1, io->count, p_ctx->fd); } else { if (p_ctx->offset > 3000000) { io->status = 0; /* EOF */ } else { memset(io->buf, 0x55, io->count); io->offset = p_ctx->offset; io->status = io->count; p_ctx->offset += io->count; } } break; case IO_WRITE: if (p_ctx->fd) { io->status = fwrite(io->buf, 1, io->count, p_ctx->fd); } else { io->status = -1; } break; /* Cleanup things during close */ case IO_CLOSE: if (p_ctx->fd) { fclose(p_ctx->fd); p_ctx->fd = NULL; } io->status = 0; { struct fileevent_pkt event; bstrncpy(event.Description, "A good description", sizeof(event.Description)); bstrncpy(event.Source, "A very good source", sizeof(event.Source)); event.Type = 'c'; event.Severity = 100; bfuncs->AddFileEvent(ctx, &event); bfuncs->JobMessage(ctx, fi, li, M_INFO, 0, "Adding FileEvent\n"); } break; case IO_SEEK: if (p_ctx->nb_obj == 4) { io->status = fwrite(io->buf, 1, io->count, p_ctx->fd); } else { io->status = -1; } io->status = io->offset; break; } return bRC_OK; } /* * Bacula is notifying us that a plugin name string was found, and * passing us the plugin command, so we can prepare for a restore. */ static bRC startRestoreFile(bpContext *ctx, const char *cmd) { printf("test-plugin-fd: startRestoreFile cmd=%s\n", cmd); return bRC_OK; } /* * Bacula is notifying us that the plugin data has terminated, so * the restore for this particular file is done. */ static bRC endRestoreFile(bpContext *ctx) { printf("test-plugin-fd: endRestoreFile\n"); return bRC_OK; } /* * This is called during restore to create the file (if necessary) * We must return in rp->create_status: * * CF_ERROR -- error * CF_SKIP -- skip processing this file * CF_EXTRACT -- extract the file (i.e.call i/o routines) * CF_CREATED -- created, but no content to extract (typically directories) * */ static bRC createFile(bpContext *ctx, struct restore_pkt *rp) { struct plugin_ctx *pctx=(struct plugin_ctx *)ctx->pContext; printf("test-plugin-fd: createFile\n"); if (strlen(rp->where) > 512) { printf("Restore target dir too long. Restricting to first 512 bytes.\n"); } bstrncpy(pctx->where, rp->where, sizeof(pctx->where)); pctx->replace = rp->replace; rp->create_status = CF_CORE; return bRC_OK; } /* * We will get here if the File is a directory after everything * is written in the directory. */ static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) { printf("test-plugin-fd: setFileAttributes\n"); return bRC_OK; } /* When using Incremental dump, all previous dumps are necessary */ static bRC checkFile(bpContext *ctx, char *fname) { if (strncmp(fname, "/@testplugin/", strlen("/@testplugin/")) == 0) { return bRC_Seen; } else { return bRC_OK; } } /* Called for each file with the RestoreFileList feature at the start of a * restore */ static bRC restoreFileList(bpContext *ctx, struct restore_filelist_pkt *rp) { if (rp) { Dmsg1(0, "Will restore fname=%s\n", rp->ofname); } return bRC_OK; } /* * New Bacula Plugin API require this */ static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl) { return bRC_OK; } /* QueryParameter interface */ static bRC queryParameter(bpContext *ctx, struct query_pkt *qp) { OutputWriter ow(qp->api_opts); cmd_parser a; bRC ret = bRC_OK; int nb = 3; int sleepval = 3; int i; struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; if (!p_ctx) { return bRC_Error; } a.parse_cmd(qp->command); if ((i = a.find_arg_with_value("nb")) > 0) { nb = str_to_int64(a.argv[i]); } if ((i = a.find_arg_with_value("sleep")) > 0) { sleepval = str_to_int64(a.argv[i]); } if (strcasecmp(qp->parameter, "int") == 0) { ow.get_output(OT_INT32, "int", 1, OT_INT32, "int", 2, OT_INT32, "int", 3, OT_INT32, "int", 4, OT_INT32, "int", 5, OT_INT32, "int", 6, OT_INT32, "int", 7, OT_INT32, "int", 8, OT_INT32, "int", 9, OT_INT32, "int", 10, OT_END); goto bail_out; } if (strcasecmp(qp->parameter, "string") == 0) { ow.get_output(OT_STRING, "string", "string1", OT_STRING, "string", "string2", OT_STRING, "string", "string3", OT_STRING, "string", "string4", OT_STRING, "string", "string5", OT_STRING, "string", "string6", OT_STRING, "string", "string7", OT_STRING, "string", "string8", OT_STRING, "string", "string9", OT_STRING, "string", "string10", OT_END); goto bail_out; } if (strcasecmp(qp->parameter, "withsleep") == 0) { if (p_ctx->nb++ < nb) { ow.get_output(OT_INT32, "withsleep", p_ctx->nb, OT_END); bmicrosleep(sleepval, 0); } if (p_ctx->nb < nb) { ret = bRC_More; } goto bail_out; } bail_out: if (p_ctx->query_buf) { free(p_ctx->query_buf); } qp->result = p_ctx->query_buf = bstrdup(ow.get_output(OT_END)); return ret; } #ifdef __cplusplus } #endif bacula-15.0.3/src/plugins/fd/kubernetes-fd.h0000644000175000017500000000651014771010173020434 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file kubernetes-fd.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula Kubernetes Plugin with metaplugin interface. * @version 1.3.0 * @date 2021-01-05 * * @copyright Copyright (c) 2021 All rights reserved. * IP transferred to Bacula Systems according to agreement. */ #ifndef KUBERNETES_PLUGIN_FD_H #define KUBERNETES_PLUGIN_FD_H #include "pluginlib/pluginlib.h" #include "pluginlib/metaplugin.h" /* * The list of restore options saved to the RestoreObject. */ struct ini_items plugin_items_dump[] = { // name handler comment required default {"config", ini_store_str, "K8S config file", 0, "*None*"}, {"host", ini_store_str, "K8S API server URL/Host", 0, "*None*"}, {"token", ini_store_str, "K8S Bearertoken", 0, "*None*"}, // {"username", ini_store_str, "HTTP Auth username for API", 0, "*None*"}, // {"password", ini_store_str, "HTTP Auth password for API", 0, "*None*"}, {"verify_ssl", ini_store_bool, "K8S API server cert verification", 0, "True"}, {"ssl_ca_cert", ini_store_str, "Custom CA Certs file to use", 0, "*None*"}, {"outputformat", ini_store_str, "Output format when saving to file (JSON, YAML)", 0, "RAW"}, {"fdaddress", ini_store_str, "The address for listen to incoming backup pod data", 0, "*FDAddress*"}, {"fdport", ini_store_int32, "The port for opening socket for listen", 0, "9104"}, {"pluginhost", ini_store_str, "The endpoint address for backup pod to connect", 0, "*FDAddress*"}, {"pluginport", ini_store_int32, "The endpoint port to connect", 0, "9104"}, {NULL, NULL, NULL, 0, NULL} }; // the list of valid plugin options const char * valid_params[] = { "listing", "query", "abort_on_error", "config", "incluster", "host", "token", "verify_ssl", "ssl_ca_cert", "timeout", "debug", "backup_mode", "namespace", "ns", "persistentvolume", "pv", "pvconfig", "scconfig", "pvcdata", "fdaddress", "fdport", "pluginhost", "pluginport", "fdcertfile", "fdkeyfile", "baculaimage", "imagepullpolicy", "imagepullsecret", "outputformat", "labels", NULL, }; #endif // KUBERNETES_PLUGIN_FD_H bacula-15.0.3/src/plugins/fd/test-verify-fd.c0000644000175000017500000002257614771010173020553 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * A Verify Data plugin to check test-verify signature */ #define BUILD_PLUGIN #include "bacula.h" #include "fd_plugins.h" #define USE_CMD_PARSER #include "fd_common.h" #include "findlib/bfile.h" /* for bopen/bclose/bread/bwrite */ #ifdef __cplusplus extern "C" { #endif static const int dbglvl = 100; static char *working; #define PLUGIN_LICENSE "Bacula Systems(R) SA" #define PLUGIN_AUTHOR "Eric Bollengier" #define PLUGIN_DATE "September 2021" #define PLUGIN_VERSION "1" #define PLUGIN_DESCRIPTION "Bacula Test-Verify Plugin" /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); static bRC pluginIO(bpContext *ctx, struct io_pkt *io); static bRC startVerifyFile(bpContext *ctx, struct restore_pkt *rp); static bRC endVerifyFile(bpContext *ctx); static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); /* Pointers to Bacula functions */ static bFuncs *bfuncs = NULL; static bInfo *binfo = NULL; /* Plugin Information block */ static pInfo pluginInfo = { sizeof(pluginInfo), FD_PLUGIN_INTERFACE_VERSION, FD_PLUGIN_MAGIC, PLUGIN_LICENSE, PLUGIN_AUTHOR, PLUGIN_DATE, PLUGIN_VERSION, PLUGIN_DESCRIPTION }; /* Plugin entry points for Bacula * * A verify plugin do not have to implement specific backup or restore * functions. The plugin cannot be set in the FileSet, it is loaded at * startup and will register to the bEventVerifyStream event. */ static pFuncs pluginFuncs = { sizeof(pluginFuncs), FD_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, /* new plugin instance */ freePlugin, /* free plugin instance */ getPluginValue, setPluginValue, handlePluginEvent, NULL, /* startBackupFile */ NULL, /* endBackupFile */ NULL, /* startRestoreFile */ NULL, /* endRestoreFile */ pluginIO, NULL, /* createFile */ setFileAttributes, NULL, // checkFile NULL, // handleXACLdata NULL, // restoreFileList NULL, // checkStream NULL, // queryParameter NULL, // metadataRestore startVerifyFile, endVerifyFile }; #define get_self(x) (((x)==NULL)?NULL:(verifyplug*)((x)->pContext)) /* Plugin instance class */ class verifyplug: public cmd_parser, public SMARTALLOC { private: bpContext *ctx; public: BPIPE *pfd; /* Main file descriptor read/write */ POOLMEM *fname; char *pattern; verifyplug(bpContext *bpc): cmd_parser(), ctx(bpc), pfd(NULL), fname(NULL), pattern(NULL) { }; virtual ~verifyplug() { free_and_null_pool_memory(fname); }; /* Wait to be called to allocate memory */ void init_mem() { fname = get_pool_memory(PM_FNAME); }; }; /* * loadPlugin() and unloadPlugin() are entry points that are * exported, so Bacula can directly call these two entry points * they are common to all Bacula plugins. */ /* * External entry point called by Bacula to "load the plugin */ bRC DLL_IMP_EXP loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs) { bfuncs = lbfuncs; /* set Bacula funct pointers */ binfo = lbinfo; *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ bfuncs->getBaculaValue(NULL, bVarWorkingDir, (void *)&working); return bRC_OK; } /* * External entry point to unload the plugin */ bRC DLL_IMP_EXP unloadPlugin() { // Dmsg(NULL, dbglvl, "Unloaded\n"); return bRC_OK; } /* * The following entry points are accessed through the function * pointers we supplied to Bacula. Each plugin type (dir, fd, sd) * has its own set of entry points that the plugin must define. */ /* * Create a new instance of the plugin i.e. allocate our private storage */ static bRC newPlugin(bpContext *ctx) { verifyplug *self = New(verifyplug(ctx)); if (!self) { return bRC_Error; } ctx->pContext = (void *)self; /* set our context pointer */ return bRC_OK; } /* * Free a plugin instance, i.e. release our private storage */ static bRC freePlugin(bpContext *ctx) { verifyplug *self = get_self(ctx); if (!self) { return bRC_Error; } delete self; return bRC_OK; } /* * Return some plugin value (none defined) */ static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Set a plugin value (none defined) */ static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Handle an event that was generated in Bacula */ static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) { int i; verifyplug *self = get_self(ctx); if (!self) { return bRC_Error; } /* * Most events don't interest us so we ignore them. * the printfs are so that plugin writers can enable them to see * what is really going on. */ switch (event->eventType) { case bEventPluginOptions: Jmsg(ctx, M_INFO, "Got plugin command = %s\n", (char *)value); self->parse_cmd((char *)value); bfuncs->registerBaculaEvents(ctx, bEventVerifyStream); if ((i = self->find_arg_with_value("pattern")) >= 1) { self->pattern = self->argv[i]; } else { self->pattern = (char *)"notfound"; } Dmsg0(0, "Register event bEventVerifyStream\n"); break; case bEventVssPrepareSnapshot: break; case bEventJobStart: // Dmsg(ctx, dbglvl, "JobStart=%s\n", (char *)value); break; case bEventJobEnd: // Dmsg(ctx, dbglvl, "JobEnd\n"); break; case bEventLevel: break; case bEventSince: // Dmsg(ctx, dbglvl, "since=%d\n", (int)value); break; case bEventStartVerifyJob: self->init_mem(); break; case bEventHandleBackupFile: /* Fall-through wanted */ case bEventStartBackupJob: /* Fall-through wanted */ case bEventEndBackupJob: /* Fall-through wanted */ case bEventStartRestoreJob: /* Fall-through wanted */ case bEventEndRestoreJob: /* Fall-through wanted */ case bEventRestoreCommand: /* Fall-through wanted */ case bEventEstimateCommand: /* Fall-through wanted */ case bEventBackupCommand: /* Plugin = "verifyplug" */ return bRC_Error; default: // Dmsg(ctx, dbglvl, "unknown event=%d\n", event->eventType); break; } return bRC_OK; } static bRC startVerifyFile(bpContext *ctx, struct restore_pkt *rp) { verifyplug *self = get_self(ctx); pm_strcpy(self->fname, rp->ofname); return bRC_OK; } /* * Done with verify of this file */ static bRC endVerifyFile(bpContext *ctx) { return bRC_OK; } /* * Bacula is calling us to do the actual I/O */ static bRC pluginIO(bpContext *ctx, struct io_pkt *io) { char buf[512]; verifyplug *self = get_self(ctx); ssize_t nb; if (!self) { return bRC_Error; } io->status = 0; io->io_errno = 0; switch(io->func) { case IO_OPEN: Dmsg(ctx, dbglvl, "verifyplug: Initialize verifyplug\n"); // Works only on GNU bsnprintf(buf, sizeof(buf)-1, "grep -q '%s'", self->pattern); if ((self->pfd = open_bpipe(buf, 0, "rw", NULL)) == NULL) { io->io_errno = errno; io->status = -1; Jmsg(ctx, M_ERROR, "Open failed: fname=%s ERR=%s\n", io->fname, strerror(errno)); return bRC_Error; } io->status = fileno(self->pfd->wfd); break; case IO_READ: return bRC_Error; case IO_WRITE: Dmsg(ctx, dbglvl, "verifyplug: writing\n"); nb = write(fileno(self->pfd->wfd), io->buf, io->count); if (nb != io->count) { Jmsg(ctx, M_ERROR, "Write error\n"); return bRC_Error; } break; /* Cleanup things during close */ case IO_CLOSE: if (close_bpipe(self->pfd) == 0) { Jmsg(ctx, M_INFO, _("File %s matching pattern\n"), self->fname); } break; case IO_SEEK: /* Seek not needed for this plugin, we don't use real sparse file */ break; } return bRC_OK; } /* * We will get here if the File is a directory after everything * is written in the directory. */ static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) { // Dmsg(ctx, dbglvl, "setFileAttributes\n"); return bRC_Error; } #ifdef __cplusplus } #endif bacula-15.0.3/src/plugins/fd/bpipe-fd.c0000644000175000017500000006216014771010173017362 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * A simple pipe plugin for the Bacula File Daemon * * Kern Sibbald, October 2007 * */ #include "bacula.h" #define USE_FULL_WRITE #include "fd_common.h" #include "fd_plugins.h" #include "lib/ini.h" #include "fileopts.h" #undef malloc #undef free #undef strdup #define fi __FILE__ #define li __LINE__ #ifdef __cplusplus extern "C" { #endif static const int dbglvl = 150; #define PLUGIN_LICENSE "AGPLv3" #define PLUGIN_AUTHOR "Kern Sibbald" #define PLUGIN_DATE "January 2008" #define PLUGIN_VERSION "2" #define PLUGIN_DESCRIPTION "Bacula Pipe File Daemon Plugin" /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); static bRC endBackupFile(bpContext *ctx); static bRC pluginIO(bpContext *ctx, struct io_pkt *io); static bRC startRestoreFile(bpContext *ctx, const char *cmd); static bRC endRestoreFile(bpContext *ctx); static bRC createFile(bpContext *ctx, struct restore_pkt *rp); static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); static bRC checkFile(bpContext *ctx, char *fname); static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl); static char *apply_rp_codes(struct plugin_ctx * p_ctx); static bool dump_restoreobject(bpContext *ctx, restore_object_pkt *rop); /* Pointers to Bacula functions */ static bFuncs *bfuncs = NULL; static bInfo *binfo = NULL; /* Plugin Information block */ static pInfo pluginInfo = { sizeof(pluginInfo), FD_PLUGIN_INTERFACE_VERSION, FD_PLUGIN_MAGIC, PLUGIN_LICENSE, PLUGIN_AUTHOR, PLUGIN_DATE, PLUGIN_VERSION, PLUGIN_DESCRIPTION }; /* Plugin entry points for Bacula */ static pFuncs pluginFuncs = { sizeof(pluginFuncs), FD_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, /* new plugin instance */ freePlugin, /* free plugin instance */ getPluginValue, setPluginValue, handlePluginEvent, startBackupFile, endBackupFile, startRestoreFile, endRestoreFile, pluginIO, createFile, setFileAttributes, checkFile, handleXACLdata, NULL /* No checkStream */ }; /* * Plugin private context */ struct plugin_ctx { boffset_t offset; BPIPE *pfd; /* bpipe file descriptor */ int efd; /* stderr */ int rfd; /* stdout */ int wfd; /* stdin */ int maxfd; /* max(stderr, stdout) */ bool backup; /* set when the backup is done */ bool restore_object_sent; bool canceled; char *cmd; /* plugin command line */ char *fname; /* filename to "backup/restore" */ char *reader; /* reader program for backup */ char *writer; /* writer program for backup */ alist *rop_writer; /* writer command set via restore object */ char where[512]; int replace; int job_level; int estimate_mode; int64_t total_bytes; /* number of bytes read/write */ POOLMEM *restore_obj_buf; }; /* * loadPlugin() and unloadPlugin() are entry points that are * exported, so Bacula can directly call these two entry points * they are common to all Bacula plugins. */ /* * External entry point called by Bacula to "load the plugin */ bRC loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs) { bfuncs = lbfuncs; /* set Bacula funct pointers */ binfo = lbinfo; *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ return bRC_OK; } /* * External entry point to unload the plugin */ bRC unloadPlugin() { // printf("bpipe-fd: Unloaded\n"); return bRC_OK; } class restoreobj: public SMARTALLOC { public: char *name; char *writer; restoreobj(char *n, char *w) { name = bstrdup(n); writer = bstrdup(w); }; ~restoreobj() { bfree(writer); bfree(name); }; }; /* * The following entry points are accessed through the function * pointers we supplied to Bacula. Each plugin type (dir, fd, sd) * has its own set of entry points that the plugin must define. */ /* * Create a new instance of the plugin i.e. allocate our private storage */ static bRC newPlugin(bpContext *ctx) { struct plugin_ctx *p_ctx = (struct plugin_ctx *)malloc(sizeof(struct plugin_ctx)); if (!p_ctx) { return bRC_Error; } memset(p_ctx, 0, sizeof(struct plugin_ctx)); ctx->pContext = (void *)p_ctx; /* set our context pointer */ return bRC_OK; } /* * Free a plugin instance, i.e. release our private storage */ static bRC freePlugin(bpContext *ctx) { struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; if (!p_ctx) { return bRC_Error; } if (p_ctx->cmd) { free(p_ctx->cmd); /* free any allocated command string */ } free_and_null_pool_memory(p_ctx->restore_obj_buf); if (p_ctx->rop_writer) { restoreobj *rop; foreach_alist(rop, p_ctx->rop_writer) { delete rop; } delete p_ctx->rop_writer; } free(p_ctx); /* free our private context */ p_ctx = NULL; return bRC_OK; } /* * Return some plugin value (none defined) */ static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Set a plugin value (none defined) */ static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Handle an event that was generated in Bacula */ static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) { struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; restore_object_pkt *rop; if (!p_ctx) { return bRC_Error; } // char *name; /* * Most events don't interest us so we ignore them. * the printfs are so that plugin writers can enable them to see * what is really going on. */ switch (event->eventType) { case bEventLevel: p_ctx->job_level = ((intptr_t)value); break; case bEventCancelCommand: p_ctx->canceled = true; break; case bEventPluginCommand: bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: PluginCommand=%s\n", (char *)value); break; case bEventJobStart: bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: JobStart=%s\n", (char *)value); break; case bEventJobEnd: // printf("bpipe-fd: JobEnd\n"); break; case bEventStartBackupJob: // printf("bpipe-fd: StartBackupJob\n"); break; case bEventEndBackupJob: // printf("bpipe-fd: EndBackupJob\n"); break; case bEventSince: // printf("bpipe-fd: since=%d\n", (int)value); break; case bEventRestoreObject: if (!value) { break; } rop = (restore_object_pkt *)value; if (!dump_restoreobject(ctx, rop)) { return bRC_Error; } break; case bEventStartRestoreJob: // printf("bpipe-fd: StartRestoreJob\n"); break; case bEventEndRestoreJob: // printf("bpipe-fd: EndRestoreJob\n"); break; /* Plugin command e.g. plugin = ::read command:write command */ case bEventEstimateCommand: p_ctx->estimate_mode = true; /* Fall-through wanted */ case bEventRestoreCommand: // printf("bpipe-fd: EventRestoreCommand cmd=%s\n", (char *)value); /* Fall-through wanted */ case bEventBackupCommand: char *p; bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: pluginEvent cmd=%s\n", (char *)value); p_ctx->restore_object_sent = false; /* Foreach command, we might have to send a specific restore object */ p_ctx->backup = false; p_ctx->cmd = strdup((char *)value); p = strchr(p_ctx->cmd, ':'); if (!p) { bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Plugin terminator not found: %s\n", (char *)value); return bRC_Error; } *p++ = 0; /* terminate plugin */ p_ctx->fname = p; p = strchr(p, ':'); if (!p) { bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "File terminator not found: %s\n", (char *)value); return bRC_Error; } *p++ = 0; /* terminate file */ p_ctx->reader = p; p = strchr(p, ':'); if (!p) { bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Reader terminator not found: %s\n", (char *)value); return bRC_Error; } *p++ = 0; /* terminate reader string */ p_ctx->writer = p; /* We may already have a writer command set via restore object */ if (p_ctx->rop_writer) { restoreobj *rop; foreach_alist(rop, p_ctx->rop_writer) { if (!strcmp(rop->name, (char *)value)) { p_ctx->writer = rop->writer; } } } // printf("bpipe-fd: plugin=%s fname=%s reader=%s writer=%s\n", // p_ctx->cmd, p_ctx->fname, p_ctx->reader, p_ctx->writer); break; default: // printf("bpipe-fd: unknown event=%d\n", event->eventType); break; } return bRC_OK; } /* Configuration parameters for restore */ static struct ini_items my_items[] = { // name handler comment required default { "restore_command", ini_store_str, "Restore command to use", 0, NULL}, { NULL, NULL, NULL, 0, NULL} }; static bool dump_restoreobject(bpContext *ctx, restore_object_pkt *rop) { struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; if (!p_ctx) { return false; } bfuncs->DebugMessage(ctx, fi, li, dbglvl, "Trying to dump restore object\n"); if (!strcmp(rop->object_name, INI_RESTORE_OBJECT_NAME)) { ConfigFile ini; if (!ini.dump_string(rop->object, rop->object_len)) { bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Unable to parse the User supplied restore options\n"); bfuncs->DebugMessage(ctx, fi, li, 0, "Can't parse configuration file\n"); return false; } ini.register_items(my_items, sizeof(struct ini_items)); if (ini.parse(ini.out_fname)) { /* TODO: check parameters */ if (ini.items[0].found) { if (!p_ctx->rop_writer) { p_ctx->rop_writer = New(alist(5, not_owned_by_alist)); } p_ctx->rop_writer->append(New(restoreobj(rop->plugin_name, ini.items[0].val.strval))); bfuncs->JobMessage(ctx, fi, li, M_INFO, 0, _("Using user supplied restore command: \"%s\"\n"), ini.items[0].val.strval); } else { bfuncs->DebugMessage(ctx, fi, li, 0, "Options not set\n"); } } else { bfuncs->DebugMessage(ctx, fi, li, 0, "Can't parse configuration file\n"); bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "User supplied restore options are not valid\n"); return false; } } return true; } /* * Start the backup of a specific file */ static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) { struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; if (!p_ctx) { return bRC_Error; } /* First step is to send config restoreobject if needed */ if (!p_ctx->restore_object_sent && p_ctx->job_level == 'F' && !p_ctx->estimate_mode) { ConfigFile ini; POOLMEM *buf = get_pool_memory(PM_MESSAGE); p_ctx->restore_object_sent = true; ini.register_items(my_items, sizeof(struct ini_items)); sp->restore_obj.object_name = (char*)INI_RESTORE_OBJECT_NAME; sp->restore_obj.object_len = ini.serialize(&buf); sp->restore_obj.object = buf; sp->type = FT_PLUGIN_CONFIG; p_ctx->restore_obj_buf = buf; return bRC_OK; } time_t now = time(NULL); sp->fname = p_ctx->fname; sp->type = FT_REG; sp->statp.st_mode = 0700 | S_IFREG; sp->statp.st_ctime = now; sp->statp.st_mtime = now; sp->statp.st_atime = now; sp->statp.st_size = -1; sp->statp.st_blksize = 4096; sp->statp.st_blocks = 1; #ifdef TEST_BPIPE_OFFSET sp->flags |= FO_OFFSETS; #endif p_ctx->backup = true; return bRC_OK; } /* * Done with backup of this file */ static bRC endBackupFile(bpContext *ctx) { struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; if (!p_ctx) { return bRC_Error; } /* * We would return bRC_More if we wanted startBackupFile to be * called again to backup another file */ if (!p_ctx->backup) { /* We sent the RestoreObject, we need to send the data */ return bRC_More; } return bRC_OK; } static void send_log(bpContext *ctx, char *buf) { struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; strip_trailing_newline(buf); bfuncs->JobMessage(ctx, fi, li, M_INFO, 0, "%s: %s\n", p_ctx->fname, buf); } /* * Bacula is calling us to do the actual I/O */ static bRC pluginIO(bpContext *ctx, struct io_pkt *io) { fd_set rfds; fd_set wfds; bool ok=false; char buf[1024]; struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext; if (!p_ctx) { return bRC_Error; } io->status = -1; io->io_errno = 0; switch(io->func) { case IO_OPEN: p_ctx->total_bytes = 0; p_ctx->wfd = p_ctx->efd = p_ctx->rfd = -1; bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: IO_OPEN\n"); if (io->flags & (O_CREAT | O_WRONLY)) { char *writer_codes = apply_rp_codes(p_ctx); p_ctx->pfd = open_bpipe(writer_codes, 0, "rws"); bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: IO_OPEN fd=%p writer=%s\n", p_ctx->pfd, writer_codes); if (!p_ctx->pfd) { io->io_errno = errno; bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Open pipe writer=%s failed: ERR=%s\n", writer_codes, strerror(errno)); if (writer_codes) { free(writer_codes); } return bRC_Error; } if (writer_codes) { free(writer_codes); } /* We need to read from stdout/stderr for messages to display to the user */ p_ctx->rfd = fileno(p_ctx->pfd->rfd); p_ctx->wfd = fileno(p_ctx->pfd->wfd); p_ctx->maxfd = MAX(p_ctx->wfd, p_ctx->rfd); io->status = p_ctx->wfd; } else { /* Use shell mode and split stderr/stdout */ p_ctx->pfd = open_bpipe(p_ctx->reader, 0, "rse"); bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: IO_OPEN fd=%p reader=%s\n", p_ctx->pfd, p_ctx->reader); if (!p_ctx->pfd) { io->io_errno = errno; bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Open pipe reader=%s failed: ERR=%s\n", p_ctx->reader, strerror(errno)); return bRC_Error; } /* We need to read from stderr for job log and stdout for the data */ p_ctx->efd = fileno(p_ctx->pfd->efd); p_ctx->rfd = fileno(p_ctx->pfd->rfd); p_ctx->maxfd = MAX(p_ctx->efd, p_ctx->rfd); io->status = p_ctx->rfd; } sleep(1); /* let pipe connect */ break; case IO_READ: if (!p_ctx->pfd) { bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Logic error: NULL read FD\n"); return bRC_Error; } /* We first try to read stderr, but keep monitoring for data on stdout (when stderr is empty) */ while (!p_ctx->canceled) { FD_ZERO(&rfds); FD_SET(p_ctx->rfd, &rfds); FD_SET(p_ctx->efd, &rfds); select(p_ctx->maxfd+1, &rfds, NULL, NULL, NULL); if (!FD_ISSET(p_ctx->efd, &rfds)) { /* nothing in stderr, then we should have something in stdout */ break; } int ret = read(p_ctx->efd, buf, sizeof(buf)); if (ret <= 0) { /* stderr is closed or in error, stdout should be in the same state */ /* let handle it at the stdout level */ break; } /* TODO: buffer and split lines */ buf[ret]=0; send_log(ctx, buf); } io->status = read(p_ctx->rfd, io->buf, io->count); // bfuncs->DebugMessage(ctx, fi, li, dbglvl, "bpipe-fd: IO_READ buf=%p len=%d\n", io->buf, io->status); if (io->status < 0) { berrno be; bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Pipe read error: ERR=%s\n", be.bstrerror()); bfuncs->DebugMessage(ctx, fi, li, dbglvl, "Pipe read error: count=%lld errno=%d ERR=%s\n", p_ctx->total_bytes, (int)errno, be.bstrerror()); return bRC_Error; } io->offset = p_ctx->total_bytes; p_ctx->total_bytes += io->status; break; case IO_WRITE: if (!p_ctx->pfd) { bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Logic error: NULL write FD\n"); return bRC_Error; } /* When we write, we must check for the error channel (stdout+stderr) as well */ while (!ok && !p_ctx->canceled) { FD_ZERO(&wfds); FD_SET(p_ctx->wfd, &wfds); FD_ZERO(&rfds); FD_SET(p_ctx->rfd, &rfds); select(p_ctx->maxfd+1, &rfds, &wfds, NULL, NULL); if (FD_ISSET(p_ctx->rfd, &rfds)) { int ret = read(p_ctx->rfd, buf, sizeof(buf)); /* TODO: simulate fgets() */ if (ret > 0) { buf[ret]=0; send_log(ctx, buf); } else { ok = true; /* nothing to read */ } } if (FD_ISSET(p_ctx->wfd, &wfds)) { ok = true; } } // printf("bpipe-fd: IO_WRITE fd=%p buf=%p len=%d\n", p_ctx->fd, io->buf, io->count); io->status = full_write(p_ctx->wfd, io->buf, io->count, &p_ctx->canceled); // printf("bpipe-fd: IO_WRITE buf=%p len=%d\n", io->buf, io->status); if (io->status <= 0) { berrno be; bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Pipe write error: ERR=%s\n", be.bstrerror()); bfuncs->DebugMessage(ctx, fi, li, dbglvl, "Pipe write error: count=%lld errno=%d ERR=%s\n", p_ctx->total_bytes, (int)errno, be.bstrerror()); return bRC_Error; } p_ctx->total_bytes += io->status; break; case IO_CLOSE: if (!p_ctx->pfd) { bfuncs->JobMessage(ctx, fi, li, M_FATAL, 0, "Logic error: NULL FD on bpipe close\n"); return bRC_Error; } /* We inform the other side that we have nothing more to send */ if (p_ctx->wfd >= 0) { int ret = close_wpipe(p_ctx->pfd); if (ret == 0) { bfuncs->JobMessage(ctx, fi, li, M_ERROR, 0, "bpipe-fd: Error closing for file %s: %d\n", p_ctx->fname, ret); } } /* We flush what the other program has to say */ while (!ok && !p_ctx->canceled) { struct timeval tv = {10, 0}; // sleep for 10secs FD_ZERO(&rfds); p_ctx->maxfd = -1; if (p_ctx->rfd >= 0) { FD_SET(p_ctx->rfd, &rfds); p_ctx->maxfd = MAX(p_ctx->maxfd, p_ctx->rfd); } if (p_ctx->efd >= 0) { FD_SET(p_ctx->efd, &rfds); p_ctx->maxfd = MAX(p_ctx->maxfd, p_ctx->efd); } if (p_ctx->maxfd == -1) { ok = true; /* exit the loop */ } else { select(p_ctx->maxfd+1, &rfds, NULL, NULL, &tv); } if (p_ctx->rfd >= 0 && FD_ISSET(p_ctx->rfd, &rfds)) { int ret = read(p_ctx->rfd, buf, sizeof(buf)); if (ret > 0) { buf[ret]=0; send_log(ctx, buf); } else { p_ctx->rfd = -1; /* closed, keep the reference in bpipe */ } } /* The stderr can be melted with stdout or not */ if (p_ctx->efd >= 0 && FD_ISSET(p_ctx->efd, &rfds)) { int ret = read(p_ctx->efd, buf, sizeof(buf)); if (ret > 0) { buf[ret]=0; send_log(ctx, buf); } else { p_ctx->efd = -1; /* closed, keep the reference in bpipe */ } } } io->status = close_bpipe(p_ctx->pfd); if (io->status != 0) { bfuncs->JobMessage(ctx, fi, li, M_ERROR, 0, "bpipe-fd: Error closing for file %s: %d\n", p_ctx->fname, io->status); } break; case IO_SEEK: io->offset = p_ctx->offset; io->status = 0; break; } return bRC_OK; } /* * Bacula is notifying us that a plugin name string was found, and * passing us the plugin command, so we can prepare for a restore. */ static bRC startRestoreFile(bpContext *ctx, const char *cmd) { // printf("bpipe-fd: startRestoreFile cmd=%s\n", cmd); return bRC_OK; } /* * Bacula is notifying us that the plugin data has terminated, so * the restore for this particular file is done. */ static bRC endRestoreFile(bpContext *ctx) { // printf("bpipe-fd: endRestoreFile\n"); return bRC_OK; } /* * This is called during restore to create the file (if necessary) * We must return in rp->create_status: * * CF_ERROR -- error * CF_SKIP -- skip processing this file * CF_EXTRACT -- extract the file (i.e.call i/o routines) * CF_CREATED -- created, but no content to extract (typically directories) * */ static bRC createFile(bpContext *ctx, struct restore_pkt *rp) { // printf("bpipe-fd: createFile\n"); uint sz = sizeof(((struct plugin_ctx *)ctx->pContext)->where); if (strlen(rp->where) > sz) { printf("Restore target dir too long. Restricting to first %u bytes.\n", sz); } bstrncpy(((struct plugin_ctx *)ctx->pContext)->where, rp->where, sz); ((struct plugin_ctx *)ctx->pContext)->replace = rp->replace; rp->create_status = CF_EXTRACT; return bRC_OK; } /* * We will get here if the File is a directory after everything * is written in the directory. */ static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) { // printf("bpipe-fd: setFileAttributes\n"); return bRC_OK; } /* When using Incremental dump, all previous dumps are necessary */ static bRC checkFile(bpContext *ctx, char *fname) { return bRC_OK; } /* * New Bacula Plugin API require this */ static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl) { return bRC_OK; } /************************************************************************* * Apply codes in writer command: * %w -> "where" * %r -> "replace" * * Replace: * 'always' => 'a', chr(97) * 'ifnewer' => 'w', chr(119) * 'ifolder' => 'o', chr(111) * 'never' => 'n', chr(110) * * This function will allocate the required amount of memory with malloc. * Need to be free()d manually. * Inspired by edit_job_codes in lib/util.c */ static char *apply_rp_codes(struct plugin_ctx * p_ctx) { char *p, *q; const char *str; char add[10]; int w_count = 0, r_count = 0; char *omsg; char *imsg = p_ctx->writer; if (!imsg) { return NULL; } if ((p = imsg)) { while ((q = strstr(p, "%w"))) { w_count++; p=q+1; } p = imsg; while ((q = strstr(p, "%r"))) { r_count++; p=q+1; } } /* Required mem: * len(imsg) * + number of "where" codes * (len(where)-2) * - number of "replace" codes */ omsg = (char*)malloc(strlen(imsg) + (w_count * (strlen(p_ctx->where)-2)) - r_count + 1); if (!omsg) { fprintf(stderr, "Out of memory."); return NULL; } *omsg = 0; //printf("apply_rp_codes: %s\n", imsg); for (p=imsg; *p; p++) { if (*p == '%') { switch (*++p) { case '%': str = "%"; break; case 'w': str = p_ctx->where; break; case 'r': snprintf(add, 2, "%c", p_ctx->replace); str = add; break; default: add[0] = '%'; add[1] = *p; add[2] = 0; str = add; break; } } else { add[0] = *p; add[1] = 0; str = add; } //printf("add_str %s\n", str); strcat(omsg, str); //printf("omsg=%s\n", omsg); } return omsg; } #ifdef __cplusplus } #endif bacula-15.0.3/src/plugins/fd/kubernetes-backend/0000755000175000017500000000000014771010173021257 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/TODO0000644000175000017500000000012114771010173021741 0ustar bsbuildbsbuild-- TO BE DONE -- - Bacula Regression Test -- TO BE REVIEWED -- -- OTHER -- bacula-15.0.3/src/plugins/fd/kubernetes-backend/make_bin0000755000175000017500000000067514771010173022762 0ustar bsbuildbsbuild#!/bin/bash # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # We need to specify all .so file in the pyinstaller command OPT=`find build/lib.* -name *.so | while read a; do echo -n " -r $a"; done` PYTHONPATH=`./get_python PYTHONPATH` PYTHON_PREFIX=`./get_python PYTHON_PREFIX` export PYTHONPATH export PATH=$PATH:$PYTHON_PREFIX/bin echo $PYTHONPATH set -x pyinstaller -F $OPT build/scripts-*/k8s_backend bacula-15.0.3/src/plugins/fd/kubernetes-backend/get_python0000755000175000017500000000222114771010173023362 0ustar bsbuildbsbuild#!/bin/bash # # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Get some python environment variables if [ $# != 1 ]; then echo "Usage: $0 [PYTHONPATH | PIP | PYTHON | PYTHON_PREFIX ]" exit 1 fi VERSION= for i in 14 13 12 11 10 9 8 7 6 5 4 do if which python3.$i &> /dev/null ; then VERSION=3.$i break fi done if [ "$VERSION" = "" ]; then echo "Unable to find python" exit 1 fi BASEDIR=$HOME/.local if [ $1 = "PYTHONPATH" ]; then RES="$PYTHONPATH:${BASEDIR}/lib64/python${VERSION}/site-packages:${BASEDIR}/lib/python${VERSION}/site-packages:/usr/local/lib64/python${VERSION}/site-packages/:/usr/local/lib/python${VERSION}/site-packages/"; if [ -d $PWD/build/lib.linux-x86_64-${VERSION} ]; then RES="$RES:$PWD/build/lib.linux-x86_64-${VERSION}" fi echo $RES elif [ $1 = "PIP" ]; then if which pip$VERSION &> /dev/null then echo pip$VERSION else echo pip3 fi elif [ $1 = "PYTHON" ]; then echo python$VERSION elif [ $1 = "PYTHON_PREFIX" ]; then echo $BASEDIR else echo "Invalid parameter $1" exit 1 fi bacula-15.0.3/src/plugins/fd/kubernetes-backend/requirements.txt0000644000175000017500000000033614771010173024545 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Author: Radoslaw Korzeniewski # pyyaml >= 6.0 kubernetes >= 26.1.0 urllib3 >= 1.26.18 requests >= 2.27.1 bacula-15.0.3/src/plugins/fd/kubernetes-backend/bin/0000755000175000017500000000000014771010173022027 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/bin/k8s_backend0000644000175000017500000000143214771010173024126 0ustar bsbuildbsbuild#!/usr/bin/env python3 # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import sys # sys.path.append('@pythondir@') from baculak8s.main import main if __name__ == "__main__": sys.exit(main()) bacula-15.0.3/src/plugins/fd/kubernetes-backend/mkExt.pl0000755000175000017500000000105514771010173022710 0ustar bsbuildbsbuild#!/usr/bin/perl -w use strict; my $file; print " from distutils.core import setup from distutils.extension import Extension from Cython.Build import cythonize extensions = ["; while (my $l = <>) { chomp($l); next if ($l =~ /__init__.py/); $l =~ s:^./::; $file = $l; $l =~ s:/:.:g; $l =~ s:\.py$::; print "Extension('$l', ['$file']),\n"; } print "] setup( name='k8s_backend', scripts=['bin/k8s_backend'], setup_requires=['cython'], ext_modules=cythonize(extensions), ) # find . -name '*.py' | ./mkExt.pl "; bacula-15.0.3/src/plugins/fd/kubernetes-backend/.gitignore0000644000175000017500000000220114771010173023242 0ustar bsbuildbsbuild# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *,cover .hypothesis/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # IPython Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # dotenv .env # virtualenv venv/ ENV/ # Spyder project settings .spyderproject # Rope project settings .ropeproject .idea/ .AppleDouble # dynamically created image version baculak8s/plugins/k8sbackend/baculabackupimage.py bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/0000755000175000017500000000000014771010173022421 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/base_tests.py0000644000175000017500000000336114771010173025132 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import unittest from tests import TMP_TEST_FILES_ROOT from tests.util.io_test_util import IOTestUtil from tests.util.os_test_util import OsTestUtil class BaseTest(unittest.TestCase, OsTestUtil, IOTestUtil): """ Base Class for all tests """ def setUp(self): self._create_test_folders() def _create_test_folders(self): self.test_files_root = self.create_test_dir(TMP_TEST_FILES_ROOT) def tearDown(self): self.delete_if_dir(self.test_files_root) def assertItemsContainsValueType(self, items, key, val_type): for item in items: self.assertDictContainsKey(item, key, val_type) def assertDictContainsKey(self, item, key, val_type): self.assertIn(key, item) self.assertIsInstance(item[key], val_type) def assertItemsContainsValue(self, items, key, val): for item in items: self.assertEqual(item[key], val) def assertIsSubsetOf(self, subset, dictionary): isSubset = set(subset.items()).issubset(set(dictionary.items())) self.assertTrue(isSubset, "Wasn't a subset!") bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/util/0000755000175000017500000000000014771010173023376 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/util/packet_test_util.py0000644000175000017500000000233314771010173027314 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. from baculaswift.io.packet_definitions import EOD_PACKET class PacketTestUtil(object): def data_packet(self, data): packet = ("D{}\n".format(str(len(data)).zfill(6)).encode()) packet += data return packet def invalid_command_packet(self): return self.command_packet("invalid_command") def command_packet(self, packet_content): packet_content += "\n" packet_header = "C{}\n".format(str(len(packet_content)).zfill(6)) return (packet_header + packet_content).encode() def eod_packet(self): return EOD_PACKET + b"\n" bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/util/__init__.py0000644000175000017500000000120314771010173025503 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/util/os_test_util.py0000644000175000017500000000272314771010173026471 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import os import shutil class OsTestUtil: """ Class with utility methods for dealing with System Calls """ def create_test_dir(self, dir_name): self.delete_if_dir(dir_name) os.mkdir(dir_name) return dir_name def create_test_file(self, dir, filename, bytes_content): path = os.path.join(dir, filename) file = open(path, 'w+b') file.write(bytes_content) file.close() return path def delete_if_dir(self, path): if os.path.isdir(path): shutil.rmtree(path) def gen_random_bytes(self, bytes_size): return os.urandom(bytes_size) def gen_random_bytes(bytes_size): return os.urandom(bytes_size) def create_byte_chunks(chunk_count, chunk_size): chunk = gen_random_bytes(chunk_size) return [chunk] * chunk_count bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/util/packet_builders.py0000644000175000017500000003403014771010173027110 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import json from baculaswift.io.jobs.backup_io import BACKUP_START_PACKET from baculaswift.io.jobs.restore_io import RESTORE_START_PACKET, RESTORE_END_PACKET, TRANSFER_START_PACKET from baculaswift.io.packet_definitions import TERMINATION_PACKET, XATTR_DATA_START, ACL_DATA_START, \ ESTIMATION_START_PACKET from baculaswift.io.services.job_end_io import END_JOB_START_PACKET from baculaswift.io.services.job_info_io import JOB_START_PACKET from baculaswift.io.services.plugin_params_io import PLUGIN_PARAMETERS_START from baculaswift.plugins.plugin import DEFAULT_FILE_MODE, DEFAULT_DIR_MODE from baculaswift.services.job_info_service import TYPE_BACKUP, TYPE_ESTIMATION, TYPE_RESTORE from tests import BACKEND_PLUGIN_USER, BACKEND_PLUGIN_PWD, BACKEND_PLUGIN_URL, BACKEND_PLUGIN_TYPE from tests.util.packet_test_util import PacketTestUtil class HandshakePacketBuilder(PacketTestUtil): def build(self, packet_content="Hello {} 1".format(BACKEND_PLUGIN_TYPE)): return self.command_packet(packet_content) class JobInfoStartPacketBuilder(PacketTestUtil): def build_invalid(self): return self.build("InvalidStartPacket") def build(self, content=JOB_START_PACKET): packet = self.command_packet(content) return packet class JobInfoBlockBuilder(PacketTestUtil): def with_invalid_job_type(self): return self.build("InvalidJobType") def build(self, job_type, where=None, since=None, replace=None): packet = JobInfoStartPacketBuilder().build() packet += self.command_packet("Name=az_14125_bcjn") packet += self.command_packet("JobID=334") packet += self.command_packet("Type={}".format(job_type.upper())) if where is not None: packet += self.command_packet("Where={}".format(where)) if since is not None: packet += self.command_packet("Since={}".format(since)) if replace is not None: packet += self.command_packet("Replace={}".format(replace)) packet += self.eod_packet() return packet def without_job_name(self): packet = JobInfoStartPacketBuilder().build() packet += self.command_packet("JobID=334") packet += self.command_packet("Type=E") packet += self.eod_packet() return packet def without_job_id(self): packet = JobInfoStartPacketBuilder().build() packet += self.command_packet("Name=az_14125_bcjn") packet += self.command_packet("Type=E") packet += self.eod_packet() return packet def without_job_type(self): packet = JobInfoStartPacketBuilder().build() packet += self.command_packet("Name=az_14125_bcjn") packet += self.command_packet("JobID=334") packet += self.eod_packet() return packet class PluginParamsStartPacketBuilder(PacketTestUtil): def build_invalid(self): return self.build(content="InvalidStartPacket") def build(self, content=PLUGIN_PARAMETERS_START): packet = self.command_packet(content) return packet class PluginParamsBlockBuilder(PacketTestUtil): def build(self, includes=None, regex_includes=None, excludes=None, regex_excludes=None, segment_size=None, restore_local_path=None, password=True, passfile=None): packet = PluginParamsStartPacketBuilder().build() packet += self.command_packet("User={}".format(BACKEND_PLUGIN_USER)) packet += self.command_packet("URL={}".format(BACKEND_PLUGIN_URL)) if password: packet += self.command_packet("Password={}".format(BACKEND_PLUGIN_PWD)) if includes is not None: for include in includes: packet += self.command_packet("include={}".format(include)) if regex_includes is not None: for regex_include in regex_includes: packet += self.command_packet("regex_include={}".format(regex_include)) if excludes is not None: for exclude in excludes: packet += self.command_packet("exclude={}".format(exclude)) if regex_excludes is not None: for regex_exclude in regex_excludes: packet += self.command_packet("regex_exclude={}".format(regex_exclude)) if segment_size is not None: packet += self.command_packet("be_object_segment_size={}".format(segment_size)) if restore_local_path is not None: packet += self.command_packet("restore_local_path={}".format(restore_local_path)) if passfile is not None: packet += self.command_packet("passfile={}".format(passfile)) packet += self.command_packet("debug=1") packet += self.eod_packet() return packet def without_url(self): packet = PluginParamsStartPacketBuilder().build() packet += self.command_packet("User={}".format(BACKEND_PLUGIN_USER)) packet += self.command_packet("Password={}".format(BACKEND_PLUGIN_PWD)) packet += self.eod_packet() return packet def without_user(self): packet = PluginParamsStartPacketBuilder().build() packet += self.command_packet("Password={}".format(BACKEND_PLUGIN_PWD)) packet += self.command_packet("URL={}".format(BACKEND_PLUGIN_URL)) packet += self.eod_packet() return packet def without_pwd(self): packet = PluginParamsStartPacketBuilder().build() packet += self.command_packet("User={}".format(BACKEND_PLUGIN_USER)) packet += self.command_packet("URL={}".format(BACKEND_PLUGIN_URL)) packet += self.eod_packet() return packet class JobEndPacketBuilder(PacketTestUtil): def build(self): packet = self.command_packet(END_JOB_START_PACKET) packet += TERMINATION_PACKET return packet def build_invalid(self): packet = self.invalid_command_packet() packet += TERMINATION_PACKET return packet class BackupCommandBuilder(PacketTestUtil): def build(self): packet = self.command_packet(BACKUP_START_PACKET) return packet class FilesEstimationCommandBuilder(PacketTestUtil): def build(self): packet = self.command_packet(ESTIMATION_START_PACKET) return packet class RestoreFileCommandBuilder(PacketTestUtil): def with_invalid_file_transfer_start(self, buckets): return self.build(buckets, invalid_file_transfer_start=True) def with_invalid_xattrs_transfer_start(self, buckets): return self.build(buckets, invalid_xattrs_transfer_start=True) def with_invalid_acl_transfer_start(self, buckets): return self.build(buckets, invalid_acl_transfer_start=True) def build(self, buckets, with_start_packet=True, invalid_file_transfer_start=False, invalid_xattrs_transfer_start=False, invalid_acl_transfer_start=False, fname_without_fsource=False, where=None, ): packet = b'' if with_start_packet: packet += self.command_packet(RESTORE_START_PACKET) for bucket in buckets: packet += self.__create_files_packets(bucket, invalid_file_transfer_start, invalid_xattrs_transfer_start, where=where, fname_without_fsource=fname_without_fsource) packet += self.__create_bucket_packets(bucket, invalid_acl_transfer_start) packet += self.command_packet(RESTORE_END_PACKET) return packet def __create_files_packets(self, bucket, invalid_file_transfer_start=False, invalid_xattrs_transfer_start=False, where=False, fname_without_fsource=False): packet = b'' for file in bucket['files']: packet += self.file_info(bucket['name'], file, where=where, fname_without_fsource=fname_without_fsource) if "data_packets" not in file: file["data_packets"] = True if file["data_packets"]: if file['size'] > 0: if invalid_file_transfer_start: packet += self.invalid_command_packet() else: packet += self.command_packet(TRANSFER_START_PACKET) if 'content' in file: packet += self.file_contents(file['content']) packet += self.file_acl(False) packet += self.file_xattrs(invalid_xattrs_transfer_start) return packet def file_info(self, bucket, file, where=None, fname_without_fsource=False): if fname_without_fsource: file_source = "" else: file_source = "@{}".format(BACKEND_PLUGIN_TYPE) if where: packet = self.command_packet('FNAME:{}/{}/{}/{}'.format(where, file_source, bucket, file['name'])) else: packet = self.command_packet('FNAME:{}/{}/{}'.format(file_source, bucket, file['name'])) packet += self.command_packet('STAT:F {} 0 0 {} 1 473'.format(file['size'], DEFAULT_FILE_MODE)) if "modified-at" not in file: file["modified-at"] = 2222222222 packet += self.command_packet('TSTAMP:1111111111 {} 3333333333'.format(file["modified-at"])) packet += self.eod_packet() return packet def file_contents(self, contents): packet = b'' for chunk in contents: packet += self.data_packet(chunk) packet += self.eod_packet() return packet def file_xattrs(self, invalid_xattrs_transfer_start): if invalid_xattrs_transfer_start: packet = self.invalid_command_packet() else: packet = self.command_packet(XATTR_DATA_START) x_attrs = { 'content-type': "app/pdf", 'content-encoding': "ascii", 'content-disposition': "download", 'x-delete-at': "1999919048", 'x-object-meta-custom1': "custom_meta1", 'x-object-meta-custom2': "custom_meta2", } x_attrs_bytes = json.dumps(x_attrs).encode() packet += self.data_packet(x_attrs_bytes) packet += self.eod_packet() return packet def file_acl(self, invalid_acl_transfer_start): # Swift does not have File Acl if BACKEND_PLUGIN_TYPE == "swift": return b'' if invalid_acl_transfer_start: packet = self.invalid_command_packet() else: packet = self.command_packet(ACL_DATA_START) acl = { 'read': "user1, user2", 'write': "user3" } acl_bytes = json.dumps(acl).encode() packet += self.data_packet(acl_bytes) packet += self.eod_packet() return packet def __create_bucket_packets(self, bucket, invalid_acl_transfer_start=False): packet = self.bucket_info(bucket) if "data_packets" not in bucket: bucket["data_packets"] = True if bucket["data_packets"]: packet += self.bucket_acl(invalid_acl_transfer_start) packet += self.bucket_xattrs() return packet def bucket_info(self, bucket): packet = self.command_packet('FNAME:@{}/{}/'.format(BACKEND_PLUGIN_TYPE, bucket['name'])) if "modified-at" not in bucket: bucket["modified-at"] = 2222222222 packet += self.command_packet('STAT:D 12345 0 0 {} 1 473'.format(DEFAULT_DIR_MODE)) packet += self.command_packet('TSTAMP:1111111111 {} 3333333333'.format(bucket["modified-at"])) packet += self.eod_packet() return packet def bucket_xattrs(self): packet = self.command_packet(XATTR_DATA_START) xattrs = { 'x-container-meta-quota-bytes': "1000000", 'x-container-meta-quota-count': "100", 'x-container-meta-web-directory-type': "text/directory", 'x-container-meta-custom1': "custom_meta1", 'x-container-meta-custom2': "custom_meta2", } x_attrs_bytes = json.dumps(xattrs).encode() packet += self.data_packet(x_attrs_bytes) packet += self.eod_packet() return packet def bucket_acl(self, invalid_acl_transfer_start): if invalid_acl_transfer_start: packet = self.invalid_command_packet() else: packet = self.command_packet(ACL_DATA_START) acl = { 'read': "user1, user2", 'write': "user3" } acl_bytes = json.dumps(acl).encode() packet += self.data_packet(acl_bytes) packet += self.eod_packet() return packet class BackendCommandBuilder(PacketTestUtil): def build(self, job_type, buckets=None, where=None, includes=None, segment_size=None, restore_local_path=None): packet = HandshakePacketBuilder().build() packet += JobInfoBlockBuilder().build(job_type, where=where) packet += PluginParamsBlockBuilder().build(includes=includes, segment_size=segment_size, restore_local_path=restore_local_path) if job_type == TYPE_BACKUP: packet += BackupCommandBuilder().build() elif job_type == TYPE_ESTIMATION: packet += FilesEstimationCommandBuilder().build() elif job_type == TYPE_RESTORE: packet += RestoreFileCommandBuilder().build(buckets) else: raise ValueError("Invalid job_type!") packet += JobEndPacketBuilder().build() return packet bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/util/io_test_util.py0000644000175000017500000001004214771010173026450 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import io import sys from baculaswift.io.packet_definitions import EOD_PACKET, STATUS_COMMAND, STATUS_DATA, STATUS_ABORT, STATUS_ERROR, \ STATUS_WARNING class IOTestUtil(object): """ Class with utility methods for dealing with IO """ def stub_bytestream_stdin(testcase_inst, input): stdin = sys.stdin def cleanup(): sys.stdin = stdin wrapper = io.TextIOWrapper( io.BytesIO(input), newline='\n' ) testcase_inst.addCleanup(cleanup) sys.stdin = wrapper def stub_stdout(testcase_inst): wrapper = io.TextIOWrapper( io.BytesIO(), newline='\n' ) saved_stdout = sys.stdout stub_stdout = wrapper sys.stdout = stub_stdout def cleanup(): stub_stdout.close() sys.stdout = saved_stdout testcase_inst.addCleanup(cleanup) return stub_stdout def verify_command_packet(self, output, packet): self.verify_packet_existence(output, STATUS_COMMAND, packet.encode()) def verify_not_command_packet(self, output, packet): self.verify_not_packet_existence(output, STATUS_COMMAND, packet.encode()) def verify_data_packet(self, output, packet): packet_length = str(len(packet)).zfill(6).encode() packet_header = STATUS_DATA.encode() + packet_length + b'\n' packet = packet self.assertIn(packet_header + packet, output) def verify_abort_packet(self, output, packet): self.verify_packet_existence(output, STATUS_ABORT, packet.encode()) def verify_error_packet(self, output, packet): self.verify_packet_existence(output, STATUS_ERROR, packet.encode()) def verify_warning_packet(self, output, packet): self.verify_packet_existence(output, STATUS_WARNING, packet.encode()) def verify_no_aborts(self, output): self.assertNotIn(b'A00000', output) self.assertNotIn(b'A0000', output) self.assertNotIn(b'A000', output) def verify_no_errors(self, output): self.assertNotIn(b'E00000', output) self.assertNotIn(b'E0000', output) self.assertNotIn(b'E000', output) def verify_no_warnings(self, output): self.assertNotIn(b'W00000', output) self.assertNotIn(b'W0000', output) self.assertNotIn(b'W000', output) def verify_eod_packet(self, output): self.assertIn(EOD_PACKET, output) def verify_eod_packet_count(self, output, expected_count): self.verify_packet_count(output, EOD_PACKET, expected_count) def verify_str_count(self, output, packet, expected_count): real_count = output.count(packet.encode()) self.assertEquals(expected_count, real_count) def verify_packet_count(self, output, packet, expected_count): real_count = output.count(packet) self.assertEquals(expected_count, real_count) def verify_packet_existence(self, output, status, packet): packet_length = str(len(packet) + 1).zfill(6).encode() packet_header = status.encode() + packet_length + b'\n' packet = packet self.assertIn(packet_header + packet, output) def verify_not_packet_existence(self, output, status, packet): packet_length = str(len(packet) + 1).zfill(6).encode() packet_header = status.encode() + packet_length + b'\n' packet = packet self.assertNotIn(packet_header + packet, output) bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/test_stress/0000755000175000017500000000000014771010173025003 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/test_stress/__init__.py0000644000175000017500000000120314771010173027110 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/test_stress/test_stress_restore.py0000644000175000017500000000503214771010173031502 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import time from baculaswift.plugins.plugin import MEGABYTE from baculaswift.services.job_info_service import TYPE_RESTORE from tests.test_baculak8s.test_system.system_test import SystemTest from tests.util.os_test_util import create_byte_chunks from tests.util.packet_builders import BackendCommandBuilder class StressRestoreTest(SystemTest): def test_restore_100M_file_regular_upload_65K_data_packets(self): total_size = 1600 * 65553 buckets = [{ "name": "bucket_1", "files": [ { "name": "file1.txt", "size": total_size, "content": create_byte_chunks(1600, 65553) }, ] }] packet = BackendCommandBuilder().build(TYPE_RESTORE, buckets, segment_size=8000 * MEGABYTE) start = time.clock() output = self.execute_plugin(packet) end = time.clock() elapsed = (end - start) self.verify_no_aborts(output) resp = self.swift_connection.head_object("bucket_1", "file1.txt") self.assertEqual(total_size, int(resp["content-length"])) def test_restore_100M_file_regular_upload_999K_data_packets(self): total_size = 105 * 999999 buckets = [{ "name": "bucket_1", "files": [ { "name": "file1.txt", "size": total_size, "content": create_byte_chunks(105, 999999) }, ] }] packet = BackendCommandBuilder().build(TYPE_RESTORE, buckets, segment_size=8000 * MEGABYTE) start = time.clock() output = self.execute_plugin(packet) end = time.clock() elapsed = (end - start) self.verify_no_aborts(output) resp = self.swift_connection.head_object("bucket_1", "file1.txt") self.assertEqual(total_size, int(resp["content-length"])) bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/test_stress/test_stress_backup.py0000644000175000017500000000535414771010173031273 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. from tests.test_baculak8s.test_system.system_test import SystemTest class StressBackupTest(SystemTest): # TODO Refactor pass # def test_backup_5M_file(self): # container = "cats" # filename = "cats1.txt" # file_content = self.gen_random_bytes(5 * 1024 * 1024) # self.create_test_data(container, file_content, filename) # command = BackupFileCommandBuilder.build(container, filename) # output = self.execute_plugin(command) # self.verify_param_existence(output, "file", filename) # self.verify_param_existence(output, "content-length", len(file_content)) # # def test_backup_50M_file(self): # container = "cats" # filename = "cats1.txt" # file_content = self.gen_random_bytes(50 * 1024 * 1024) # self.create_test_data(container, file_content, filename) # command = BackupFileCommandBuilder.build(container, filename) # output = self.execute_plugin(command) # self.verify_param_existence(output, "file", filename) # self.verify_param_existence(output, "content-length", len(file_content)) # # def test_backup_100M_file(self): # container = "cats" # filename = "cats1.txt" # file_content = self.gen_random_bytes(100 * 1024 * 1024) # self.create_test_data(container, file_content, filename) # command = BackupFileCommandBuilder.build(container, filename) # output = self.execute_plugin(command) # self.verify_param_existence(output, "file", filename) # self.verify_param_existence(output, "content-length", len(file_content)) # # def test_backup_500M_file(self): # container = "cats" # filename = "cats1.txt" # file_content = self.gen_random_bytes(500 * 1024 * 1024) # self.create_test_data(container, file_content, filename) # command = BackupFileCommandBuilder.build(container, filename) # output = self.execute_plugin(command) # self.verify_param_existence(output, "file", filename) # self.verify_param_existence(output, "content-length", len(file_content)) bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/__init__.py0000644000175000017500000000203214771010173024527 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import os from os import environ TESTS_ROOT = os.path.abspath(os.path.dirname(__file__)) TMP_TEST_FILES_ROOT = os.path.join(TESTS_ROOT, "tmp") BACKEND_PLUGIN_TYPE = environ.get('BE_PLUGIN_TYPE') BACKEND_PLUGIN_VERSION = environ.get('BE_PLUGIN_VERSION') BACKEND_PLUGIN_URL = environ.get('BE_PLUGIN_URL') BACKEND_PLUGIN_USER = environ.get('BE_PLUGIN_USER') BACKEND_PLUGIN_PWD = environ.get('BE_PLUGIN_PWD') bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/k8s_tests.py0000644000175000017500000000776414771010173024740 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. from baculak8s.util.lambda_util import apply from tests.base_tests import BaseTest from tests.util.os_test_util import gen_random_bytes class K8STest(BaseTest): """ Class with utility methods for dealing with K8S """ def setUp(self): super().setUp() self.config_test_k8sclient() def tearDown(self): super().tearDown() self.delete_all_containers() def config_test_k8sclient(self): pass def delete_all_containers(self): pass def create_test_data(self, container, file_content, filename): pass def create_container_new(self, buckets): pass def upload_file(self, container_name, filepath, filename): with open(filepath + "/" + filename, 'rb') as local: self.swift_connection.put_object( container_name, filename, contents=local, content_type='text/plain' ) def upload_files(self, container, files, file_chunk_count=0, file_chunk_size=0): pass def create_containers(self, amount): containers = [] for i in range(0, amount): container_name = "container_%d" % i self.swift_connection.put_container(container_name) containers.append(container_name) return containers def put_containers_on_swift(self, containers): for container in containers: self.swift_connection.put_container(container) def upload_test_objects(self, container, path, filenames): for filename in filenames: self.upload_file(container, path, filename) def verify_containers(self, buckets, where=None): if where is not None: splitted = where.split("/") splitted = list(filter(None, splitted)) header, uploaded_files = self.swift_connection.get_container(splitted[0]) uploaded_names = apply(lambda f: f['name'], uploaded_files) uploaded_bytes = apply(lambda f: f['bytes'], uploaded_files) for bucket in buckets: expected_files = bucket['files'] for file in expected_files: expected_name = file["name"] if len(splitted) > 1: path = "/".join(splitted[1:]) expected_name = "{}/{}".format(path, expected_name) self.assertIn(expected_name, uploaded_names) self.assertIn(file['size'], uploaded_bytes) else: for bucket in buckets: expected_files = bucket['files'] header, uploaded_files = self.swift_connection.get_container(bucket['name']) uploaded_names = apply(lambda f: f['name'], uploaded_files) uploaded_bytes = apply(lambda f: f['bytes'], uploaded_files) for file in expected_files: self.assertIn(file['name'], uploaded_names) self.assertIn(file['size'], uploaded_bytes) self.assertIn('x-container-meta-custom1', header) class FileLikeObject(object): def __init__(self, file_chunks): self.file_chunks = file_chunks def read(self, size=-1): if not self.file_chunks: return None return self.file_chunks.pop() def next_chunk(self, chunk_size): pass bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/README0000644000175000017500000000435614771010173023311 0ustar bsbuildbsbuild--- Test Execution In order to properly execute all the tests, some Environment Variables must be created: BE_PLUGIN_TYPE (Specifies which Plugin should be tested) BE_PLUGIN_VERSION (Specifies which Plugin Version should be tested) BE_PLUGIN_URL (Specifies the URL where the Plugins Data Source exists) BE_PLUGIN_USER (Specifies the username that should be used by the Plugin) BE_PLUGIN_PWD (Specifies the password that should be used by the Plugin) Example: export BE_PLUGIN_TYPE=swift export BE_PLUGIN_VERSION=1 export BE_PLUGIN_URL=http://192.168.0.5:8080 export BE_PLUGIN_USER=test:tester export BE_PLUGIN_PWD=testing To run the tests (from the projects root folder): python3 -m unittest discover tests/test_baculaswift For more information: https://docs.openstack.org/swift/latest/development_saio.html -- Manual Testing It is possible that Swift containers and objects might be needed to be created in order to do manual testing. In this case, the python-swiftclient should be accessed directly. The python-swiftclient (called "swift") tool is a command line utility for communicating with an OpenStack Object Storage (swift) environment. It allows one to perform several types of operations. In order for properly use this tool (with legacy authenticaton), four Environment Variables must be created ST_AUTH_VERSION ST_AUTH ST_USER ST_KEY Those values could be, for example (if you are using SAIO - Swift All in One): export ST_AUTH_VERSION=1.0 export ST_AUTH=http://192.168.0.5:8080/auth/v1.0 export ST_USER=test:tester export ST_KEY=testing The complete list of operations that this tool can perform is shown in this link: https://docs.openstack.org/python-swiftclient/latest/cli/index.html#examples -- Swift ACL On Swift, only Accounts and Containers (buckets) have Access Control Lists. In order to create ACLs on Containers with the "swift" command-line tool, please check this link: https://www.swiftstack.com/docs/cookbooks/swift_usage/container_acl.html -- Swift XATTRS On Swift, both Containers (buckets) and Objects (files) may have Extended Attributes. In order to create XATTRs on both with the "swift" command-line tool, please check this link: https://docs.openstack.org/python-swiftclient/latest/cli/index.html#swift-post bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/test_exploratory/0000755000175000017500000000000014771010173026050 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/test_exploratory/test_account.py0000644000175000017500000000215314771010173031116 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import unittest from tests.base_tests import BaseTest class AccountTest(BaseTest): def test_should_retrieve_account_stats(self): stats = self.swift_service.stat() self.assertEqual(True, stats['success']) def test_should_list_account_containers_metadata(self): containers_meta = self.swift_service.list() for metadata in containers_meta: self.assertEqual(True, metadata['success']) if __name__ == '__main__': unittest.main()bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/test_exploratory/test_auth.py0000644000175000017500000000161114771010173030421 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import unittest from tests.base_tests import BaseTest class AuthTest(BaseTest): def test_should_do_legacy_authentication(self): self.assertEqual(True, self.swift_service.capabilities()['success']) if __name__ == '__main__': unittest.main()bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/test_exploratory/__init__.py0000644000175000017500000000120314771010173030155 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/test_exploratory/test_object.py0000644000175000017500000000332314771010173030730 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import unittest from io import RawIOBase from swiftclient.service import SwiftUploadObject from tests.base_tests import BaseTest class ObjectTest(BaseTest): def tearDown(self): self.delete_all_containers() def test_should_upload_object(self): container = "cats" filename = "cats1.txt" file_like = FileLikeObject([self.gen_random_bytes(65553) for i in range(50)]) obj = SwiftUploadObject(file_like, filename) self.swift_connection.put_container(container) r_gen = self.swift_service.upload(container, [obj]) for r in r_gen: pass c_data = self.swift_connection.get_container(container) print(c_data) class FileLikeObject(RawIOBase): def __init__(self, file_chunks, *args, **kwargs): super().__init__(*args, **kwargs) self.file_chunks = file_chunks def read(self, size=-1): if not self.file_chunks: return None return self.file_chunks.pop() def next_chunk(self, chunk_size): pass if __name__ == '__main__': unittest.main() bacula-15.0.3/src/plugins/fd/kubernetes-backend/tests/test_exploratory/test_container.py0000644000175000017500000000604014771010173031443 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import unittest from time import sleep from swiftclient.service import SwiftUploadObject from tests.base_tests import BaseTest class ContainerTest(BaseTest): def test_should_create_container(self): container_name = "cats" self.swift_connection.put_container(container_name) containers = self.swift_connection.get_account()[1] self.assertEqual(len(containers), 1) def test_should_list_containers(self): self.__create_containers(5) response = self.swift_service.list() for page in response: print(len(page["listing"])) print(page["listing"]) def __create_containers(self, amount): for i in range(0, amount): self.swift_connection.put_container("container={}".format(i)) def test_should_list_container_metadata(self): container_name = "cats" self.swift_connection.put_container(container_name) stats = self.swift_service.stat(container=container_name) print(stats['headers']) sleep(4) self.swift_connection.put_object(container_name, "cat1.txt", "") stats = self.swift_service.stat(container=container_name) print(stats['headers']) def test_should_list_container_objects(self): container_name = "WORLD" files = ["AMERICA/NORTH/EUA/eua.txt", "AMERICA/NORTH/CANADA/canada.txt", "AMERICA/SOUTH/BRAZIL/brazil.txt"] fc1 = FileLikeObject([self.gen_random_bytes(65553) for i in range(1)]) obj1 = SwiftUploadObject(fc1, files[0]) fc2 = FileLikeObject([self.gen_random_bytes(65553) for i in range(1)]) obj2 = SwiftUploadObject(fc2, files[1]) fc3 = FileLikeObject([self.gen_random_bytes(65553) for i in range(1)]) obj3 = SwiftUploadObject(fc2, files[2]) r_gen = self.swift_service.upload(container_name, [obj1, obj2, obj3]) for r in r_gen: pass self.swift_connection.put_container(container_name) list_gen = self.swift_connection.get_container(container=container_name) print(list_gen) class FileLikeObject(object): def __init__(self, file_chunks): self.file_chunks = file_chunks def read(self, size=-1): if not self.file_chunks: return None return self.file_chunks.pop() def next_chunk(self, chunk_size): pass if __name__ == '__main__': unittest.main() bacula-15.0.3/src/plugins/fd/kubernetes-backend/setup.py0000644000175000017500000000240514771010173022772 0ustar bsbuildbsbuild#!/usr/bin/env python3 # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import sys from setuptools import setup, find_packages if sys.version_info < (3, 0): sys.exit('This version of the Backend supports only Python 3 or above') setup( name='baculak8s', version='2.3.0', author='Radoslaw Korzeniewski, Francisco Manuel Garcia Botella', author_email='radekk@korzeniewski.net, francisco.garcia@baculasystems.com', packages=find_packages(exclude=('tests', 'tests.*', 'docker')), # packages=packages, license="Bacula® - The Network Backup Solution", data_files=[ ('/opt/bacula/bin', ['bin/k8s_backend']) ], # scripts=['bin/k8s_backend'] ) bacula-15.0.3/src/plugins/fd/kubernetes-backend/Project.txt0000644000175000017500000001131614771010173023430 0ustar bsbuildbsbuildRD-Project-0001 Check List 1.0 ------------------------------- This checklist must be completed and stored in the SVN directory of the project. When an item is not relevant, N/A can be specified. When an item is not done, the line can stay empty. A percentage can also be specified when the item is not completely done. Yes/No/Done can be used for Boolean answers. A copy should be sent to the Project and the R&D manager. Project Name....: Swift Plugin Version.........: 1.0 Authors.........: Henrique Medrado de Faria Completion%.....: 100% ---------------------------------------------------------------- --- Project Description Short Description: This project intents to create a Plugin for Backup and Restore of OpenStack Swift Storage objects, which is accessible via a REST API. It is used by Bacula C++ Plugin, and both comunicate through an ASCII protocol. Beta Testers....: Alpha Date......: Beta Date.......: Target Date.....: Release Date....: ---------------------------------------------------------------- --- Code -- What is the associated intranet web page name? -- Where is the associated SVN project directory? -- Where is the code (git, path, ...)? bsweb:swift -- What is the git branch name? master -- How to compile the code? The code is interpreted -- What are the command line options? At the moment, there are no command line options -- What are the tested platforms? -- What are the supported platforms? -- Who did the code review? Alain Spineux ---------------------------------------------------------------- --- Dependencies What is needed to run the code (dependencies)? What is the procedure to install dependencies? Is the dependency installation procedure implemented in a depkgs-xxx Makefile? Are the dependencies stored in bsweb:/home/src/depkgs as a depkgs-xxx file ? How to configure dependencies? What is the license for each dependency? Is it compatible with BEE license? Was an email sent to all developers with the documentation to install new dependencies? Can Bacula compile without the new dependencies? Yes. Should we update the configure.in for new libraries? ---------------------------------------------------------------- -- Coding Style Are all structures properly documented? Yes. Are all functions properly documented? Yes. Are all advanced algorithms documented? Yes. Is the copyright correct in all files? Yes. ---------------------------------------------------------------- -- Regression Testing -- What are the names of the regress tests that can be used? All tests are store inside the "tests" folder Unit Tests: tests/bacula_swift/test_io tests/bacula_swift/test_jobs tests/bacula_swift/test_services Integration Tests: tests/bacula_swift/test_plugins (Tests the integration with the Plugin Data Source, such as Swift Storage) System Tests: tests/bacula_swift/test_system (Tests the code as if it were used by the Bacula C++ Plugin) Exploratory Tests: Auxiliary Tests for developers to study and understand Data Sources Stress Tests: Tests to verify performance requirements -- What are the options or variables that can be used to configure the tests? In order to properly configure the tests, some Environment Variables must be created: BE_PLUGIN_TYPE (Specifies which Plugin should be tested) BE_PLUGIN_VERSION (Specifies which Plugin Version should be tested) BE_PLUGIN_URL (Specifies the URL where the Plugins Data Source exists) BE_PLUGIN_USER (Specifies the username that should be used by the Plugin) BE_PLUGIN_PWD (Specifies the password that should be used by the Plugin) Example: export BE_PLUGIN_TYPE=swift export BE_PLUGIN_VERSION=1 export BE_PLUGIN_URL=http://192.168.0.5:8080 export BE_PLUGIN_USER=test:tester export BE_PLUGIN_PWD=testing -- Have we some unit test procedures? How to run them? To run the tests (from the projects root folder): $ python3 -m unittest discover tests/test_baculaswift -- Who ran the regression tests? ---------------------------------------------------------------- --- Documentation Where is the documentation? Are the following subjects described in the documentation: 1- General overview 2- Installation of the program 3- Configuration of the program 4- Limitations ---------------------------------------------------------------- --- Packaging Do we have a RPM spec file? No Do we have Debian debhelper files? No Do we have a Windows installer? No Will the spec or the debhelper scripts abort if dependencies are not found? ---------------------------------------------------------------- --- Support What is the Mantis category for bug reports? Who is the Support expert for the project? Henrique Medrado (hfaria020@gmail.com) bacula-15.0.3/src/plugins/fd/kubernetes-backend/bacula-backup.yaml0000644000175000017500000000145114771010173024636 0ustar bsbuildbsbuildapiVersion: v1 kind: Pod metadata: name: {podname} namespace: {namespace} labels: app: baculabackup spec: hostname: {podname} {nodenameparam} containers: - name: {podname} resources: limits: cpu: "1" memory: "64Mi" requests: cpu: "100m" memory: "16Mi" image: {image} env: - name: PLUGINMODE value: "{mode}" - name: PLUGINHOST value: "{host}" - name: PLUGINPORT value: "{port}" - name: PLUGINTOKEN value: "{token}" - name: PLUGINJOB value: "{job}" imagePullPolicy: {imagepullpolicy} volumeMounts: - name: {podname}-storage mountPath: /{mode} restartPolicy: Never volumes: - name: {podname}-storage persistentVolumeClaim: claimName: {pvcname} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/0000755000175000017500000000000014771010173023134 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/jobs/0000755000175000017500000000000014771010173024071 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/jobs/job_pod_bacula.py0000644000175000017500000005030014771010173027364 0ustar bsbuildbsbuild# # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # import logging import time from abc import ABCMeta import yaml from baculak8s.jobs.job import Job from baculak8s.plugins.k8sbackend.baculabackup import (BACULABACKUPIMAGE, ImagePullPolicy, prepare_backup_pod_yaml, get_backup_pod_name) from baculak8s.plugins.k8sbackend.pvcclone import prepare_backup_clone_yaml, find_bacula_pvc_clones_from_old_job from baculak8s.plugins.k8sbackend.baculaannotations import BaculaBackupMode from baculak8s.util.respbody import parse_json_descr from baculak8s.util.sslserver import DEFAULTTIMEOUT, ConnectionServer from baculak8s.util.token import generate_token DEFAULTRECVBUFFERSIZE = 64 * 1048 PLUGINHOST_NONE_ERR = "PLUGINHOST parameter is missing and cannot be autodetected. " \ "Cannot continue with pvcdata backup!" POD_EXECUTION_ERR = "Cannot successfully start bacula-backup pod in expected time!" POD_REMOVE_ERR = "Unable to remove proxy Pod {podname}! Other operations with proxy Pod will fail!" POD_EXIST_ERR = "Job already running in '{namespace}' namespace. Check logs or delete {podname} Pod manually." TAR_STDERR_UNKNOWN = "Unknown error. You should check Pod logs for possible explanation." PLUGINPORT_VALUE_ERR = "Cannot use provided pluginport={port} option. Used default!" FDPORT_VALUE_ERR = "Cannot use provided fdport={port} option. Used default!" POD_YAML_PREPARED_INFO = "Prepare bacula-backup Pod with: {image} <{pullpolicy}> {pluginhost}:{pluginport}" POD_YAML_PREPARED_INFO_NODE = "Prepare Bacula Pod on: {nodename} with: {image} <{pullpolicy}> {pluginhost}:{pluginport}" CANNOT_CREATE_BACKUP_POD_ERR = "Cannot create backup pod. Err={}" CANNOT_REMOVE_BACKUP_POD_ERR = "Cannot remove backup pod. Err={}" PVCDATA_GET_ERROR = "Cannot get PVC Data object Err={}" PVCCLONE_YAML_PREPARED_INFO = "Prepare clone: {namespace}/{snapname} storage: {storage} capacity: {capacity}" CANNOT_CREATE_PVC_CLONE_ERR = "Cannot create PVC snapshot. Err={}" CANNOT_CREATE_VSNAPSHOT_ERR = "Cannot create Volume Snapshot. Err={}" CANNOT_CREATE_PVC_SNAPSHOT_ERR = "Cannot create PVC from Volume Snapshot. Err={}" CANNOT_REMOVE_PVC_CLONE_ERR = "Cannot remove PVC snapshot. Err={}" CANNOT_REMOVE_VSNAPSHOT_ERR = "Unable to remove volume snapshot {vsnapshot}! Please you must remove it manually." CANNOT_START_CONNECTIONSERVER = "Cannot start ConnectionServer. Err={}" WARNING_CLONED_PVC_WAS_NOT_WORKED = "As clone backup is empty. It will retry again to do a backup with standard mode." VSNAPSHOT_BACKUP_COMPATIBLE_INFO = "The pvc `{}` is compatible with volume snapshot backup. Doing backup with this technology." PVC_FROM_SNAPSHOT_CREATED = "The pvc `{}` was created from volume snapshot from pvc `{}`." CREATING_PVC_FROM_VSNAPSHOT = "Creating pvc from volume snapshot of pvc `{}`." FINISHED_PVC_FROM_VSNAPSHOT = "Finished pvc `{}` volume snapshot from pvc `{}`." class JobPodBacula(Job, metaclass=ABCMeta): """ This is a common class for all job which handles pvcdata backups using bacula-backup Pod. """ def __init__(self, plugin, io, params): super().__init__(plugin, io, params) self.connsrv = None self.fdaddr = params.get("fdaddress") self.fdport = params.get("fdport", 9104) self.pluginhost = params.get("pluginhost", self.fdaddr) self.pluginport = params.get("pluginport", self.fdport) self.certfile = params.get('fdcertfile') _keyfile = params.get('fdkeyfile') self.keyfile = _keyfile if _keyfile is not None else self.certfile self._prepare_err = False self.token = None self.jobname = '{name}:{jobid}'.format(name=params.get('name', 'undefined'), jobid=params.get('jobid', '0')) self.timeout = params.get('timeout', DEFAULTTIMEOUT) try: self.timeout = int(self.timeout) except ValueError: self.timeout = DEFAULTTIMEOUT self.timeout = max(1, self.timeout) self.tarstderr = '' self.tarexitcode = None self.backupimage = params.get('baculaimage', BACULABACKUPIMAGE) self.imagepullpolicy = ImagePullPolicy.process_param(params.get('imagepullpolicy')) self.imagepullsecret = params.get('imagepullsecret', None) self.backup_clone_compatibility = True self.debug = params.get('debug', 0) self.current_backup_mode = BaculaBackupMode.Standard def handle_pod_logs(self, connstream): logmode = '' self.tarstderr = '' file_count = 0 bytes_count = 0 with connstream.makefile(mode='r') as fd: self.tarexitcode = fd.readline().strip() logging.debug('handle_pod_logs:tarexitcode:{}'.format(self.tarexitcode)) while True: data = fd.readline() if not data: break logging.debug('LOGS:{}'.format(data.strip())) if data.startswith('---- stderr ----'): logmode = 'stderr' continue elif data.startswith('---- list ----'): logmode = 'list' continue elif data.startswith('---- end ----'): break if logmode == 'stderr': self.tarstderr += data continue elif logmode == 'list': # no listing feature yet file_count += 1 try: file_props = data.split() logging.debug('Data Split:{}'.format(','.join(file_props))) bytes_count += int(file_props[2]) except Exception as e: logging.exception(e) continue logging.debug('Bytes/files in backup: {}/{}'.format(bytes_count, file_count)) logging.debug('Type of job:' + str(self._params.get('type'))) # If the backupMode is standard, we ignore if the backup contains 0 bytes. if self._params.get('type') == 'b' and self.current_backup_mode != BaculaBackupMode.Standard and bytes_count == 0 and file_count < 3: self._io.send_non_fatal_error(WARNING_CLONED_PVC_WAS_NOT_WORKED) self.backup_clone_compatibility = False def handle_pod_data_recv(self, connstream): while True: data = self.connsrv.streamrecv(DEFAULTRECVBUFFERSIZE) if not data: logging.debug('handle_pod_data_recv:EOT') break if self.debug == '3': logging.debug('handle_pod_data_recv:D' + str(len(data))) self._io.send_data(data) def handle_pod_data_send(self, connstream): while True: data = self._io.read_data() if not data: logging.debug('handle_pod_data_send:EOT') break self.connsrv.streamsend(data) if self.debug == '3': logging.debug('handle_pod_data_send:D{}'.format(len(data))) def prepare_pod_yaml(self, namespace, pvcdata, mode='backup'): logging.debug('pvcdata: {}'.format(pvcdata)) if self.pluginhost is None: self._handle_error(PLUGINHOST_NONE_ERR) self._prepare_err = True return None pport = self.pluginport try: self.pluginport = int(self.pluginport) except ValueError: self.pluginport = 9104 logging.warning(PLUGINPORT_VALUE_ERR.format(port=pport)) self._io.send_warning(PLUGINPORT_VALUE_ERR.format(port=pport)) pvcname = pvcdata.get('name') node_name = pvcdata.get('node_name') podyaml = prepare_backup_pod_yaml(mode=mode, nodename=node_name, host=self.pluginhost, port=self.pluginport, token=self.token, namespace=namespace, pvcname=pvcname, image=self.backupimage, imagepullpolicy=self.imagepullpolicy, imagepullsecret=self.imagepullsecret, job=self.jobname) if node_name is None: self._io.send_info(POD_YAML_PREPARED_INFO.format( image=self.backupimage, pullpolicy=self.imagepullpolicy, pluginhost=self.pluginhost, pluginport=self.pluginport )) else: self._io.send_info(POD_YAML_PREPARED_INFO_NODE.format( nodename=node_name, image=self.backupimage, pullpolicy=self.imagepullpolicy, pluginhost=self.pluginhost, pluginport=self.pluginport )) return podyaml def prepare_clone_yaml(self, namespace, pvcname, capacity, storage_class): logging.debug('prepare_clone_yaml: {} {} {} {}'.format(namespace, pvcname, capacity, storage_class)) if namespace is None or pvcname is None or capacity is None or storage_class is None: logging.error("Invalid params to pvc clone!") return None, None pvcyaml, snapname = prepare_backup_clone_yaml(namespace, pvcname, capacity, storage_class, self.jobname) self._io.send_info(PVCCLONE_YAML_PREPARED_INFO.format( namespace=namespace, snapname=snapname, storage=storage_class, capacity=capacity )) return pvcyaml, snapname def prepare_connection_server(self): if self.connsrv is None: if self.fdaddr is None: self.fdaddr = '0.0.0.0' fport = self.fdport try: self.fdport = int(self.fdport) except ValueError: self.fdport = 9104 logging.warning(FDPORT_VALUE_ERR.format(port=fport)) self._handle_error(FDPORT_VALUE_ERR.format(port=fport)) logging.debug("prepare_connection_server:New ConnectionServer: {}:{}".format( str(self.fdaddr), str(self.fdport))) self.connsrv = ConnectionServer(self.fdaddr, self.fdport, token=self.token, certfile=self.certfile, keyfile=self.keyfile, timeout=self.timeout) response = self.connsrv.listen() logging.debug("response:{}".format(response)) if isinstance(response, dict) and 'error' in response: self._handle_error(CANNOT_START_CONNECTIONSERVER.format(parse_json_descr(response))) return False else: logging.debug("prepare_connection_server:Reusing ConnectionServer!") self.connsrv.token = self.token return True def execute_pod(self, namespace, podyaml): prev_bacula_pod_name = self._plugin.check_bacula_pod(namespace, self.jobname) if prev_bacula_pod_name: logging.debug('Exist previous bacula-backup pod! Name: {}'.format(prev_bacula_pod_name)) self._io.send_info('Exist a previous bacula-backup pod: {}'.format(prev_bacula_pod_name)) response = self._plugin.remove_backup_pod(namespace, prev_bacula_pod_name) if isinstance(response, dict) and 'error' in response: self._handle_error(CANNOT_REMOVE_BACKUP_POD_ERR.format(parse_json_descr(response))) return False self._io.send_info('Removed previous bacula-backup pod: {}'.format(prev_bacula_pod_name)) poddata = yaml.safe_load(podyaml) response = self._plugin.create_backup_pod(namespace, poddata) if isinstance(response, dict) and 'error' in response: self._handle_error(CANNOT_CREATE_BACKUP_POD_ERR.format(parse_json_descr(response))) else: for seq in range(self.timeout): time.sleep(1) isready = self._plugin.backup_pod_isready(namespace, podname=get_backup_pod_name(self.jobname), seq=seq) if isinstance(isready, dict) and 'error' in isready: self._handle_error(CANNOT_CREATE_BACKUP_POD_ERR.format(parse_json_descr(isready))) break elif isready: return True return False def execute_pvcclone(self, namespace, clonename, cloneyaml): pass def delete_pod(self, namespace, force=False): for a in range(self.timeout): time.sleep(1) response = self._plugin.check_gone_backup_pod(namespace, get_backup_pod_name(self.jobname), force=force) if isinstance(response, dict) and 'error' in response: self._handle_error(CANNOT_REMOVE_BACKUP_POD_ERR.format(parse_json_descr(response))) else: logging.debug('delete_pod:isgone:{}'.format(response)) if response: return True return False def delete_pvcclone(self, namespace, clonename, force=False): for a in range(self.timeout): time.sleep(1) response = self._plugin.check_gone_pvcclone(namespace, clonename, force=force) if isinstance(response, dict) and 'error' in response: self._handle_error(CANNOT_REMOVE_PVC_CLONE_ERR.format(parse_json_descr(response))) else: logging.debug('delete_pvcclone:isgone:{}'.format(response)) if response: return True return False def handle_delete_pod(self, namespace): if not self.delete_pod(namespace=namespace): self._handle_error(POD_REMOVE_ERR.format(podname=get_backup_pod_name(self.jobname))) def handle_tarstderr(self): if self.tarexitcode != '0' or len(self.tarstderr) > 0: # format or prepare error message if not len(self.tarstderr): self.tarstderr = TAR_STDERR_UNKNOWN else: self.tarstderr = self.tarstderr.rstrip('\n') # classify it as error or warning if self.tarexitcode != '0': self._handle_non_fatal_error(self.tarstderr) else: self._io.send_warning(self.tarstderr) def prepare_bacula_pod(self, pvcdata, namespace=None, mode='backup'): if self._prepare_err: # first prepare yaml was unsuccessful, we can't recover from this error return False self.token = generate_token() if namespace is None: namespace = pvcdata.get('fi').namespace logging.debug('prepare_bacula_pod:token={} namespace={}'.format(self.token, namespace)) podyaml = self.prepare_pod_yaml(namespace, pvcdata, mode=mode) if podyaml is None: # error preparing yaml self._prepare_err = True return False if not self.prepare_connection_server(): self._prepare_err = True return False logging.debug('prepare_bacula_pod:start pod') if not self.execute_pod(namespace, podyaml): self._handle_error(POD_EXECUTION_ERR) return False return True def cleanup_old_cloned_pvc(self, namespace): logging.debug("Starting cleanup the old pvc from other previous job") # List all pvc names from namespace pvc_names = self._plugin.get_pvc_names(namespace) # Search old cloned pvcs old_clone_pvcs = find_bacula_pvc_clones_from_old_job(pvc_names, self.jobname) # Remove old pvcs for old_clone_pvc in old_clone_pvcs: self._io.send_info(f"Exist a previous bacula-backup pvc. Name: {old_clone_pvc}") response = self._plugin.delete_persistent_volume_claim(namespace, old_clone_pvc) logging.debug(f"Response from pvc `{old_clone_pvc}` delete request: {response}") if isinstance(response, dict) and "error" in response: return response self._io.send_info(f"Removed the previous bacula-backup pvc: {old_clone_pvc}") logging.debug("Deleted request launched successfully") logging.debug("Ended cleanup the old pvcs from other previous job") def create_pvcclone(self, namespace, pvcname): clonename = None logging.debug("pvcclone for:{}/{}".format(namespace, pvcname)) self.cleanup_old_cloned_pvc(namespace) pvcdata = self._plugin.get_pvcdata_namespaced(namespace, pvcname) if isinstance(pvcdata, dict) and 'exception' in pvcdata: self._handle_error(PVCDATA_GET_ERROR.format(parse_json_descr(pvcdata))) else: logging.debug('PVCDATA_ORIG:{}:{}'.format(pvcname, pvcdata)) cloneyaml, clonename = self.prepare_clone_yaml(namespace, pvcname, pvcdata.get('capacity'), pvcdata.get('storage_class_name')) if cloneyaml is None or clonename is None: # error preparing yaml self._prepare_err = True return None clonedata = yaml.safe_load(cloneyaml) response = self._plugin.create_pvc_clone(namespace, clonedata) if isinstance(response, dict) and 'error' in response: self._handle_error(CANNOT_CREATE_PVC_CLONE_ERR.format(parse_json_descr(response))) return None return clonename def handle_create_vsnapshot_backup(self, namespace, pvcname): """ Manage operations to create snapshot and new pvc from this snapshot to do backup. """ pvc = self._plugin.get_pvcdata_namespaced(namespace, pvcname) # Check if pvc is compatible with vsnapshot if not self._plugin.check_pvc_compatiblity_with_vsnapshot(namespace, pvc.get('name')): logging.debug('The pvc is not compatible with vsnapshots. Name:{}'.format(pvc.get('name'))) return None, pvc logging.debug('Origin pvcdata FileInfo:\n{}'.format(pvc.get('fi'))) self._io.send_info(VSNAPSHOT_BACKUP_COMPATIBLE_INFO.format(pvc.get('name'))) vsnapshot = self._plugin.create_vsnapshot(namespace, pvc) if isinstance(vsnapshot, dict) and 'error' in vsnapshot: self._handle_error(CANNOT_CREATE_VSNAPSHOT_ERR.format(parse_json_descr(vsnapshot))) return None, None # Create pvc from volume snapshot self._io.send_info(CREATING_PVC_FROM_VSNAPSHOT.format(pvc.get('name'))) new_pvc = self._plugin.create_pvc_from_vsnapshot(namespace, pvc) if isinstance(new_pvc, dict) and 'error' in new_pvc: self._handle_error(CANNOT_CREATE_PVC_SNAPSHOT_ERR.format(parse_json_descr(new_pvc))) return None, None self._io.send_info(FINISHED_PVC_FROM_VSNAPSHOT.format(new_pvc.get('name'), pvc.get('name'))) return vsnapshot, new_pvc def handle_delete_vsnapshot_backup(self, namespace, vsnapshot, pvcdata): logging.debug('handle_delete_vsnapshot: {}/{}/{}'.format(namespace, vsnapshot, pvcdata)) if vsnapshot is None: return None pvc_name = pvcdata if type(pvcdata) == dict: pvc_name = pvcdata.get('name') response = self._plugin.remove_pvcclone(namespace, pvc_name) if isinstance(response, dict) and "error" in response: return self._handle_error(CANNOT_REMOVE_PVC_CLONE_ERR.format(pvc_name)) response = self._plugin.remove_vsnapshot(namespace, vsnapshot.get('name')) if isinstance(response, dict) and "error" in response: return self._handle_error(CANNOT_REMOVE_VSNAPSHOT_ERR.format(vsnapshot=vsnapshot.get('name'))) return True bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/jobs/listing_job.py0000644000175000017500000000410414771010173026745 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import logging from baculak8s.io.default_io import DefaultIO from baculak8s.jobs.job import Job from baculak8s.util.respbody import parse_json_descr LISTING_START = "ListingStart" LISTING_EMPTY_RESULT = "Listing returned an empty result" LISTING_ERROR_RESPONSE = "Listing returned error response: {}" class ListingJob(Job): """ Job that contains the business logic related to the listing mode of the Backend. It depends upon a Plugin Class implementation that retrieves listing data from the Plugins Data Source """ def __init__(self, plugin, params): super().__init__(plugin, DefaultIO(), params) def execute(self): self._start(LISTING_START) self.execution_loop() self._io.send_eod() def execution_loop(self): self.__listing_loop() def __listing_loop(self): found_any = False file_info_list = self._plugin.list_in_path(self._params["listing"]) if isinstance(file_info_list, dict) and 'error' in file_info_list: logging.warning(file_info_list) self._io.send_warning(LISTING_ERROR_RESPONSE.format(parse_json_descr(file_info_list))) else: for file_info in file_info_list: found_any = True self._io.send_file_info(file_info_list.get(file_info).get('fi')) if not found_any: self._io.send_warning(LISTING_EMPTY_RESULT) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/jobs/backup_job.py0000644000175000017500000004712414771010173026552 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import logging import time from baculak8s.entities.file_info import DIRECTORY from baculak8s.entities.plugin_object import PluginObject from baculak8s.io.packet_definitions import FILE_DATA_START from baculak8s.jobs.estimation_job import PVCDATA_GET_ERROR, EstimationJob from baculak8s.jobs.job_pod_bacula import DEFAULTRECVBUFFERSIZE from baculak8s.plugins.k8sbackend.baculaannotations import ( BaculaAnnotationsClass, BaculaBackupMode, annotated_pvc_backup_mode) from baculak8s.plugins.k8sbackend.baculabackup import BACULABACKUPPODNAME from baculak8s.plugins.k8sbackend.podexec import ExecStatus, exec_commands from baculak8s.util.respbody import parse_json_descr from baculak8s.util.boolparam import BoolParam from baculak8s.plugins.k8sbackend.k8sfileinfo import defaultk8spath BACKUP_START_PACKET = "BackupStart" BACKUP_PARAM_LABELS = "Resource Selector: {}" FILE_BACKUP_ERROR = "Error while reading file contents from the chosen Data Source: {}" POD_DATA_RECV_ERR = "Error in receiving data from bacula-backup Pod!" BA_MODE_ERROR = "Invalid annotations for Pod: {namespace}/{podname}. Backup Mode '{mode}' not supported!" BA_EXEC_STDOUT = "{}:{}" BA_EXEC_STDERR = "{} Error:{}" BA_EXEC_ERROR = "Pod Container execution: {}" POD_BACKUP_SELECTED = "The selected backup mode to do pvc backup of the pvc `{}` is `{}`" CHANGE_BACKUP_MODE_FOR_INCOMPATIBLITY_PVC = "The pvc `{}` is not compatible with snapshot backup, changing mode to clone. Only pvc with storage that they use CSI driver are compatible." PVC_BACKUP_MODE_APPLIED_INFO = "The pvc `{}` will be backup with {} mode." RETRY_BACKUP_WITH_STANDARD_MODE = "If the clone backup is empty. It will try again to do a backup using standard mode." class BackupJob(EstimationJob): """ Job that contains the business logic related to the backup mode of the Backend. It depends upon a Plugin Class implementation that retrieves backup data from the Plugins Data Source """ def __init__(self, plugin, params): super().__init__(plugin, params, BACKUP_START_PACKET) _label = params.get('labels', None) self.fs_backup_mode = BaculaBackupMode.process_param(params.get("backup_mode", BaculaBackupMode.Snapshot)) # Fileset backup mode defined. if _label is not None: self._io.send_info(BACKUP_PARAM_LABELS.format(_label)) if params.get('pvcdata', None) is not None: self._io.send_info("The selected default backup mode to do pvc backup in all job is `{}`.".format(self.fs_backup_mode)) def execution_loop(self): super().processing_loop(estimate=False) self.process_plugin_objects() def process_file(self, data): return self._backup_file(data) def process_plugin_objects(self): # logging.debug("SELF: {}".format(dir(self))) """ name: plugin command line category: Container type: Kubernetes/Openshift POD source: (name on the network?) uuid: (I don't know if available) size: total size POD would be nice status: T/W/e count: Number of POD """ logging.debug("PO_PODS: {}".format(self._plugin.pods_counter)) if self._plugin.pods_counter > 0: po_pods = PluginObject("/{}/".format(defaultk8spath), "Kubernetes PODs", cat="Container", potype="POD", src=self._plugin.po_source_data, uuid=self._plugin.po_source_data, count=self._plugin.pods_counter) self._io.send_plugin_object(po_pods) logging.debug("PO_PVCS: {} {}".format(self._plugin.pvcs_counter, self._plugin.pvcs_totalsize)) if self._plugin.pvcs_counter > 0: po_pvcs = PluginObject("/{}/".format(defaultk8spath), "Kubernetes Persistent Volume Claims", cat="Container", potype="PVC", src=self._plugin.po_source_data, uuid=self._plugin.po_source_data, count=self._plugin.pvcs_counter, size=self._plugin.pvcs_totalsize) self._io.send_plugin_object(po_pvcs) def _backup_file(self, data): file_info = data.get('fi') super()._estimate_file(file_info) if file_info.type != DIRECTORY: self.__backup_data(file_info, data.get('spec')) self._io.send_eod() def __backup_data(self, info, spec_data): self._io.send_command(FILE_DATA_START) if spec_data is None: self._handle_error(FILE_BACKUP_ERROR.format(info.name)) else: for file_chunk in [spec_data[i:i+DEFAULTRECVBUFFERSIZE] for i in range(0, len(spec_data), DEFAULTRECVBUFFERSIZE)]: self._io.send_data(str.encode(file_chunk)) def __backup_pvcdata(self, namespace): logging.debug('backup_pvcdata:data recv') self._io.send_command(FILE_DATA_START) response = self.connsrv.handle_connection(self.handle_pod_data_recv) if 'error' in response: self._handle_error(response['error']) if 'should_remove_pod' in response: self.delete_pod(namespace=namespace, force=True) return False logging.debug('backup_pvcdata:logs recv') response = self.connsrv.handle_connection(self.handle_pod_logs) if 'error' in response: self._handle_error(response['error']) return False return True def process_pvcdata(self, namespace, pvcdata, backup_with_pod = False, retry_backup = False): status = None vsnapshot = None is_cloned = False cloned_pvc_name = None # For retry if clone backup is incompatible. orig_pvcdata = pvcdata # Detect if pvcdata is compatible with snapshots if not backup_with_pod and not retry_backup: logging.debug('Backup mode {} of pvc {} without pod:'.format(self.fs_backup_mode, pvcdata.get('name'))) pvc_raw = self._plugin.get_persistentvolumeclaim_read_namespaced(namespace, pvcdata.get('name')) pvc_backup_mode = annotated_pvc_backup_mode(pvc_raw, self.fs_backup_mode) if pvc_backup_mode == BaculaBackupMode.Snapshot: logging.debug('Snapshot is activated') vsnapshot, pvcdata = self.handle_create_vsnapshot_backup(namespace, pvcdata.get('name')) self._io.send_info(PVC_BACKUP_MODE_APPLIED_INFO.format(pvcdata.get('name'), BaculaBackupMode.Snapshot)) self.current_backup_mode = BaculaBackupMode.Snapshot if (vsnapshot is None and pvc_backup_mode != BaculaBackupMode.Standard) or pvc_backup_mode == BaculaBackupMode.Clone: if pvc_backup_mode != BaculaBackupMode.Clone: self._io.send_info(CHANGE_BACKUP_MODE_FOR_INCOMPATIBLITY_PVC.format(pvcdata.get('name'))) self._io.send_info(PVC_BACKUP_MODE_APPLIED_INFO.format(pvcdata.get('name'), BaculaBackupMode.Clone)) cloned_pvc_name = self.create_pvcclone(namespace, pvcdata.get('name')) cloned_pvc = self._plugin.get_pvcdata_namespaced(namespace, cloned_pvc_name) logging.debug('Cloned pvc fi:{}'.format(cloned_pvc.get('fi'))) cloned_pvc.get('fi').set_name(pvcdata.get('fi').name) pvcdata = cloned_pvc is_cloned = True self.current_backup_mode = BaculaBackupMode.Clone if pvc_backup_mode == BaculaBackupMode.Standard: self._io.send_info(PVC_BACKUP_MODE_APPLIED_INFO.format(pvcdata.get('name'), BaculaBackupMode.Standard)) self.current_backup_mode = BaculaBackupMode.Standard logging.debug('Process_pvcdata (Backup_job): {} --- {}'.format(vsnapshot, pvcdata)) if self.prepare_bacula_pod(pvcdata, namespace=namespace, mode='backup'): super()._estimate_file(pvcdata) # here to send info about pvcdata to plugin status = self.__backup_pvcdata(namespace=namespace) if status: self._io.send_eod() self.handle_tarstderr() self.handle_delete_pod(namespace=namespace) # Both prepare_bacula_pod fails or not, we must remove snapshot and pvc if not backup_with_pod: self.handle_delete_vsnapshot_backup(namespace, vsnapshot, pvcdata) if is_cloned: self.delete_pvcclone(namespace, cloned_pvc_name) # It retries when the backup is not with pod annotation (it is controlled in their function) if not backup_with_pod and not retry_backup and not self.backup_clone_compatibility: # It is important set to True before recall the process. self.backup_clone_compatibility = True # We only try once self._io.send_info(RETRY_BACKUP_WITH_STANDARD_MODE) logging.debug("Sent info to joblog: " + RETRY_BACKUP_WITH_STANDARD_MODE) status = self.process_pvcdata(namespace, orig_pvcdata, backup_with_pod, True) return status def handle_pod_container_exec_command(self, corev1api, namespace, pod, runjobparam, failonerror=False): podname = pod.get('name') containers = pod.get('containers') logging.debug("[{}] pod {} containers: {}".format(runjobparam, podname, containers)) # now check if run before job container, command = BaculaAnnotationsClass.handle_run_job_container_command(pod.get(runjobparam)) if container is not None: logging.info("container: {}".format(container)) logging.info("command: {}".format(command)) if container != '*': # check if container exist if container not in containers: # error logging.error("container {} not found".format(container)) return False containers = [container] # here execute command for cname in containers: logging.info("executing command: {} on {}".format(command, cname)) outch, errch, infoch = exec_commands(corev1api, namespace, podname, cname, command) logging.info("stdout:\n{}".format(outch)) if len(outch) > 0: outch = outch.rstrip('\n') self._io.send_info(BA_EXEC_STDOUT.format(runjobparam, outch)) logging.info("stderr:\n{}".format(errch)) if len(errch) > 0: errch = errch.rstrip('\n') self._io.send_warning(BA_EXEC_STDERR.format(runjobparam, errch)) execstatus = ExecStatus.check_status(infoch) logging.info("Exec status: {}".format(execstatus)) if not execstatus: self._io.send_warning(BA_EXEC_ERROR.format(infoch.get('message'))) if failonerror: self._handle_error("Failing job on request...") return False return True def process_pod_pvcdata(self, namespace, pod, pvcnames): logging.debug("process_pod_pvcdata:{}/{} {}".format(namespace, pod, pvcnames)) status = None corev1api = self._plugin.corev1api pod_backup_mode = BaculaBackupMode.process_param(pod.get(BaculaAnnotationsClass.BackupMode, BaculaBackupMode.Snapshot)) if pod_backup_mode is None: self._handle_error(BA_MODE_ERROR.format(namespace=namespace, podname=pod.get('name'), mode=pod.get(BaculaAnnotationsClass.BackupMode))) return False self._io.send_info("The selected default backup mode to do pvc backup of the pod `{}` is `{}`".format(pod.get('name'), pod_backup_mode)) failonerror = BoolParam.handleParam(pod.get(BaculaAnnotationsClass.RunBeforeJobonError), True) # the default is to fail job on error # here we execute remote command before Pod backup if not self.handle_pod_container_exec_command(corev1api, namespace, pod, BaculaAnnotationsClass.RunBeforeJob, failonerror): logging.error("handle_pod_container_exec_command execution error!") return False requestedvolumes = [v.lstrip().rstrip() for v in pvcnames.split(',')] handledvolumes = [] # iterate on requested volumes for shapshot logging.debug("iterate over requested vols for backup: {}".format(requestedvolumes)) for pvc in requestedvolumes: pvcname = pvc original_pvc = self._plugin.get_pvcdata_namespaced(namespace, pvcname) vsnapshot = None logging.debug("handling vol before backup: {}".format(pvcname)) pvc_raw = self._plugin.get_persistentvolumeclaim_read_namespaced(namespace, pvcname) pvc_backup_mode = annotated_pvc_backup_mode(pvc_raw, pod_backup_mode) self._io.send_info(POD_BACKUP_SELECTED.format(pvcname, pvc_backup_mode)) # Check if pvc has status: 'Terminating'. Because in this state, the backup raise error. if self._plugin.pvc_is_terminating(namespace, original_pvc): logging.debug("Skip pvc. Cause Terminating status") self._io.send_warning("Skip pvc `{}` because it is in Terminating status.".format(pvcname)) continue if self._plugin.pvc_is_pending(namespace, original_pvc): logging.debug("Skip pvc. Cause Pending status") self._io.send_warning("Skip pvc `{}` because it is in Pending status.".format(pvcname)) continue if pvc_backup_mode == BaculaBackupMode.Snapshot: logging.debug('Snapshot mode chosen') vsnapshot, pvc_from_vsnap = self.handle_create_vsnapshot_backup(namespace, pvcname) logging.debug("The vsnapshot created from pvc {} is: {}".format(pvcname, vsnapshot)) logging.debug("The pvc create from vsnapshot {} is: {}. FI: {}".format(vsnapshot, pvc_from_vsnap, pvc_from_vsnap.get('fi'))) if vsnapshot is None: logging.debug(CHANGE_BACKUP_MODE_FOR_INCOMPATIBLITY_PVC.format(pvcname)) # backupmode = BaculaBackupMode.Clone self._io.send_info(CHANGE_BACKUP_MODE_FOR_INCOMPATIBLITY_PVC.format(pvcname)) pvc_backup_mode = BaculaBackupMode.Clone else: pvc = pvc_from_vsnap pvcname = pvc_from_vsnap.get("name") if pvc_backup_mode == BaculaBackupMode.Clone: pvcname = self.create_pvcclone(namespace, pvcname) cloned_pvc = self._plugin.get_pvcdata_namespaced(namespace, pvcname) if pvcname is None: # error logging.error("create_pvcclone failed!") return False else: logging.debug('Original_pvc: {}'.format(original_pvc.get('fi'))) logging.debug('Cloned_pvc->fi: {}'.format(cloned_pvc.get('fi'))) cloned_pvc.get('fi').set_name(original_pvc.get('fi').name) pvc = cloned_pvc logging.debug("handling vol after snapshot/clone: {}".format(pvcname)) handledvolumes.append({ 'pvcname': pvcname, 'pvc': pvc, 'vsnapshot': vsnapshot, 'backupmode': pvc_backup_mode, 'original_pvc': original_pvc }) failonerror = BoolParam.handleParam(pod.get(BaculaAnnotationsClass.RunAfterSnapshotonError), False) # the default is ignore errors # here we execute remote command after vol snapshot if not self.handle_pod_container_exec_command(corev1api, namespace, pod, BaculaAnnotationsClass.RunAfterSnapshot, failonerror): return False # iterate on requested volumes for backup logging.debug("iterate over requested vols for backup: {}".format(handledvolumes)) for volumes in handledvolumes: logging.debug('Volume in handlevolumes:\n{}'.format(volumes)) pvc = volumes['pvc'] pvcname = volumes['pvcname'] # get pvcdata for this volume """ PVCDATA:plugintest-pvc-alone:{'name': 'plugintest-pvc-alone-baculaclone-lfxrra', 'node_name': None, 'storage_class_name': 'ocs-storagecluster-cephfs', 'capacity': '1Gi', 'fi': } """ pvcdata = self._plugin.get_pvcdata_namespaced(namespace, pvcname, pvcname) if isinstance(pvcdata, dict) and 'error' in pvcdata: self._handle_error(PVCDATA_GET_ERROR.format(parse_json_descr(pvcdata))) else: # Modify the name in FileInfo because we need save the file like original name # and not the new pvc (from vsnapshot) name. if volumes.get('backupmode') == BaculaBackupMode.Snapshot or volumes.get('backupmode') == BaculaBackupMode.Clone: logging.debug('We change the name of FileInfo to adapt the original pvc name with the new pvc name') pvcdata.get('fi').set_name(pvc.get('fi').name) if len(pvcdata) > 0: status = self.process_pvcdata(namespace, pvcdata, True) # Control the compatibility of snapshot or clone. If it is not compatible, retry again with standard backup mode. logging.debug('Call again process_pvcdata from process_pod_pvcdata? {}'.format(self.backup_clone_compatibility)) if not self.backup_clone_compatibility: status = self.process_pvcdata(namespace, volumes['original_pvc'], True) # iterate on requested volumes for delete snap logging.debug("iterate over requested vols for delete snap: {}".format(handledvolumes)) for volumes in handledvolumes: pvcname = volumes['pvcname'] logging.debug("Should remove this pvc: {}".format(pvcname)) if volumes.get('backupmode') == BaculaBackupMode.Clone: # snapshot delete if snapshot requested status = self.delete_pvcclone(namespace, pvcname) if volumes.get('backupmode') == BaculaBackupMode.Snapshot and volumes['vsnapshot'] is not None: status = self.handle_delete_vsnapshot_backup(namespace, volumes['vsnapshot'], volumes['pvc']) logging.debug("Finish removing pvc clones and vsnapshots. Status {}".format(status)) failonerror = BoolParam.handleParam(pod.get(BaculaAnnotationsClass.RunAfterJobonError), False) # the default is ignore errors # here we execute remote command after Pod backup if not self.handle_pod_container_exec_command(corev1api, namespace, pod, BaculaAnnotationsClass.RunAfterJob, failonerror): return False return status bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/jobs/__init__.py0000644000175000017500000000120314771010173026176 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/jobs/query_job.py0000644000175000017500000000363414771010173026450 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import logging from baculak8s.io.default_io import DefaultIO from baculak8s.jobs.job import Job from baculak8s.util.respbody import parse_json_descr QUERY_START = "QueryStart" QUERY_ERROR_RESPONSE = "QueryParam returned error response: {}" class QueryJob(Job): """ Job that contains the business logic related to the queryParams mode of the Backend. It depends upon a Plugin Class implementation that retrieves listing data from the Plugins Data Source """ def __init__(self, plugin, params): super().__init__(plugin, DefaultIO(), params) def execute(self): self._start(QUERY_START) self.execution_loop() self._io.send_eod() def execution_loop(self): self.__query_loop() def __query_loop(self): found_any = False query_param_list = self._plugin.query_parameter(self._params["query"]) if isinstance(query_param_list, dict) and 'error' in query_param_list: logging.warning(query_param_list) self._io.send_warning(QUERY_ERROR_RESPONSE.format(parse_json_descr(query_param_list))) else: for param_data in query_param_list: found_any = True self._io.send_query_response(param_data) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/jobs/job_factory.py0000644000175000017500000000363514771010173026753 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. from baculak8s.jobs.backup_job import BackupJob from baculak8s.jobs.estimation_job import EstimationJob from baculak8s.jobs.listing_job import ListingJob from baculak8s.jobs.query_job import QueryJob from baculak8s.jobs.restore_job import RestoreJob from baculak8s.services.job_info_service import (TYPE_BACKUP, TYPE_ESTIMATION, TYPE_RESTORE) class JobFactory(object): """ Creates a Job that will be executed by the Backend :param job_type: The type of the created Job (Backup, Restore, Estimation) :param plugin: The plugin that this Job should use :raise: ValueError, if an invalid job_type was provided """ @staticmethod def create(params, plugin): if params["type"] == TYPE_BACKUP: return BackupJob(plugin, params) elif params["type"] == TYPE_RESTORE: return RestoreJob(plugin, params) elif params["type"] == TYPE_ESTIMATION: if params.get("listing", None) is not None: return ListingJob(plugin, params) if params.get("query", None) is not None: return QueryJob(plugin, params) return EstimationJob(plugin, params) else: raise ValueError("Invalid Job Type") bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/jobs/job.py0000644000175000017500000000674414771010173025230 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import logging import sys import time from abc import ABCMeta, abstractmethod from baculak8s.io.log import Log INVALID_START_TEMPLATE = "Invalid start packet. Expected packet: {}" KUBERNETES_CODE_INFO = "Connected to Kubernetes {major}.{minor} - {git_version}." class Job(metaclass=ABCMeta): """ Abstract Base Class for all the Backend Jobs """ def __init__(self, plugin, io, params): self._plugin = plugin self._io = io self._params = params @abstractmethod def execute(self): """ Executes the Job """ raise NotImplementedError @abstractmethod def execution_loop(self): """ Execution loop subroutine """ raise NotImplementedError def _start(self, expected_start_packet): self._read_start(expected_start_packet, onError=self._abort) if self._params.get('level', 'F') != 'F': logging.error("Unsupported backup level. Doing FULL backup.") self._io.send_warning("Unsupported backup level. Doing FULL backup.") self._connect() self._io.send_eod() def _read_start(self, start_packet, onError): _, packet = self._io.read_packet() if packet != start_packet: self._io.send_abort(INVALID_START_TEMPLATE.format(start_packet)) onError() def _connect(self): response = self._plugin.connect() if 'error' in response: logging.debug("response data:{}".format(response)) if 'error_code' in response: self._io.send_connection_error(response['error_code']) else: self._io.send_connection_error(0, strerror=response['error']) # Reads termination packet packet_header = self._io.read_line() Log.save_received_termination(packet_header) sys.exit(0) else: if self._params.get("type", None) == 'b': # display some info to user data = response.get('response') if data is not None: self._io.send_info(KUBERNETES_CODE_INFO.format( major=data.major, minor=data.minor, git_version=data.git_version, )) def _handle_non_fatal_error(self, error_message): if self._params.get("abort_on_error", None) == "1": self._io.send_abort(error_message) self._abort() else: self._io.send_non_fatal_error(error_message) def _handle_error(self, error_message): if self._params.get("abort_on_error", None) == "1": self._io.send_abort(error_message) self._abort() else: self._io.send_error(error_message) def _abort(self): self._plugin.disconnect() sys.exit(0) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/jobs/restore_job.py0000644000175000017500000002637614771010173026776 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import logging import re import time from baculak8s.entities.file_info import DIRECTORY, EMPTY_FILE, NOT_EMPTY_FILE from baculak8s.entities.k8sobjtype import K8SObjType from baculak8s.io.jobs.restore_io import (COMMA_SEPARATOR_NOT_SUPPORTED, FILE_ERROR_TEMPLATE, FILE_TRANSFER_START, RESTORE_LOOP_ERROR, RESTORE_START, SKIP_PACKET, SUCCESS_PACKET, FileContentReader, RestoreIO, RestorePacket) from baculak8s.jobs.job_pod_bacula import JobPodBacula from baculak8s.plugins.k8sbackend.k8sfileinfo import k8sfile2objname from baculak8s.services.job_info_service import (REPLACE_ALWAYS, REPLACE_NEVER) from baculak8s.util.respbody import parse_json_descr from baculak8s.util.sslserver import DEFAULTTIMEOUT NS_DOESNOT_EXIST_ERR = "Namespace '{namespace}' does not exist! You should restore namespace too!" PVC_DOESNOT_EXIST_ERR = "PVC: '{pvcname}' does not exist or is not mounted! " \ "You should restore PVC config or mount it in any Pod!" PVC_ISNOTREADY_ERR = "PVC: '{pvcname}' is not ready in expected time! " \ "You should check PVC config or logs!" SERVICE_ACCOUNT_TOKEN_RESTORE_INFO = "Unable to restore service-account-token {token}. Regenerated new one." RESTORE_RES_ERR = "Cannot restore resource object. Err={}" PVC_STATUS_ERR = "PVC: '{}' invalid status! Err={}" class RestoreJob(JobPodBacula): """ Job that contains the business logic related to the restore mode of the Backend. It depends upon a Plugin Class implementation that sends data to the Plugins Data Source. """ def __init__(self, plugin, params): super().__init__(plugin, RestoreIO(), params) self.ns = {} self.localrestore = "where" in self._params and self._params["where"] != "" def execution_loop(self): return self.__restore_loop() def execute(self): self._start(RESTORE_START), self.execution_loop() self._io.send_eod() def __restore_loop(self): file_info = None while True: packet_type, packet = self._io.next_loop_packet(onError=self._abort) if packet_type == RestorePacket.RESTORE_END: break elif packet_type == RestorePacket.FILE_INFO: file_info = packet self.restore_object(file_info) else: self._io.send_abort(RESTORE_LOOP_ERROR) self._abort() def restore_object(self, file_info): dorestore = self.__handle_file_info(file_info) logging.debug('restore_object:dorestore:'+str(dorestore)) if dorestore: # everything is ok, so do a restore reader = FileContentReader() self._io.send_command(SUCCESS_PACKET) self._read_start(FILE_TRANSFER_START, onError=self._abort) if self.localrestore or file_info.objtype != K8SObjType.K8SOBJ_PVCDATA: self.__restore_file(file_info, reader) else: namespace = file_info.namespace self.__restore_pvcdata(namespace) elif dorestore is not None: self._io.send_command(SKIP_PACKET) def __handle_file_info(self, file_info): logging.debug('handle_file_info:fileinfo: {}'.format(file_info)) dorestore = True if file_info.type in [EMPTY_FILE, NOT_EMPTY_FILE]: if "replace" in self._params and not self.__should_replace(file_info): dorestore = False else: if self.localrestore: file_info = self._plugin.apply_where_parameter(file_info, self._params["where"]) if "regexwhere" in self._params and self._params["regexwhere"] != '': self.__apply_regexwhere_param(file_info) else: if file_info.objtype == K8SObjType.K8SOBJ_PVCDATA: dorestore = self.__prepare_pvcdata_restore(file_info) elif file_info.type == DIRECTORY: # We just ignore it for the current implementation dorestore = False logging.debug('handle_file_info:dorestore: {}'.format(dorestore)) return dorestore def __prepare_pvcdata_restore(self, file_info): """ Successful pvcdata restore require the following prerequisites: - namespace has to exist so at least restored with this job - pvcs has to exist so at least restored with this job :param file_info: pvcdata FileInfo for restore :return: """ namespace = file_info.namespace nsdata = self.ns.get(namespace) if nsdata is None: # no information about namespace, get it logging.debug('prepare_pvcdata_restore:no previous ns check. do it!') nsexist = self._plugin.check_namespace(namespace) nsdata = { 'exist': nsexist, } self.ns[namespace] = nsdata if nsexist is None: logging.debug('prepare_pvcdata_restore ns:{} not exist!'.format(namespace)) self._handle_error(NS_DOESNOT_EXIST_ERR.format(namespace=namespace)) return None else: if nsdata.get('exist', None) is None: logging.debug('prepare_pvcdata_restore ns:cached:{} not exist!'.format(namespace)) return False logging.debug('prepare_pvcdata_restore ns: {} found.'.format(namespace)) pvcdatalist = nsdata.get('pvcs') if pvcdatalist is None: # grab pvcs pvcdatalist = self._plugin.list_pvcdata_for_namespace(namespace, allpvcs=True) logging.debug('prepare_pvcdata_restore pvcdatalist:{}'.format(pvcdatalist)) self.ns[namespace].update({ 'pvcs': pvcdatalist, }) pvcname = k8sfile2objname(file_info.name) pvcdata = pvcdatalist.get(pvcname) if pvcdata is None: logging.debug('prepare_pvcdata_restore pvc:{} not exist!'.format(pvcname)) self._handle_error(PVC_DOESNOT_EXIST_ERR.format(pvcname=pvcname)) return None pvcisready = False for _ in range(DEFAULTTIMEOUT): time.sleep(1) isready = self._plugin.pvc_isready(namespace, pvcname) iswaiting = self._plugin.is_pvc_waiting_first_consumer(namespace,pvcname) if isinstance(isready, dict) and 'error' in isready: # cannot check pvc status self._handle_error(PVC_STATUS_ERR.format(pvcname, parse_json_descr(isready))) elif isready or iswaiting: pvcisready = True break # well, we have to wait for pvc to be ready, so restart procedure logging.debug('Waiting for pvc to become ready...') if not pvcisready: logging.debug('prepare_pvcdata_restore pvc:{} is not ready in expected time!'.format(pvcname)) self._handle_error(PVC_ISNOTREADY_ERR.format(pvcname=pvcname)) return None self.ns[namespace].update({ 'pvcdata': pvcdata, }) logging.debug('prepare_pvcdata_restore PVCDATA:{}'.format(pvcdata)) if self.prepare_bacula_pod(pvcdata, namespace=namespace, mode='restore'): return True return False def __should_replace(self, file_info): """ Checks if restored object is already available as we have to distinguish between create and patch. :param file_info: :return: """ if file_info.objtype == K8SObjType.K8SOBJ_PVCDATA: # always replace file_info return True curent_file = self._plugin.check_file(file_info) # cache get object for future use if isinstance(curent_file, dict) and 'error' in curent_file: file_info.objcache = None logging.error("check_file: {}".format(curent_file['error'])) else: file_info.objcache = curent_file if self._params["replace"] == REPLACE_ALWAYS: return True if self._params["replace"] == REPLACE_NEVER: if curent_file is not None: return False # XXX: we cannot support ifnewer or ifolder because k8s does not provide modification time return True def __apply_regexwhere_param(self, file_info): if re.match(r",(.+?),(.+?),", self._params["regexwhere"]): self._io.send_abort(COMMA_SEPARATOR_NOT_SUPPORTED) self._abort() file_info.apply_regexwhere_param(self._params["regexwhere"]) def __handle_pvcdata_connections(self): logging.debug('restore_pvcdata:data send') response = self.connsrv.handle_connection(self.handle_pod_data_send) if 'error' in response: self._handle_error(response['error']) return False logging.debug('backup_pvcdata:logs recv') response = self.connsrv.handle_connection(self.handle_pod_logs) if 'error' in response: self._handle_error(response['error']) return False return True def __restore_pvcdata(self, namespace): self.__handle_pvcdata_connections() self.handle_tarstderr() self.handle_delete_pod(namespace=namespace) self._io.send_command(SUCCESS_PACKET) def __restore_file(self, file_info, reader): if file_info.size == 0 and file_info.objtype != K8SObjType.K8SOBJ_PVCDATA: logging.debug('file_info.size == 0') response = self._plugin.restore_file(file_info) else: reader.finished = False response = self._plugin.restore_file(file_info, reader) if isinstance(response, dict) and 'error' in response: if file_info.objtype == K8SObjType.K8SOBJ_SECRET: self._io.send_warning(SERVICE_ACCOUNT_TOKEN_RESTORE_INFO.format( token=file_info.name.split('/')[-1][:-5])) else: if 'exception' in response: error_msg = RESTORE_RES_ERR.format(parse_json_descr(response)) self._handle_error(error_msg) else: error_msg = FILE_ERROR_TEMPLATE.format(file_info.name, file_info.namespace, response['error']) self._handle_error(error_msg) else: if file_info.size != 0 or file_info.objtype == K8SObjType.K8SOBJ_PVCDATA: self._io.send_command(SUCCESS_PACKET) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/jobs/estimation_job.py0000644000175000017500000004210014771010173027446 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import logging import re from baculak8s.entities.file_info import FileInfo from baculak8s.io.default_io import DefaultIO from baculak8s.io.packet_definitions import ESTIMATION_START_PACKET from baculak8s.jobs.job_pod_bacula import (PVCDATA_GET_ERROR, JobPodBacula) from baculak8s.plugins.k8sbackend.baculaannotations import BaculaAnnotationsClass, BaculaBackupMode from baculak8s.util.respbody import parse_json_descr PATTERN_NOT_FOUND = "No matches found for pattern {}" NO_PV_FOUND = "No required Persistent Volumes found at the cluster" NO_SC_FOUND = "No required Storage Classes found at the cluster" NO_NS_FOUND = "No required Namespaces found at the cluster" PV_LIST_ERROR = "Cannot list PV objects. Err={}" PODS_LIST_ERROR = "Cannot list Pods objects. Err={}" SC_LIST_ERROR = "Cannot list StorageClass objects. Err={}" NS_LIST_ERROR = "Cannot list Namespace objects. Err={}" RES_LIST_ERROR = "Cannot list resource objects. Err={}" # PVCDATA_LIST_ERROR = "Cannot list PVC Data objects. Err={}" PROCESSING_NAMESPACE_INFO = "Processing namespace: {namespace}" PROCESSING_PVCDATA_START_INFO = "Start backup volume claim: {pvc}" PROCESSING_PVCDATA_STOP_INFO = "Finish backup volume claim: {pvc}" PROCESSING_PODBACKUP_PHASE_START_INFO = "Start backup phase of pvcs that they are in pod annotations" PROCESSING_PODBACKUP_PHASE_FINISH_INFO = "Finish backup phase of pvcs that they are in pod annotations" PROCESSING_PODBACKUP_START_INFO = "Start backup Pod: {namespace}/{podname}" PROCESSING_PODBACKUP_FINISH_INFO = "Finish backup Pod: {namespace}/{podname}" SKIP_PVCDATA_SECOND_BACKUP_INFO = "Skip backup of pvc `{}` because it did previously with a pod" SECOND_TRY_PVCDATA_INFO = "Generic try to do backup of this pvc `{}`" BA_PVC_NOT_FOUND_ERROR = "Requested volume claim: {pvc} on {namespace}/{podname} not found!" BA_PVCNAME_ERROR = "Invalid annotations for Pod: {namespace}/{podname}. {bavol} Required!" PROCESS_POD_PVCDATA_ERROR = "Cannot process Pod PVC Data backup: {namespace}/{podname}!" LABEL_PARAM_INVALID_ERROR = "Label parameter ({}) is invalid!" class EstimationJob(JobPodBacula): """ Job that contains the business logic related to the estimation mode of the Backend. It depends upon a Plugin Class implementation that retrieves estimation data from the Plugins Data Source """ def __init__(self, plugin, params, start_packet=None): if not start_packet: start_packet = ESTIMATION_START_PACKET self.__start_packet = start_packet super().__init__(plugin, DefaultIO(), params) self.include_matches = [] self.regex_include_matches = [] self.exclude_matches = [] self.regex_exclude_matches = [] _sc = params.get('storageclass', []) self.storageclassparam = _sc if len(_sc) > 0 else None _pv = params.get('persistentvolume', []) self.persistentvolumeparam = _pv if len(_pv) > 0 else None def execute(self): self._start(self.__start_packet) self.execution_loop() self.__verify_all_matches() self._io.send_eod() def execution_loop(self): return self.processing_loop(estimate=True) def processing_loop(self, estimate=False): sc_list = self._plugin.list_all_storageclass(estimate=estimate) logging.debug("processing list_all_storageclass:{}:nrfound:{}".format(self.storageclassparam, len(sc_list))) if isinstance(sc_list, dict) and sc_list.get('exception'): self._handle_error(SC_LIST_ERROR.format(parse_json_descr(sc_list))) else: if self.storageclassparam is not None and len(sc_list) == 0: self._handle_error(NO_SC_FOUND) for sc in sc_list: logging.debug('processing sc:{}'.format(sc)) self.process_file(sc_list.get(sc)) pv_list = self._plugin.list_all_persistentvolumes(estimate=estimate) logging.debug("processing list_all_persistentvolumes:{}:nrfound:{}".format(self.persistentvolumeparam, len(pv_list))) if isinstance(pv_list, dict) and pv_list.get('exception'): self._handle_error(PV_LIST_ERROR.format(parse_json_descr(pv_list))) else: if self.persistentvolumeparam is not None and len(pv_list) == 0: self._handle_error(NO_PV_FOUND) for pv in pv_list: logging.debug("processing pv: {}".format(pv)) self.process_file(pv_list.get(pv)) ns_list = self._plugin.list_all_namespaces(estimate=estimate) logging.debug("processing list_all_namespaces:nrfound:{}".format(len(ns_list))) if isinstance(ns_list, dict) and ns_list.get('exception'): self._handle_error(NS_LIST_ERROR.format(parse_json_descr(ns_list))) else: if len(self._params.get('namespace')) != 0 and len(ns_list) == 0: self._handle_error(NO_NS_FOUND) for nsname in ns_list: ns = ns_list.get(nsname) logging.debug('processing ns: {}'.format(ns.get('name'))) if not estimate: self._io.send_info(PROCESSING_NAMESPACE_INFO.format(namespace=ns['name'])) self.process_file(ns) nsdata = self._plugin.list_namespaced_objects_before_pvcdata(nsname, estimate=estimate) logging.debug('Before PVCData NSDATA:{}'.format([ns.keys() for ns in nsdata])) # limit debug output for sub in nsdata: # sub is a list of different resource types if isinstance(sub, dict) and sub.get('exception'): self._handle_error(RES_LIST_ERROR.format(parse_json_descr(sub))) else: for res in sub: self.process_file(sub.get(res)) podsannotated = self._plugin.get_annotated_namespaced_pods_data(nsname, estimate=estimate) logging.debug("processing get_annotated_namespaced_pods_data:{}:nrfound:{}".format(nsname, len(podsannotated))) # We can define what pvc backup in two places: fileset and pod annotation. So, we save the pod annotations to we don't backup two times the same. pvc_backed={} # here we have a list of pods which are anotated if podsannotated is not None and self._plugin.config.get('pvcdata') is not None: if isinstance(podsannotated, dict) and podsannotated.get('exception'): self._handle_error(PODS_LIST_ERROR.format(parse_json_descr(podsannotated))) else: if len(podsannotated) > 0 and not estimate: self._io.send_info(PROCESSING_PODBACKUP_PHASE_START_INFO) for pod in podsannotated: logging.debug('PODDATA:{}'.format(pod)) # this is required parameter! pvcnames = pod.get(BaculaAnnotationsClass.BackupVolume) if pvcnames is None: self._handle_error(BA_PVCNAME_ERROR.format(namespace=nsname, podname=pod.get('name'), bavol=BaculaAnnotationsClass.BaculaPrefix + BaculaAnnotationsClass.BackupVolume)) continue podname = pod.get('name') if not estimate: self._io.send_info(PROCESSING_PODBACKUP_START_INFO.format(namespace=nsname, podname=podname)) status = self.process_pod_pvcdata(nsname, pod, pvcnames) logging.debug("Status in processing_loop: {}".format(status)) if status is None: for pvc_name in pvcnames.split(','): pvc_backed[pvc_name] = 'error' logging.error("Some unknown error at {namespace}/{podname}!".format(namespace=nsname, podname=podname)) self._handle_error(PROCESS_POD_PVCDATA_ERROR.format(namespace=nsname, podname=podname)) break if not estimate: for pvc_name in pvcnames.split(','): pvc_backed[pvc_name] = 'ok' self._io.send_info(PROCESSING_PODBACKUP_FINISH_INFO.format(namespace=nsname, podname=podname)) if len(podsannotated) > 0 and not estimate: self._io.send_info(PROCESSING_PODBACKUP_PHASE_FINISH_INFO) logging.debug("PVCs backed through pod annotations: {}".format(pvc_backed)) pvcdatalist = self._plugin.list_pvcdata_for_namespace(nsname, estimate=estimate) logging.debug("processing list_pvcdata_for_namespace:{}:nrfound:{}".format(nsname, len(pvcdatalist))) if pvcdatalist is not None: if isinstance(pvcdatalist, dict) and pvcdatalist.get('exception'): self._handle_error(PV_LIST_ERROR.format(parse_json_descr(pvcdatalist))) else: for pvc in pvcdatalist: pvcdata = pvcdatalist.get(pvc) logging.debug('PVCDATA:{}:{}'.format(pvc, pvcdata)) if not estimate: logging.debug("Previous pvc_backed: {} /{}".format(pvc_backed, pvc)) # If a pod has annotations to backup a pvc, it did it previously. So, skip if it was ok, if not second try. if pvc_backed.get(pvc) == 'ok': self._io.send_info(SKIP_PVCDATA_SECOND_BACKUP_INFO.format(pvc)) continue elif pvc_backed.get(pvc) == 'error': if self._plugin.pvc_is_terminating(nsname, pvcdata): logging.debug("Skip pvc. Cause Terminating status") self._io.send_warning("Skip pvc of second try `{}` because it is in Terminating status.".format(pvc)) continue if self._plugin.pvc_is_pending(nsname, pvcdata): logging.debug("Skip pvc. Cause Pending status") self._io.send_warning("Skip pvc `{}` because it is in Pending status.".format(pvc)) continue self._io.send_warning(SECOND_TRY_PVCDATA_INFO.format(pvc)) self._io.send_info(PROCESSING_PVCDATA_START_INFO.format(pvc=pvc)) if self._plugin.pvc_is_terminating(nsname, pvcdata): logging.debug("Skip pvc. Cause Terminating status") self._io.send_warning("Skip pvc `{}` because it is in Terminating status.".format(pvc)) continue if self._plugin.pvc_is_pending(nsname, pvcdata): logging.debug("Skip pvc. Cause Pending status") self._io.send_warning("Skip pvc `{}` because it is in Pending status.".format(pvc)) continue status = self.process_pvcdata(nsname, pvcdata) if status is None: # None means unable to prepare listening service during backup break if not estimate and status: self._io.send_info(PROCESSING_PVCDATA_STOP_INFO.format(pvc=pvc)) logging.debug("Finish pvcdatalist") nsdata = self._plugin.list_namespaced_objects_after_pvcdata(nsname, estimate=estimate) logging.debug('After PVCDATA NSDATA:{}'.format([ns.keys() for ns in nsdata])) # limit debug output for sub in nsdata: # sub is a list of different resource types if isinstance(sub, dict) and sub.get('exception'): self._handle_error(RES_LIST_ERROR.format(parse_json_descr(sub))) else: for res in sub: self.process_file(sub.get(res)) def _estimate_file(self, data): logging.debug('{}'.format(data)) if isinstance(data, dict): file_info = data.get('fi') elif isinstance(data, FileInfo): file_info = data else: raise ValueError('Invalid data in estimate_file') logging.debug('file_info: {}'.format(file_info)) self._io.send_file_info(file_info) def process_file(self, data): return self._estimate_file(data) def process_pvcdata(self, namespace, pvcdata): return self._estimate_file(pvcdata) def process_pod_pvcdata(self, namespace, pod, pvcnames): # iterate on requested pvc logging.debug("process_pod_pvcdata in Estimate mode") for pvc in pvcnames.split(','): if self._plugin.pvc_is_terminating(namespace, pvc): logging.debug("Skip pvc. Cause Terminating status") self._io.send_warning("Skip pvc `{}` because it is in Terminating status.".format(pvc)) continue if self._plugin.pvc_is_pending(namespace, pvc): logging.debug("Skip pvc. Cause Pending status") self._io.send_warning("Skip pvc `{}` because it is in Pending status.".format(pvc)) continue # get pvcdata for this volume pvcdata = self._plugin.get_pvcdata_namespaced(namespace, pvc) if isinstance(pvcdata, dict) and 'exception' in pvcdata: self._handle_error(PVCDATA_GET_ERROR.format(parse_json_descr(pvcdata))) else: logging.debug('PVCDATA:{}:{}'.format(pvc, pvcdata)) if len(pvcdata) > 0: self._estimate_file(pvcdata) return True def __match_includes(self, info): if len(self._params.get("includes", [])) <= 0: return True any_match = info.match_any_glob( self._params["includes"], self.include_matches ) return any_match def __match_regex_includes(self, info): if len(self._params.get("regex_includes", [])) <= 0: return True any_match = info.match_any_regex( self._params["regex_includes"], self.regex_include_matches ) return any_match def __match_excludes(self, info): if len(self._params.get("excludes", [])) <= 0: return False any_match = info.match_any_glob( self._params["excludes"], self.exclude_matches ) return any_match def __match_regex_excludes(self, info): if len(self._params.get("regex_excludes", [])) <= 0: return False any_match = info.match_any_regex( self._params["regex_excludes"], self.regex_exclude_matches ) return any_match def __verify_all_matches(self): self.__verify_matches("includes", self.include_matches) self.__verify_matches("regex_includes", self.regex_include_matches) self.__verify_matches("excludes", self.exclude_matches) self.__verify_matches("regex_excludes", self.regex_exclude_matches) def __verify_matches(self, pattern_type, matches): if len(self._params.get(pattern_type, [])) <= 0: return patterns = self._params[pattern_type] # Assures that all patterns got at least one match for pattern in patterns: if pattern not in matches: error_msg = PATTERN_NOT_FOUND.format(pattern) self._handle_error(error_msg) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/services/0000755000175000017500000000000014771010173024757 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/services/service.py0000644000175000017500000000206614771010173026775 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. from abc import ABCMeta, abstractmethod class Service(metaclass=ABCMeta): """ Abstract Base Class for all the Backend Services Services represents auxiliary Business Logic supporting the main functionality implemented by the Backend Jobs Classes """ @abstractmethod def execute(self, params=None): """ Executes the Service """ raise NotImplementedError bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/services/unexpected_error_service.py0000644000175000017500000000243114771010173032426 0ustar bsbuildbsbuild# The main author of Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import logging import traceback from baculak8s.io.default_io import DefaultIO from baculak8s.io.packet_definitions import UNEXPECTED_ERROR_PACKET from baculak8s.services.service import Service class UnexpectedErrorService(Service): """ Service that is executed whenever an unexpected exception happens during the Backend execution. """ def __init__(self): self.io = DefaultIO() def execute(self, params=None): # We log the Exception Stack Trace logging.error(UNEXPECTED_ERROR_PACKET) logging.error(traceback.format_exc()) self.io.send_abort(UNEXPECTED_ERROR_PACKET) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/services/handshake_service.py0000644000175000017500000000520614771010173031002 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import sys from baculak8s.io.default_io import DefaultIO from baculak8s.services.service import Service INVALID_HANDSHAKE_PACKET = "Invalid Handshake Packet" INVALID_PLUGIN_NAME = "Invalid Plugin Name" INVALID_PLUGIN_API = "Invalid Plugin API Version" HANDSHAKE_OK = "Hello Bacula" class HandshakeService(Service): """ Service that contains the business logic related to the Backend Handshake. """ def __init__(self): self.io = DefaultIO() def execute(self, params=None): """ Reads and parses the Handshake Packet sent to the Backend. The Handshake Packet should have the format: "Hello $pluginname$ $pluginAPI$". :raise SystemExit, in case of a invalid HandshakePacket or in case of unsupported $pluginname$, $pluginAPI$ or if the Backend received an invalid Handshake Packet. :returns The $pluginname$. """ handshake_data = self.__read_handshake_packet() self.__verify_packet_data(handshake_data) self.io.send_command(HANDSHAKE_OK) return handshake_data[1] def __read_handshake_packet(self): _, packet = self.io.read_packet() if not packet: self.io.send_abort(INVALID_HANDSHAKE_PACKET) sys.exit(0) packet = packet.lower() packet_data = packet.split(" ") if len(packet_data) != 3 or packet_data[0] != "hello": self.io.send_abort(INVALID_HANDSHAKE_PACKET) sys.exit(0) return packet_data def __verify_packet_data(self, packet_data): valid_plugins = { "kubernetes": "3", "openshift": "3", } plugin_name = packet_data[1] plugin_api = packet_data[2] if plugin_name not in valid_plugins: self.io.send_abort(INVALID_PLUGIN_NAME) sys.exit(0) if plugin_api is not valid_plugins[plugin_name]: self.io.send_abort(INVALID_PLUGIN_API) sys.exit(0) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/services/__init__.py0000644000175000017500000000120314771010173027064 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/services/job_end_service.py0000644000175000017500000000432514771010173030455 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import sys from baculak8s.io.default_io import DefaultIO from baculak8s.io.log import Log from baculak8s.io.packet_definitions import TERMINATION_PACKET from baculak8s.services.service import Service END_JOB_START_PACKET = "END" INVALID_END_JOB_START_PACKET = "Invalid End Job Start Packet" INVALID_TERMINATION_PACKET = "Invalid Termination Packet." \ " This indicates a possible error " \ "with the reading of the input sent" \ "to the Backend." class JobEndService(Service): """ Service that contains the business logic related to ending the Backend Job """ def __init__(self, job_info, plugin): # The job_info parameter will probably be used in the future self.plugin = plugin self.job_info = job_info self.io = DefaultIO() def execute(self, params=None): if self.job_info.get("query", None) is not None: return self.__read_start() self.plugin.disconnect() self.__read_termination() def __read_start(self): _, packet = self.io.read_packet() if packet != END_JOB_START_PACKET: self.io.send_abort(INVALID_END_JOB_START_PACKET) self.__abort() return self.io.send_eod() def __read_termination(self): packet_header = self.io.read_line() if packet_header != TERMINATION_PACKET: self.__abort() Log.save_received_termination(packet_header) def __abort(self): self.plugin.disconnect() sys.exit(0) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/services/job_info_service.py0000644000175000017500000000605214771010173030641 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import sys import logging import baculak8s from baculak8s.io.services.job_info_io import JobInfoIO, INVALID_JOB_PARAMETER_BLOCK, INVALID_JOB_TYPE, \ JOB_NAME_NOT_FOUND, JOB_ID_NOT_FOUND, JOB_TYPE_NOT_FOUND, INVALID_REPLACE_PARAM, JOB_START_PACKET, \ INVALID_JOB_START_PACKET from baculak8s.services.service import Service TYPE_BACKUP = "b" TYPE_RESTORE = "r" TYPE_ESTIMATION = "e" REPLACE_ALWAYS = 'a' REPLACE_IFNEWER = 'w' REPLACE_NEVER = 'n' REPLACE_IFOLDER = 'o' class JobInfoService(Service): """ Service that contains the business logic related to reading and parsing the information about the Job that should be created and executed by the Backend. """ def __init__(self): self.io = JobInfoIO() def execute(self, params=None): self.__read_start() params_block = self.__read_params_block() self.io.send_eod() return params_block def __read_start(self): _, packet = self.io.read_packet() if packet != JOB_START_PACKET: self.io.send_abort(INVALID_JOB_START_PACKET) sys.exit(0) def __read_params_block(self): job_info = self.io.read_job_info() # Parameters validation if not job_info: self.io.send_abort(INVALID_JOB_PARAMETER_BLOCK) sys.exit(0) if "name" not in job_info: self.io.send_abort(JOB_NAME_NOT_FOUND) sys.exit(0) if "jobid" not in job_info: self.io.send_abort(JOB_ID_NOT_FOUND) sys.exit(0) if "replace" in job_info: job_info["replace"] = job_info["replace"].lower() if job_info["replace"] not in [REPLACE_ALWAYS, REPLACE_IFNEWER, REPLACE_NEVER, REPLACE_IFOLDER]: self.io.send_abort(INVALID_REPLACE_PARAM) sys.exit(0) if "namespace" in job_info: logging.debug("FILE NAMESPACE: {}".format(job_info.get("namespace"))) baculak8s.plugins.k8sbackend.k8sfileinfo.defaultk8spath = job_info.get("namespace") job_info.pop("namespace") if "type" not in job_info: self.io.send_abort(JOB_TYPE_NOT_FOUND) sys.exit(0) else: job_info["type"] = job_info["type"].lower() if job_info["type"] not in [TYPE_BACKUP, TYPE_RESTORE, TYPE_ESTIMATION]: self.io.send_abort(INVALID_JOB_TYPE) sys.exit(0) return job_info bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/services/plugin_params_service.py0000644000175000017500000000611114771010173031711 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import sys import os from baculak8s.io.services.plugin_params_io import PluginParamsIO, INVALID_PLUGIN_PARAMETERS_BLOCK, URL_NOT_FOUND, \ USER_NOT_FOUND, PWD_NOT_FOUND, PASSFILE_NOT_FOUND, PWD_INSIDE_PASSFILE_NOT_FOUND, RESTORE_LOCAL_WITHOUT_WHERE, \ PLUGIN_PARAMETERS_START, INVALID_PLUGIN_PARAMETERS_START from baculak8s.services.service import Service class PluginParamsService(Service): """ Service that contains the business logic related to reading and parsing the information about the Plugin that will be used by the Job to fulfill it's purpose """ def __init__(self, job_info): self.io = PluginParamsIO() self.job_info = job_info def execute(self, params=None): self.__read_start() params_block = self.__read_params_block() self.io.send_eod() return params_block def __read_start(self): _, packet = self.io.read_packet() if packet != PLUGIN_PARAMETERS_START: self.io.send_abort(INVALID_PLUGIN_PARAMETERS_START) sys.exit(0) def __read_params_block(self): params_block = self.io.read_plugin_params() # Parameters validation if not params_block: self.io.send_abort(INVALID_PLUGIN_PARAMETERS_BLOCK) sys.exit(0) if "restore_local" in params_block and ("where" not in self.job_info or self.job_info["where"] == ""): self.io.send_abort(RESTORE_LOCAL_WITHOUT_WHERE) sys.exit(0) if 'pv' in params_block: # 'pv' is an alias for 'persistentvolume' params_block.get('persistentvolume').append(params_block.get('pv')) del params_block['pv'] if 'ns' in params_block: # 'ns' is an alias for 'namespace' params_block.get('namespace').append(params_block.get('ns')) del params_block['ns'] return params_block def __get_password(self, params_block): """ Reads the password from the passfile and puts it into the Plugin Params """ if not os.path.isfile(params_block["passfile"]): self.io.send_abort(PASSFILE_NOT_FOUND) sys.exit(0) with open(params_block["passfile"], "r") as f: params_block["password"] = f.readline() f.close() if not params_block["password"]: self.io.send_abort(PWD_INSIDE_PASSFILE_NOT_FOUND) sys.exit(0) return params_block bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/entities/0000755000175000017500000000000014771010173024760 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/entities/k8sobjtype.py0000644000175000017500000000656014771010173027443 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # class K8SObjType(object): K8SOBJ_CONFIGMAP = 'cm' K8SOBJ_DAEMONSET = 'ds' K8SOBJ_DEPLOYMENT = 'dp' K8SOBJ_ENDPOINT = 'ep' K8SOBJ_INGRESS = 'ing' K8SOBJ_LIMITRANGE = 'lr' K8SOBJ_NAMESPACE = 'ns' K8SOBJ_POD = 'pod' K8SOBJ_PVOLCLAIM = 'pvc' K8SOBJ_PVOLUME = 'pv' K8SOBJ_PODTEMPLATE = 'podt' K8SOBJ_REPLICASET = 'rs' K8SOBJ_REPLICACONTR = 'rc' K8SOBJ_RESOURCEQUOTA = 'rq' K8SOBJ_SECRET = 'sec' K8SOBJ_SERVICE = 'svc' K8SOBJ_SERVICEACCOUNT = 'sva' K8SOBJ_STATEFULSET = 'ss' K8SOBJ_PVCDATA = 'pvcdata' K8SOBJ_STORAGECLASS = 'sc' K8SOBJ_NAMESPACE_Path = 'namespaces' K8SOBJ_PVOLUME_Path = 'persistentvolumes' K8SOBJ_PVCS_Path = 'persistentvolumeclaims' K8SOBJ_PVCDATA_Path = 'pvcdata' K8SOBJ_STORAGECLASS_Path = 'storageclass' pathdict = { K8SOBJ_CONFIGMAP: 'configmaps', K8SOBJ_DAEMONSET: 'daemonsets', K8SOBJ_DEPLOYMENT: 'deployments', K8SOBJ_ENDPOINT: 'endpoints', K8SOBJ_INGRESS: 'ingress', K8SOBJ_LIMITRANGE: 'limitranges', K8SOBJ_NAMESPACE: K8SOBJ_NAMESPACE_Path, K8SOBJ_POD: 'pods', K8SOBJ_PVOLCLAIM: K8SOBJ_PVCS_Path, K8SOBJ_PVCDATA: K8SOBJ_PVCS_Path, K8SOBJ_PVOLUME: K8SOBJ_PVOLUME_Path, K8SOBJ_PODTEMPLATE: 'podtemplates', K8SOBJ_REPLICASET: 'replicasets', K8SOBJ_REPLICACONTR: 'replicationcontroller', K8SOBJ_RESOURCEQUOTA: 'resourcequota', K8SOBJ_SECRET: 'secrets', K8SOBJ_SERVICE: 'services', K8SOBJ_SERVICEACCOUNT: 'serviceaccounts', K8SOBJ_STATEFULSET: 'statefulsets', K8SOBJ_STORAGECLASS: K8SOBJ_STORAGECLASS_Path, } methoddict = { K8SOBJ_CONFIGMAP: 'config_map', K8SOBJ_DAEMONSET: 'daemon_set', K8SOBJ_DEPLOYMENT: 'deployment', K8SOBJ_ENDPOINT: 'endpoint', K8SOBJ_INGRESS: 'ingress', K8SOBJ_LIMITRANGE: 'limitrange', K8SOBJ_NAMESPACE: 'namespace', K8SOBJ_POD: 'pod', K8SOBJ_PVOLCLAIM: 'persistentvolume_claim', K8SOBJ_PVCDATA: 'persistentvolume_data', K8SOBJ_PVOLUME: 'persistentvolume', K8SOBJ_PODTEMPLATE: 'pod_template', K8SOBJ_REPLICASET: 'replica_set', K8SOBJ_REPLICACONTR: 'replication_controller', K8SOBJ_RESOURCEQUOTA: 'resource_quota', K8SOBJ_SECRET: 'secret', K8SOBJ_SERVICE: 'service', K8SOBJ_SERVICEACCOUNT: 'service_account', K8SOBJ_STATEFULSET: 'stateful_set', K8SOBJ_STORAGECLASS: 'storageclass', } bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/entities/__init__.py0000644000175000017500000000120214771010173027064 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald.bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/entities/plugin_object.py0000644000175000017500000000242214771010173030156 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2023 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. class PluginObject(object): """ Entity representing information about a Plugin Object """ def __init__(self, path, name, cat=None, potype=None, src=None, uuid=None, count=None, size=None): self.path = path self.name = name self.cat = cat self.type = potype self.src = src self.uuid = uuid self.count = count self.size = size def __str__(self): return '{{PluginObject path: {} name:{} cat:{} type:{} src:{} uuid:{} count:{} size:{}}}'\ .format(self.path, self.name, self.cat, self.type, self.src, self.uuid, self.count, self.size) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/entities/file_info.py0000644000175000017500000000746514771010173027300 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import fnmatch import re NOT_EMPTY_FILE = "F" EMPTY_FILE = "E" DIRECTORY = "D" SYMBOLIC_LINK = "S" HARD_LINK = "L" DEFAULT_FILE_MODE = "100640" # -rw-r----- DEFAULT_DIR_MODE = "040755" # drwxr-xr-x MEGABYTE = 1024 * 1024 class FileInfo(object): """ Entity representing information about a File """ def __init__(self, name, ftype, size, uid, gid, accessed_at, modified_at, created_at, mode, nlink, index=None, namespace=None, objtype=None, fullfname=[]): self.name = name self.type = ftype self.size = size self.uid = uid self.gid = gid self.mode = mode self.nlink = nlink self.index = index self.accessed_at = accessed_at self.modified_at = modified_at self.created_at = created_at self.namespace = namespace self.objtype = objtype self.objcache = None self.fullfname = fullfname def __str__(self): return '{{FileInfo name:{} namespace:{} type:{} objtype:{} cached:{}}}'\ .format(str(self.name), str(self.namespace), str(self.type), str(self.objtype), self.objcache is not None) def set_name(self, name: str): self.name = name def is_bucket(self): return self.type == DIRECTORY and not self.name def match_any_glob(self, globs, current_matches): """ Verifies whether this File matches any glob inside $globs$. If it does, the $current_matches$ list will be updated. """ any_match = False for glob in globs: # Glob check if fnmatch.fnmatchcase(self.name, glob): any_match = True current_matches.append(glob) break return any_match def match_any_regex(self, regexes, current_matches): """ Verifies whether this File matches any regex inside $regexs$. If it does, the $current_matches$ list will be updated. """ any_match = False for regex in regexes: if re.match(regex, self.name): any_match = True current_matches.append(regex) break return any_match def apply_regexwhere_param(self, regexwhere): """ Applies a $regexwhere$ into this File, updating it's name. """ patterns = regexwhere.split(",") for pattern in patterns: re_flags = 0 if pattern.endswith("/i"): re_flags += re.IGNORECASE separator = pattern[0] splitted = pattern \ .strip(separator) \ .split(separator) splitted[1] = splitted[1].replace("$", "\\") # We remove empty entries splitted = list(filter(None, splitted)) new_name = re.sub(r'%s' % splitted[0], r'' + splitted[1], self.name, flags=re_flags) self.name = new_name bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/io/0000755000175000017500000000000014771010173023543 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/io/jobs/0000755000175000017500000000000014771010173024500 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/io/jobs/restore_io.py0000644000175000017500000001560014771010173027226 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import re import sys from enum import Enum import logging from baculak8s.entities.file_info import FileInfo from baculak8s.io.default_io import DefaultIO from baculak8s.io.log import Log from baculak8s.io.packet_definitions import ACL_DATA_START, XATTR_DATA_START, EOD_PACKET, STATUS_DATA from baculak8s.plugins.k8sbackend.k8sfileinfo import k8sfileobjecttype RESTORE_START = "RestoreStart" INVALID_RESTORE_START_PACKET = "Invalid restore job start packet" RESTORE_END_PACKET = "FINISH" SUCCESS_PACKET = "OK" SKIP_PACKET = "SKIP" FILE_TRANSFER_START = "DATA" INVALID_XATTRS_TRANSFER_START_PACKET = "Invalid extended attributes transfer start packet. Aborting" INVALID_ACL_TRANSFER_START_PACKET = "Invalid access control list transfer start packet. Aborting" RESTORE_LOOP_ERROR = "Invalid packet during restore loop." FNAME_WITHOUT_FSOURCE_ERROR = "Invalid FNAME packet. It should have information about the Files Source." XATTR_ERROR_TEMPLATE = "Error while transferring files extended attributes to the chosen Data Source" \ "\nFile: {}\nBucket: {}.\n" ACL_ERROR_TEMPLATE = "Error while transferring files access control list to the chosen Data Source" \ "\nFile: {}\nBucket: {}.\n" FILE_ERROR_TEMPLATE = "Error while transferring file content to the chosen Data Source" \ "\n\tFile: {}\n\tNamespace: {}\n\tdetails: {}" BUCKET_ERROR_TEMPLATE = "Error while creating a Bucket on the chosen Data Source" \ "Bucket: {}.\n" COMMA_SEPARATOR_NOT_SUPPORTED = "Comma separator not supported yet." class RestorePacket(Enum): FILE_INFO = 1 ACL_START = 2 XATTR_START = 3 RESTORE_END = 4 INVALID_PACKET = 5 class RestoreIO(DefaultIO): def next_loop_packet(self, onError): _, packet = self.read_packet() logging.debug('next_loop_packet:packet:' + str(packet)) if packet is None: return RestorePacket.INVALID_PACKET, None if packet == RESTORE_END_PACKET: return RestorePacket.RESTORE_END, None if packet.startswith("FNAME:"): file_info = self.__read_file_info(packet, onError) return RestorePacket.FILE_INFO, file_info if packet == ACL_DATA_START: return RestorePacket.ACL_START, None if packet == XATTR_DATA_START: return RestorePacket.XATTR_START, None return RestorePacket.INVALID_PACKET, None def __read_file_info(self, full_fname, onError): """ Reads four packages from stdin: 1 - The files FNAME packet 2 - The files STAT packet 3 - The files TSTAMP packet 4 - An EOD packet :return: The file_info data structure """ full_fname = full_fname.replace("FNAME:", "").rstrip("/") if "@" not in full_fname: _, full_stat = self.read_packet() _, full_tstamp = self.read_packet() self.read_eod() self.send_abort(FNAME_WITHOUT_FSOURCE_ERROR) onError() return where_param = self.__read_where_parameter(full_fname) if where_param is not None: full_fname = full_fname.replace(where_param, '', 1).lstrip('/') # creates an array like: # ['@kubernetes', 'namespaces', '$namespace', '$object', '$file.yaml'] # ['@kubernetes', 'namespaces', '$namespace', '$object', '$file.tar'] # ['@kubernetes', 'namespaces', '$namespace', '$file.yaml'] # ['@kubernetes', 'persistentvolumes', '$file.yaml'] fname = full_fname.split("/", 4) _, full_stat = self.read_packet() # creates an array with [$type$, $size$, $uid$, $gid$, $mode$, $nlink$] fstat = re.sub("STAT:", '', full_stat).split(' ') _, full_tstamp = self.read_packet() # creates an array with [$atime$, $mtime$, $ctime$] ftstamp = re.sub("TSTAMP:", '', full_tstamp).split(' ') self.read_eod() # debugging logging.debug("fstat:" + str(fstat)) logging.debug("fname:" + str(fname)) logging.debug("ftstamp:" + str(ftstamp)) objtype = k8sfileobjecttype(fname) return FileInfo( # The name may be empty if we have a bucket file name=fname[-1], ftype=fstat[0], size=int(fstat[1]), uid=fstat[2], gid=fstat[3], mode=fstat[4], nlink=fstat[5], index=fstat[6], accessed_at=int(ftstamp[0]), modified_at=int(ftstamp[1]), created_at=int(ftstamp[2]), namespace=objtype['namespace'], objtype=objtype['obj'], fullfname=fname ) def __read_where_parameter(self, full_fname): if full_fname.startswith("@"): where_param = None else: # we read the where param part of the FNAME packet index_fsource = full_fname.index("/@") where_param = full_fname[0:index_fsource] where_param = where_param.rstrip("/") return where_param def read_data(self): """ Reads a data packet from the stdio: "D000123\n" (Data packet header) "chunk" (Data packet) :return: The data packet content, or None if an EOD packet (F000000) is found instead """ header = self.read_line() if not header: raise ValueError("Packet Header not found") logging.debug('io.read_data: {}'.format(header)) if header == EOD_PACKET: Log.save_received_eod(header) return None # Removes the status of the header to obtain data content length chunk_length = int(header.decode().replace(STATUS_DATA, '')) chunk = sys.stdin.buffer.read(chunk_length) Log.save_received_data(header) return chunk class FileContentReader(RestoreIO): """ Class used to read chunks of file content from standard input. """ def __init__(self): self.finished = False def read(self, size=None): if self.finished: return None data = self.read_data() if not data: self.finished = True return data def finished_transfer(self): return self.finished bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/io/jobs/__init__.py0000644000175000017500000000120214771010173026604 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald.bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/io/services/0000755000175000017500000000000014771010173025366 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/io/services/__init__.py0000644000175000017500000000120214771010173027472 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald.bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/io/services/job_info_io.py0000644000175000017500000000575614771010173030231 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import datetime from baculak8s.io.default_io import DefaultIO from baculak8s.io.log import Log from baculak8s.io.packet_definitions import EOD_PACKET JOB_START_PACKET = "Job" INVALID_JOB_START_PACKET = "Invalid Job Start Packet" INVALID_JOB_PARAMETER_BLOCK = "Invalid Job Parameter Block" INVALID_JOB_TYPE = "Invalid Job Type. The supported types are:" \ "B - Backup, R - Restore or E - Estimation" INVALID_REPLACE_PARAM = "Invalid Replace Parameter. The supported values are:" \ "a - Replace always, w - Replace if newer, n - Never replace" \ "or o - Replace if older" JOB_NAME_NOT_FOUND = "Parameter Job Name Not Found" JOB_ID_NOT_FOUND = "Parameter Job ID Not Found" JOB_TYPE_NOT_FOUND = "Parameter Job Type Not Found" class JobInfoIO(DefaultIO): def read_job_info(self): """ Reads blocks of parameters: "C000111\n" (Command packet header) key1=value1\n (Parameter) "C000222\n" key2=value2\n "C000333\n" key3=value3\n ... until an EOD packet (F000000) is found :return: A dictionary containing the parameters """ block = {} while True: packet_header = self.read_line() if not packet_header: raise ValueError("Packet Header not found") if packet_header == EOD_PACKET: Log.save_received_eod(packet_header) return block else: packet_content = self.read_line().decode() param = packet_content.split("=", 1) # converts key to lowercase param[0] = param[0].lower() if param[0] == "since": # converts the provided timestamp to a utc timestamp parsed_param = int(param[1]) parsed_param = int(datetime.datetime \ .utcfromtimestamp(parsed_param) \ .replace(tzinfo=datetime.timezone.utc) \ .timestamp()) block[param[0]] = parsed_param else: block[param[0]] = param[1] Log.save_received_packet(packet_header, packet_content) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/io/services/plugin_params_io.py0000644000175000017500000000667314771010173031304 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. from baculak8s.io.default_io import DefaultIO from baculak8s.io.log import Log from baculak8s.io.packet_definitions import EOD_PACKET PLUGIN_PARAMETERS_START = "Params" INVALID_PLUGIN_PARAMETERS_START = "Invalid Plugin Parameters Start Packet" INVALID_PLUGIN_PARAMETERS_BLOCK = "Invalid Plugin Parameters Block" URL_NOT_FOUND = "Parameter URL not found on Plugin Parameters" USER_NOT_FOUND = "Parameter User not found on Plugin Parameters" PWD_NOT_FOUND = "Parameter Password not found on Plugin Parameters" PASSFILE_NOT_FOUND = "Passfile not found. ERR=No such file or directory" PWD_INSIDE_PASSFILE_NOT_FOUND = "Password inside passfile not found" RESTORE_LOCAL_WITHOUT_WHERE = "Restore Local plugin parameter without Where Plugin Parameter" class PluginParamsIO(DefaultIO): def read_plugin_params(self): """ Reads blocks of parameters: "C000111\n" (Command packet header) key1=value1\n (Parameter) "C000222\n" key2=value2\n "C000333\n" key3=value3\n ... until an EOD packet (F000000) is found :return: A dictionary containing the parameters """ block = { "includes": [], "regex_includes": [], "excludes": [], "regex_excludes": [], "namespace": [], "persistentvolume": [], "storageclass": [], } while True: packet_header = self.read_line() if not packet_header: raise ValueError("Packet Header not found") if packet_header == EOD_PACKET: Log.save_received_eod(packet_header) return block else: packet_content = self.read_line().decode() param = packet_content.split("=", 1) # converts key to lowercase param[0] = param[0].lower() # handle single word param without equal sign if len(param) < 2: param.append(True) # Allow simple quote to identify string parameters. if isinstance(param[1], str): if param[1][0] == "'": param[1] = ''.join(param[1].split("'")) # handle array parameters automatically if param[0] in block.keys(): if isinstance(block[param[0]], list): block[param[0]].append(param[1]) else: _b = block[param[0]] block[param[0]] = [_b, param[1]] else: block[param[0]] = param[1] Log.save_received_packet(packet_header, packet_content) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/io/packet_definitions.py0000644000175000017500000000236314771010173027763 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. STATUS_COMMAND = "C" STATUS_DATA = "D" STATUS_ABORT = "A" STATUS_ERROR = "E" STATUS_NON_FATAL_ERROR = "Q" STATUS_WARNING = "W" STATUS_INFO = "I" EOD_PACKET = b'F000000' TERMINATION_PACKET = b'T000000' UNEXPECTED_ERROR_PACKET = "Unexpected error. Please check log for details" FILE_DATA_START = "DATA" XATTR_DATA_START = "XATTR" ACL_DATA_START = "ACL" ESTIMATION_START_PACKET = "EstimateStart" QUERY_START_PACKET = "QueryStart" INVALID_ESTIMATION_START_PACKET = "Invalid estimation job start packet" OBJECT_PAGE_ERROR = "Error retrieving a page of buckets." FILE_INFO_ERROR = "Error retrieving information about a file." bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/io/__init__.py0000644000175000017500000000120214771010173025647 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald.bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/io/default_io.py0000644000175000017500000001604714771010173026240 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import sys from baculak8s.io.log import Log from baculak8s.io.packet_definitions import * from baculak8s.plugins.plugin import * CONNECTION_ERROR_TEMPLATE = "Error connecting to the chosen Data Source. {error}." HOST_NOT_FOUND_CONNECTION_ERROR = "404 Not found. Name or service not known" HOST_TIMEOUT_ERROR = "Host connection timeout. Maximum retries exceeded" AUTH_FAILED_CONNECTION_ERROR = "Authentication Failed" UNEXPECTED_CONNECTION_ERROR = "Unrecognized connection error" SSL_ERROR = "SSL verification failed" CONNECTION_REFUSED_TEMPLATE = "Max retry exceeded or Connection refused" class DefaultIO(object): """ Default Class for performing IO. It contains helper methods do read and send packets. """ def send_connection_error(self, error_code, strerror=None): if strerror is None: message = self.__get_connection_error_message(error_code) else: message = strerror self.send_error(message) def send_connection_abort(self, error_code, strerror=None): if strerror is None: message = self.__get_connection_error_message(error_code) else: message = strerror self.send_abort(message) def __get_connection_error_message(self, error_code): if error_code == ERROR_HOST_NOT_FOUND: return CONNECTION_ERROR_TEMPLATE.format(error=HOST_NOT_FOUND_CONNECTION_ERROR) elif error_code == ERROR_HOST_TIMEOUT: return CONNECTION_ERROR_TEMPLATE.format(error=HOST_TIMEOUT_ERROR) elif error_code == ERROR_AUTH_FAILED: return CONNECTION_ERROR_TEMPLATE.format(error=AUTH_FAILED_CONNECTION_ERROR) elif error_code == ERROR_SSL_FAILED: return CONNECTION_ERROR_TEMPLATE.format(error=SSL_ERROR) elif error_code == ERROR_CONNECTION_REFUSED: return CONNECTION_ERROR_TEMPLATE.format(error=CONNECTION_REFUSED_TEMPLATE) else: return CONNECTION_ERROR_TEMPLATE.format(error=UNEXPECTED_CONNECTION_ERROR) def send_eod(self): packet_header = EOD_PACKET + b"\n" sys.stdout.buffer.write(packet_header) sys.stdout.flush() Log.save_sent_eod(packet_header.decode()) def send_abort(self, message): self.send_packet(STATUS_ABORT, message) def send_error(self, message): self.send_packet(STATUS_ERROR, message) def send_non_fatal_error(self, message): self.send_packet(STATUS_NON_FATAL_ERROR, message) def send_warning(self, message): self.send_packet(STATUS_WARNING, message) def send_info(self, message): self.send_packet(STATUS_INFO, message) def send_command(self, message): self.send_packet(STATUS_COMMAND, message) def send_data(self, data): self.send_packet(STATUS_DATA, data, raw=True) def send_packet(self, status, packet_content, raw=False): """ Prints a packet to stdout. A packet has the format $status$ + $packet_length$ + \n $packet_content$ + \n where $status$ represents the type of packet sent and $packet_length$ has 6 decimal chars. If $raw$ is True, $packet_content$ will be handled as a Byte String """ bytes_content = packet_content if not raw: packet_content += "\n" bytes_content = packet_content.encode() packet_length = str(len(bytes_content)).zfill(6) packet_header = "{}{}\n".format(status, packet_length) sys.stdout.buffer.write(packet_header.encode()) sys.stdout.buffer.write(bytes_content) sys.stdout.flush() if not raw: Log.save_sent_packet(packet_header, packet_content) else: Log.save_sent_data(packet_header) def send_file_info(self, info): """ Prints four packages into stdout: 1 - The files FNAME packet 2 - The files STAT packet 3 - The files TSTAMP packet 4 - A EOD packet """ self.send_command("FNAME:{}".format(info.name)) self.send_command("TSTAMP:{} {} {}".format(info.accessed_at, info.modified_at, info.created_at)) self.send_command("STAT:{} {} {} {} {} {}".format(info.type, info.size, info.uid, info.gid, info.mode, info.nlink)) self.send_eod() def send_plugin_object(self, podata): """ Prints PluginObject class information into stdout Args: podata (class PluginObject): Entity representing information about a Plugin Object """ self.send_command("PLUGINOBJ:{}".format(podata.path)) self.send_command("PLUGINOBJ_NAME:{}".format(podata.name)) if podata.cat is not None: self.send_command("PLUGINOBJ_CAT:{}".format(podata.cat)) if podata.type is not None: self.send_command("PLUGINOBJ_TYPE:{}".format(podata.type)) if podata.src is not None: self.send_command("PLUGINOBJ_SRC:{}".format(podata.src)) if podata.uuid is not None: self.send_command("PLUGINOBJ_UUID:{}".format(podata.uuid)) if podata.size is not None: self.send_command("PLUGINOBJ_SIZE:{}".format(podata.size)) if podata.count is not None: self.send_command("PLUGINOBJ_COUNT:{}".format(podata.count)) self.send_eod() def send_query_response(self, response): key = response[0] value = response[1] self.send_command("{}={}".format(key, value)) def read_line(self): """ Reads a line from the stdin buffer :return: The line read, as a Byte String, without the newline char """ return sys.stdin.buffer.readline().strip() def read_packet(self): """ Reads a packet from the stdin buffer :return: 1- The packet header, as a Byte String, without the newline char 2- The packet content, as a String, without the newline char """ packet_header = sys.stdin.buffer.readline().strip() packet_content = sys.stdin.buffer.readline().strip().decode() Log.save_received_packet(packet_header, packet_content) return packet_header, packet_content def read_eod(self): packet_header = self.read_line() if not packet_header or packet_header != EOD_PACKET: raise ValueError("EOD packet not found") Log.save_received_eod(packet_header) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/io/log.py0000644000175000017500000001064314771010173024702 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import logging import os import baculak8s PLUGIN_WORKING = os.getenv("PLUGIN_WORKING", "/opt/bacula/working/kubernetes") PRE_JOB_LOG_NAME_TEMPLATE = "pre_job_{}.log" LOG_NAME_TEMPLATE = "{pid}_{job_id}_{job_name}.log" class LogConfig(object): """ Class used in order to configure the execution Debug Log File It determines whether the Debug Log File should be created, and where it should be created """ @staticmethod def start(): # The Log File should be at the PLUGIN_WORKING path if not os.path.exists(PLUGIN_WORKING): try: os.makedirs(PLUGIN_WORKING) except: # fallback to /tmp baculak8s.io.log.PLUGIN_WORKING = '/tmp/backendplugin' if not os.path.exists(PLUGIN_WORKING): os.makedirs(PLUGIN_WORKING) # The Log File starts with a "Pre Job" name file_name = PRE_JOB_LOG_NAME_TEMPLATE.format(os.getpid()) file_name = os.path.join(PLUGIN_WORKING, file_name) logging.basicConfig(filename=file_name, level=logging.DEBUG, filemode='w+', format='%(levelname)s:[%(pathname)s:%(lineno)d in %(funcName)s] %(message)s') @staticmethod def handle_params(job_info, plugin_params): if "debug" in plugin_params and plugin_params["debug"]: LogConfig._create(job_info) Log.debug_level = int(plugin_params["debug"]) else: LogConfig._delete_pre_job_log() @staticmethod def _create(job_info): pid = os.getpid() old_name = PRE_JOB_LOG_NAME_TEMPLATE.format(pid) new_name = LOG_NAME_TEMPLATE.format(pid=pid, job_id=job_info["jobid"], job_name=job_info["name"]) old_name = os.path.join(PLUGIN_WORKING, old_name) new_name = os.path.join(PLUGIN_WORKING, new_name) if os.path.isfile(old_name): os.rename(old_name, new_name) @staticmethod def _delete_pre_job_log(): pid = os.getpid() pre_job_log_file = PRE_JOB_LOG_NAME_TEMPLATE.format(pid) pre_job_log_file = os.path.join(PLUGIN_WORKING, pre_job_log_file) if os.path.isfile(pre_job_log_file): os.remove(pre_job_log_file) class Log: """ Class with helper methods to send data to the Debug Log """ debug_level = 0 @staticmethod def save_received_termination(packet_header): Log.save_received_packet(packet_header, "(TERMINATION PACKET)") @staticmethod def save_received_eod(packet_header): Log.save_received_packet(packet_header, "(EOD PACKET)") @staticmethod def save_received_data(packet_header): Log.save_received_packet(packet_header, "(DATA PACKET)") @staticmethod def save_received_packet(packet_header, packet_content): if Log.debug_level <= 1: return if Log.debug_level == 2 and packet_header.decode()[0] == 'D': return message = "Received Packet\n{}\n{}\n".format(packet_header.decode(), packet_content) logging.debug(message) @staticmethod def save_sent_eod(packet_header): Log.save_sent_packet(packet_header, "(EOD PACKET)\n") @staticmethod def save_sent_data(packet_header): Log.save_sent_packet(packet_header, "(DATA PACKET)\n") @staticmethod def save_sent_packet(packet_header, packet_content): if Log.debug_level <= 1: return if Log.debug_level == 2 and packet_header[0] == 'D': return message = "Sent Packet\n{}{}".format(packet_header, packet_content) logging.debug(message) @staticmethod def save_exit_code(exit_code): message = "Backend finished with Exit Code: {}".format(exit_code) logging.debug(message) @staticmethod def save_exception(e): logging.exception(e) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/util/0000755000175000017500000000000014771010173024111 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/util/iso8601.py0000644000175000017500000001041514771010173025575 0ustar bsbuildbsbuild# Copyright (c) 2007 Michael Twomey # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import re import time from datetime import datetime, timedelta, tzinfo __all__ = ["parse_date", "ParseError"] # Adapted from http://delete.me.uk/2005/03/iso8601.html ISO8601_REGEX_RAW = "(?P[0-9]{4})-(?P[0-9]{1,2})-(?P[0-9]{1,2})" \ "T(?P[0-9]{2}):(?P[0-9]{2})(:(?P[0-9]{2})(.(?P[0-9]+))?)?" \ "(?PZ|[-+][0-9]{2}(:?[0-9]{2})?)?" ISO8601_REGEX = re.compile(ISO8601_REGEX_RAW) TIMEZONE_REGEX = re.compile("(?P[+-])(?P[0-9]{2}):?(?P[0-9]{2})?") class ParseError(Exception): """Raised when there is a problem parsing a date string""" # Yoinked from python docs ZERO = timedelta(0) class Utc(tzinfo): """UTC """ def utcoffset(self, dt): return ZERO def tzname(self, dt): return "UTC" def dst(self, dt): return ZERO UTC = Utc() class FixedOffset(tzinfo): """Fixed offset in hours and minutes from UTC """ def __init__(self, name, offset_hours, offset_minutes, offset_seconds=0): self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes, seconds=offset_seconds) self.__name = name def utcoffset(self, dt): return self.__offset def tzname(self, dt): return self.__name def dst(self, dt): return ZERO def __repr__(self): return "" % self.__name def parse_timezone(tzstring): """Parses ISO 8601 time zone specs into tzinfo offsets """ if tzstring == "Z": return UTC if tzstring is None: zone_sec = -time.timezone return FixedOffset(name=time.tzname[0], offset_hours=(zone_sec / 3600), offset_minutes=(zone_sec % 3600) / 60, offset_seconds=zone_sec % 60) m = TIMEZONE_REGEX.match(tzstring) prefix, hours, minutes = m.groups() if minutes is None: minutes = 0 else: minutes = int(minutes) hours = int(hours) if prefix == "-": hours = -hours minutes = -minutes return FixedOffset(tzstring, hours, minutes) def parse_date(datestring): """Parses ISO 8601 dates into datetime objects The timezone is parsed from the date string. However it is quite common to have dates without a timezone (not strictly correct). In this case the default timezone specified in default_timezone is used. This is UTC by default. """ if not isinstance(datestring, str): raise ValueError("Expecting a string %r" % datestring) m = ISO8601_REGEX.match(datestring) if not m: raise ParseError("Unable to parse date string %r" % datestring) groups = m.groupdict() tz = parse_timezone(groups["timezone"]) if groups["fraction"] is None: groups["fraction"] = 0 else: groups["fraction"] = int(float("0.{}".format(groups["fraction"])) * 1e6) try: return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]), int(groups["hour"]), int(groups["minute"]), int(groups["second"]), int(groups["fraction"]), tz) except Exception as e: raise ParseError("Failed to create a valid datetime record due to: {}".format(e)) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/util/respbody.py0000644000175000017500000000173114771010173026314 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import json def parse_json_descr(response): message = 'No details.' descr = response.get('descr') if descr is not None and isinstance(descr, str): try: data = json.loads(descr) message = data.get('message', 'Unknown.') except: pass return message bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/util/__init__.py0000644000175000017500000000123314771010173026221 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/util/date_util.py0000644000175000017500000000260414771010173026437 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import datetime from baculak8s.util.iso8601 import parse_date def iso8601_to_unix_timestamp(iso_string): dt = parse_date(iso_string) \ .replace(tzinfo=datetime.timezone.utc) return int(dt.timestamp()) def gmt_to_unix_timestamp(gmt_string): dt = datetime.datetime.strptime(gmt_string, "%a, %d %b %Y %H:%M:%S GMT") return int(dt.timestamp()) def get_time_now(): tstamp = datetime.datetime.now(tz=datetime.timezone.utc).timestamp() return int(tstamp) def datetime_to_unix_timestamp(dt): return int(datetime.datetime.timestamp(dt)) def k8stimestamp_to_unix_timestamp(ts): if isinstance(ts, datetime.datetime): return datetime_to_unix_timestamp(ts) return iso8601_to_unix_timestamp(ts) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/util/token.py0000644000175000017500000000367014771010173025611 0ustar bsbuildbsbuild# # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # import logging import random import string TOKENSIZE = 64 def generate_token(size=TOKENSIZE, chars=tuple(string.ascii_letters) + tuple(string.digits)): """ Generates a random string of characters composed on letters and digits :param size: the number of characters to generate - the length of the token string, default is 64 :param chars: the allowed characters set, the default is ascii letters and digits :return: the token used for authorization """ return ''.join(random.choice(chars) for _ in range(size)) def create_auth_data(token): auth_data = 'Token: {token:{size}}\n'.format(token=token, size=TOKENSIZE) return auth_data def auth_data_length(): return len(create_auth_data('')) def check_auth_data(token, data): """ Verifies the authorization data received from peer :param token: the token at size :param data: :return: """ authdata = create_auth_data(token) logging.debug('AUTH_DATA:' + authdata.rstrip('\n')) ddata = data.decode() logging.debug('RECV_TOKEN_DATA:'+ddata.rstrip('\n')) return authdata == ddata bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/util/sslserver.py0000644000175000017500000001711214771010173026515 0ustar bsbuildbsbuild# # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # import os import socket import ssl import time from baculak8s.util.token import * DEFAULTTIMEOUT = 600 DEFAULTCERTFILE = "/opt/bacula/etc/bacula-backup.cert" DEFAULTKEYFILE = "/opt/bacula/etc/bacula-backup.key" CONNECTIONSERVER_AUTHTOK_ERR1 = 'ConnectionServer: Authentication token receiving timeout!' CONNECTIONSERVER_AUTHTOK_ERR2 = 'ConnectionServer: Authentication token error!' CONNECTIONSERVER_AUTHTOK_ERR3 = '{ "message": "Cert files does not exist! Cannot prepare Connection Service!" }' CONNECTIONSERVER_AUTHTOK_ERR4 = '{ "message": "ConnectionServer:Cannot bind to socket! Err={}" }' CONNECTIONSERVER_AUTHTOK_ERR5 = 'ConnectionServer: Timeout waiting...' CONNECTIONSERVER_AUTHTOK_ERR6 = 'ConnectionServer: Invalid Hello Data received!' CONNECTIONSERVER_HELLO_ERR1 = 'ConnectionServer: Hello data receiving timeout!' CONNECTIONSERVER_HELLO_ERR2 = 'ConnectionServer: Invalid Hello data packet: "{}"' CONNECTIONSERVER_HELLO_ERR3 = 'ConnectionServer: Invalid Hello header: "{}"' class ConnectionServer(object): def __init__(self, host, port=9104, token=None, certfile=None, keyfile=None, timeout=DEFAULTTIMEOUT, *args, **kwargs): super(ConnectionServer, self).__init__(*args, **kwargs) self.connstream = None self.token = token if token is not None else generate_token() self.timeout = timeout try: self.timeout = int(self.timeout) except ValueError: self.timeout = DEFAULTTIMEOUT self.timeout = max(1, self.timeout) socket.setdefaulttimeout(self.timeout) self.bindsocket = socket.socket() self.sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS) self.host = host self.port = port self.certfile = certfile if certfile is not None else DEFAULTCERTFILE self.keyfile = keyfile if keyfile is not None else DEFAULTKEYFILE def streamrecv(self, size): try: data = self.connstream.recv(size) except socket.timeout: data = None return data def streamsend(self, data): status = True try: self.connstream.send(data) except socket.timeout: status = False finally: return status def authenticate(self): hello = self.gethello() if not isinstance(hello, dict) or 'error' in hello: logging.debug(CONNECTIONSERVER_AUTHTOK_ERR6) return { 'error': CONNECTIONSERVER_AUTHTOK_ERR6, } response = hello.get('response') if response is not None and response[0] != 'Hello': logging.debug(CONNECTIONSERVER_AUTHTOK_ERR6) return { 'error': CONNECTIONSERVER_AUTHTOK_ERR6, } try: data = self.connstream.recv(auth_data_length()) except socket.timeout: logging.debug(CONNECTIONSERVER_AUTHTOK_ERR1) return { 'error': CONNECTIONSERVER_AUTHTOK_ERR1, } if check_auth_data(self.token, data): self.connstream.send(b'OK') logging.debug('ConnectionServer:Authenticated') return {} else: self.connstream.send(b'NO') logging.error(CONNECTIONSERVER_AUTHTOK_ERR2) return { 'error': CONNECTIONSERVER_AUTHTOK_ERR2, } def gethello(self) -> dict: data = "" try: data = self.connstream.recv(4) except socket.timeout: logging.debug(CONNECTIONSERVER_HELLO_ERR1) return { 'error': CONNECTIONSERVER_HELLO_ERR1, } ddata = data.decode() if ddata[3] != ':': logging.debug(CONNECTIONSERVER_HELLO_ERR3.format(ddata)) return { 'error': CONNECTIONSERVER_HELLO_ERR3.format(ddata), } ddata = ddata[:3] try: datalen = int(ddata) except ValueError: logging.debug(CONNECTIONSERVER_HELLO_ERR2.format(ddata)) return { 'error': CONNECTIONSERVER_HELLO_ERR2.format(ddata), } try: data = self.connstream.recv(datalen) except socket.timeout: logging.debug(CONNECTIONSERVER_HELLO_ERR1) return { 'error': CONNECTIONSERVER_HELLO_ERR1, } ddata = data.decode().split(':') logging.debug(ddata) return { 'response': ddata } def close(self): self.connstream.shutdown(socket.SHUT_RDWR) self.connstream.close() def shutdown(self): self.bindsocket.close() def listen(self): if not os.path.exists(self.certfile) or not os.path.exists(self.keyfile): logging.error(CONNECTIONSERVER_AUTHTOK_ERR3) return { 'error': True, 'descr': CONNECTIONSERVER_AUTHTOK_ERR3, } self.sslcontext.load_cert_chain(certfile=self.certfile, keyfile=self.keyfile) ops = 0 lastexcept = "" for ops in range(self.timeout): try: self.bindsocket.bind((self.host, self.port)) except OSError as e: logging.error(e) lastexcept = str(e) time.sleep(5) else: break if ops == self.timeout - 1: logging.error(CONNECTIONSERVER_AUTHTOK_ERR4.format(lastexcept)) return { 'error': True, 'descr': CONNECTIONSERVER_AUTHTOK_ERR4.format(lastexcept) } logging.debug('ConnectionServer:Listening...') self.bindsocket.listen(5) return {} def handle_connection(self, process_client_data): try: newsocket, fromaddr = self.bindsocket.accept() except socket.timeout: logging.error(CONNECTIONSERVER_AUTHTOK_ERR5) return { 'error': CONNECTIONSERVER_AUTHTOK_ERR5, 'should_remove_pod': 1, } logging.debug("ConnectionServer:Connection from: {}".format(fromaddr)) self.connstream = self.sslcontext.wrap_socket(newsocket, server_side=True) try: authresp = self.authenticate() if 'error' in authresp: return authresp process_client_data(self.connstream) finally: logging.debug('ConnectionServer:Finish - disconnect.') try: self.connstream.shutdown(socket.SHUT_RDWR) except Exception as e: logging.debug('throw exception when disconnect.') logging.error(e) logging.debug('ConnectionServer:Finish - shutdown.') self.connstream.close() logging.debug('ConnectionServer:Finish - closed.') return {} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/util/lambda_util.py0000644000175000017500000000254114771010173026742 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import logging import time K8S_NUM_MAX_TRIES_ERROR = "Reached maximum number of tries! Message: {message}" NUM_MAX_TRIES = 20 def apply(condition, iterable): # Helper method to map lambdas on iterables # Created just for readability return list(map(condition, iterable)) def wait_until_resource_is_ready(action, error_msg = 'Reached num maximum tries.', sleep = 3): logging.debug('Waiting until resource is ready') tries = 0 is_ready = False while not is_ready: is_ready = action() if tries >= NUM_MAX_TRIES: logging.debug("Reached num maximum tries.") return { 'error': K8S_NUM_MAX_TRIES_ERROR.format(message=error_msg)} time.sleep(sleep) return {}bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/util/size_util.py0000644000175000017500000000314114771010173026471 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. def k8s_size_to_int(res): if isinstance(res, str): if res.endswith("Ki"): return int(res[:-2]) * 1024 if res.endswith("Mi"): return int(res[:-2]) * 1024 * 1024 if res.endswith("Gi"): return int(res[:-2]) * 1024 * 1024 * 1024 if res.endswith("Pi"): return int(res[:-2]) * 1024 * 1024 * 1024 * 1024 if res.endswith("Ti"): return int(res[:-2]) * 1024 * 1024 * 1024 * 1024 * 1024 if res.endswith("K"): return int(res[:-1]) * 1000 if res.endswith("M"): return int(res[:-1]) * 1000000 if res.endswith("G"): return int(res[:-1]) * 1000000000 if res.endswith("P"): return int(res[:-1]) * 1000000000000 if res.endswith("T"): return int(res[:-1]) * 1000000000000000 if res.endswith("m"): return int(res[:-1]) / 1000.0 return 0 return res bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/util/boolparam.py0000644000175000017500000000356414771010173026447 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2020 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # class BoolParam(object): @staticmethod def handleParam(param, _default = False): """ Function handles configuration bool parameter expressed in any known value. It handles string values like '1', 'Yes', etc. as True and '0', 'No', etc as False. Args: param (any): parameter value to process _default (bool, optional): use this value when parameter handling fail. Defaults to False. Returns: bool: return processed value """ if not isinstance(_default, bool): _default = False if param is not None: if isinstance(param, str): if param.lower() in ('1', 'yes', 'true'): return True if param.lower() in ('0', 'no', 'false'): return False if isinstance(param, int) or isinstance(param, bool) or isinstance(param, float): if param: return True else: return False # finally return default return _default bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/util/dict_util.py0000644000175000017500000000134114771010173026442 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. def merge_two_dicts(d1, d2): merged = d1.copy() merged.update(d2) return merged bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/0000755000175000017500000000000014771010173024615 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/fs_plugin.py0000644000175000017500000001567014771010173027166 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import datetime import json import logging import os from json import JSONEncoder import yaml from baculak8s.entities.file_info import FileInfo from baculak8s.entities.k8sobjtype import K8SObjType from baculak8s.plugins.k8sbackend.k8sfileinfo import encoder_load from baculak8s.plugins.plugin import Plugin class K8SEncoder(JSONEncoder): # handles a JSON encoder for kubernetes objects def default(self, o): if isinstance(o, datetime.datetime): return o.strftime("%Y-%m-%dT%H:%M:%S%Z") else: todict = getattr(o, 'to_dict', None) if todict is not None: odict = o.to_dict() # try to remap dictionary to attributes map amap = getattr(o, 'attribute_map', None) if amap is not None: for att in amap: attval = amap.get(att) if attval != att and att in odict: odict[attval] = odict[att] del odict[att] return odict else: return json.JSONEncoder.default(self, o) class FileSystemPlugin(Plugin): """ Plugin that communicates with the Local Filesystem """ def __init__(self, confdata): logging.debug("params:" + str(confdata)) self._params = confdata def list_in_path(self, path): raise NotImplementedError def list_all_namespaces(self): raise NotImplementedError def list_all_persistentvolumes(self): raise NotImplementedError def list_namespaced_objects(self, namespace): raise NotImplementedError def query_parameter(self, parameter): raise NotImplementedError def check_file(self, file_info): file_path = os.path.join(self._params.get('where', '/'), file_info.name) try: file_stats = os.stat(file_path) except FileNotFoundError: return None return file_stats def connect(self): """ Implementation of Plugin.connect(self) No need to connect to the Local FS """ return {} def disconnect(self): """ Implementation of Plugin.disconnect(self) No need to disconnect from the Local FS """ return {} def apply_where_parameter(self, file_info, where_param): """ Implementation of Plugin.apply_where_parameter(self, file_info, where_param) """ file_info.name = "{}{}{}{}".format(os.path.sep, where_param, os.path.sep, os.path.sep.join(file_info.fullfname)) return file_info def upload_bucket_acl(self, bucket, acl): return {} def upload_bucket_xattrs(self, file_info: FileInfo): return {} def upload_file_xattrs(self, file_info: FileInfo): return {} def upload_file_acl(self, bucket, filename, acl): return {} def upload_bucket(self, bucket_info): """ Implementation of Plugin.upload_bucket(self, bucket_info) Creates the bucket as a directory on the Local Filesystem """ dir_path = os.path.join(self._params["restore_local_path"], bucket_info.bucket) # We change the path to the local OS path format dir_path = os.path.abspath(dir_path) # If the files directory doesn't exist yet, we create it os.makedirs(dir_path, exist_ok=True) os.chmod(dir_path, int(bucket_info.mode, 8)) return {"success": 'True'} def restore_file(self, file_info, file_content_source=None): """ Implementation of Plugin.restore_file(self, file_info, file_content_source=None) Creates the file on the Local Filesystem """ mode = None file_ext = file_info.name[-4:] if file_info.objtype != K8SObjType.K8SOBJ_PVCDATA: # if change output format then mode is not None mode = self._params.get('outputformat', None) logging.debug("outputformat: {}".format(mode)) if mode is not None: mode = mode.lower() if mode not in ('json', 'yaml'): mode = None else: file_ext = mode # We change the path to the local OS path format file_path = os.path.abspath(file_info.name[:-4]+file_ext) # If the files path doesn't exist yet, we create it os.makedirs(os.path.dirname(file_path), exist_ok=True) logging.debug("file_path: {}".format(file_path)) if not file_content_source: # We create an empty file with open(file_path, "w") as f: f.close() else: logging.debug("save mode: {}".format(mode)) if mode is not None: strdata = b'' while True: chunk = file_content_source.read() if chunk is None: break strdata = strdata + chunk # here we have an api object translated to simple dict # logging.debug("STRDATA:" + str(strdata)) el = encoder_load(strdata, file_info.name) jd = json.dumps(el, cls=K8SEncoder, sort_keys=True) data = json.loads(jd) # logging.debug("EL:" + str(el)) # logging.debug("JD:" + str(jd)) # logging.debug("DATA:" + str(data)) if 'json' == mode: with open(file_path, "w") as out_file: json.dump(data, out_file, indent=3, sort_keys=True) if 'yaml' == mode: with open(file_path, "w") as out_file: yaml.dump(data, out_file, Dumper=yaml.SafeDumper, default_flow_style=False) out_file.close() else: # We create an file with the contents from file_content_source with open(file_path, "wb") as f: while True: chunk = file_content_source.read() if chunk is None: break f.write(chunk) f.close() os.chmod(file_path, int(file_info.mode, 8)) return {"success": 'True'} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/__init__.py0000644000175000017500000000120314771010173026722 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/0000755000175000017500000000000014771010173026632 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/deployment.py0000644000175000017500000000542414771010173031371 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.plugins.k8sbackend.k8sutils import * def deployments_read_namespaced(appsv1api, namespace, name): return appsv1api.read_namespaced_deployment(name, namespace) def deployments_list_namespaced(appsv1api, namespace, estimate=False, labels=""): dplist = {} deployments = appsv1api.list_namespaced_deployment(namespace=namespace, watch=False, label_selector=labels) for dp in deployments.items: dpdata = deployments_read_namespaced(appsv1api, namespace, dp.metadata.name) spec = encoder_dump(dpdata) dplist['dp-' + dp.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_DEPLOYMENT, nsname=namespace, name=dp.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=dpdata.metadata.creation_timestamp), } return dplist def deployments_restore_namespaced(appsv1api, file_info, file_content): dp = encoder_load(file_content, file_info.name) metadata = prepare_metadata(dp.metadata) # Instantiate the daemon_set object deployment = client.V1Deployment( api_version=dp.api_version, kind="Deployment", spec=dp.spec, metadata=metadata ) if file_info.objcache is not None: # object exist so we replace it response = appsv1api.replace_namespaced_deployment(k8sfile2objname(file_info.name), file_info.namespace, deployment, pretty='true') else: # object does not exist, so create one as required response = appsv1api.create_namespaced_deployment(file_info.namespace, deployment, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/volumesnapshotclass.py0000644000175000017500000000624114771010173033324 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2023 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Author: Francisco Manuel Garcia Botella, francisco.garcia@baculasystems.com. # import logging import pathlib from kubernetes import client K8SOBJ_SNAPSHOT_GROUP = 'snapshot.storage.k8s.io' K8SOBJ_SNAPSHOT_VERSION = 'v1' K8SOBJ_SNAPSHOT_PLURAL = 'volumesnapshotclasses' def volumesnapshotclass_list_all(custom_api, filter_names=None): vol_snap_list = {} # volume_snapshot_classes = [] volume_snapshot_classes = custom_api.list_cluster_custom_object(K8SOBJ_SNAPSHOT_GROUP, K8SOBJ_SNAPSHOT_VERSION, K8SOBJ_SNAPSHOT_PLURAL, watch=False) logging.debug("Volume snapshot classes:{}".format(volume_snapshot_classes)) if volume_snapshot_classes: for vol_snap in volume_snapshot_classes.get('items'): if filter_names is not None and len(filter_names) > 0: logging.debug("filter_names-glob-for: {}".format(vol_snap.get('metadata').get('name'))) found = False for vol_glob in filter_names: logging.debug("checking vol_glob: {}".format(vol_glob)) if pathlib.Path(vol_snap.get('metadata').get('name')).match(vol_glob): found = True logging.debug('Found volSnap.') break if not found: continue vol_snap_list[vol_snap.get('metadata').get('name')] = vol_snap return vol_snap_list def snapshot_api_is_supported(): for api in client.ApisApi().get_api_versions().groups: if api.name == K8SOBJ_SNAPSHOT_GROUP: for v in api.versions: if v.version == K8SOBJ_SNAPSHOT_VERSION: logging.debug('Cluster support snapshots API') return True logging.debug('Cluster does not support snapshots API') return False def get_snapshot_drivers_compatible(custom_api): compatible_drivers = [] if not snapshot_api_is_supported(): return compatible_drivers volume_snapshot_classes = [] try: # raise Exception("This is a probe") volume_snapshot_classes = volumesnapshotclass_list_all(custom_api) except Exception as e: logging.error('Controlled exception. This exception is because the volumesnapshotclasses is nos available. We can confirm this fact with `kubectl api-resources`') logging.exception(e) pass for vol_snap_name in volume_snapshot_classes: compatible_drivers.append(volume_snapshot_classes[vol_snap_name].get('driver')) return compatible_drivers bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/service.py0000644000175000017500000000605714771010173030654 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.plugins.k8sbackend.k8sutils import * def services_read_namespaced(corev1api, namespace, name): return corev1api.read_namespaced_service(name, namespace) def services_list_namespaced(corev1api, namespace, estimate=False, labels=""): srvlist = {} services = corev1api.list_namespaced_service(namespace=namespace, watch=False, label_selector=labels) for srv in services.items: srvdata = services_read_namespaced(corev1api, namespace, srv.metadata.name) spec = encoder_dump(srvdata) srvlist['srv-' + srv.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_SERVICE, nsname=namespace, name=srv.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=srvdata.metadata.creation_timestamp), } return srvlist def services_restore_namespaced(corev1api, file_info, file_content): srv = encoder_load(file_content, file_info.name) metadata = prepare_metadata(srv.metadata) # Instantiate the service object service = client.V1Service( api_version=srv.api_version, kind="Service", spec=srv.spec, metadata=metadata ) # If it is headless service, we need put 'None' in cluster_ip. # In other case, we need put None to assign another IP in cluster. if service.spec.cluster_ip != 'None' and service.spec.type == 'ClusterIP': # clean some data service.spec.cluster_ip = None service.spec.cluster_i_ps = [] if file_info.objcache is not None: # object exist so we replace it response = corev1api.replace_namespaced_service(k8sfile2objname(file_info.name), file_info.namespace, service, pretty='true') else: # object does not exist, so create one as required response = corev1api.create_namespaced_service(file_info.namespace, service, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/serviceaccounts.py0000644000175000017500000000575114771010173032414 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.plugins.k8sbackend.k8sutils import * def service_accounts_read_namespaced(corev1api, namespace, name): return corev1api.read_namespaced_service_account(name, namespace) def service_accounts_list_namespaced(corev1api, namespace, estimate=False, labels=""): salist = {} serviceaccounts = corev1api.list_namespaced_service_account(namespace=namespace, watch=False, label_selector=labels) for sa in serviceaccounts.items: sadata = service_accounts_read_namespaced(corev1api, namespace, sa.metadata.name) spec = encoder_dump(sadata) salist['sa-' + sa.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_SERVICEACCOUNT, nsname=namespace, name=sa.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=sadata.metadata.creation_timestamp), } return salist def service_accounts_restore_namespaced(corev1api, file_info, file_content): sa = encoder_load(file_content, file_info.name) metadata = prepare_metadata(sa.metadata) # Instantiate the serviceaccount object serviceaccount = client.V1ServiceAccount( api_version=sa.api_version, kind="ServiceAccount", automount_service_account_token=sa.automount_service_account_token, image_pull_secrets=sa.image_pull_secrets, secrets=sa.secrets, metadata=metadata ) if file_info.objcache is not None: # object exist so we replace it response = corev1api.replace_namespaced_service_account(k8sfile2objname(file_info.name), file_info.namespace, serviceaccount, pretty='true') else: # object does not exist, so create one as required response = corev1api.create_namespaced_service_account(file_info.namespace, serviceaccount, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/pvcdata.py0000644000175000017500000001652414771010173030636 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2020 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # import logging import kubernetes from baculak8s.entities.file_info import (DEFAULT_FILE_MODE, NOT_EMPTY_FILE, FileInfo) from baculak8s.entities.k8sobjtype import K8SObjType from baculak8s.plugins.k8sbackend.k8sfileinfo import NOW_TIMESTAMP, k8sfileinfo from baculak8s.plugins.k8sbackend.persistentvolumeclaims import ( persistentvolumeclaims_namespaced_names, persistentvolumeclaims_read_namespaced) from baculak8s.plugins.k8sbackend.pods import pods_namespaced_specs from baculak8s.util.size_util import k8s_size_to_int def pvcdata_list_update_node_names(corev1api, namespace, pvcdatalist): """ Updates node_name values for pvcdatalist. Args: corev1api (corev1api): kubernetes corev1api instance namespace (str): kubernetes namespace pvcdatalist (dict): pvc data list as dictionary Returns: dict: updated pvc data list as dictionary """ # here we collect node_names for proper backup pod deployment logging.debug('Init pvc nodes list') pods = pods_namespaced_specs(corev1api, namespace=namespace) # logging.debug('Get pods:{}'.format(pods)) for pod in pods: if pod.spec is not None and pod.spec.volumes is not None: for vol in pod.spec.volumes: if vol.persistent_volume_claim is not None: pvcname = vol.persistent_volume_claim.claim_name for pvcf in pvcdatalist: if pvcname == pvcdatalist[pvcf].get('name') and pvcdatalist[pvcf].get('node_name') is None: logging.debug('Pvcf: {} -- Node_name: {}'.format(pvcf, pod.spec.node_name)) pvcdatalist[pvcf]['node_name'] = pod.spec.node_name logging.debug('End pvc nodes list') return pvcdatalist def pvcdata_get_namespaced(corev1api, namespace, pvcname, pvcalias=None): """ Return a single pvcdata dict for requested pvcname. Args: corev1api (corev1api): kubernetes corev1api instance namespace (str): kubernetes namespace pvcname (str): requested pvc name pvcalias (str, optional): when not None then File_Info object wil use it as file name. Defaults to None. Returns: dict: pvc data dict """ pvc = persistentvolumeclaims_read_namespaced(corev1api, namespace, pvcname) logging.debug('Read PVC: {}'.format(pvc)) pvcspec = pvc.spec storageclassname = pvcspec.storage_class_name pvcsize = pvcspec.resources.requests.get('storage', '-1') pvcdata = { 'name': pvcname, 'node_name': None, 'storage_class_name': storageclassname, 'capacity': pvcsize, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_PVCDATA, nsname=namespace, name=pvcname if pvcalias is None else pvcalias, ftype=NOT_EMPTY_FILE, size=pvcsize), } pvcdatalist = pvcdata_list_update_node_names(corev1api, namespace, {pvcname: pvcdata}) logging.debug('Read persistent volume claim completed') return pvcdatalist.get(pvcname) def pvcdata_list_namespaced(corev1api, namespace, estimate=False, pvcfilter=True, labels=""): """ Return a list of pvcdata dicts for selected namespace. Args: corev1api (corev1api): kubernetes corev1api instance namespace (str): kubernetes namespace estimate (bool, optional): select if we do estimate (True) or backup job (False). Defaults to False. pvcfilter (bool, optional): when not None and not empty list then select pvc which name is in this list only. Defaults to True. labels (str, optional): selector labels Returns: dict: pvc data list as dictionary """ pvcdata = {} if pvcfilter: logging.debug("pvcfilter: {}".format(pvcfilter)) pvcnamelist = persistentvolumeclaims_namespaced_names(corev1api, namespace, labels) for pvcn in pvcnamelist: pvcname = pvcn[1] logging.debug('found:{}'.format(pvcname)) if isinstance(pvcfilter, list) and pvcname not in pvcfilter: continue pvcsize = pvcn[3] pvcdata[pvcname] = { 'name': pvcname, 'node_name': None, 'storage_class_name': pvcn[2], 'capacity': pvcsize, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_PVCDATA, nsname=namespace, name=pvcname, ftype=NOT_EMPTY_FILE, size=pvcsize), } logging.debug("add pvc: {}".format(pvcdata[pvcname])) # here we collect node_names for proper backup pod deployment pods = pods_namespaced_specs(corev1api, namespace=namespace) for pod in pods: if pod.spec.volumes is not None: for vol in pod.spec.volumes: if vol.persistent_volume_claim is not None: pvcname = vol.persistent_volume_claim.claim_name for pvcf in pvcdata: if pvcname == pvcdata[pvcf].get('name') and pvcdata[pvcf].get('node_name') is None: pvcdata[pvcf]['node_name'] = pod.spec.node_name return pvcdata def list_pvcs_namespaced(corev1api, namespace): """ Return pvclist for selected namespace with FileInfo object only. This function is useful in listing mode only. Args: corev1api (corev1api): kubernetes corev1api instance namespace (str): kubernetes namespace Returns: dict: pvc data list as dictionary """ pvcslist = {} pvcnamelist = persistentvolumeclaims_namespaced_names(corev1api, namespace) for pvcn in pvcnamelist: pvcname = pvcn[1] pvcsize = pvcn[3] logging.debug('found:{} : {}'.format(pvcname, pvcsize)) name = "/{}/{}/{}/{}".format(K8SObjType.K8SOBJ_NAMESPACE_Path, namespace, K8SObjType.K8SOBJ_PVCDATA_Path, pvcname) pvcslist[pvcname] = { 'fi': FileInfo(name=name, ftype=NOT_EMPTY_FILE, size=k8s_size_to_int(pvcsize), uid=0, gid=0, mode=DEFAULT_FILE_MODE, nlink=1, modified_at=NOW_TIMESTAMP, accessed_at=NOW_TIMESTAMP, created_at=NOW_TIMESTAMP), } return pvcslist bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/podexec.py0000644000175000017500000000414614771010173030640 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # import logging import kubernetes import yaml from kubernetes.stream import stream from kubernetes.stream.ws_client import (ERROR_CHANNEL, STDERR_CHANNEL, STDOUT_CHANNEL) DEFAULTTIMEOUT = 600 class ExecStatus(object): Success = 'Success' Failure = 'Failure' @staticmethod def check_status(info_channel): if info_channel is not None: if info_channel.get('status', ExecStatus.Failure) == ExecStatus.Success: return True return False def exec_commands(corev1api, namespace, podname, container, command): exec_command = [ '/bin/sh', '-c', command ] client = stream(corev1api.connect_get_namespaced_pod_exec, podname, namespace, command=exec_command, container=container, stderr=True, stdin=False, stdout=True, tty=False, _preload_content=False) client.run_forever(timeout=DEFAULTTIMEOUT) out_channel = client.read_channel(STDOUT_CHANNEL) err_channel = client.read_channel(STDERR_CHANNEL) info_channel = yaml.load(client.read_channel(ERROR_CHANNEL), Loader=yaml.FullLoader) return out_channel, err_channel, info_channel bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/configmaps.py0000644000175000017500000000545414771010173031342 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.plugins.k8sbackend.k8sutils import * def config_map_read_namespaced(corev1api, namespace, name): return corev1api.read_namespaced_config_map(name, namespace) def config_maps_list_namespaced(corev1api, namespace, estimate=False, labels=""): cmlist = {} configmaps = corev1api.list_namespaced_config_map(namespace=namespace, watch=False, label_selector=labels) for cm in configmaps.items: cmdata = config_map_read_namespaced(corev1api, namespace, cm.metadata.name) spec = encoder_dump(cmdata) cmlist['cm-' + cm.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_CONFIGMAP, nsname=namespace, name=cm.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=cmdata.metadata.creation_timestamp), } return cmlist def config_map_restore_namespaced(corev1api, file_info, file_content): cm = encoder_load(file_content, file_info.name) metadata = prepare_metadata(cm.metadata) # Instantiate the configmap object configmap = client.V1ConfigMap( api_version=cm.api_version, kind="ConfigMap", data=cm.data, binary_data=cm.binary_data, metadata=metadata ) if file_info.objcache is not None: # object exist so we replace it response = corev1api.replace_namespaced_config_map(k8sfile2objname(file_info.name), file_info.namespace, configmap, pretty='true') else: # object does not exist, so create one as required response = corev1api.create_namespaced_config_map(file_info.namespace, configmap, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/csi_snapshot.py0000644000175000017500000001164314771010173031706 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2023 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # import logging SNAPSHOT_DRIVER_COMPATIBLE='csi' K8SOBJ_SNAPSHOT_GROUP = 'snapshot.storage.k8s.io' K8SOBJ_SNAPSHOT_VERSION = 'v1' K8SOBJ_SNAPSHOTCLASS_PLURAL = 'volumesnapshotclasses' K8SOBJ_SNAPSHOT_PLURAL = 'volumesnapshots' K8SOBJ_SNAPSHOT_KIND = 'VolumeSnapshot' K8SOBJ_SNAPSHOT_NAME_TEMPLATE = 'bacula-vsnap-{pvc}-{jobid}' BACKUP_PVC_FROM_SNAPSHOT_TEMPLATE = 'bacula-pvcfs-{pvc}-{jobid}' def csi_snapshots_read_namespaced(crd_api, namespace, name): return crd_api.get_namespaced_custom_object(K8SOBJ_SNAPSHOT_GROUP, K8SOBJ_SNAPSHOT_VERSION, namespace, K8SOBJ_SNAPSHOT_PLURAL, name) def csi_snapshots_namespaced_names(crd_api, namespace, labels=""): snapdict = {} snaps = crd_api.list_namespaced_custom_object(K8SOBJ_SNAPSHOT_GROUP, K8SOBJ_SNAPSHOT_VERSION, namespace, K8SOBJ_SNAPSHOT_PLURAL, watch=False, label_selector=labels) for snap in snaps.items: snapdict[snap.metadata.name] = { 'name': snap.metadata.name, 'api_version': snap.apiVersion, 'kind': snap.kind, 'namespace': snap.metadata.namespace, 'resourceVersion': snap.metadata.resourceVersion, 'uid': snap.metadata.uid, 'pvc_source': snap.spec.source.persistentVolumeClaimName, 'class_name': snap.spec.volumeSnapshotClassName, 'creation_time': snap.status.creationTime, 'ready_to_use': snap.status.readyToUse, 'restore_size': snap.status.restoreSize } return snapdict def get_volume_snapshot_class_name(crd_api, pvc_storageclass_provisioner): volume_snapshot_classes = crd_api.list_cluster_custom_object(K8SOBJ_SNAPSHOT_GROUP, K8SOBJ_SNAPSHOT_VERSION, K8SOBJ_SNAPSHOTCLASS_PLURAL, watch=False) logging.debug("Provisioner of PVC Storage class:\n{}".format(pvc_storageclass_provisioner)) logging.debug("VolumeSnapshotClasses available:\n {}".format(volume_snapshot_classes)) for snap in volume_snapshot_classes.get('items'): if pvc_storageclass_provisioner == snap.get('driver'): logging.debug("VolumesnapshotClass selected:\n{}".format(snap.get('metadata').get('name'))) return snap.get('metadata').get('name') raise ValueError("VolumeSnapshotClass not found for this provisioner: {}. Please contact with Bacula support".format(pvc_storageclass_provisioner)) def prepare_create_snapshot_body(crd_api, namespace, pvc_name, jobid, pvc_storageclass_provisioner): volumeSnapshotClassName = get_volume_snapshot_class_name(crd_api, pvc_storageclass_provisioner) return { "group": K8SOBJ_SNAPSHOT_GROUP, "version": K8SOBJ_SNAPSHOT_VERSION, "namespace": namespace, "plural": K8SOBJ_SNAPSHOT_PLURAL, "body": { "apiVersion": "{}/{}".format(K8SOBJ_SNAPSHOT_GROUP, K8SOBJ_SNAPSHOT_VERSION), "kind": K8SOBJ_SNAPSHOT_KIND, "metadata": { "name": K8SOBJ_SNAPSHOT_NAME_TEMPLATE.format(pvc=pvc_name, jobid=jobid), "namespace": namespace }, "spec": { "volumeSnapshotClassName": volumeSnapshotClassName, "source": { "persistentVolumeClaimName": pvc_name } } } } def prepare_snapshot_action(namespace, vsnapshot_name): return { "group": K8SOBJ_SNAPSHOT_GROUP, "version": K8SOBJ_SNAPSHOT_VERSION, "namespace": namespace, "plural": K8SOBJ_SNAPSHOT_PLURAL, "name": vsnapshot_name } def prepare_pvc_from_vsnapshot_body(namespace, pvcdata, jobid): vsnapshot_name = K8SOBJ_SNAPSHOT_NAME_TEMPLATE.format(pvc=pvcdata.get('name'), jobid=jobid) return { 'api_version': 'v1', 'kind': 'PersistentVolumeClaim', 'metadata': { 'name': BACKUP_PVC_FROM_SNAPSHOT_TEMPLATE.format(pvc=pvcdata.get('name'), jobid=jobid), 'namespace': namespace }, 'spec': { 'storageClassName': pvcdata.get('storage_class_name'), 'dataSource': { 'name': vsnapshot_name, 'kind': K8SOBJ_SNAPSHOT_KIND, 'apiGroup': K8SOBJ_SNAPSHOT_GROUP }, 'accessModes': ['ReadWriteOnce'], 'resources': { 'requests': {'storage': pvcdata.get('capacity')}} } } ././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootbacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/persistentvolumeclaims.pybacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/persistentvolumeclaims.0000644000175000017500000001121314771010173033452 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # import logging import kubernetes from baculak8s.util.size_util import k8s_size_to_int from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import (K8SObjType, encoder_dump, encoder_load, k8sfile2objname, k8sfileinfo) from baculak8s.plugins.k8sbackend.k8sutils import prepare_metadata def persistentvolumeclaims_read_namespaced(corev1api, namespace, name): return corev1api.read_namespaced_persistent_volume_claim(name, namespace) def persistentvolumeclaims_namespaced_only_names(corev1api, namespace, labels=""): pvcslist = [] pvcs = corev1api.list_namespaced_persistent_volume_claim(namespace=namespace, watch=False, label_selector=labels) for pvc in pvcs.items: pvcslist.append(pvc.metadata.name) return pvcslist def persistentvolumeclaims_namespaced_names(corev1api, namespace, labels=""): pvcslist = [] pvcs = corev1api.list_namespaced_persistent_volume_claim(namespace=namespace, watch=False, label_selector=labels) for pvc in pvcs.items: pvcspec = pvc.spec pvcslist.append([ "pvcdata", pvc.metadata.name, pvcspec.storage_class_name, pvcspec.resources.requests.get('storage', '-1') ]) return pvcslist def persistentvolumeclaims_list_namespaced(corev1api, namespace, estimate=False, labels=""): pvcslist = {} pvcstotalsize = 0 pvcs = corev1api.list_namespaced_persistent_volume_claim(namespace=namespace, watch=False, label_selector=labels) for pvc in pvcs.items: pvcdata = persistentvolumeclaims_read_namespaced(corev1api, namespace, pvc.metadata.name) spec = encoder_dump(pvcdata) logging.debug("PVCDATA-OBJ:{}".format(pvcdata)) pvcsize = 0 if (pvcdata.status.capacity is not None): pvcsize = k8s_size_to_int(pvcdata.status.capacity['storage']) pvcstotalsize += pvcsize pvcslist['pvc-' + pvc.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_PVOLCLAIM, nsname=namespace, name=pvc.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=pvcdata.metadata.creation_timestamp), } return pvcslist, pvcstotalsize def persistentvolumeclaims_restore_namespaced(corev1api, file_info, file_content): pvc = encoder_load(file_content, file_info.name) metadata = prepare_metadata(pvc.metadata) # Instantiate the persistentvolumeclaims object persistentvolumeclaims = kubernetes.client.V1PersistentVolumeClaim( api_version=pvc.api_version, kind="PersistentVolumeClaim", spec=pvc.spec, metadata=metadata ) # clean some data persistentvolumeclaims.spec.volume_mode = None persistentvolumeclaims.spec.volume_name = None logging.debug('PVC: ' + str(persistentvolumeclaims)) if file_info.objcache is not None: # object exist so we replace it response = corev1api.replace_namespaced_persistent_volume_claim(k8sfile2objname(file_info.name), file_info.namespace, persistentvolumeclaims, pretty='true') else: # object does not exist, so create one as required response = corev1api.create_namespaced_persistent_volume_claim(file_info.namespace, persistentvolumeclaims, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/__init__.py0000644000175000017500000000000014771010173030731 0ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/baculabackupimage.py.in0000644000175000017500000000006214771010173033227 0ustar bsbuildbsbuildKUBERNETES_TAR_IMAGE="@KUBERNETES_IMAGE_VERSION@" bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/baculaannotations.py0000644000175000017500000001376014771010173032720 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # import logging import kubernetes from baculak8s.plugins.k8sbackend.pods import pods_namespaced_specs """ * bacula/backup.mode: [snapshot|standard] - it will be the cloud way to select what pvcdata you want to backup and extend the current plugin parameter: pvcdata[=], default is snapshot if not defined. * bacula/backup.volumes: - required, multiple pvc names as comma separated list. * bacula/run.before.job.container.command: [/|*/] - a star (*) means all containers. * bacula/run.before.job.failjobonerror: [yes|no] - default is yes. * bacula/run.after.job.container.command: [/|*/] - a star (*) means all containers. * bacula/run.after.job.failjobonerror: [yes|no] - default is no. """ class BaculaBackupMode(object): """ This is a class to manage snapshot mode. """ Snapshot = 'snapshot' Clone = 'clone' Standard = 'standard' params = (Snapshot, Clone, Standard) @staticmethod def process_param(mode): """The static method validates backup mode Args: mode (str): a backup mode parameter from k8s annotation Returns: str: backup mode normalized to consts, `None` when error """ if mode is None: return None mode = mode.lower() for p in BaculaBackupMode.params: if p == mode: return p raise Exception('This backup mode `{}` is not supported. Only supported: snapshot, clone or standard.'.format(mode)) class BaculaAnnotationsClass(object): """ This is a class to manage Bacula annotations parameters """ BaculaPrefix = 'bacula/' BackupMode = 'backup.mode' BackupVolume = 'backup.volumes' RunBeforeJob = 'run.before.job.container.command' RunBeforeJobonError = 'run.before.job.failjobonerror' RunAfterJob = 'run.after.job.container.command' RunAfterJobonError = 'run.after.job.failjobonerror' RunAfterSnapshot = 'run.after.snapshot.container.command' RunAfterSnapshotonError = 'run.after.snapshot.failjobonerror' params = (BackupMode, BackupVolume, RunBeforeJob, RunBeforeJobonError, RunAfterJob, RunAfterJobonError, RunAfterSnapshot, RunAfterSnapshotonError) @staticmethod def process_param(param): """The static method validates Bacula annotations Args: param (str): a Bacula annotation from k8s Returns: str: Bacula annotation normalized to consts, `None` when error """ if param is not None: for p in BaculaAnnotationsClass.params: if param == BaculaAnnotationsClass.BaculaPrefix + p: return p return None @staticmethod def handle_run_job_container_command(param): """The static method handles container/command annotation parameters Args: param (str): a container command parameter from k8s annotation Returns: tuple(2): container / command split """ container, command = (None, None) if param is not None: try: container, command = param.split('/', 1) except ValueError as e: logging.error(e) return container, command def annotated_namespaced_pods_data(corev1api, namespace, estimate=False, labels=""): """Reads Pods annotations to search for Bacula annotations Args: corev1api (coreviapi): kubernetes corev1api instance namespace (str): namespace for the pod estimate (bool, optional): inform if we does backup or estimate job. Defaults to False. labels (list , optional): k8s label filter. Defaults to None. Returns: list: a list of pods and its annotations for selected namespace """ podsdata = [] pods = pods_namespaced_specs(corev1api, namespace, labels) for pod in pods: metadata = pod.metadata if metadata.annotations is None: continue bacula_annotations = [k for k, v in metadata.annotations.items() if k.startswith(BaculaAnnotationsClass.BaculaPrefix)] if len(bacula_annotations) > 0: containers = [c.name for c in pod.spec.containers] podobj = { 'name': metadata.name, 'containers': containers } for ba in bacula_annotations: param = metadata.annotations.get(ba) baname = BaculaAnnotationsClass.process_param(ba) # we will ignore all annotations we cannot handle if baname is not None: podobj[baname] = param podsdata.append(podobj) return podsdata def annotated_pvc_backup_mode(pvc, pod_backup_mode): """Reads PVC annotations to search for backup mode annotation Args: pvc: kubernetes PVC object pod_backup_mode: Selected backup mode in pod annotation Returns: BaculaBackupMode of pvc if it has, or pod backup mode """ metadata = pvc.metadata if metadata.annotations is None or 'bacula/backup.mode' not in metadata.annotations: return pod_backup_mode return BaculaBackupMode.process_param(metadata.annotations.get('bacula/backup.mode'))././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootbacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/replicationcontroller.pybacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/replicationcontroller.p0000644000175000017500000000625614771010173033441 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.plugins.k8sbackend.k8sutils import * def replication_controller_read_namespaced(corev1api, namespace, name): return corev1api.read_namespaced_replication_controller(name, namespace) def replication_controller_list_namespaced(corev1api, namespace, estimate=False, labels=""): rclist = {} replicationcontroller = corev1api.list_namespaced_replication_controller(namespace=namespace, watch=False, label_selector=labels) for rc in replicationcontroller.items: rcdata = replication_controller_read_namespaced(corev1api, namespace, rc.metadata.name) spec = encoder_dump(rcdata) rclist['rc-' + rc.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_REPLICACONTR, nsname=namespace, name=rc.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=rcdata.metadata.creation_timestamp), } return rclist def replication_controller_restore_namespaced(corev1api, file_info, file_content): rc = encoder_load(file_content, file_info.name) metadata = prepare_metadata(rc.metadata) # Instantiate the replicationcontroller object replicationcontroller = client.V1ReplicationController( api_version=rc.api_version, kind="ReplicationController", spec=rc.spec, metadata=metadata ) if file_info.objcache is not None: # object exist so we replace it response = corev1api.replace_namespaced_replication_controller(k8sfile2objname(file_info.name), file_info.namespace, replicationcontroller, pretty='true') else: # object does not exist, so create one as required response = corev1api.create_namespaced_replication_controller(file_info.namespace, replicationcontroller, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/replicaset.py0000644000175000017500000000543514771010173031346 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.plugins.k8sbackend.k8sutils import * def replica_sets_read_namespaced(appsv1api, namespace, name): return appsv1api.read_namespaced_replica_set(name, namespace) def replica_sets_list_namespaced(appsv1api, namespace, estimate=False, labels=""): rslist = {} replicasets = appsv1api.list_namespaced_replica_set(namespace=namespace, watch=False, label_selector=labels) for rs in replicasets.items: rsdata = replica_sets_read_namespaced(appsv1api, namespace, rs.metadata.name) spec = encoder_dump(rsdata) rslist['rs-' + rs.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_REPLICASET, nsname=namespace, name=rs.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=rsdata.metadata.creation_timestamp), } return rslist def replica_sets_restore_namespaced(appsv1api, file_info, file_content): rs = encoder_load(file_content, file_info.name) metadata = prepare_metadata(rs.metadata) # Instantiate the daemon_set object replicaset = client.V1ReplicaSet( api_version=rs.api_version, kind="ReplicaSet", spec=rs.spec, metadata=metadata ) if file_info.objcache is not None: # object exist so we replace it response = appsv1api.replace_namespaced_replica_set(k8sfile2objname(file_info.name), file_info.namespace, replicaset, pretty='true') else: # object does not exist, so create one as required response = appsv1api.create_namespaced_replica_set(file_info.namespace, replicaset, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/persistentvolumes.py0000644000175000017500000001037114771010173033021 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # import pathlib from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.util.size_util import k8s_size_to_int from baculak8s.plugins.k8sbackend.k8sutils import * def persistentvolume_read(corev1api, name): return corev1api.read_persistent_volume(name) def persistentvolumes_list_all(corev1api, pvfilter=None, estimate=False): pvlist = {} persistentvolumes = corev1api.list_persistent_volume(watch=False) for pv in persistentvolumes.items: if pvfilter is not None and len(pvfilter) > 0: logging.debug("pvfilter-glob-for: {}".format(pv.metadata.name)) found = False for pvglob in pvfilter: logging.debug("checking pvglob: {}".format(pvglob)) if pathlib.Path(pv.metadata.name).match(pvglob): found = True logging.debug('Found.') break if not found: continue pvdata = persistentvolume_read(corev1api, pv.metadata.name) spec = encoder_dump(pvdata) pvlist[pv.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_PVOLUME, name=pv.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=pvdata.metadata.creation_timestamp), } return pvlist def persistentvolumes_names(corev1api): pvlist = [] persistentvolumes = corev1api.list_persistent_volume(watch=False) for pv in persistentvolumes.items: pvlist.append(["persistentvolume", pv.metadata.name]) return pvlist def persistentvolumes_list_all_names(corev1api): pvlist = {} persistentvolumes = corev1api.list_persistent_volume(watch=False) for pv in persistentvolumes.items: pvname = pv.metadata.name pvsize = pv.spec.capacity['storage'] logging.debug("pvsize: {} / {}".format(type(pvsize), pvsize)) pvlist[pvname] = { 'fi': FileInfo(name="/{}/{}".format(K8SObjType.K8SOBJ_PVOLUME_Path, pvname), ftype=NOT_EMPTY_FILE, size=k8s_size_to_int(pvsize), uid=0, gid=0, mode=DEFAULT_FILE_MODE, nlink=1, modified_at=NOW_TIMESTAMP, accessed_at=NOW_TIMESTAMP, created_at=k8stimestamp_to_unix_timestamp(pv.metadata.creation_timestamp)), } return pvlist def persistentvolumes_restore(corev1api, file_info, file_content): pv = encoder_load(file_content, file_info.name) metadata = prepare_metadata(pv.metadata) # Instantiate the persistentvolume object persistentvolume = client.V1PersistentVolume( api_version=pv.api_version, kind="PersistentVolume", spec=pv.spec, metadata=metadata ) if file_info.objcache is not None: # object exist so we replace it response = corev1api.replace_persistent_volume(k8sfile2objname(file_info.name), persistentvolume, pretty='true') else: # object does not exist, so create one as required response = corev1api.create_persistent_volume(persistentvolume, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/pvcclone.py0000644000175000017500000000716114771010173031022 0ustar bsbuildbsbuild# # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # import os import json import logging from baculak8s.util.token import generate_token DEFAULTCLONEYAML = os.getenv('DEFAULTCLONEYAML', "/opt/bacula/scripts/bacula-backup-clone.yaml") PREFIX_CLONE_PVC_BACKUP_NAME = "bacula-pvcclone-{job_name}-" CLONE_PVC_BACKUP_NAME=PREFIX_CLONE_PVC_BACKUP_NAME + "{job_id}" JOB_NAME_MAX_CHARS=25 JOB_ID_MAX_DIGITS=12 CLONETEMPLATE = """ apiVersion: v1 kind: PersistentVolumeClaim metadata: name: {clonename} namespace: {namespace} labels: app: baculabackup spec: storageClassName: {storageclassname} dataSource: name: {pvcname} kind: PersistentVolumeClaim accessModes: - ReadWriteOnce resources: requests: storage: {pvcsize} """ def find_bacula_pvc_clones_from_old_job(pvc_list, job): name_for_search = PREFIX_CLONE_PVC_BACKUP_NAME.format(job_name=job.split('.')[0][:JOB_NAME_MAX_CHARS].lower()) num_hyphen=name_for_search.count('-') old_pvcs = [] for pvc_name in pvc_list: if pvc_name.startswith(name_for_search) and num_hyphen == pvc_name.count('-'): old_pvcs.append(pvc_name) logging.debug(f"Found old cloned pvcs: {old_pvcs}") return old_pvcs def get_clone_pvc_name(job): # Get job name and id, and limit to not exceed 63 characters in pvc name job_name = job.split('.')[0][:JOB_NAME_MAX_CHARS].lower() job_id = job.split(':')[1][:JOB_ID_MAX_DIGITS] return CLONE_PVC_BACKUP_NAME.format(job_name=job_name, job_id=job_id) def prepare_backup_clone_yaml(namespace, pvcname, pvcsize, scname, jobname, clonename=None): """ Handles PVC clone yaml preparation based on available templates Args: namespace (str): k8s namespace for pvc clone pvcname (str): source pvc name to clone from pvcsize (str): k8s capacity of the original pvc scname (str): storage class of the original pvc job (str): Job name to add to cloned pvc name clonename (str, optional): the cloned - destination - pvcname; if `None` then name will be assigned automatically. Defaults to None. Returns: tuple(2): return a prepared pvc clone yaml string and assigned pvc clone name, especially useful when this name was created automatically. """ cloneyaml = CLONETEMPLATE if os.path.exists(DEFAULTCLONEYAML): with open(DEFAULTCLONEYAML, 'r') as file: cloneyaml = file.read() if clonename is None: clonename = get_clone_pvc_name(jobname) return cloneyaml.format(namespace=namespace, pvcname=pvcname, pvcsize=pvcsize, clonename=clonename, storageclassname=scname), clonename def delete_pvcclone(corev1api, namespace, pvc_name, grace_period_seconds=0, propagation_policy='Foreground'): return corev1api.delete_namespaced_persistent_volume_claim( pvc_name, namespace, grace_period_seconds=grace_period_seconds, propagation_policy=propagation_policy) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/endpoints.py0000644000175000017500000000537414771010173031220 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.plugins.k8sbackend.k8sutils import * def endpoints_read_namespaced(corev1api, namespace, name): return corev1api.read_namespaced_endpoints(name, namespace) def endpoints_list_namespaced(corev1api, namespace, estimate=False, labels=""): eplist = {} endpoints = corev1api.list_namespaced_endpoints(namespace=namespace, watch=False, label_selector=labels) for ep in endpoints.items: epdata = endpoints_read_namespaced(corev1api, namespace, ep.metadata.name) spec = encoder_dump(epdata) eplist['ep-' + ep.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_ENDPOINT, nsname=namespace, name=ep.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=epdata.metadata.creation_timestamp), } return eplist def endpoints_restore_namespaced(corev1api, file_info, file_content): ep = encoder_load(file_content, file_info.name) metadata = prepare_metadata(ep.metadata) # Instantiate the endpoint object endpoint = client.V1Endpoints( api_version=ep.api_version, kind="Endpoint", subsets=ep.subsets, metadata=metadata ) if file_info.objcache is not None: # object exist so we replace it response = corev1api.replace_namespaced_endpoints(k8sfile2objname(file_info.name), file_info.namespace, endpoint, pretty='true') else: # object does not exist, so create one as required response = corev1api.create_namespaced_endpoints(file_info.namespace, endpoint, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/k8sfileinfo.py0000644000175000017500000001306314771010173031430 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # import json import logging import yaml from baculak8s.entities.file_info import (DEFAULT_DIR_MODE, DEFAULT_FILE_MODE, DIRECTORY, FileInfo) from baculak8s.entities.k8sobjtype import K8SObjType from baculak8s.util.date_util import (get_time_now, k8stimestamp_to_unix_timestamp) from baculak8s.util.size_util import k8s_size_to_int NOW_TIMESTAMP = get_time_now() defaultk8sext = 'yaml' defaultk8spath = '@kubernetes' defaultk8sarchext = 'tar' def encoder_dump(msg): if defaultk8sext == 'json': return json.dumps(msg, sort_keys=True, default=str) else: return yaml.dump(msg) def encoder_load(msg, filename=None): if filename.endswith('.json') or (filename is None and defaultk8sext == 'json'): return json.loads(msg) else: return yaml.load(msg, Loader=yaml.Loader) def k8sfile2objname(fname): return str(fname).replace('.'+defaultk8sext, '').replace('.json', '').replace('.yaml', '').replace('.tar', '') def k8sfilepath(objtype, nsname='', name=''): # TODO: refactor string format to new ".format()" method if objtype is not None: if objtype == K8SObjType.K8SOBJ_NAMESPACE: # namespace return '/{}/{}/{}/{}.{}'.format(defaultk8spath, K8SObjType.pathdict[K8SObjType.K8SOBJ_NAMESPACE], nsname, name, defaultk8sext) if objtype == K8SObjType.K8SOBJ_PVOLUME: # persistent volume return '/{}/{}/{}.{}'.format(defaultk8spath, K8SObjType.pathdict[K8SObjType.K8SOBJ_PVOLUME], name, defaultk8sext) if objtype == K8SObjType.K8SOBJ_STORAGECLASS: # storage class return '/{}/{}/{}.{}'.format(defaultk8spath, K8SObjType.pathdict[K8SObjType.K8SOBJ_STORAGECLASS], name, defaultk8sext) if objtype == K8SObjType.K8SOBJ_PVCDATA: # PVC Data tar archive here return '/{}/{}/{}/{}/{}.{}'.format(defaultk8spath, K8SObjType.pathdict[K8SObjType.K8SOBJ_NAMESPACE], nsname, K8SObjType.pathdict[K8SObjType.K8SOBJ_PVCDATA], name, defaultk8sarchext) # other objects return '/{}/{}/{}/{}/{}.{}'.format(defaultk8spath, K8SObjType.pathdict[K8SObjType.K8SOBJ_NAMESPACE], nsname, K8SObjType.pathdict[objtype], name, defaultk8sext) return None def k8sfileinfo(objtype, name, ftype, size, nsname=None, creation_timestamp=None): return FileInfo( name=k8sfilepath(objtype, nsname=nsname, name=name), ftype=ftype, size=k8s_size_to_int(size), objtype=objtype, uid=0, gid=0, mode=DEFAULT_DIR_MODE if ftype == DIRECTORY else DEFAULT_FILE_MODE, # TODO: Persistent volumes can have a different access modes [RWO, ROM, RWM] # TODO: which we can express as different file modes in Bacula nlink=1, modified_at=NOW_TIMESTAMP, accessed_at=NOW_TIMESTAMP, created_at=NOW_TIMESTAMP if creation_timestamp is None else k8stimestamp_to_unix_timestamp(creation_timestamp) ) def k8sfileobjecttype(fnames): if len(fnames) < 3 or fnames[0] != defaultk8spath: # the filepath variable cannot be converted to k8s fileinfo return None objtype = { 'obj': None, 'namespace': None, } if fnames[1] == K8SObjType.K8SOBJ_NAMESPACE_Path: # handle namespaced objects objtype.update({'namespace': fnames[2]}) filename = fnames[3] if filename.endswith('.{}'.format(defaultk8sext)): objtype.update({'obj': K8SObjType.K8SOBJ_NAMESPACE}) elif filename == K8SObjType.K8SOBJ_PVCS_Path: # handle pvcs both config and data filename = fnames[4] if filename.endswith('.{}'.format(defaultk8sext)): # this is a config file objtype.update({'obj': K8SObjType.K8SOBJ_PVOLCLAIM}) else: # any other are pvcdata files objtype.update({'obj': K8SObjType.K8SOBJ_PVCDATA}) else: for obj in K8SObjType.pathdict.keys(): if K8SObjType.pathdict[obj] == filename: objtype.update({'obj': obj}) break elif fnames[1] == K8SObjType.K8SOBJ_PVOLUME_Path: # handle persistent volumes here objtype.update({'obj': K8SObjType.K8SOBJ_PVOLUME}) elif fnames[1] == K8SObjType.K8SOBJ_STORAGECLASS_Path: # handle storage class here objtype.update({'obj': K8SObjType.K8SOBJ_STORAGECLASS}) logging.debug('objtype:' + str(objtype)) return objtype bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/daemonset.py0000644000175000017500000000541414771010173031167 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.plugins.k8sbackend.k8sutils import * def daemon_sets_read_namespaced(appsv1api, namespace, name): return appsv1api.read_namespaced_daemon_set(name, namespace) def daemon_sets_list_namespaced(appsv1api, namespace, estimate=False, labels=""): dslist = {} daemonsets = appsv1api.list_namespaced_daemon_set(namespace=namespace, watch=False, label_selector=labels) for ds in daemonsets.items: dsdata = daemon_sets_read_namespaced(appsv1api, namespace, ds.metadata.name) spec = encoder_dump(dsdata) dslist['ds-' + ds.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_DAEMONSET, nsname=namespace, name=ds.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=dsdata.metadata.creation_timestamp), } return dslist def daemon_sets_restore_namespaced(appsv1api, file_info, file_content): ds = encoder_load(file_content, file_info.name) metadata = prepare_metadata(ds.metadata) # Instantiate the daemon_set object daemonset = client.V1DaemonSet( api_version=ds.api_version, kind="DaemonSet", spec=ds.spec, metadata=metadata ) if file_info.objcache is not None: # object exist so we replace it response = appsv1api.replace_namespaced_daemon_set(k8sfile2objname(file_info.name), file_info.namespace, daemonset, pretty='true') else: # object does not exist, so create one as required response = appsv1api.create_namespaced_daemon_set(file_info.namespace, daemonset, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/baculabackup.py0000644000175000017500000001110014771010173031612 0ustar bsbuildbsbuild# # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # import os import logging from baculak8s.plugins.k8sbackend.baculabackupimage import KUBERNETES_TAR_IMAGE JOB_NAME_MAX_CHARS = 23 JOB_ID_MAX_DIGITS = 12 BACULABACKUPPODNAME = 'bacula-backup-{job_name}-{job_id}' # BACULABACKUPIMAGE = "hub.baculasystems.com/bacula-backup:" + KUBERNETES_TAR_IMAGE BACULABACKUPIMAGE = "bacula-backup:" + KUBERNETES_TAR_IMAGE DEFAULTPODYAML = os.getenv('DEFAULTPODYAML', "/opt/bacula/scripts/bacula-backup.yaml") PODTEMPLATE = """ apiVersion: v1 kind: Pod metadata: name: {podname} namespace: {namespace} labels: app: baculabackup spec: hostname: {podname} {nodename} containers: - name: {podname} resources: limits: cpu: "1" memory: "64Mi" requests: cpu: "100m" memory: "16Mi" image: {image} env: - name: PLUGINMODE value: "{mode}" - name: PLUGINHOST value: "{host}" - name: PLUGINPORT value: "{port}" - name: PLUGINTOKEN value: "{token}" - name: PLUGINJOB value: "{job}" imagePullPolicy: {imagepullpolicy} volumeMounts: - name: {podname}-storage mountPath: /{mode} {imagepullsecrets} restartPolicy: Never volumes: - name: {podname}-storage persistentVolumeClaim: claimName: {pvcname} """ class ImagePullPolicy(object): IfNotPresent = 'IfNotPresent' Always = 'Always' Never = 'Never' params = (IfNotPresent, Always, Never) @staticmethod def process_param(imagepullpolicy): if imagepullpolicy is not None: for p in ImagePullPolicy.params: # logging.debug("imagepullpolicy test: {} {}".format(p, self.imagepullpolicy)) if imagepullpolicy.lower() == p.lower(): return p return ImagePullPolicy.IfNotPresent def exists_bacula_pod(pod_list, job): """Get name of first backup pod belong to previous job. :param pod_list: list of pods in namespace :param job: Name of job, without id :return: Name of pod of previous job """ name_for_search = 'bacula-backup-' + job.split('.')[0][:JOB_NAME_MAX_CHARS].lower() + '-' num_hyphen=name_for_search.count('-') for pod_name in pod_list: if name_for_search in pod_name and num_hyphen == pod_name.count('-'): return pod_name return '' def get_backup_pod_name(job): # Get job name and id, and limit to not exceed 63 characters in pod name job_name = job.split('.')[0][:JOB_NAME_MAX_CHARS].lower() job_id = job.split(':')[1][:JOB_ID_MAX_DIGITS] return BACULABACKUPPODNAME.format(job_name=job_name, job_id=job_id) def prepare_backup_pod_yaml(mode='backup', nodename=None, host='localhost', port=9104, token='', namespace='default', pvcname='', image=BACULABACKUPIMAGE, imagepullpolicy=ImagePullPolicy.IfNotPresent, imagepullsecret=None, job=''): podyaml = PODTEMPLATE if os.path.exists(DEFAULTPODYAML): with open(DEFAULTPODYAML, 'r') as file: podyaml = file.read() nodename_param = '' imagepullsecrets_param = '' if nodename is not None: nodename_param = "nodeName: {nodename}".format(nodename=nodename) if imagepullsecret is not None: imagepullsecrets_param = "imagePullSecrets:\n - name: {imagepullsecret}".format(imagepullsecret=imagepullsecret) logging.debug('host:{} port:{} namespace:{} image:{} imagepullsecret:{} job:{}'.format(host, port, namespace, image, imagepullsecret, job)) return podyaml.format(mode=mode, nodename=nodename_param, host=host, port=port, token=token, namespace=namespace, image=image, pvcname=pvcname, podname=get_backup_pod_name(job), imagepullpolicy=imagepullpolicy, imagepullsecrets=imagepullsecrets_param, job=job) bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/pods.py0000644000175000017500000001115414771010173030153 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # import logging import kubernetes from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.entities.k8sobjtype import K8SObjType from baculak8s.plugins.k8sbackend.k8sfileinfo import (NOW_TIMESTAMP, encoder_dump, encoder_load, k8sfile2objname, k8sfileinfo) from baculak8s.plugins.k8sbackend.k8sutils import prepare_metadata def pods_read_namespaced(corev1api, namespace, name): return corev1api.read_namespaced_pod(name, namespace) def pods_namespaced_specs(corev1api, namespace, labels=""): podslist = [] pods = corev1api.list_namespaced_pod(namespace=namespace, watch=False, label_selector=labels) # logging.debug('[CUSTOM] Get items of pods: {}'.format(pods.items)) for pod in pods.items: podslist.append(pod) # logging.debug("pods_namespaced_specs:{}".format(podslist)) return podslist def pods_list_namespaced(corev1api, namespace, estimate=False, labels=""): podslist = {} pods = corev1api.list_namespaced_pod(namespace=namespace, watch=False, label_selector=labels) for pod in pods.items: poddata = pods_read_namespaced(corev1api, namespace, pod.metadata.name) spec = encoder_dump(poddata) podslist['pod-' + pod.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_POD, nsname=namespace, name=pod.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=poddata.metadata.creation_timestamp), } return podslist def pods_restore_namespaced(corev1api, file_info, file_content): pod = encoder_load(file_content, file_info.name) metadata = prepare_metadata(pod.metadata) # Instantiate the pods object pods = kubernetes.client.V1Pod( api_version=pod.api_version, kind="Pod", spec=pod.spec, metadata=metadata ) # clean some data pods.spec.node_name = None secvol = [] volumes = [] # this removes dynamic secret token from volumes for v in pods.spec.volumes: if v.secret is not None and v.name.startswith('default-token-'): # TODO: we should check if the secret exist and is not type=='kubernetes.io/*' logging.debug('detectedSecVolume:'+str(v)) secvol.append(v) else: logging.debug('standardVolume:'+str(v.name)) volumes.append(v) pods.spec.volumes = volumes volume_mounts = [] # this removes above volumes from volume mounts for c in pods.spec.containers: for v in c.volume_mounts: found = False logging.debug('volMountCheck:'+str(v.name)) for s in secvol: if s.name == v.name: found = True logging.debug('secVolMountFound') break logging.debug('findResult:'+str(found)) if not found: volume_mounts.append(v) c.volume_mounts = volume_mounts logging.debug('volumeMounts after cleanup:' + str(c.volume_mounts)) if file_info.objcache is not None: # object exist so we replace it response = corev1api.replace_namespaced_pod(k8sfile2objname(file_info.name), file_info.namespace, pods, pretty='true') else: # object does not exist, so create one as required response = corev1api.create_namespaced_pod(file_info.namespace, pods, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/resourcequota.py0000644000175000017500000000551514771010173032113 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.plugins.k8sbackend.k8sutils import * def resource_quota_read_namespaced(corev1api, namespace, name): return corev1api.read_namespaced_resource_quota(name, namespace) def resource_quota_list_namespaced(corev1api, namespace, estimate=False, labels=""): rqlist = {} resourcequota = corev1api.list_namespaced_resource_quota(namespace=namespace, watch=False, label_selector=labels) for rq in resourcequota.items: rqdata = resource_quota_read_namespaced(corev1api, namespace, rq.metadata.name) spec = encoder_dump(rqdata) rqlist['rq-' + rq.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_RESOURCEQUOTA, nsname=namespace, name=rq.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=rqdata.metadata.creation_timestamp), } return rqlist def resource_quota_restore_namespaced(corev1api, file_info, file_content): rq = encoder_load(file_content, file_info.name) metadata = prepare_metadata(rq.metadata) # Instantiate the resourcequota object resourcequota = client.V1ResourceQuota( api_version=rq.api_version, kind="ResourceQuota", spec=rq.spec, metadata=metadata ) if file_info.objcache is not None: # object exist so we replace it response = corev1api.replace_namespaced_resource_quota(k8sfile2objname(file_info.name), file_info.namespace, resourcequota, pretty='true') else: # object does not exist, so create one as required response = corev1api.create_namespaced_resource_quota(file_info.namespace, resourcequota, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/podtemplates.py0000644000175000017500000000551614771010173031714 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.plugins.k8sbackend.k8sutils import * def podtemplates_read_namespaced(corev1api, namespace, name): return corev1api.read_namespaced_pod_template(name, namespace) def podtemplates_list_namespaced(corev1api, namespace, estimate=False, labels=""): podstlist = {} podst = corev1api.list_namespaced_pod_template(namespace=namespace, watch=False, label_selector=labels) for podt in podst.items: podtdata = podtemplates_read_namespaced(corev1api, namespace, podst.metadata.name) spec = encoder_dump(podtdata) podstlist['pod-' + podt.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_PODTEMPLATE, nsname=namespace, name=podt.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=podtdata.metadata.creation_timestamp), } return podstlist def podtemplates_restore_namespaced(corev1api, file_info, file_content): podst = encoder_load(file_content, file_info.name) metadata = prepare_metadata(podst.metadata) # Instantiate the podtemplates object podtemplates = client.V1PodTemplate( api_version=podst.api_version, kind="PodTemplate", template=podst.template, metadata=metadata ) if file_info.objcache is not None: # object exist so we replace it response = corev1api.replace_namespaced_pod_template(k8sfile2objname(file_info.name), file_info.namespace, podtemplates, pretty='true') else: # object does not exist, so create one as required response = corev1api.create_namespaced_pod_template(file_info.namespace, podtemplates, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/k8sutils.py0000644000175000017500000000416214771010173030775 0ustar bsbuildbsbuild# # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # import time from kubernetes import client def prepare_metadata(data, annotations=False): metadata = client.V1ObjectMeta( name=data.name, namespace=data.namespace, annotations=data.annotations if annotations else {}, deletion_grace_period_seconds=data.deletion_grace_period_seconds, deletion_timestamp=data.deletion_timestamp, # finalizers=data.finalizers, # generate_name=data.generate_name, # initializers=data.initializers, labels=data.labels ) return metadata def wait_for_pod_ready(corev1api, namespace, name, waits=600): a = 0 for a in range(waits): status = corev1api.read_namespaced_pod_status(name=name, namespace=namespace) isready = status.status.container_statuses[0].ready if isready: break time.sleep(1) return a < waits - 1 def wait_for_pod_terminated(corev1api, namespace, name, waits=600): a = 0 for a in range(waits): status = corev1api.read_namespaced_pod_status(name=name, namespace=namespace) container = status.status.container_statuses[0] isterminated = not container.ready and container.state.terminated is not None if isterminated: break time.sleep(1) return a < waits - 1 bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/storageclass.py0000644000175000017500000001344614771010173031706 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # import logging import pathlib from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.plugins.k8sbackend.k8sutils import * from baculak8s.util.size_util import k8s_size_to_int def storageclass_read(storagev1api, name): return storagev1api.read_storage_class(name) def storageclass_list_all(storagev1api, scfilter=None, estimate=False): sclist = {} storageclass = storagev1api.list_storage_class(watch=False) for sc in storageclass.items: if scfilter is not None and len(scfilter) > 0: logging.debug("scfilter-glob-for: {}".format(sc.metadata.name)) found = False for scglob in scfilter: logging.debug("checking scglob: {}".format(scglob)) if pathlib.Path(sc.metadata.name).match(scglob): found = True logging.debug('Found.') break if not found: continue scdata = storageclass_read(storagev1api, sc.metadata.name) spec = encoder_dump(scdata) sclist[sc.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_STORAGECLASS, name=sc.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=scdata.metadata.creation_timestamp), } return sclist def storageclass_names(storagev1api): sclist = [] storageclass = storagev1api.list_storage_class(watch=False) for sc in storageclass.items: sclist.append(["storageclass", sc.metadata.name]) return sclist def storageclass_list_all_names(storagev1api): sclist = {} storageclass = storagev1api.list_storage_class(watch=False) # logging.debug(storageclass) for sc in storageclass.items: sclist[sc.metadata.name] = { 'fi': FileInfo(name="/{}/{}".format(K8SObjType.K8SOBJ_STORAGECLASS_Path, sc.metadata.name), ftype=NOT_EMPTY_FILE, size=1024, # arbitrary file size uid=0, gid=0, mode=DEFAULT_FILE_MODE, nlink=1, modified_at=NOW_TIMESTAMP, accessed_at=NOW_TIMESTAMP, created_at=k8stimestamp_to_unix_timestamp(sc.metadata.creation_timestamp)), } return sclist def get_provisioner(storagev1api, storage_name: str): storageclass = storagev1api.read_storage_class(storage_name) if storageclass is not None: return storageclass.provisioner return None """ {'allow_volume_expansion': True, 'allowed_topologies': None, 'api_version': None, 'kind': None, 'metadata': {'annotations': {'storageclass.kubernetes.io/is-default-class': 'true'}, 'cluster_name': None, 'creation_timestamp': datetime.datetime(2020, 7, 23, 12, 0, 2, tzinfo=tzlocal()), 'deletion_grace_period_seconds': None, 'deletion_timestamp': None, 'finalizers': None, 'generate_name': None, 'generation': None, 'initializers': None, 'labels': {'addonmanager.kubernetes.io/mode': 'EnsureExists'}, 'managed_fields': None, 'name': 'standard', 'namespace': None, 'owner_references': None, 'resource_version': '265', 'self_link': '/apis/storage.k8s.io/v1/storageclasses/standard', 'uid': 'b859eec1-8055-4cad-b9a0-fc13ef01a21a'}, 'mount_options': None, 'parameters': {'type': 'pd-standard'}, 'provisioner': 'kubernetes.io/gce-pd', 'reclaim_policy': 'Delete', 'volume_binding_mode': 'Immediate'} """ def storageclass_restore(storagev1api, file_info, file_content): logging.debug("storageclass_restore:fileinfo: {}".format(file_info)) sc = encoder_load(file_content, file_info.name) metadata = prepare_metadata(sc.metadata) # Instantiate the storageclass object storageclass = client.V1StorageClass( api_version=sc.api_version, kind="StorageClass", metadata=metadata, allow_volume_expansion=sc.allow_volume_expansion, allowed_topologies=sc.allowed_topologies, mount_options=sc.mount_options, parameters=sc.parameters, provisioner=sc.provisioner, reclaim_policy=sc.reclaim_policy, volume_binding_mode=sc.volume_binding_mode, ) if file_info.objcache is not None: # object exist so we replace it response = storagev1api.replace_storage_class(k8sfile2objname(file_info.name), body=storageclass, pretty='true') else: # object does not exist, so create one as required response = storagev1api.create_storage_class(body=storageclass, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/statefulset.py0000644000175000017500000000545614771010173031561 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.plugins.k8sbackend.k8sutils import * def stateful_sets_read_namespaced(appsv1api, namespace, name): return appsv1api.read_namespaced_stateful_set(name, namespace) def stateful_sets_list_namespaced(appsv1api, namespace, estimate=False, labels=""): sslist = {} statefulsets = appsv1api.list_namespaced_stateful_set(namespace=namespace, watch=False, label_selector=labels) for ss in statefulsets.items: ssdata = stateful_sets_read_namespaced(appsv1api, namespace, ss.metadata.name) spec = encoder_dump(ssdata) sslist['ss-' + ss.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_STATEFULSET, nsname=namespace, name=ss.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=ssdata.metadata.creation_timestamp), } return sslist def stateful_sets_restore_namespaced(appsv1api, file_info, file_content): ss = encoder_load(file_content, file_info.name) metadata = prepare_metadata(ss.metadata) # Instantiate the daemon_set object statefulset = client.V1StatefulSet( api_version=ss.api_version, kind="StatefulSet", spec=ss.spec, metadata=metadata ) if file_info.objcache is not None: # object exist so we replace it response = appsv1api.replace_namespaced_stateful_set(k8sfile2objname(file_info.name), file_info.namespace, statefulset, pretty='true') else: # object does not exist, so create one as required response = appsv1api.create_namespaced_stateful_set(file_info.namespace, statefulset, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/namespaces.py0000644000175000017500000001000214771010173031314 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.plugins.k8sbackend.k8sutils import * def namespace_read(corev1api, name): return corev1api.read_namespace(name) def namespaces_list_all(corev1api, nsfilter=None, estimate=False): nslist = {} namespaces = corev1api.list_namespace(watch=False) for ns in namespaces.items: if nsfilter is not None and len(nsfilter) > 0: if ns.metadata.name not in nsfilter: continue nsdata = namespace_read(corev1api, ns.metadata.name) spec = encoder_dump(nsdata) nslist[ns.metadata.name] = { 'name': ns.metadata.name, 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_NAMESPACE, nsname=ns.metadata.name, name=ns.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=nsdata.metadata.creation_timestamp), } return nslist def namespace_names(corev1api): nslist = [] namespaces = corev1api.list_namespace(watch=False) for ns in namespaces.items: nslist.append(["namespace", ns.metadata.name]) return nslist def namespaces_list_all_names(corev1api): nslist = {} namespaces = corev1api.list_namespace(watch=False) for ns in namespaces.items: nslist[ns.metadata.name] = { 'fi': FileInfo(name="/{}/{}".format(K8SObjType.K8SOBJ_NAMESPACE_Path, ns.metadata.name), ftype=DIRECTORY, size=0, uid=0, gid=0, mode=DEFAULT_DIR_MODE, nlink=1, modified_at=NOW_TIMESTAMP, accessed_at=NOW_TIMESTAMP, created_at=k8stimestamp_to_unix_timestamp(ns.metadata.creation_timestamp)), } return nslist def namespaces_restore(corev1api, file_info, file_content): ns = encoder_load(file_content, file_info.name) metadata = prepare_metadata(ns.metadata) projectid = None # Populate annotations about projectId if ns.metadata.annotations is not None: projectid = ns.metadata.annotations.get('field.cattle.io/projectId', None) if projectid is not None: ann = {'field.cattle.io/projectId': projectid} if metadata.annotations is not None: metadata.annotations.update(ann) else: metadata.annotations = ann # Instantiate the namespace object namespace = client.V1Namespace( api_version=ns.api_version, kind="Namespace", spec=ns.spec, metadata=metadata ) if file_info.objcache is not None: # TODO: Do we really need to replace existing namespace? response = corev1api.replace_namespace(k8sfile2objname(file_info.name), namespace, pretty='true') else: # object does not exist, so create one as required response = corev1api.create_namespace(namespace, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/ingress.py0000644000175000017500000000566114771010173030666 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula® - The Network Backup Solution # # Copyright (C) 2000-2020 Bacula Systems SA # All rights reserved. # # The main author of Bacula is Kern Sibbald, with contributions from many # others, a complete list can be found in the file AUTHORS. # # Licensees holding a valid Bacula Systems SA license may use this file # and others of this release in accordance with the proprietary license # agreement provided in the LICENSE file. Redistribution of any part of # this release is not permitted. # # Bacula® is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.plugins.k8sbackend.k8sutils import * def ingress_check(networkv1api, file_info): return networkv1api.read_namespaced_ingress(k8sfile2objname(file_info.name), file_info.namespace) def ingress_read_namespaced(networkv1api, namespace, name): return networkv1api.read_namespaced_ingress(name, namespace) def ingress_list_namespaced(networkv1api, namespace, estimate=False, labels=""): ingresslist = {} ingresses = networkv1api.list_namespaced_ingress(namespace=namespace, watch=False, label_selector=labels) for ingress in ingresses.items: ingressdata = ingress_read_namespaced(networkv1api, namespace, ingress.metadata.name) spec = encoder_dump(ingressdata) ingresslist['ingress-' + ingress.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_INGRESS, nsname=namespace, name=ingress.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=ingressdata.metadata.creation_timestamp), } return ingresslist def ingress_restore_namespaced(networkv1api, file_info, file_content): ingress = encoder_load(file_content, file_info.name) metadata = prepare_metadata(ingress.metadata) # Instantiate the daemon_set object statefulset = client.V1Ingress( api_version=ingress.api_version, kind="Ingress", spec=ingress.spec, metadata=metadata ) if file_info.objcache is not None: # object exist so we replace it response = networkv1api.replace_namespaced_ingress(k8sfile2objname(file_info.name), file_info.namespace, statefulset, pretty='true') else: # object does not exist, so create one as required response = networkv1api.create_namespaced_ingress(file_info.namespace, statefulset, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/limitrange.py0000644000175000017500000000542614771010173031346 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.plugins.k8sbackend.k8sutils import * def limit_range_read_namespaced(corev1api, namespace, name): return corev1api.read_namespaced_limit_range(name, namespace) def limit_range_list_namespaced(corev1api, namespace, estimate=False, labels=""): lrlist = {} limitranges = corev1api.list_namespaced_limit_range(namespace=namespace, watch=False, label_selector=labels) for lr in limitranges.items: lrdata = limit_range_read_namespaced(corev1api, namespace, lr.metadata.name) spec = encoder_dump(lrdata) lrlist['lr-' + lr.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_LIMITRANGE, nsname=namespace, name=lr.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=lrdata.metadata.creation_timestamp), } return lrlist def limit_range_restore_namespaced(corev1api, file_info, file_content): lr = encoder_load(file_content, file_info.name) metadata = prepare_metadata(lr.metadata) # Instantiate the limitrange object limitrange = client.V1LimitRange( api_version=lr.api_version, kind="Limitrange", spec=lr.spec, metadata=metadata ) if file_info.objcache is not None: # object exist so we replace it response = corev1api.replace_namespaced_config_map(k8sfile2objname(file_info.name), file_info.namespace, limitrange, pretty='true') else: # object does not exist, so create one as required response = corev1api.create_namespaced_config_map(file_info.namespace, limitrange, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/k8sbackend/secret.py0000644000175000017500000000563114771010173030476 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # from baculak8s.entities.file_info import NOT_EMPTY_FILE from baculak8s.plugins.k8sbackend.k8sfileinfo import * from baculak8s.plugins.k8sbackend.k8sutils import * def secrets_read_namespaced(corev1api, namespace, name): return corev1api.read_namespaced_secret(name, namespace, pretty='true') def secrets_list_namespaced(corev1api, namespace, estimate=False, labels=""): seclist = {} secrets = corev1api.list_namespaced_secret(namespace=namespace, watch=False, label_selector=labels) for sec in secrets.items: secdata = secrets_read_namespaced(corev1api, namespace, sec.metadata.name) spec = encoder_dump(secdata) seclist['sec-' + sec.metadata.name] = { 'spec': spec if not estimate else None, 'fi': k8sfileinfo(objtype=K8SObjType.K8SOBJ_SECRET, nsname=namespace, name=sec.metadata.name, ftype=NOT_EMPTY_FILE, size=len(spec), creation_timestamp=secdata.metadata.creation_timestamp), } return seclist def secrets_restore_namespaced(corev1api, file_info, file_content): sec = encoder_load(file_content, file_info.name) metadata = prepare_metadata(sec.metadata, annotations=True if sec.type == 'kubernetes.io/service-account-token' else False) # Instantiate the secret object secret = client.V1Secret( api_version=sec.api_version, kind="Secret", data=sec.data, string_data=sec.string_data, type=sec.type, metadata=metadata ) if file_info.objcache is not None: # object exist so we replace it response = corev1api.replace_namespaced_secret(k8sfile2objname(file_info.name), file_info.namespace, secret, pretty='true') else: # object does not exist, so create one as required response = corev1api.create_namespaced_secret(file_info.namespace, secret, pretty='true') return {'response': response} bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/plugin.py0000644000175000017500000000433114771010173026466 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. from abc import ABCMeta, abstractmethod UNRECOGNIZED_CONNECTION_ERROR = -1 ERROR_SSL_FAILED = 1200 ERROR_HOST_NOT_FOUND = 1400 ERROR_HOST_TIMEOUT = 1500 ERROR_AUTH_FAILED = 401 ERROR_CONNECTION_REFUSED = 111 class Plugin(metaclass=ABCMeta): """ Abstract Base Class for all the Plugins """ @abstractmethod def connect(self): """ Connects to the Plugin Data Source """ raise NotImplementedError @abstractmethod def disconnect(self): """ Disconnects from the Plugin Data Source """ raise NotImplementedError @abstractmethod def list_in_path(self, path): """ Lists all FileInfo objects belonging to the provided $path$ :return: """ raise NotImplementedError @abstractmethod def query_parameter(self, parameter): """ Response to client parameter query :return: """ raise NotImplementedError @abstractmethod def check_file(self, file_info): """ :return: """ raise NotImplementedError @abstractmethod def restore_file(self, file_info, file_content_source=None): """ :return: """ raise NotImplementedError @abstractmethod def list_all_namespaces(self): """ :return: """ raise NotImplementedError @abstractmethod def list_namespaced_objects(self, namespace): """ :return: """ raise NotImplementedError bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/plugin_factory.py0000644000175000017500000000273014771010173030216 0ustar bsbuildbsbuild# Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. from baculak8s.plugins.fs_plugin import FileSystemPlugin from baculak8s.plugins.kubernetes_plugin import KubernetesPlugin class PluginFactory(object): @staticmethod def create(plugin_name, config): """ Creates a plugin to be used for Backup and / or Restore operations. :param plugin_name: The name of the plugin to be Created :param config: The plugin configuration :return: The created plugin :raise: ValueError, if an invalid plugin_name was provided """ if "where" in config and len(config["where"]) > 1: return FileSystemPlugin(config) if plugin_name in ("kubernetes", "openshift"): return KubernetesPlugin(config) else: raise ValueError("Invalid Plugin Type") bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/plugins/kubernetes_plugin.py0000644000175000017500000015217614771010173030730 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. import logging import requests import requests.packages.urllib3 import urllib3 from kubernetes import client, config from kubernetes.client.rest import ApiException from urllib3.exceptions import MaxRetryError, SSLError, TimeoutError from baculak8s.entities.file_info import (DEFAULT_DIR_MODE, DIRECTORY, EMPTY_FILE, FileInfo) from baculak8s.entities.k8sobjtype import K8SObjType from baculak8s.io.log import Log from baculak8s.plugins import k8sbackend from baculak8s.plugins.k8sbackend.baculabackup import exists_bacula_pod from baculak8s.plugins.k8sbackend.baculaannotations import annotated_namespaced_pods_data from baculak8s.plugins.k8sbackend.configmaps import * from baculak8s.plugins.k8sbackend.csi_snapshot import * from baculak8s.plugins.k8sbackend.daemonset import * from baculak8s.plugins.k8sbackend.deployment import * from baculak8s.plugins.k8sbackend.endpoints import * from baculak8s.plugins.k8sbackend.ingress import * from baculak8s.plugins.k8sbackend.k8sfileinfo import NOW_TIMESTAMP from baculak8s.plugins.k8sbackend.limitrange import * from baculak8s.plugins.k8sbackend.namespaces import * from baculak8s.plugins.k8sbackend.persistentvolumeclaims import * from baculak8s.plugins.k8sbackend.persistentvolumes import * from baculak8s.plugins.k8sbackend.pods import * from baculak8s.plugins.k8sbackend.podtemplates import * from baculak8s.plugins.k8sbackend.pvcdata import * from baculak8s.plugins.k8sbackend.pvcclone import delete_pvcclone from baculak8s.plugins.k8sbackend.replicaset import * from baculak8s.plugins.k8sbackend.replicationcontroller import * from baculak8s.plugins.k8sbackend.resourcequota import * from baculak8s.plugins.k8sbackend.secret import * from baculak8s.plugins.k8sbackend.service import * from baculak8s.plugins.k8sbackend.serviceaccounts import * from baculak8s.plugins.k8sbackend.statefulset import * from baculak8s.plugins.k8sbackend.storageclass import * from baculak8s.plugins.k8sbackend.volumesnapshotclass import get_snapshot_drivers_compatible from baculak8s.plugins.plugin import * from baculak8s.util.date_util import gmt_to_unix_timestamp from baculak8s.util.lambda_util import wait_until_resource_is_ready HTTP_NOT_FOUND = 404 K8S_POD_CONTAINER_STATUS_ERROR = "Pod status error! Reason: {}, Message: {}" K8S_VOLUME_SNAPSHOT_STATUS_ERROR = "Volume status error! Reason: {}, Message: {}" class KubernetesPlugin(Plugin): """ Plugin that communicates with Kubernetes API """ def __init__(self, params): logging.debug("params:" + str(params)) self._params = params _ns = params.get("namespace", []) _pv = params.get("persistentvolume", []) _sc = params.get("storageclass", []) _vssl = params.get("verify_ssl", None) _pvconfig = params.get("pvconfig", True) _scconfig = params.get("scconfig", True) _pvcdata = params.get("pvcdata", None) if isinstance(_pvcdata, str): # single param if _pvcdata == '1': # pvcdata without value _pvcdata = True else: _pvcdata = _pvcdata.split(',') self.config = { 'config_file': params.get("config", None), 'host': params.get("host", None), 'token': params.get("token", None), 'username': params.get("username", None), # for future usage 'password': params.get("password", None), # for future usage 'incluster': params.get("incluster", None), 'verify_ssl': True if _vssl is None or _vssl == "1" or _vssl == '' else False, 'ssl_ca_cert': params.get("ssl_ca_cert", None), 'namespace': _ns if len(_ns) > 0 else None, 'nsconfig': True, 'pv': _pv if len(_pv) > 0 else None, 'pvconfig': False if _pvconfig == '0' else True, 'storageclass': _sc if len(_sc) > 0 else None, 'scconfig': False if _scconfig == '0' else True, 'pvcdata': _pvcdata, 'labels': params.get("labels", ""), } # disable namespace backup when pv param selected if self.config.get('pv') is not None and self.config.get('namespace') is None: self.config['nsconfig'] = False # as pvcs are namespaced objects then it is required to add namespace=... parameter if self.config.get('namespace') is None and self.config.get('pvcdata') is not None: self.config['pvcdata'] = None logging.debug("pluginconfig:{}".format(self.config)) self.coreapi = None self.corev1api = None self.appsv1api = None self.storagev1api = None self.clientConfiguration = None self.clientAPI = None self.networkv1api = None self.k8s = { K8SObjType.K8SOBJ_PVOLUME: {}, K8SObjType.K8SOBJ_NAMESPACE: {}, K8SObjType.K8SOBJ_STORAGECLASS: {}, K8SObjType.K8SOBJ_PVCDATA: {}, } self._connected = False self.pods_counter = 0 self.pvcs_counter = 0 self.pvcs_totalsize = 0 self.po_source_data = "" def connect(self): """ Implementation of Plugin.connect(self) """ if self.config.get("incluster") is not None: try: config.load_incluster_config() except config.config_exception.ConfigException as e: strerror = ' '.join(e.args) logging.debug("ERROR at load_incluster_config: " + strerror) return { 'error': "incluster error: " + strerror, 'exception': True, } except Exception as e: logging.debug("ERROR at load_incluster_config: " + str(e)) return { 'error': str(e), 'exception': True, } elif self.config.get("token") is not None and self.config.get('host') is not None: # handle connection with bearertoken and apihost params self.clientConfiguration = client.Configuration() self.clientConfiguration.host = self.config.get('host') self.clientConfiguration.verify_ssl = self.config.get('verify_ssl') ssl_ca_cert = self.config.get("ssl_ca_cert") if ssl_ca_cert is not None: self.clientConfiguration.ssl_ca_cert = ssl_ca_cert self.clientConfiguration.api_key = {"authorization": "Bearer " + self.config.get("token")} self.clientAPI = client.ApiClient(configuration=self.clientConfiguration) else: configfile = self.config.get("config_file", None) if configfile is None: configfile = config.kube_config.KUBE_CONFIG_DEFAULT_LOCATION logging.debug("load_kube_config(config_file={})".format(configfile)) try: config.load_kube_config(config_file=configfile) except OSError as e: logging.debug("ERROR OSError at load_kube_config: " + str(e.strerror)) return { 'error': e.strerror + " config=" + str(configfile), 'exception': True, } except Exception as e: logging.debug("ERROR Exception at load_kube_config: " + str(e)) return { 'error': str(e) + " config=" + str(configfile), 'exception': True, } # Tests the connection with K8S self.coreapi = client.CoreApi(api_client=self.clientAPI) self.corev1api = client.CoreV1Api(api_client=self.clientAPI) self.appsv1api = client.AppsV1Api(api_client=self.clientAPI) self.storagev1api = client.StorageV1Api(api_client=self.clientAPI) self.crd_api = client.CustomObjectsApi(api_client=self.clientAPI) # To manage csi snapshots self.networkv1api = client.NetworkingV1Api(api_client=self.clientAPI) # To manage ingress logging.getLogger(requests.packages.urllib3.__package__).setLevel(logging.ERROR) logging.getLogger(client.rest.__package__).setLevel(logging.ERROR) urllib3.disable_warnings() logging.captureWarnings(True) response = self.__execute(lambda: self.coreapi.get_api_versions(), check_connection=False) if isinstance(response, dict) and "error" in response: logging.debug("ERROR response:{}".format(response)) self.coreapi = None self.corev1api = None self.appsv1api = None self.storagev1api = None return response else: self._connected = True # grab some info about a cluster and forward to job vapi = client.VersionApi() response = self.__execute(lambda: vapi.get_code(), check_connection=False) if isinstance(response, dict) and "error" in response: data = {} else: data = {'response': response} response = self.__execute(lambda: self.coreapi.get_api_versions(), check_connection=False) if not (isinstance(response, dict) and "error" in response): self.po_source_data = response.server_address_by_client_cid_rs[0].server_address return data def disconnect(self): """ Implementation of Plugin.disconnect(self) No need to disconnect from K8S """ pass def list_in_path(self, path): """ Implementation of Plugin.list_in_path(self, path) returns FileInfo objects associated with the $path$ """ top_level_dirs = [ K8SObjType.K8SOBJ_NAMESPACE_Path, K8SObjType.K8SOBJ_PVOLUME_Path, K8SObjType.K8SOBJ_STORAGECLASS_Path, ] path = path.lstrip("/") if path.startswith(K8SObjType.K8SOBJ_NAMESPACE_Path): lpath = path.split("/") if len(lpath) > 1 and len(lpath[1]) > 0: if len(lpath) == 2 or len(lpath[2]) == 0: data = { 'pvcdata': { 'fi': FileInfo(name='/namespaces/' + lpath[1] + '/' + K8SObjType.K8SOBJ_PVCDATA_Path, ftype=DIRECTORY, size=0, uid=0, gid=0, nlink=1, mode=DEFAULT_DIR_MODE, modified_at=NOW_TIMESTAMP, accessed_at=NOW_TIMESTAMP, created_at=NOW_TIMESTAMP), } } return data elif lpath[2] == K8SObjType.K8SOBJ_PVCDATA_Path: return list_pvcs_namespaced(self.corev1api, lpath[1]) else: return self.list_all_namespaces_names() if path == K8SObjType.K8SOBJ_STORAGECLASS_Path: return self.list_all_storageclass_names() if path == K8SObjType.K8SOBJ_PVOLUME_Path: return self.list_all_persistentvolumes_names() self.k8s = {} for dirname in top_level_dirs: self.k8s[dirname] = { 'fi': FileInfo(name='/' + dirname, ftype=DIRECTORY, size=0, uid=0, gid=0, nlink=1, mode=DEFAULT_DIR_MODE, modified_at=NOW_TIMESTAMP, accessed_at=NOW_TIMESTAMP, created_at=NOW_TIMESTAMP), } return self.k8s def query_parameter(self, parameter): """ Implementation of Plugin.query_parameter(self, parameter) returns array of available parameters """ if parameter == 'namespace': return self.__execute(lambda: namespace_names(self.corev1api)) if parameter == 'persistentvolume': return self.__execute(lambda: persistentvolumes_names(self.corev1api)) if parameter == 'storageclass': return self.__execute(lambda: storageclass_names(self.storagev1api)) ns = self.config.get("namespace") if ns is not None: ns = ns[0] if parameter == 'pvcdata': return self.__execute(lambda: persistentvolumeclaims_namespaced_names(self.corev1api, ns)) return [] def list_all_persistentvolumes(self, estimate=False): if self.config.get('pvconfig'): self.k8s[K8SObjType.K8SOBJ_PVOLUME] = \ self.__execute(lambda: persistentvolumes_list_all(self.corev1api, pvfilter=self.config['pv'], estimate=estimate)) return self.k8s[K8SObjType.K8SOBJ_PVOLUME] def list_all_namespaces(self, estimate=False): if self.config.get('nsconfig'): self.k8s[K8SObjType.K8SOBJ_NAMESPACE] = \ self.__execute(lambda: namespaces_list_all(self.corev1api, nsfilter=self.config['namespace'], estimate=estimate)) return self.k8s[K8SObjType.K8SOBJ_NAMESPACE] def list_all_storageclass(self, estimate=False): if self.config.get('scconfig'): self.k8s[K8SObjType.K8SOBJ_STORAGECLASS] = \ self.__execute(lambda: storageclass_list_all(self.storagev1api, scfilter=self.config['storageclass'], estimate=estimate)) return self.k8s[K8SObjType.K8SOBJ_STORAGECLASS] def get_persistentvolumeclaim_read_namespaced(self, namespace, name): return self.__execute(lambda: persistentvolumeclaims_read_namespaced(self.corev1api, namespace, name)) def get_pvcdata_namespaced(self, namespace, pvcname, pvcalias=None, estimate=False): logging.debug("pvcdata namespaced: {}/{} pvcalias={}".format(namespace, pvcname, pvcalias)) return self.__execute(lambda: pvcdata_get_namespaced(self.corev1api, namespace, pvcname, pvcalias)) def list_pvcdata_for_namespace(self, namespace, estimate=False, allpvcs=False): pvcfilter = self.config.get('pvcdata', allpvcs) if not allpvcs else allpvcs logging.debug("list pvcdata for namespace:{} pvcfilter={} estimate={}".format(namespace, pvcfilter, estimate)) return self.__execute(lambda: pvcdata_list_namespaced(self.corev1api, namespace, estimate, pvcfilter=pvcfilter)) def get_config_maps(self, namespace, estimate=False): return self.__execute(lambda: config_maps_list_namespaced(self.corev1api, namespace, estimate, self.config['labels'])) def get_endpoints(self, namespace, estimate=False): return self.__execute(lambda: endpoints_list_namespaced(self.corev1api, namespace, estimate, self.config['labels'])) def get_pods(self, namespace, estimate=False): # the __execute() wrapper intercept exceptions and return and error dict() # instead of the expected response response = self.__execute(lambda: pods_list_namespaced(self.corev1api, namespace, estimate, self.config['labels'])) if not isinstance(response, dict) or not "error" in response: pods = response nrpods = len(pods) logging.debug("get_pods[{}]:pods:{}".format(namespace, nrpods)) self.pods_counter += nrpods return pods else: return response # it is a dictionary with an error def get_pvcs(self, namespace, estimate=False): # the __execute() wrapper intercept exceptions and return and error dict() # instead of the expected response response = self.__execute(lambda: persistentvolumeclaims_list_namespaced(self.corev1api, namespace, estimate, self.config['labels'])) if isinstance(response, tuple) and len(response) == 2: pvcs, totalsize = response nrpvcs = len(pvcs) self.pvcs_counter += nrpvcs self.pvcs_totalsize += totalsize self.k8s['pvcs'] = pvcs logging.debug("get_pvcs[{}]:pvcs:{}".format(namespace, nrpvcs)) return pvcs else: return response # it should be a dictionary with an error def get_pvc_names(self, namespace): return self.__execute(lambda: persistentvolumeclaims_namespaced_only_names(self.corev1api, namespace)) def get_podtemplates(self, namespace, estimate=False): return self.__execute(lambda: podtemplates_list_namespaced(self.corev1api, namespace, estimate, self.config['labels'])) def get_limit_ranges(self, namespace, estimate=False): return self.__execute(lambda: limit_range_list_namespaced(self.corev1api, namespace, estimate, self.config['labels'])) def get_replication_controller(self, namespace, estimate=False): return self.__execute(lambda: replication_controller_list_namespaced(self.corev1api, namespace, estimate, self.config['labels'])) def get_resource_quota(self, namespace, estimate=False): return self.__execute(lambda: resource_quota_list_namespaced(self.corev1api, namespace, estimate, self.config['labels'])) def get_secrets(self, namespace, estimate=False): return self.__execute(lambda: secrets_list_namespaced(self.corev1api, namespace, estimate, self.config['labels'])) def get_services(self, namespace, estimate=False): return self.__execute(lambda: services_list_namespaced(self.corev1api, namespace, estimate, self.config['labels'])) def get_service_accounts(self, namespace, estimate=False): return self.__execute(lambda: service_accounts_list_namespaced(self.corev1api, namespace, estimate, self.config['labels'])) def get_daemon_sets(self, namespace, estimate=False): return self.__execute(lambda: daemon_sets_list_namespaced(self.appsv1api, namespace, estimate, self.config['labels'])) def get_deployments(self, namespace, estimate=False): return self.__execute(lambda: deployments_list_namespaced(self.appsv1api, namespace, estimate, self.config['labels'])) def get_replica_sets(self, namespace, estimate=False): return self.__execute(lambda: replica_sets_list_namespaced(self.appsv1api, namespace, estimate, self.config['labels'])) def get_stateful_sets(self, namespace, estimate=False): return self.__execute(lambda: stateful_sets_list_namespaced(self.appsv1api, namespace, estimate, self.config['labels'])) def get_ingresses(self, namespace, estimate=False): return self.__execute(lambda: ingress_list_namespaced(self.networkv1api, namespace, estimate, self.config['labels'])) def list_all_persistentvolumes_names(self): self.k8s[K8SObjType.K8SOBJ_PVOLUME] = self.__execute(lambda: persistentvolumes_list_all_names(self.corev1api)) return self.k8s[K8SObjType.K8SOBJ_PVOLUME] def get_annotated_namespaced_pods_data(self, namespace, estimate=False): return self.__execute(lambda: annotated_namespaced_pods_data(self.corev1api, namespace, estimate, self.config['labels'])) def list_all_storageclass_names(self): self.k8s[K8SObjType.K8SOBJ_STORAGECLASS] = self.__execute(lambda: storageclass_list_all_names(self.storagev1api)) return self.k8s[K8SObjType.K8SOBJ_STORAGECLASS] def list_all_namespaces_names(self): self.k8s[K8SObjType.K8SOBJ_NAMESPACE] = self.__execute(lambda: namespaces_list_all_names(self.corev1api)) return self.k8s[K8SObjType.K8SOBJ_NAMESPACE] def list_namespaced_objects(self, namespace, estimate=False): # We should maintain the following resources backup order # logging.debug("list_namespaced_objects_label:[{}]".format(self.config['labels'])) # self.k8s[K8SObjType.K8SOBJ_PVCDATA] = {} # data = [ # self.get_config_maps(namespace, estimate), # self.get_service_accounts(namespace, estimate), # self.get_secrets(namespace, estimate), # # self.get_endpoints(namespace, estimate), # self.get_pvcs(namespace, estimate), # self.get_limit_ranges(namespace, estimate), # self.get_resource_quota(namespace, estimate), # self.get_services(namespace, estimate), # self.get_pods(namespace, estimate), # self.get_daemon_sets(namespace, estimate), # self.get_replica_sets(namespace, estimate), # self.get_stateful_sets(namespace, estimate), # self.get_deployments(namespace, estimate), # self.get_replication_controller(namespace, estimate), # self.get_ingresses(namespace,estimate), # ] # return data raise NotImplementedError("Not implemented list_namespaced_objects") def list_namespaced_objects_before_pvcdata(self, namespace, estimate=False): # We should maintain the following resources backup order logging.debug("list_namespaced_objects_label:[{}]".format(self.config['labels'])) self.k8s[K8SObjType.K8SOBJ_PVCDATA] = {} data = [ self.get_config_maps(namespace, estimate), self.get_service_accounts(namespace, estimate), self.get_secrets(namespace, estimate), # self.get_endpoints(namespace, estimate), self.get_pvcs(namespace, estimate), self.get_limit_ranges(namespace, estimate), self.get_resource_quota(namespace, estimate), self.get_services(namespace, estimate), # self.get_pods(namespace, estimate), # self.get_daemon_sets(namespace, estimate), # self.get_replica_sets(namespace, estimate), # self.get_stateful_sets(namespace, estimate), # self.get_deployments(namespace, estimate), # self.get_replication_controller(namespace, estimate), # self.get_ingresses(namespace,estimate), ] return data def list_namespaced_objects_after_pvcdata(self, namespace, estimate=False): # We should maintain the following resources backup order logging.debug("list_namespaced_objects_label:[{}]".format(self.config['labels'])) self.k8s[K8SObjType.K8SOBJ_PVCDATA] = {} data = [ # self.get_config_maps(namespace, estimate), # self.get_service_accounts(namespace, estimate), # self.get_secrets(namespace, estimate), # # self.get_endpoints(namespace, estimate), # self.get_pvcs(namespace, estimate), # self.get_limit_ranges(namespace, estimate), # self.get_resource_quota(namespace, estimate), # self.get_services(namespace, estimate), self.get_pods(namespace, estimate), self.get_daemon_sets(namespace, estimate), self.get_replica_sets(namespace, estimate), self.get_stateful_sets(namespace, estimate), self.get_deployments(namespace, estimate), self.get_replication_controller(namespace, estimate), self.get_ingresses(namespace,estimate), ] return data def upload_config_map(self, file_info, file_content): return self.__execute(lambda: config_map_restore_namespaced(self.corev1api, file_info, file_content)) def upload_daemon_set(self, file_info, file_content): return self.__execute(lambda: daemon_sets_restore_namespaced(self.appsv1api, file_info, file_content)) def upload_deployment(self, file_info, file_content): return self.__execute(lambda: deployments_restore_namespaced(self.appsv1api, file_info, file_content)) def upload_endpoint(self, file_info, file_content): return self.__execute(lambda: endpoints_restore_namespaced(self.corev1api, file_info, file_content)) def upload_limitrange(self, file_info, file_content): return self.__execute(lambda: limit_range_restore_namespaced(self.corev1api, file_info, file_content)) def upload_namespace(self, file_info, file_content): return self.__execute(lambda: namespaces_restore(self.corev1api, file_info, file_content)) def upload_pod(self, file_info, file_content): return self.__execute(lambda: pods_restore_namespaced(self.corev1api, file_info, file_content)) def upload_persistentvolume_claim(self, file_info, file_content): return self.__execute(lambda: persistentvolumeclaims_restore_namespaced(self.corev1api, file_info, file_content)) def upload_persistentvolume(self, file_info, file_content): return self.__execute(lambda: persistentvolumes_restore(self.corev1api, file_info, file_content)) def upload_storageclass(self, file_info, file_content): return self.__execute(lambda: storageclass_restore(self.storagev1api, file_info, file_content)) def upload_pod_template(self, file_info, file_content): return self.__execute(lambda: podtemplates_restore_namespaced(self.corev1api, file_info, file_content)) def upload_replica_set(self, file_info, file_content): return self.__execute(lambda: replica_sets_restore_namespaced(self.appsv1api, file_info, file_content)) def upload_replication_controller(self, file_info, file_content): return self.__execute(lambda: replication_controller_restore_namespaced(self.corev1api, file_info, file_content)) def upload_resource_quota(self, file_info, file_content): return self.__execute(lambda: resource_quota_restore_namespaced(self.corev1api, file_info, file_content)) def upload_secret(self, file_info, file_content): return self.__execute(lambda: secrets_restore_namespaced(self.corev1api, file_info, file_content)) def upload_service(self, file_info, file_content): return self.__execute(lambda: services_restore_namespaced(self.corev1api, file_info, file_content)) def upload_service_account(self, file_info, file_content): return self.__execute(lambda: service_accounts_restore_namespaced(self.corev1api, file_info, file_content)) def upload_stateful_set(self, file_info, file_content): return self.__execute(lambda: stateful_sets_restore_namespaced(self.appsv1api, file_info, file_content)) def upload_ingress(self, file_info, file_content): return self.__execute(lambda: ingress_restore_namespaced(self.networkv1api, file_info, file_content)) def __restore_k8s_object(self, file_info, file_content_source=None): file_content = b'' if file_info.size != 0 and file_content_source is not None: while True: data = file_content_source.read() if data is None: break else: file_content += data method_name = 'upload_' + str(K8SObjType.methoddict.get(file_info.objtype)) method = getattr(self, method_name, None) if method is None: return {'error': 'Invalid object type: {}'.format(file_info.objtype)} if file_info.objcache is None: current_file = self.check_file(file_info) if isinstance(current_file, dict) and 'error' in current_file: file_info.objcache = None logging.error("check_file: {}".format(current_file['error'])) else: file_info.objcache = current_file return method(file_info, file_content) def restore_file(self, file_info, file_content_source=None): """ Implementation of Plugin.restore_file(self, file_info, file_content_source=None) """ if file_info.objtype != K8SObjType.K8SOBJ_PVCDATA: return self.__restore_k8s_object(file_info, file_content_source) # TODO: export/move all checks into k8sbackend def check_storage_compatibility_with_vsnapshot(self, storage_class_name): logging.debug("Check Storage class compatibility.{}".format(storage_class_name)) storage_provisioner = get_provisioner(self.storagev1api, storage_class_name) logging.debug("Provisioner {}".format(storage_provisioner)) logging.debug('Compatible Drivers: {} '.format(get_snapshot_drivers_compatible(self.crd_api))) if storage_provisioner in get_snapshot_drivers_compatible(self.crd_api): return True return False def check_pvc_compatiblity_with_vsnapshot(self, namespace, pvc_name): pvc = self.get_pvcdata_namespaced(namespace, pvc_name) logging.debug('Check Compatibility with Snapshots. Name: {}'.format(pvc_name)) logging.debug('PVC: {}'.format(pvc)) return self.check_storage_compatibility_with_vsnapshot(pvc.get('storage_class_name')) def _check_config_map(self, file_info): return self.__exec_check_object( lambda: self.corev1api.read_namespaced_config_map(k8sfile2objname(file_info.name), file_info.namespace)) def _check_daemon_set(self, file_info): return self.__exec_check_object( lambda: self.appsv1api.read_namespaced_daemon_set(k8sfile2objname(file_info.name), file_info.namespace)) def _check_deployment(self, file_info): return self.__exec_check_object( lambda: self.appsv1api.read_namespaced_deployment(k8sfile2objname(file_info.name), file_info.namespace)) def _check_endpoint(self, file_info): return self.__exec_check_object( lambda: self.corev1api.read_namespaced_endpoints(k8sfile2objname(file_info.name), file_info.namespace)) def _check_limitrange(self, file_info): return self.__exec_check_object( lambda: self.corev1api.read_namespaced_limit_range(k8sfile2objname(file_info.name), file_info.namespace)) def _check_namespace(self, file_info): return self.__exec_check_object(lambda: self.corev1api.read_namespace(k8sfile2objname(file_info.name))) def check_namespace(self, name): return self.__exec_check_object(lambda: self.corev1api.read_namespace(name)) def _check_pod(self, file_info): return self.__exec_check_object( lambda: self.corev1api.read_namespaced_pod(k8sfile2objname(file_info.name), file_info.namespace)) def check_pod(self, namespace, name): return self.__exec_check_object(lambda: self.corev1api.read_namespaced_pod(name, namespace)) def check_bacula_pod(self, namespace, job): pod_list = self.get_pods(namespace) return exists_bacula_pod(pod_list, job) def _check_persistentvolume_claim(self, file_info): return self.__exec_check_object( lambda: self.corev1api.read_namespaced_persistent_volume_claim(k8sfile2objname(file_info.name), file_info.namespace)) def _check_persistentvolume_claim_status(self, namespace, pvc_name): return self.__exec_check_object( lambda: self.corev1api.read_namespaced_persistent_volume_claim_status(pvc_name, namespace) ) def _check_persistentvolume(self, file_info): return self.__exec_check_object( lambda: self.corev1api.read_persistent_volume(k8sfile2objname(file_info.name))) def _check_storageclass(self, file_info): return self.__exec_check_object( lambda: self.storagev1api.read_storage_class(k8sfile2objname(file_info.name))) def _check_pod_template(self, file_info): return self.__exec_check_object( lambda: self.corev1api.read_namespaced_pod_template(k8sfile2objname(file_info.name), file_info.namespace)) def _check_replica_set(self, file_info): return self.__exec_check_object( lambda: self.appsv1api.read_namespaced_replica_set(k8sfile2objname(file_info.name), file_info.namespace)) def _check_stateful_set(self, file_info): return self.__exec_check_object( lambda: self.appsv1api.read_namespaced_stateful_set(k8sfile2objname(file_info.name), file_info.namespace)) def _check_ingress(self, file_info): return self.__exec_check_object(lambda: ingress_check(self.networkv1api, file_info)) def _check_replication_controller(self, file_info): return self.__exec_check_object( lambda: self.corev1api.read_namespaced_replication_controller(k8sfile2objname(file_info.name), file_info.namespace)) def _check_resource_quota(self, file_info): return self.__exec_check_object( lambda: self.corev1api.read_namespaced_resource_quota(k8sfile2objname(file_info.name), file_info.namespace)) def _check_secret(self, file_info): return self.__exec_check_object( lambda: self.corev1api.read_namespaced_secret(k8sfile2objname(file_info.name), file_info.namespace)) def _check_service(self, file_info): return self.__exec_check_object( lambda: self.corev1api.read_namespaced_service(k8sfile2objname(file_info.name), file_info.namespace)) def _check_service_account(self, file_info): return self.__exec_check_object( lambda: self.corev1api.read_namespaced_service_account(k8sfile2objname(file_info.name), file_info.namespace)) def delete_persistent_volume_claim(self, namespace, pvc_name, grace_period_seconds=0, propagation_policy="Foreground"): return self.__execute(lambda: delete_pvcclone(self.corev1api, namespace, pvc_name, grace_period_seconds=grace_period_seconds, propagation_policy=propagation_policy)) def check_file(self, file_info): """ :param file_info: :return: """ method_name = '_check_' + str(K8SObjType.methoddict.get(file_info.objtype)) method = getattr(self, method_name, None) if method is None: return {'error': 'Invalid object type: {}'.format(file_info.objtype)} return method(file_info) def __exec_check_object(self, action, check_connection=True): """ Executes an action and verifies if it return HTTP404 :param action: :param check_connection: :return: """ if check_connection: # Verifies if connect() was called if not self._connected: return None try: return action() except ApiException as e: if e.status == HTTP_NOT_FOUND: return None return False def __execute(self, action, check_connection=True): """ Executes an action that uses one of the python-swiftclient APIs (Connection API or Service API) :param action: Action to be executed :return: The action result in case of success, or an error dictionary in case of failure """ if check_connection: # Verifies if connect() was called if not self._connected: return { 'exception': True, 'error': "To use this, a connection must be established first", 'error_code': 0 } try: return action() except ApiException as e: Log.save_exception(e) return { 'exception': True, 'error': e.reason, 'error_code': e.status, 'descr': e.body, } except MaxRetryError as e: Log.save_exception(e) return { 'exception': True, 'error': e.reason, 'error_code': ERROR_CONNECTION_REFUSED, } except TimeoutError as e: Log.save_exception(e) return { 'exception': True, 'error': "A socket timeout error occurs.", 'error_code': ERROR_HOST_TIMEOUT } except SSLError as e: Log.save_exception(e) return { 'exception': True, 'error': "SSL certificate fails in an HTTPS connection.", 'error_code': ERROR_SSL_FAILED } except OSError as e: Log.save_exception(e) if e.__class__.__name__ == "ConnectionError": error_code = ERROR_HOST_NOT_FOUND error_message = "Host is Down" elif e.__class__.__name__ == "ConnectTimeout": error_code = ERROR_HOST_TIMEOUT error_message = "Host Timeout" else: error_code = UNRECOGNIZED_CONNECTION_ERROR error_message = "Unrecognized Error" return { 'exception': True, 'error': error_message, 'error_code': error_code } except Exception as e: Log.save_exception(e) return { 'exception': True, 'error': "Unrecognized Error", 'error_code': UNRECOGNIZED_CONNECTION_ERROR } def __extract_file_info(self, bucket, file_name, headers): """ Extracts the file information from the swift object Stat data structure :param bucket: The objects bucket :param file_name: The objects name :param headers: The objects headers :return: the file information data structure """ # TODO: unused? accessed_at_gmt = headers["date"] accessed_at_ts = gmt_to_unix_timestamp(accessed_at_gmt) modified_at_gmt = headers["last-modified"] modified_at_ts = gmt_to_unix_timestamp(modified_at_gmt) created_at_ts = int(float(headers["x-timestamp"])) file_size = int(headers["content-length"]) file_type = NOT_EMPTY_FILE if file_size > 0 else EMPTY_FILE return { "name": file_name, "type": file_type, "size": file_size, "uid": 0, "gid": 0, "mode": DEFAULT_FILE_MODE, "nlink": 1, "modified-at": modified_at_ts, "accessed-at": accessed_at_ts, "created-at": created_at_ts, } def create_backup_pod(self, namespace, poddata): pod = client.V1Pod( api_version=poddata.get('apiVersion'), kind=poddata.get('kind'), metadata=poddata.get('metadata'), spec=poddata.get('spec') ) response = self.__execute(lambda: self.corev1api.create_namespaced_pod(namespace=namespace, body=pod, pretty=True)) if isinstance(response, dict) and "error" in response: return response return {} def create_pvc_clone(self, namespace, pvcclonedata): pvcclone = client.V1PersistentVolumeClaim( api_version=pvcclonedata.get('apiVersion'), kind=pvcclonedata.get('kind'), metadata=pvcclonedata.get('metadata'), spec=pvcclonedata.get('spec') ) response = self.__execute(lambda: self.corev1api.create_namespaced_persistent_volume_claim(namespace=namespace, body=pvcclone, pretty=True)) if isinstance(response, dict) and "error" in response: return response return {} def vsnapshot_isready(self, namespace, snapshot_name): response = self._vsnapshot_status(namespace, snapshot_name) if isinstance(response, dict) and "error" in response: return response status = response.get('status') logging.debug("vsnapshot_isready:ReadyToUse:{}".format(status)) if status is None: return False return status.get('readyToUse') def create_vsnapshot(self, namespace, pvc): pvcname = pvc.get('name') logging.info("Creating vsnapshot of pvc `{}`".format(pvc)) storage_provisioner = get_provisioner(self.storagev1api, pvc.get('storage_class_name')) snapshot = prepare_create_snapshot_body(self.crd_api, namespace, pvcname, self._params.get('jobid'), storage_provisioner) logging.debug("Body to create vsnapshot:\n`{}`\n".format(snapshot)) response = self.__execute(lambda: self.crd_api.create_namespaced_custom_object(**snapshot, pretty=True)) if isinstance(response, dict) and "error" in response: return response snapshot_name = snapshot.get("body").get("metadata").get("name") logging.debug('Request create snapshot `{}` sent correctly'.format(snapshot_name)) wait_until_resource_is_ready( lambda: self.vsnapshot_isready(namespace, snapshot_name), "Waiting create volume snapshot `{}` to be ready.".format(snapshot_name) ) logging.info("Created successfully vsnapshot of pvc `{}`".format(pvcname)) return { 'name': snapshot_name, 'kind': snapshot.get('body').get('kind'), 'source': pvcname, 'namespace': namespace, 'volume_snapshot_class_name': snapshot.get('body').get('spec').get('volumeSnapshotClassName') } def create_pvc_from_vsnapshot(self, namespace, pvcdata): new_pvc = prepare_pvc_from_vsnapshot_body(namespace, pvcdata, self._params.get('jobid')) logging.debug("Body to create pvc from vsnapshot: `{}`".format(new_pvc)) response = self.create_pvc_clone(namespace, new_pvc) if isinstance(response, dict) and 'error' in response: return response response = self.get_pvcdata_namespaced(namespace, new_pvc.get('metadata').get('name')) if isinstance(response, dict) and 'error' in response: return response logging.debug('PVC from vsnapshot Response: {}'.format(response)) new_pvc_name = response.get('name') wait_until_resource_is_ready( lambda: self.pvc_status(namespace, new_pvc_name), 'Waiting create pvc from snapshot `{}` to be ready'.format(new_pvc_name) ) return { 'name': new_pvc_name, 'node_name': response.get('node_name'), 'storage_class_name': response.get('storage_class_name'), 'capacity': response.get('capacity'), 'fi': pvcdata.get('fi') } def backup_pod_status(self, namespace, bacula_pod_name): return self.corev1api.read_namespaced_pod_status(name=bacula_pod_name, namespace=namespace) def pvc_status(self, namespace, pvcname): return self.__execute(lambda: self.corev1api.read_namespaced_persistent_volume_claim_status(name=pvcname, namespace=namespace)) def is_pvc_waiting_first_consumer(self, namespace, pvcname): field_selector = 'involvedObject.kind='+'PersistentVolumeClaim'+',involvedObject.name='+pvcname response = self.__execute(lambda: self.corev1api.list_namespaced_event(namespace, field_selector=field_selector)) logging.debug('Response in pvc_waiting_fisrt_consumer:') logging.debug(response) if response is None or (not isinstance(response, dict) and not hasattr(response, "items")): return False for event in response.items: if event.reason == 'WaitForFirstConsumer': return True return False def _vsnapshot_status(self, namespace, snapshot_name): return self.__execute(lambda: self.crd_api.get_namespaced_custom_object_status(**prepare_snapshot_action(namespace, snapshot_name))) def backup_pod_isready(self, namespace, podname, seq=None): pod = self.backup_pod_status(namespace, podname) status = pod.status # logging.debug("backup_pod_isready:status:{} {}".format(type(status), status)) if status.container_statuses is None: if status.reason is None: # the Pod constainer status is not available yet return False err = K8S_POD_CONTAINER_STATUS_ERROR.format(status.reason, status.message) logging.error(err) return {'error': err} isready = status.container_statuses[0].ready logging.info("backup_pod_status:isReady: {} / {}".format(isready, seq)) return isready def pvc_isready(self, namespace, pvcname): response = self.pvc_status(namespace, pvcname) if isinstance(response, dict) and "error" in response: return response status = response.status logging.debug("pvc_isready:status:{}".format(status)) return status.phase == 'Bound' # Check if the pvc is terminating status. # This can be checked if the property 'metadata'>'deletion_timestamp' is not None def pvc_is_terminating(self, namespace, pvc): if not isinstance(pvc, dict): raise Exception('Error when try to get pvc status. PVC must be a `dict`') logging.debug('PVC is terminating status?. Namespace:{}.\nPVC Name:{}'.format(namespace,pvc.get('name'))) pvc_status = self._check_persistentvolume_claim_status(namespace, pvc.get('name')) logging.debug('Pvc status:{}'.format(pvc_status)) try: deletion_timestamp = pvc_status.metadata.deletion_timestamp logging.debug('Deletion_Timestamp:{}'.format(deletion_timestamp)) if deletion_timestamp is not None: return True return False except Exception as ex: logging.debug('Exception ocurrs:{}'.format(ex)) logging.exception(ex) logging.error('Had a error when try to get deletion_timestamp of pvc status') return True # Check if the pvc is pending status. # This can be checked if the property 'status'>'phase' is Pending def pvc_is_pending(self, namespace, pvc): if not isinstance(pvc, dict): raise Exception('Error when try to get pvc status. PVC must be a `dict`') logging.debug('PVC is pending status?. Namespace:{}.\nPVC Name:{}'.format(namespace,pvc.get('name'))) pvc_status = self._check_persistentvolume_claim_status(namespace, pvc.get('name')) # logging.debug('Pvc status:{}'.format(pvc_status)) try: current_status = pvc_status.status.phase if current_status == 'Pending': return True return False except Exception as ex: logging.debug('Exception ocurrs:{}'.format(ex)) logging.exception(ex) logging.error('Had a error when try to get pvc status') return True def remove_backup_pod(self, namespace, podname): logging.debug('remove_backup_pod') response = self.__execute(lambda: self.corev1api.delete_namespaced_pod( podname, namespace, grace_period_seconds=0, propagation_policy='Foreground')) if isinstance(response, dict) and "error" in response: return response return {} def remove_pvcclone(self, namespace, clonename): logging.debug('Remove pvcclone `{}`'.format(clonename)) response = self.delete_persistent_volume_claim(namespace, clonename) logging.debug("Response from delete pvc: {}".format(response)) if isinstance(response, dict) and "error" in response: return response r1 = self.get_pvcdata_namespaced(namespace, clonename) wait_until_resource_is_ready(lambda: 'error' in self.get_pvcdata_namespaced(namespace, clonename)) r2 = self.get_pvcdata_namespaced(namespace, clonename) logging.debug('PVC removed `{}`'.format(clonename)) return {} def remove_vsnapshot(self, namespace, vsnapshot_name): logging.debug('remove_vsnapshot `{}`'.format(vsnapshot_name)) response = self.__execute(lambda: self.crd_api.delete_namespaced_custom_object(**prepare_snapshot_action(namespace, vsnapshot_name))) logging.debug('Response remove_vsnapshot {}'.format(response)) if isinstance(response, dict) and "error" in response: return response logging.debug('Volume Snapshot removed `{}`'.format(vsnapshot_name)) return {} def check_gone_backup_pod(self, namespace, backup_pod_name, force=False): """ Checks if `pod_name` at selected namespace is already running. If not then we can proceed with Job. If it terminated but not removed then we will safely remove it. Args: namespace (str): namespace for Pod force (bool, optional): when we want to remove pod in any state. Defaults to False. Returns: bool: True if Pod not exist else False """ # TODO: Refactor this method as it does more then described! status = None gone = False try: status = self.backup_pod_status(namespace, backup_pod_name) except ApiException as e: if e.status == HTTP_NOT_FOUND: gone = True return True finally: logging.info("check_gone_backup_pod:gone:" + str(gone)) if status is not None and (force or status.status.phase not in ['Pending', 'Running']): response = self.remove_backup_pod(namespace, backup_pod_name) if isinstance(response, dict) and 'error' in response: # propagate error up return response return False def check_gone_pvcclone(self, namespace, clonename, force=False): response = self.pvc_status(namespace, clonename) if isinstance(response, dict) and "error" in response: logging.info("pvc status: {}".format(response)) if response.get('error_code') != HTTP_NOT_FOUND: return response else: return True status = response.status logging.info("status: {}".format(status)) if status is not None and status.phase != 'Pending': response = self.remove_pvcclone(namespace, clonename) if isinstance(response, dict) and 'error' in response: # propagate error up return response return False class FileContentAdapter: """ This Iterator Class is used to upload Object Segments. It is used by the Python-swiftclient connection API, and dependes upon a File Stream. It should read from the File Stream until segment_size is reached. """ def __init__(self, stream, segment_size): self.stream = stream self.segment_size = segment_size self.should_stop = False self.resend_segment = False self.current_segment = None def tell(self): """ Called by Python-swiftclient to resend data in case of error """ self.resend_segment = True pass def seek(self, pos): """ Called by Python-swiftclient to resend data in case of error """ self.resend_segment = True pass def __iter__(self): return self def __next__(self): """ Called by Python-swiftclient to retrieve the Object Content """ if self.should_stop: raise StopIteration if self.resend_segment and self.current_segment is not None: self.resend_segment = False return self.current_segment next_segment = b'' next_segment_size = 0 while True: chunk = self.stream.read() if not chunk: self.should_stop = True break next_segment += chunk next_segment_size += len(chunk) if next_segment_size >= self.segment_size: self.should_stop = True break if next_segment_size == 0: raise StopIteration self.current_segment = next_segment return next_segment bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/__init__.py0000644000175000017500000000126214771010173025246 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. from . import plugins bacula-15.0.3/src/plugins/fd/kubernetes-backend/baculak8s/main.py0000644000175000017500000000404214771010173024432 0ustar bsbuildbsbuild#!/usr/bin/env python3 # Bacula(R) - The Network Backup Solution # # Copyright (C) 2000-2022 Kern Sibbald # # The original author of Bacula is Kern Sibbald, with contributions # from many others, a complete list can be found in the file AUTHORS. # # You may use this file and others of this release according to the # license defined in the LICENSE file, which includes the Affero General # Public License, v3.0 ("AGPLv3") and some additional permissions and # terms pursuant to its AGPLv3 Section 7. # # This notice must be preserved when any source code is # conveyed and/or propagated. # # Bacula(R) is a registered trademark of Kern Sibbald. from baculak8s.io.log import Log, LogConfig from baculak8s.jobs.job_factory import JobFactory from baculak8s.plugins.plugin_factory import PluginFactory from baculak8s.services.handshake_service import HandshakeService from baculak8s.services.job_end_service import JobEndService from baculak8s.services.job_info_service import JobInfoService from baculak8s.services.plugin_params_service import PluginParamsService from baculak8s.services.unexpected_error_service import UnexpectedErrorService from baculak8s.util.dict_util import merge_two_dicts def main(): try: LogConfig.start() plugin_name = HandshakeService().execute() job_info = JobInfoService().execute() plugin_params = PluginParamsService(job_info).execute() LogConfig.handle_params(job_info, plugin_params) merged_params = merge_two_dicts(job_info, plugin_params) plugin = PluginFactory.create(plugin_name, merged_params) job = JobFactory.create(merged_params, plugin) job.execute() JobEndService(merged_params, plugin).execute() except Exception as E: Log.save_exception(E) UnexpectedErrorService().execute() exit_code = 1 Log.save_exit_code(exit_code) return exit_code except SystemExit: pass exit_code = 0 Log.save_exit_code(exit_code) return exit_code if __name__ == '__main__': main() bacula-15.0.3/src/plugins/fd/kubernetes-backend/README0000644000175000017500000000050414771010173022136 0ustar bsbuildbsbuildThis is a Kubernetes Backend for Bacula Enterprise Author: Radosław Korzeniewski (c) 2019,2020 (c) Bacula Systems SA ---------- Build and Installation ---------- To build and install the project: $ python3 setup.py build $ python3 setup.py install (From the projects root folder) To run the project: $ k8s_backend bacula-15.0.3/src/plugins/fd/kubernetes-backend/docker/0000755000175000017500000000000014771010173022526 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/kubernetes-backend/docker/Dockerfile0000644000175000017500000000153714771010173024526 0ustar bsbuildbsbuildFROM debian:bullseye COPY ./requirements.txt /mnt/ RUN apt update && \ DEBIAN_FRONTEND=noninteractive apt install -y build-essential postgresql libpq-dev libwrap0-dev python3 nano cython3 python3-pip openssh-client rsync libssl-dev && \ pip install pyinstaller && \ pip install -r /mnt/requirements.txt COPY ./docker-entrypoint.sh / RUN chmod +x /docker-entrypoint.sh WORKDIR /mnt/regress/ ENTRYPOINT ["/docker-entrypoint.sh"] # To compile: # Add Makefile rule: # async: # rsync -av -i ../bacula/src/ build/src/ # Change bacula directory in config file to: /mnt/regress/ # Execute: # make async && -C build/src/plugins/fd install-kubernetes && make -C build/src/plugins/fd/kubernetes-backend clean && make -C build/src/plugins/fd/kubernetes-backend install-kubernetes # Binary in: ~/regress/bin/k8s_backendbacula-15.0.3/src/plugins/fd/kubernetes-backend/docker/requirements.txt0000644000175000017500000000160214771010173026011 0ustar bsbuildbsbuild# -*- coding: UTF-8 -*- # # Bacula® - The Network Backup Solution # # Copyright (C) 2000-2019 Bacula Systems SA # All rights reserved. # # The main author of Bacula is Kern Sibbald, with contributions from many # others, a complete list can be found in the file AUTHORS. # # Licensees holding a valid Bacula Systems SA license may use this file # and others of this release in accordance with the proprietary license # agreement provided in the LICENSE file. Redistribution of any part of # this release is not permitted. # # Bacula® is a registered trademark of Kern Sibbald. # # Copyright (c) 2019 by Inteos sp. z o.o. # All rights reserved. IP transfered to Bacula Systems according to agreement. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # pyyaml == 6.0 kubernetes <= 26.1.0 urllib3 == 1.26.16 requests == 2.27.1 bacula-15.0.3/src/plugins/fd/kubernetes-backend/docker/docker-entrypoint.sh0000755000175000017500000000113414771010173026544 0ustar bsbuildbsbuild#!/bin/bash TMP_DIR="/mnt/regress/tmp" if [ ! -d "$TMP_DIR" ]; then make setup cat </mnt/regress/k8s_compile #!/bin/bash make async printf "\n%s\n" "Make async done." make -C build/src/plugins/fd install-kubernetes printf "\n%s\n" "Compiled k8s fd." make -C build/src/plugins/fd/kubernetes-backend clean printf "\n%s\n" "Cleaned k8s binary." make -C build/src/plugins/fd/kubernetes-backend install-kubernetes printf "\n%s\n" "Compiled k8s binary." EOF chmod +x /mnt/regress/k8s_compile else echo "The directory /mnt/regress/tmp already exists. So, we do not make setup" fi /bin/bash bacula-15.0.3/src/plugins/fd/kubernetes-backend/docker/README0000644000175000017500000000472514771010173023416 0ustar bsbuildbsbuild# Target ## Ubuntu/Debian This docker image is to compile k8s plugin independent of ubuntu/debian platform. I had problems with libc library, so to isolate this problem, we create this docker image. If you want change this version, modify the image tag and dockerfile FROM. For example: `FROM debian:bullseye` to `FROM ubuntu:jammy` **Note:** The difference between Ubuntu and Debian in this compilation is that `rm` binary is allocated in other place: - Ubuntu: `/usr/bin/rm` - Debian: `/bin/rm` So, the Makefile will be modified to avoid errors. ## RHEL (Rocky 9) This docker image is to compile k8s plugin independent of RHEL platforms. # Create the Docker image We need copy the requirements to docker context folder. For this reason, we do: ```bash cp -a ../requirements.txt . ``` ```bash docker build -t k8s_compilation:bullseye . ``` ```bash docker build -t k8s_compilation:jammy . ``` ```bash docker build -t k8s_compilation:rocky9 -f Dockerfile-rpm . ``` # Pre-Run docker container To compile in docker container, we need modify the `regress config file` to adapt. Add BACULA_SOURCE: ```bash # Where to get the source to be tested BACULA_SOURCE="/mnt/bacula/" ``` Discomment --disable-sd-dedup: ```bash # DEDUP allows to disable deduplication on the SD # If you don't have TokyoCabinet DEDUP=--disable-sd-dedup #DEDUP= ``` Set postgresql database: ```bash # Set your database here #WHICHDB="--with-sqlite3=${SQLITE3_DIR}" WHICHDB="--with-postgresql" #WHICHDB="--with-mysql" ``` We need compile with this config because unless we can't compile k8s. Also we need modify the `regress/Makefile` and add the next rule at the end of file: ``` async: rsync -av -i ../bacula/src/ build/src/ ``` # Run docker container In root project (bacula-bee folder), execute (Depend on platform): ```bash docker run --rm -it -v $(pwd):/mnt k8s_compilation:bullseye /bin/bash ``` ```bash docker run --rm -it -v $(pwd):/mnt k8s_compilation:jammy /bin/bash ``` ```bash docker run --rm -it -v $(pwd):/mnt k8s_compilation:rocky9 /bin/bash ``` # Compile k8s plugin Execute: ```bash make async ./k8s_compile ``` This `./k8s_compile` binary will do: ```bash make -C build/src/plugins/fd install-kubernetes ``` ```bash make -C build/src/plugins/fd/kubernetes-backend clean ``` ```bash make -C build/src/plugins/fd/kubernetes-backend install-kubernetes ``` # Get the k8s_backend binary Will be in `~/regress/bin/k8s_backend`. The last action to do in this file is change the permissions or owner.bacula-15.0.3/src/plugins/fd/kubernetes-backend/Makefile0000644000175000017500000000256714771010173022731 0ustar bsbuildbsbuild# # Master Makefile # # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # include ../Makefile.inc CWD=$(shell pwd) PIP_PROG=$(shell ./get_python PIP) PYTHON_PROG=$(shell ./get_python PYTHON) PYTHONPATH=$(shell ./get_python PYTHONPATH) PYTHON_PREFIX=$(shell ./get_python PYTHON_PREFIX) PIP_MAJOR_VERSION=$(shell $(PIP_PROG) --version | $(AWK) '{print $$2}' | $(AWK) -v FS=\. '{print $$1}') PIP_OPTIONS = --user # Implemented PEP 668: https://pip.pypa.io/en/stable/news/#v23-0-1 we need --break-system-packages option. ifeq ($(shell test $(PIP_MAJOR_VERSION) -ge 23; echo $$?),0) PIP_OPTIONS := ${PIP_OPTIONS} --break-system-packages endif all: pbuild clean: @find . -name '__pycache__' -print | xargs -r rm -r @$(PYTHON_PROG) setup.py clean dist-clean: clean @rm -rf build dist compile.py *.spec install-deps build: pbuild pbuild: @$(PYTHON_PROG) setup.py build install: build $(MAKE) -C ../ install-kubernetes @$(PYTHON_PROG) setup.py install install-deps: requirements.txt PYTHONPATH=$(PYTHONPATH) $(PIP_PROG) install $(PIP_OPTIONS) -r requirements.txt touch install-deps binary: install-deps find baculak8s -name '*.py' | ./mkExt.pl > compile.py PYTHONPATH=$(PYTHONPATH) $(PYTHON_PROG) compile.py build $(CWD)/make_bin install-bin: install-kubernetes install-kubernetes: binary $(INSTALL_PROGRAM) dist/k8s_backend $(DESTDIR)$(sbindir) bacula-15.0.3/src/plugins/fd/rot13-fd.c0000644000175000017500000002647314771010173017242 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * A simple Options{} plugin using rot13 encoding */ #define BUILD_PLUGIN #include "bacula.h" #include "fd_plugins.h" #include "fd_common.h" #include "findlib/bfile.h" /* for bopen/bclose/bread/bwrite */ #ifdef __cplusplus extern "C" { #endif static const int dbglvl = 100; static char *working; #define PLUGIN_LICENSE "AGPLv3" #define PLUGIN_AUTHOR "Eric Bollengier" #define PLUGIN_DATE "August 2011" #define PLUGIN_VERSION "1" #define PLUGIN_DESCRIPTION "Bacula Rot13 Plugin" /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); static bRC handleBackupFile(bpContext *ctx, struct save_pkt *sp); static bRC endBackupFile(bpContext *ctx); static bRC pluginIO(bpContext *ctx, struct io_pkt *io); static bRC startRestoreFile(bpContext *ctx, const char *cmd); static bRC endRestoreFile(bpContext *ctx); static bRC createFile(bpContext *ctx, struct restore_pkt *rp); static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); static bRC checkFile(bpContext *ctx, char *fname); /* Pointers to Bacula functions */ static bFuncs *bfuncs = NULL; static bInfo *binfo = NULL; /* Plugin Information block */ static pInfo pluginInfo = { sizeof(pluginInfo), FD_PLUGIN_INTERFACE_VERSION, FD_PLUGIN_MAGIC, PLUGIN_LICENSE, PLUGIN_AUTHOR, PLUGIN_DATE, PLUGIN_VERSION, PLUGIN_DESCRIPTION }; /* Plugin entry points for Bacula */ static pFuncs pluginFuncs = { sizeof(pluginFuncs), FD_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, /* new plugin instance */ freePlugin, /* free plugin instance */ getPluginValue, setPluginValue, handlePluginEvent, startBackupFile, endBackupFile, startRestoreFile, endRestoreFile, pluginIO, createFile, setFileAttributes, checkFile }; #define get_self(x) (((x)==NULL)?NULL:(rot13*)((x)->pContext)) /* Plugin instance class */ class rot13: public SMARTALLOC { private: bpContext *ctx; public: BFILE fd; /* Main file descriptor read/write */ rot13(bpContext *bpc) { binit(&fd); } ~rot13() { } /* Wait to be called to allocate memory */ void init_mem() { } }; /* * loadPlugin() and unloadPlugin() are entry points that are * exported, so Bacula can directly call these two entry points * they are common to all Bacula plugins. */ /* * External entry point called by Bacula to "load the plugin */ bRC DLL_IMP_EXP loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs) { bfuncs = lbfuncs; /* set Bacula funct pointers */ binfo = lbinfo; *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ bfuncs->getBaculaValue(NULL, bVarWorkingDir, (void *)&working); return bRC_OK; } /* * External entry point to unload the plugin */ bRC DLL_IMP_EXP unloadPlugin() { // Dmsg(NULL, dbglvl, "Unloaded\n"); return bRC_OK; } /* * The following entry points are accessed through the function * pointers we supplied to Bacula. Each plugin type (dir, fd, sd) * has its own set of entry points that the plugin must define. */ /* * Create a new instance of the plugin i.e. allocate our private storage */ static bRC newPlugin(bpContext *ctx) { rot13 *self = New(rot13(ctx)); if (!self) { return bRC_Error; } ctx->pContext = (void *)self; /* set our context pointer */ return bRC_OK; } /* * Free a plugin instance, i.e. release our private storage */ static bRC freePlugin(bpContext *ctx) { if (!ctx) { return bRC_Error; } rot13 *self = get_self(ctx); if (!self) { return bRC_Error; } delete self; return bRC_OK; } /* * Return some plugin value (none defined) */ static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Set a plugin value (none defined) */ static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Handle an event that was generated in Bacula */ static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) { if (!ctx) { return bRC_Error; } rot13 *self = get_self(ctx); if (!self) { return bRC_Error; } /* * Most events don't interest us so we ignore them. * the printfs are so that plugin writers can enable them to see * what is really going on. */ switch (event->eventType) { case bEventHandleBackupFile: return handleBackupFile(ctx, (struct save_pkt *)value); case bEventPluginCommand: break; case bEventVssPrepareSnapshot: break; case bEventJobStart: // Dmsg(ctx, dbglvl, "JobStart=%s\n", (char *)value); break; case bEventJobEnd: // Dmsg(ctx, dbglvl, "JobEnd\n"); break; case bEventStartBackupJob: // Dmsg(ctx, dbglvl, "StartBackupJob\n"); break; case bEventEndBackupJob: // Dmsg(ctx, dbglvl, "EndBackupJob\n"); break; case bEventLevel: // Dmsg(ctx, dbglvl, "JobLevel=%c %d\n", (int)value, (int)value); break; case bEventSince: // Dmsg(ctx, dbglvl, "since=%d\n", (int)value); break; case bEventStartRestoreJob: // Dmsg(ctx, dbglvl, "StartRestoreJob\n"); break; case bEventEndRestoreJob: // Dmsg(ctx, dbglvl, "EndRestoreJob\n"); break; /* Plugin command e.g. plugin = ::read command:write command */ case bEventRestoreCommand: // Dmsg(ctx, dbglvl, "EventRestoreCommand cmd=%s\n", (char *)value); self->init_mem(); break; case bEventEstimateCommand: /* Fall-through wanted */ case bEventBackupCommand: /* Plugin = "rot13" */ return bRC_Error; default: // Dmsg(ctx, dbglvl, "unknown event=%d\n", event->eventType); break; } return bRC_OK; } /* * Start the backup of a specific file */ static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) { return bRC_Error; } /* * Used by Options { Plugin= } */ static bRC handleBackupFile(bpContext *ctx, struct save_pkt *sp) { /* You can add custom filter here, the plugin command line * is in sp->cmd */ int len = strlen(sp->fname); if (len > 4) { Dmsg(ctx, dbglvl, "rot13: fname=%s ext=%s\n", sp->fname, sp->fname + len - 4); if (!strcmp(sp->fname + len - 4, ".txt")) { return bRC_OK; } } return bRC_Core; } /* * Done with backup of this file */ static bRC endBackupFile(bpContext *ctx) { /* Should not be called */ return bRC_Error; } /* * Bacula is calling us to do the actual I/O */ static bRC pluginIO(bpContext *ctx, struct io_pkt *io) { if (!ctx) { return bRC_Error; } rot13 *self = get_self(ctx); if (!self) { return bRC_Error; } io->status = 0; io->io_errno = 0; switch(io->func) { case IO_OPEN: Dmsg(ctx, dbglvl, "rot13: opening the file\n"); if (bopen(&self->fd, io->fname, io->flags, io->mode) < 0) { io->io_errno = errno; io->status = -1; Jmsg(ctx, M_ERROR, "Open failed: fname=%s ERR=%s\n", io->fname, strerror(errno)); return bRC_Error; } io->status = self->fd.fid; break; case IO_READ: if (!is_bopen(&self->fd)) { Jmsg(ctx, M_ERROR, "Logic error: NULL read FD\n"); return bRC_Error; } /* Read data from file */ io->status = bread(&self->fd, io->buf, io->count); if (io->status == 0 && self->fd.berrno) { Jmsg(ctx, M_ERROR, "Pipe read error: ERR=%s\n", strerror(self->fd.berrno)); Dmsg(ctx, dbglvl, "Pipe read error: ERR=%s\n", strerror(self->fd.berrno)); return bRC_Error; } for (int i=0 ; i < io->status ; i++) { io->buf[i] += 13; } break; case IO_WRITE: Dmsg(ctx, dbglvl, "rot13: writing\n"); if (!is_bopen(&self->fd)) { Jmsg(ctx, M_ERROR, "Logic error: NULL write FD\n"); return bRC_Error; } for (int i=0 ; i < io->count ; i++) { io->buf[i] -= 13; } io->status = bwrite(&self->fd, io->buf, io->count); if (io->status == 0 && self->fd.berrno != 0) { Jmsg(ctx, M_ERROR, "Write error\n"); Dmsg(ctx, dbglvl, "Write error: ERR=%s\n", strerror(self->fd.berrno)); return bRC_Error; } break; /* Cleanup things during close */ case IO_CLOSE: Dmsg(ctx, dbglvl, "rot13: closing\n"); if (!is_bopen(&self->fd)) { //Jmsg(ctx, M_ERROR, "Logic error: NULL FD on rot13 close\n"); return bRC_Error; } io->status = bclose(&self->fd); break; case IO_SEEK: if (!is_bopen(&self->fd)) { Jmsg(ctx, M_ERROR, "Logic error: NULL FD on rot13 seek\n"); return bRC_Error; } /* Seek not needed for this plugin, we don't use real sparse file */ io->status = 0; break; } return bRC_OK; } /* * Bacula is notifying us that a plugin name string was found, and * passing us the plugin command, so we can prepare for a restore. */ static bRC startRestoreFile(bpContext *ctx, const char *cmd) { // Dmsg(ctx, dbglvl, "startRestoreFile cmd=%s\n", cmd); return bRC_OK; } /* * Bacula is notifying us that the plugin data has terminated, so * the restore for this particular file is done. */ static bRC endRestoreFile(bpContext *ctx) { // Dmsg(ctx, dbglvl, "endRestoreFile\n"); return bRC_OK; } /* * This is called during restore to create the file (if necessary) * We must return in rp->create_status: * * CF_ERROR -- error * CF_CORE -- let bacula's core handles the file creation * CF_SKIP -- skip processing this file * CF_EXTRACT -- extract the file (i.e.call i/o routines) * CF_CREATED -- created, but no content to extract (typically directories) * */ static bRC createFile(bpContext *ctx, struct restore_pkt *rp) { rp->create_status = CF_CORE; return bRC_OK; } /* * We will get here if the File is a directory after everything * is written in the directory. */ static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) { // Dmsg(ctx, dbglvl, "setFileAttributes\n"); rp->create_status = CF_CORE; return bRC_OK; } /* TODO: cleanup database file with deleted files */ static bRC checkFile(bpContext *ctx, char *fname) { /* TODO: Remove working/fname.* */ return bRC_OK; } #ifdef __cplusplus } #endif bacula-15.0.3/src/plugins/fd/test-deltaseq-fd.c0000644000175000017500000003273314771010173021045 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * A simple delta plugin for the Bacula File Daemon * * */ #include "bacula.h" #include "fd_plugins.h" #include "fd_common.h" #undef malloc #undef free #undef strdup #ifdef __cplusplus extern "C" { #endif static const int dbglvl = 0; #define PLUGIN_LICENSE "AGPLv3" #define PLUGIN_AUTHOR "Eric Bollengier" #define PLUGIN_DATE "November 2010" #define PLUGIN_VERSION "1" #define PLUGIN_DESCRIPTION "Bacula Delta Test Plugin" /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); static bRC endBackupFile(bpContext *ctx); static bRC pluginIO(bpContext *ctx, struct io_pkt *io); static bRC startRestoreFile(bpContext *ctx, const char *cmd); static bRC endRestoreFile(bpContext *ctx); static bRC createFile(bpContext *ctx, struct restore_pkt *rp); static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); static bRC checkFile(bpContext *ctx, char *fname); static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl); /* Pointers to Bacula functions */ static bFuncs *bfuncs = NULL; static bInfo *binfo = NULL; /* Plugin Information block */ static pInfo pluginInfo = { sizeof(pluginInfo), FD_PLUGIN_INTERFACE_VERSION, FD_PLUGIN_MAGIC, PLUGIN_LICENSE, PLUGIN_AUTHOR, PLUGIN_DATE, PLUGIN_VERSION, PLUGIN_DESCRIPTION }; /* Plugin entry points for Bacula */ static pFuncs pluginFuncs = { sizeof(pluginFuncs), FD_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, /* new plugin instance */ freePlugin, /* free plugin instance */ getPluginValue, setPluginValue, handlePluginEvent, startBackupFile, endBackupFile, startRestoreFile, endRestoreFile, pluginIO, createFile, setFileAttributes, checkFile, handleXACLdata }; #define get_self(x) ((delta_test*)((x)->pContext)) #define FO_DELTA (1<<28) /* Do delta on file */ #define FO_OFFSETS (1<<30) /* Keep block offsets */ class delta_test { private: bpContext *ctx; public: POOLMEM *fname; /* Filename to save */ int32_t delta; FILE *fd; bool done; int level; delta_test(bpContext *bpc) { fd = NULL; ctx = bpc; done = false; level = 0; delta = 0; fname = get_pool_memory(PM_FNAME); } ~delta_test() { free_and_null_pool_memory(fname); } }; /* * loadPlugin() and unloadPlugin() are entry points that are * exported, so Bacula can directly call these two entry points * they are common to all Bacula plugins. */ /* * External entry point called by Bacula to "load the plugin */ bRC loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs) { bfuncs = lbfuncs; /* set Bacula funct pointers */ binfo = lbinfo; *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ /* Activate this plugin only in developer mode */ #ifdef DEVELOPER return bRC_OK; #else return bRC_Error; #endif } /* * External entry point to unload the plugin */ bRC unloadPlugin() { // Dmsg(NULL, dbglvl, "delta-test-fd: Unloaded\n"); return bRC_OK; } /* * The following entry points are accessed through the function * pointers we supplied to Bacula. Each plugin type (dir, fd, sd) * has its own set of entry points that the plugin must define. */ /* * Create a new instance of the plugin i.e. allocate our private storage */ static bRC newPlugin(bpContext *ctx) { delta_test *self = new delta_test(ctx); if (!self) { return bRC_Error; } ctx->pContext = (void *)self; /* set our context pointer */ return bRC_OK; } /* * Free a plugin instance, i.e. release our private storage */ static bRC freePlugin(bpContext *ctx) { delta_test *self = get_self(ctx); if (!self) { return bRC_Error; } delete self; return bRC_OK; } /* * Return some plugin value (none defined) */ static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Set a plugin value (none defined) */ static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Handle an event that was generated in Bacula */ static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) { delta_test *self = get_self(ctx); int accurate=0; if (!self) { return bRC_Error; } // char *name; /* * Most events don't interest us so we ignore them. * the printfs are so that plugin writers can enable them to see * what is really going on. */ switch (event->eventType) { case bEventPluginCommand: // Dmsg(ctx, dbglvl, // "delta-test-fd: PluginCommand=%s\n", (char *)value); break; case bEventJobStart: // Dmsg(ctx, dbglvl, "delta-test-fd: JobStart=%s\n", (char *)value); break; case bEventJobEnd: // Dmsg(ctx, dbglvl, "delta-test-fd: JobEnd\n"); break; case bEventStartBackupJob: // Dmsg(ctx, dbglvl, "delta-test-fd: StartBackupJob\n"); break; case bEventEndBackupJob: // Dmsg(ctx, dbglvl, "delta-test-fd: EndBackupJob\n"); break; case bEventLevel: // Dmsg(ctx, dbglvl, "delta-test-fd: JobLevel=%c %d\n", (int)value, (int)value); self->level = (int)(intptr_t)value; break; case bEventSince: // Dmsg(ctx, dbglvl, "delta-test-fd: since=%d\n", (int)value); break; case bEventStartRestoreJob: // Dmsg(ctx, dbglvl, "delta-test-fd: StartRestoreJob\n"); break; case bEventEndRestoreJob: // Dmsg(ctx, dbglvl, "delta-test-fd: EndRestoreJob\n"); break; /* Plugin command e.g. plugin = ::read command:write command */ case bEventRestoreCommand: // Dmsg(ctx, dbglvl, "delta-test-fd: EventRestoreCommand cmd=%s\n", (char *)value); /* Fall-through wanted */ break; case bEventBackupCommand: Dmsg(ctx, dbglvl, "delta-test-fd: pluginEvent cmd=%s\n", (char *)value); if (self->level == 'I' || self->level == 'D') { bfuncs->getBaculaValue(ctx, bVarAccurate, (void *)&accurate); if (!accurate) { /* can be changed to FATAL */ Jmsg(ctx, M_FATAL, "Accurate mode should be turned on when using the " "delta-test plugin\n"); return bRC_Error; } } break; default: // Dmsg(ctx, dbglvl, "delta-test-fd: unknown event=%d\n", event->eventType); break; } return bRC_OK; } static const char *files[] = { "/etc/passwd", "/etc/group", "/etc/hosts", "/etc/services" }; static int nb_files = 4; /* * Start the backup of a specific file */ static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) { delta_test *self = get_self(ctx); if (!self) { return bRC_Error; } time_t now = time(NULL); sp->fname = (char *)"/delta.txt"; sp->type = FT_REG; sp->statp.st_mode = 0700 | S_IFREG; sp->statp.st_ctime = now; sp->statp.st_mtime = now; sp->statp.st_atime = now; sp->statp.st_size = -1; sp->statp.st_blksize = 4096; sp->statp.st_blocks = 1; if (self->level == 'I' || self->level == 'D') { bRC state = bfuncs->checkChanges(ctx, sp); /* Should always be bRC_OK */ sp->type = (state == bRC_Seen)? FT_NOCHG : FT_REG; sp->flags |= (FO_DELTA|FO_OFFSETS); self->delta = sp->delta_seq + 1; } pm_strcpy(self->fname, files[self->delta % nb_files]); Dmsg(ctx, dbglvl, "delta-test-fd: delta_seq=%i delta=%i fname=%s\n", sp->delta_seq, self->delta, self->fname); // Dmsg(ctx, dbglvl, "delta-test-fd: startBackupFile\n"); return bRC_OK; } /* * Done with backup of this file */ static bRC endBackupFile(bpContext *ctx) { /* * We would return bRC_More if we wanted startBackupFile to be * called again to backup another file */ return bRC_OK; } /* * Bacula is calling us to do the actual I/O */ static bRC pluginIO(bpContext *ctx, struct io_pkt *io) { delta_test *self = get_self(ctx); struct stat statp; if (!self) { return bRC_Error; } io->status = 0; io->io_errno = 0; switch(io->func) { case IO_OPEN: Dmsg(ctx, dbglvl, "delta-test-fd: IO_OPEN\n"); if (io->flags & (O_CREAT | O_WRONLY)) { /* TODO: if the file already exists, the result is undefined */ if (stat(io->fname, &statp) == 0) { /* file exists */ self->fd = fopen(io->fname, "r+"); } else { self->fd = fopen(io->fname, "w"); /* file doesn't exist,create it */ } if (!self->fd) { io->io_errno = errno; Jmsg(ctx, M_FATAL, "Open failed: ERR=%s\n", strerror(errno)); return bRC_Error; } } else { self->fd = fopen(self->fname, "r"); if (!self->fd) { io->io_errno = errno; Jmsg(ctx, M_FATAL, "Open failed: ERR=%s\n", strerror(errno)); return bRC_Error; } } break; case IO_READ: if (!self->fd) { Jmsg(ctx, M_FATAL, "Logic error: NULL read FD\n"); return bRC_Error; } if (self->done) { io->status = 0; } else { /* first time, read 300, then replace 50-250 by other data */ if (self->delta == 0) { io->status = fread(io->buf, 1, 400, self->fd); } else { io->offset = self->delta * 100 / 2; /* chunks are melted */ io->status = fread(io->buf, 1, 100, self->fd); } Dmsg(ctx, dbglvl, "delta-test-fd: READ offset=%lld\n", (int64_t)io->offset); self->done = true; } if (io->status == 0 && ferror(self->fd)) { Jmsg(ctx, M_FATAL, "Pipe read error: ERR=%s\n", strerror(errno)); Dmsg(ctx, dbglvl, "Pipe read error: ERR=%s\n", strerror(errno)); return bRC_Error; } Dmsg(ctx, dbglvl, "offset=%d\n", io->offset); break; case IO_WRITE: if (!self->fd) { Jmsg(ctx, M_FATAL, "Logic error: NULL write FD\n"); return bRC_Error; } Dmsg(ctx, dbglvl, "delta-test-fd: WRITE count=%lld\n", (int64_t)io->count); io->status = fwrite(io->buf, 1, io->count, self->fd); if (io->status == 0 && ferror(self->fd)) { Jmsg(ctx, M_FATAL, "Pipe write error\n"); Dmsg(ctx, dbglvl, "Pipe read error: ERR=%s\n", strerror(errno)); return bRC_Error; } break; case IO_CLOSE: if (!self->fd) { Jmsg(ctx, M_FATAL, "Logic error: NULL FD on delta close\n"); return bRC_Error; } io->status = fclose(self->fd); break; case IO_SEEK: if (!self->fd) { Jmsg(ctx, M_FATAL, "Logic error: NULL FD on delta close\n"); return bRC_Error; } Dmsg(ctx, dbglvl, "delta-test-fd: SEEK offset=%lld\n", (int64_t)io->offset); io->status = fseek(self->fd, io->offset, io->whence); Dmsg(ctx, dbglvl, "after SEEK=%lld\n", (int64_t)ftell(self->fd)); break; } return bRC_OK; } /* * Bacula is notifying us that a plugin name string was found, and * passing us the plugin command, so we can prepare for a restore. */ static bRC startRestoreFile(bpContext *ctx, const char *cmd) { // Dmsg(ctx, dbglvl, "delta-test-fd: startRestoreFile cmd=%s\n", cmd); return bRC_OK; } /* * Bacula is notifying us that the plugin data has terminated, so * the restore for this particular file is done. */ static bRC endRestoreFile(bpContext *ctx) { // Dmsg(ctx, dbglvl, "delta-test-fd: endRestoreFile\n"); return bRC_OK; } /* * This is called during restore to create the file (if necessary) * We must return in rp->create_status: * * CF_ERROR -- error * CF_SKIP -- skip processing this file * CF_EXTRACT -- extract the file (i.e.call i/o routines) * CF_CREATED -- created, but no content to extract (typically directories) * */ static bRC createFile(bpContext *ctx, struct restore_pkt *rp) { delta_test *self = get_self(ctx); pm_strcpy(self->fname, rp->ofname); rp->create_status = CF_EXTRACT; return bRC_OK; } /* * We will get here if the File is a directory after everything * is written in the directory. */ static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) { // Dmsg(ctx, dbglvl, "delta-test-fd: setFileAttributes\n"); return bRC_OK; } /* When using Incremental dump, all previous dumps are necessary */ static bRC checkFile(bpContext *ctx, char *fname) { return bRC_OK; } /* * New Bacula Plugin API require this */ static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl) { return bRC_OK; } #ifdef __cplusplus } #endif bacula-15.0.3/src/plugins/fd/fd_common.h0000644000175000017500000006005714771010173017645 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* You can include this file to your plugin to have * access to some common tools and utilities provided by Bacula */ #ifndef PCOMMON_H #define PCOMMON_H #define JT_BACKUP 'B' /* Backup Job */ #define JT_RESTORE 'R' /* Restore Job */ #define L_FULL 'F' /* Full backup */ #define L_INCREMENTAL 'I' /* since last backup */ #define L_DIFFERENTIAL 'D' /* since last full backup */ #ifndef DLL_IMP_EXP # if defined(BUILDING_DLL) # define DLL_IMP_EXP __declspec(dllexport) # elif defined(USING_DLL) # define DLL_IMP_EXP __declspec(dllimport) # else # define DLL_IMP_EXP # endif #endif #ifdef SMARTALLOC DLL_IMP_EXP void *sm_malloc(const char *fname, int lineno, size_t nbytes); DLL_IMP_EXP void sm_free(const char *file, int line, void *fp); DLL_IMP_EXP void *reallymalloc(const char *fname, int lineno, size_t nbytes); DLL_IMP_EXP void reallyfree(const char *file, int line, void *fp); #ifndef bmalloc # define bmalloc(s) sm_malloc(__FILE__, __LINE__, (s)) # define bfree(o) sm_free(__FILE__, __LINE__, (o)) #endif #define SM_CHECK sm_check(__FILE__, __LINE__, false) #ifdef malloc #undef malloc #undef free #endif #define malloc(s) sm_malloc(__FILE__, __LINE__, (s)) #define free(o) sm_free(__FILE__, __LINE__, (o)) /* Looks to be broken on scientific linux */ #ifdef xxxx inline void *operator new(size_t size, char const * file, int line) { void *pnew = sm_malloc(file,line, size); memset((char *)pnew, 0, size); return pnew; } inline void *operator new[](size_t size, char const * file, int line) { void *pnew = sm_malloc(file, line, size); memset((char *)pnew, 0, size); return pnew; } inline void *operator new(size_t size) { void *pnew = sm_malloc(__FILE__, __LINE__, size); memset((char *)pnew, 0, size); return pnew; } inline void *operator new[](size_t size) { void *pnew = sm_malloc(__FILE__, __LINE__, size); memset((char *)pnew, 0, size); return pnew; } #define new new(__FILE__, __LINE__) inline void operator delete(void *buf) { sm_free( __FILE__, __LINE__, buf); } inline void operator delete[] (void *buf) { sm_free(__FILE__, __LINE__, buf); } inline void operator delete[] (void *buf, char const * file, int line) { sm_free(file, line, buf); } inline void operator delete(void *buf, char const * file, int line) { sm_free(file, line, buf); } #endif #endif /* !SMARTALLOC */ #ifdef UNITTESTS #define Dmsg(context, level, ...) printf( __VA_ARGS__ ) #define Jmsg(context, level, ...) printf( __VA_ARGS__ ) #else #ifdef COMMAND_LINE_TOOL #define dbglvl 10 #define Dmsg(context, level, ...) if (level <= debug_level) printf( __VA_ARGS__ ) #define Jmsg(context, level, ...) if (level <= debug_level) printf( __VA_ARGS__ ) #else #define Dmsg(context, level, ...) bfuncs->DebugMessage(context, __FILE__, __LINE__, level, __VA_ARGS__ ) #define Jmsg(context, type, ...) bfuncs->JobMessage(context, __FILE__, __LINE__, type, 0, __VA_ARGS__ ) #endif #endif #ifdef USE_CMD_PARSER #include "lib/cmd_parser.h" #endif /* USE_CMD_PARSER */ #ifdef USE_ADD_DRIVE /* Keep drive letters for windows vss snapshot */ static void add_drive(char *drives, int *nCount, char *fname) { if (strlen(fname) >= 2 && B_ISALPHA(fname[0]) && fname[1] == ':') { /* always add in uppercase */ char ch = toupper(fname[0]); /* if not found in string, add drive letter */ if (!strchr(drives,ch)) { drives[*nCount] = ch; drives[*nCount+1] = 0; (*nCount)++; } } } /* Copy our drive list to Bacula core list */ static void copy_drives(char *drives, char *dest) { int last = strlen(dest); /* dest is 27 bytes long */ for (char *p = drives; *p && last < 26; p++) { if (!strchr(dest, *p)) { dest[last++] = *p; dest[last] = 0; } } } #endif /* USE_ADD_DRIVE */ #endif /* ! PCOMMON_H */ #ifdef USE_JOB_LIST /* This class is used to store locally the job history, you can attach data * to it such as snapshot names * !!! Don't forget that this file may be deleted by the user. !!! */ class joblist: public SMARTALLOC { private: bpContext *ctx; bool return_jobname_on_prune; public: char level; /* level of the job */ POOLMEM *base; /* base name */ POOLMEM *key; /* group of backup */ POOLMEM *name; /* job name */ POOLMEM *prev; /* based on jobname */ POOLMEM *root; /* root of this branch */ POOLMEM *rootdiff; /* root of diff if any */ btime_t job_time; /* job time */ void init() { level = 0; job_time = 0; if (!base) { base = get_pool_memory(PM_FNAME); key = get_pool_memory(PM_FNAME); name = get_pool_memory(PM_FNAME); prev = get_pool_memory(PM_FNAME); root = get_pool_memory(PM_FNAME); rootdiff = get_pool_memory(PM_FNAME); } *key = *name = *prev = *root = *rootdiff = 0; set_base("jobs.dat"); ctx = NULL; return_jobname_on_prune = false; } void set_base(const char *b) { pm_strcpy(base, b); } /* If true when prune_job is called return jobnanme instead of job data in alist */ void set_return_jobname_on_prune(bool value) { return_jobname_on_prune = value; } joblist(bpContext *actx): ctx(NULL), level(0), base(NULL), key(NULL), name(NULL), prev(NULL), root(NULL), rootdiff(NULL), job_time(0) { init(); ctx = actx; } joblist(bpContext *actx, const char *akey, const char *jobname, const char *prevjobname, char joblevel): ctx(NULL), level(0), base(NULL), key(NULL), name(NULL), prev(NULL), root(NULL), rootdiff(NULL), job_time(0) { init(); ctx = actx; if (jobname) { pm_strcpy(name, jobname); } if (prevjobname) { pm_strcpy(prev, prevjobname); } level = joblevel; if (akey) { pm_strcpy(key, akey); } else { get_key_from_name(); } } ~joblist() { free_pool_memory(base); free_pool_memory(key); free_pool_memory(name); free_pool_memory(prev); free_pool_memory(root); free_pool_memory(rootdiff); } /* Will extract the name from the full job name */ bool get_key_from_name() { key = check_pool_memory_size(key, sizeof_pool_memory(name)); return get_key_from_name(ctx, name, key, sizeof_pool_memory(key)); }; /* Will extract the name from the full job name */ static bool get_key_from_name(bpContext *ctx, const char *n, char *d, int len) { // pluginTest.2012-07-19_16.59.21_11 int l = strlen(n); int dlen = 23; // strlen(".2012-07-19_16.59.21_11"); if (l > dlen) { /* we probably have a key */ int start = l - dlen; if (start + 1 > len) { Dmsg(ctx, dbglvl+100, "Key is too long\n"); return false; } if (n[start] == '.' && B_ISDIGIT(n[start + 1]) && // 2 B_ISDIGIT(n[start + 2]) && // 0 B_ISDIGIT(n[start + 3]) && // 1 B_ISDIGIT(n[start + 4]) && // 2 n[start + 5] == '-' && // - B_ISDIGIT(n[start + 6]) && // 0 B_ISDIGIT(n[start + 7])) // 7 { bstrncpy(d, n, start + 1); Dmsg(ctx, dbglvl+100, "key is %s from jobname %s\n", d, n); return true; } } Dmsg(ctx, dbglvl+100, "Unable to get key from jobname %s\n", n); return false; }; bool find_job(const char *name, POOLMEM **data=NULL); /* set root, job_time */ bool find_root_job(); bool store_job(char *data); void prune_jobs(char *build_cmd(void *arg, const char *data, const char *job), void *arg, alist *jobs); }; static pthread_mutex_t joblist_mutex = PTHREAD_MUTEX_INITIALIZER; bool joblist::find_job(const char *name, POOLMEM **data) { FILE *f; POOLMEM *tmp; POOLMEM *buf; POOLMEM *curkey; /* key */ POOLMEM *curjobname; /* jobname */ POOLMEM *prevjob; /* last jobname */ POOLMEM *rootjob; /* root jobname */ char t[MAX_NAME_LENGTH]; /* store time */ char curlevel; bool ok=false; *root = 0; job_time = 0; *rootdiff = 0; tmp = get_pool_memory(PM_FNAME); buf = get_pool_memory(PM_FNAME); curkey = get_pool_memory(PM_FNAME); curjobname = get_pool_memory(PM_FNAME); prevjob = get_pool_memory(PM_FNAME); rootjob = get_pool_memory(PM_FNAME); Mmsg(tmp, "%s/%s", working, base); P(joblist_mutex); f = bfopen(tmp, "r"); if (!f) { berrno be; Jmsg(ctx, M_ERROR, "Unable to open job database. ERR=%s\n", be.bstrerror(errno)); goto bail_out; } while (!ok && bfgets(buf, f) != NULL) { curkey = check_pool_memory_size(curkey, sizeof_pool_memory(buf)); curjobname = check_pool_memory_size(curjobname, sizeof_pool_memory(buf)); rootjob = check_pool_memory_size(rootjob, sizeof_pool_memory(buf)); prevjob = check_pool_memory_size(prevjob, sizeof_pool_memory(buf)); *curkey = *curjobname = *rootjob = *prevjob = 0; Dmsg(ctx, dbglvl+100, "line = [%s]\n", buf); if (scan_string(buf, "time=%60s level=%c key=%s name=%s root=%s prev=%s", t, &curlevel, curkey, curjobname, rootjob, prevjob) != 6) { if (scan_string(buf, "time=%60s level=F key=%s name=%s", t, curkey, curjobname) != 3) { Dmsg(ctx, dbglvl+100, "Bad line l=[%s]\n", buf); continue; } } if (strcmp(name, curjobname) == 0 && strcmp(key, curkey) == 0) { job_time = str_to_uint64(t); pm_strcpy(root, rootjob); if (curlevel == 'D') { pm_strcpy(rootdiff, curjobname); } if (data) { pm_strcpy(data, strstr(buf, " vol=") + 5); strip_trailing_newline(*data); unbash_spaces(*data); } ok = true; Dmsg(ctx, dbglvl+100, "Found job root %s -> %s -> %s\n", rootdiff, root, curjobname); } } fclose(f); bail_out: V(joblist_mutex); free_pool_memory(buf); free_pool_memory(curkey); free_pool_memory(curjobname); free_pool_memory(prevjob); free_pool_memory(rootjob); free_pool_memory(tmp); return ok; } /* Find the root job for the current job */ bool joblist::find_root_job() { FILE *f; POOLMEM *tmp; POOLMEM *buf; POOLMEM *curkey; /* key */ POOLMEM *curjobname; /* jobname */ POOLMEM *prevjob; /* last jobname */ POOLMEM *rootjob; /* root jobname */ char t[MAX_NAME_LENGTH]; char curlevel; bool ok=false; *root = 0; job_time = 0; if (level == 'F') { pm_strcpy(root, name); return true; } buf = get_pool_memory(PM_FNAME); curkey = get_pool_memory(PM_FNAME); curjobname = get_pool_memory(PM_FNAME); prevjob = get_pool_memory(PM_FNAME); rootjob = get_pool_memory(PM_FNAME); tmp = get_pool_memory(PM_FNAME); Mmsg(tmp, "%s/%s", working, base); P(joblist_mutex); f = bfopen(tmp, "r"); if (!f) { berrno be; Jmsg(ctx, M_ERROR, "Unable to prune previous jobs. ERR=%s\n", be.bstrerror(errno)); goto bail_out; } while (!ok && bfgets(buf, f) != NULL) { curkey = check_pool_memory_size(curkey, sizeof_pool_memory(buf)); curjobname = check_pool_memory_size(curjobname, sizeof_pool_memory(buf)); rootjob = check_pool_memory_size(rootjob, sizeof_pool_memory(buf)); prevjob = check_pool_memory_size(prevjob, sizeof_pool_memory(buf)); *curkey = *curjobname = *rootjob = *prevjob = 0; Dmsg(ctx, dbglvl+100, "line = [%s]\n", buf); if (scan_string(buf, "time=%60s level=%c key=%s name=%s root=%s prev=%s", t, &curlevel, curkey, curjobname, rootjob, prevjob) != 6) { if (scan_string(buf, "time=%60s level=F key=%s name=%s", t, curkey, curjobname) == 3) { pm_strcpy(rootjob, curjobname); *prevjob = 0; curlevel = 'F'; } else { Dmsg(ctx, dbglvl+100, "Bad line l=[%s]\n", buf); continue; } } if (strcmp(key, curkey) == 0 && strcmp(prev, curjobname) == 0) { pm_strcpy(root, rootjob); if (curlevel == 'D') { pm_strcpy(rootdiff, curjobname); } ok = true; Dmsg(ctx, dbglvl+100, "Found job root %s -> %s -> %s\n", rootdiff, root, curjobname); } } fclose(f); bail_out: V(joblist_mutex); free_pool_memory(tmp); free_pool_memory(buf); free_pool_memory(curkey); free_pool_memory(curjobname); free_pool_memory(prevjob); free_pool_memory(rootjob); return true; } /* Store the current job in the jobs.dat for a specific data list */ bool joblist::store_job(char *data) { FILE *fp; int l; POOLMEM *tmp = NULL; btime_t now; bool ret = true; /* Not initialized, no need to store jobs */ if (*name == 0 || !level) { Dmsg(ctx, dbglvl+100, "store_job fail name=%s level=%d\n", name, level); return false; } find_root_job(); P(joblist_mutex); tmp = get_pool_memory(PM_FNAME); Mmsg(tmp, "%s/%s", working, base); fp = bfopen(tmp, "a+"); if (!fp) { berrno be; Jmsg(ctx, M_ERROR, "Unable to update the job history. ERR=%s\n", be.bstrerror(errno)); ret = false; goto bail_out; } now = time(NULL); bash_spaces(data); if (level == 'F') { l = fprintf(fp, "time=%lld level=%c key=%s name=%s vollen=%d vol=%s\n", (long long)now, level, key, name, (int)strlen(data), data); } else { l = fprintf(fp, "time=%lld level=%c key=%s name=%s root=%s prev=%s vollen=%d vol=%s\n", (long long)now, level, key, name, root, prev, (int)strlen(data), data); } unbash_spaces(data); if (l < 0) { berrno be; Jmsg(ctx, M_ERROR, "Unable to update the job history. ERR=%s\n", be.bstrerror(errno)); ret = false; } if (fclose(fp) != 0) { ret = false; } bail_out: V(joblist_mutex); free_pool_memory(tmp); return ret; } /* Prune jobs at the end of the job, this function can generate commands * in order to cleanup something */ void joblist::prune_jobs(char *build_cmd(void *arg, const char *data, const char *job), void *arg, alist *jobs) { FILE *fout; FILE *f=NULL; POOLMEM *tmp; POOLMEM *tmpout; POOLMEM *data; POOLMEM *buf; POOLMEM *curkey; /* key */ POOLMEM *curjobname; /* jobname */ POOLMEM *prevjob; /* last jobname */ POOLMEM *rootjob; /* root jobname */ char t[MAX_NAME_LENGTH]; uint32_t datalen; char curlevel; bool keep; bool ok=false; int count=0; /* In Incremental, it means that the previous Full/Diff is well terminated */ if (level != 'I') { return; } find_root_job(); tmp = get_pool_memory(PM_FNAME); Mmsg(tmp, "%s/%s", working, base); tmpout = get_pool_memory(PM_FNAME); Mmsg(tmpout, "%s/%s.swap", working, base); buf = get_pool_memory(PM_FNAME); data = get_pool_memory(PM_FNAME); curkey = get_pool_memory(PM_FNAME); curjobname = get_pool_memory(PM_FNAME); prevjob = get_pool_memory(PM_FNAME); rootjob = get_pool_memory(PM_FNAME); *curkey = *curjobname = *prevjob = *rootjob = *buf = *data = 0; P(joblist_mutex); fout = bfopen(tmpout, "w"); if (!fout) { berrno be; Jmsg(ctx, M_ERROR, "Unable to prune previous jobs. " "Can't open %s for writing ERR=%s\n", tmpout, be.bstrerror(errno)); goto bail_out; } f = bfopen(tmp, "r"); if (!f) { berrno be; Jmsg(ctx, M_ERROR, "Unable to prune previous jobs. ERR=%s\n", be.bstrerror(errno)); goto bail_out; } while (bfgets(buf, f) != NULL) { curkey = check_pool_memory_size(curkey, sizeof_pool_memory(buf)); curjobname = check_pool_memory_size(curjobname, sizeof_pool_memory(buf)); rootjob = check_pool_memory_size(rootjob, sizeof_pool_memory(buf)); prevjob = check_pool_memory_size(prevjob, sizeof_pool_memory(buf)); data = check_pool_memory_size(data, sizeof_pool_memory(buf)); *data = *curkey = *curjobname = *rootjob = *prevjob = 0; keep = false; datalen = 0; /* We don't capture the vol list, because our sscanf is limited to 1000 bytes */ if (scan_string(buf, "time=%60s level=%c key=%s name=%s root=%s prev=%s vollen=%d vol=", t, &curlevel, curkey, curjobname, rootjob, prevjob, &datalen) != 7) { if (scan_string(buf, "time=%60s level=F key=%s name=%s vollen=%d vol=", t, curkey, curjobname, &datalen) == 4) { *rootdiff = *rootjob = *prevjob = 0; curlevel = 'F'; } else { Dmsg(ctx, dbglvl+100, "Bad line l=[%s]\n", buf); keep = true; } } if (!keep) { pm_strcpy(data, strstr(buf, " vol=") + 5); strip_trailing_newline(data); unbash_spaces(data); if (datalen != strlen(data)) { Dmsg(ctx, dbglvl+100, "Bad data line datalen != strlen(data) %d != %d\n", datalen, (int)strlen(data)); Dmsg(ctx, dbglvl+100, "v=[%s]\n", data); } } if (!keep && (strcmp(key, curkey) != 0 || strcmp(name, curjobname) == 0 || strcmp(prev, curjobname) == 0 || strcmp(root, curjobname) == 0 || strcmp(rootdiff, curjobname) == 0)) { keep = true; } if (keep) { if (fprintf(fout, "%s", buf) < 0) { berrno be; Jmsg(ctx, M_ERROR, "Unable to update the job history. ERR=%s\n", be.bstrerror(errno)); goto bail_out; } } else if (build_cmd) { count++; Dmsg(ctx, dbglvl+100, "Can prune jobname %s\n", curjobname); char *p2 = data; for(char *p = data; *p; p++) { if (*p == ',') { *p = 0; jobs->append(bstrdup(build_cmd(arg, p2, curjobname))); p2 = p + 1 ; } } jobs->append(bstrdup(build_cmd(arg, p2, curjobname))); } else if (jobs) { if (return_jobname_on_prune) { jobs->append(bstrdup(curjobname)); } else { jobs->append(bstrdup(data)); } } } ok = true; bail_out: if (f) { fclose(f); } if (fout) { fclose(fout); } /* We can switch the file */ if (ok) { unlink(tmp); if (rename(tmpout, tmp) < 0) { berrno be; Jmsg(ctx, M_ERROR, "Unable to update the job history. ERR=%s\n", be.bstrerror(errno)); } } V(joblist_mutex); free_pool_memory(tmp); free_pool_memory(tmpout); free_pool_memory(data); free_pool_memory(buf); free_pool_memory(curkey); free_pool_memory(curjobname); free_pool_memory(prevjob); free_pool_memory(rootjob); Dmsg(ctx, dbglvl+100, "Pruning %d jobs\n", count); } #endif /* ! USE_JOB_LIST */ #ifdef USE_SP_DECODE /* ***BEEF*** */ static int sp_decode(char *str, POOLMEM **ret) { int len; /* len of the original string */ int salt; /* salt key used to hide the len */ char pepper; /* pepper key used to hide the string */ int retval = 0; POOLMEM *buf = get_pool_memory(PM_FNAME); **ret = 0; /* The original string should be smaller than * the base64 form */ buf = check_pool_memory_size(buf, strlen(str)); base64_to_bin(buf, sizeof_pool_memory(buf), str, strlen(str)); /* The two intergers are on 3 digits, the password * is starting after 8 bytes */ if (sscanf(buf, "%d:%d:", &salt, &len) != 2) { Dmsg0(0, "Unable to decode given malformed string\n"); goto cleanup; } pepper = salt % 256; len = len ^ salt; /* get original len */ if (len > MAX_NAME_LENGTH || (len+8) >= sizeof_pool_memory(buf)) { Dmsg0(0, "Unable to decode given string, len is too long\n"); goto cleanup; } /* skip %03d:%03d:pass, and force the end of string */ buf[8+len] = 0; for(char *p = buf + 8 ; len > 0 ; len--) { *p = *p ^ pepper; p++; } pm_strcpy(ret, buf + 8); retval = 1; cleanup: free_pool_memory(buf); return retval; } #endif #ifdef USE_FULL_READ /* Handle signal when reading from a pipe, functions based on * fread doesn't work very well. */ static int32_t full_read(int in, char *buf, uint32_t nbytes) { size_t nleft; ssize_t nread; nleft = nbytes; while (nleft > 0) { errno = 0; nread = read(in, buf, nleft); if (nread == -1) { if (errno == EINTR) { continue; } } if (nread < 0) { return nread; /* error */ } nleft -= nread; buf += nread; if (nread == 0) { return nbytes - nleft; } } return nbytes - nleft; }; #endif #ifdef USE_FULL_WRITE static int32_t full_write(int fd, const char *ptr, uint32_t nbytes, bool *canceled=NULL) { size_t nleft; ssize_t nwritten; nleft = nbytes; while (nleft > 0 && (canceled == NULL || *canceled == false)) { do { errno = 0; nwritten = write(fd, ptr, nleft); } while (nwritten == -1 && errno == EINTR && (canceled == NULL || *canceled == false)); if (nwritten <= 0) { return nwritten; /* error */ } nleft -= nwritten; ptr += nwritten; } return nbytes - nleft; } #endif #ifdef USE_MAKEDIR /* Skip leading slash(es) */ static bool makedir(char *path, bool is_dir, mode_t mode) { struct stat statp; char *p = path; char *last; /* Handle specific windows paths */ #if HAVE_WIN32 if (strncmp(p, "\\\\?\\", 4) == 0) { p += 4; /* Skip windows special path */ } if (B_ISALPHA(p[0]) && p[1] == ':' && p[2] == '\\') { p += 3; /* Skip Windows drive */ } #endif while (IsPathSeparator(*p)) { p++; } last = p; while ((p = first_path_separator(p))) { char save_p; save_p = *p; *p = 0; if (mkdir(path, mode) != 0) { if (stat(path, &statp) != 0) { *p = save_p; return false; } else if (!S_ISDIR(statp.st_mode)) { *p = save_p; return false; } } *p = save_p; while (IsPathSeparator(*p)) { p++; } last = p; } if (is_dir && last && last[0] != '\0') { mkdir(path, mode); // Create the last directory } return true; } #endif #ifdef USE_LISTUSERS /* TODO: Adapt the code for windows */ static bool list_users(alist *list, POOLMEM *&error) { POOL_MEM tmp1; BPIPE *pfd; *error = 0; /* Simple way to list users, used by unix_user plugin parameter */ pfd = open_bpipe((char *)"cut -d: -f1 /etc/passwd", 0, "r"); if (!pfd) { Mmsg(error, "Unable to list users"); return false; } while (bfgets(tmp1.addr(), pfd->rfd)) { strip_trailing_junk(tmp1.c_str()); if (tmp1.c_str()[0] && tmp1.c_str()[0] != '#') { list->append(bstrdup(tmp1.c_str())); } } if (close_bpipe(pfd) != 0) { Mmsg(error, "Unable to list users"); } return list->size() > 0; } #endif // USE_LISTUSERS bacula-15.0.3/src/plugins/fd/pluginlib/0000755000175000017500000000000014771010173017510 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/pluginlib/metaplugin_metadata.h0000644000175000017500000000267714771010173023702 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file metaplugin_metadata.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a metadata (ACL, XATTR, METADATA) handling for metaplugin. * @version 1.0.0 * @date 2021-09-20 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #ifndef _METAPLUGIN_METADATA_H_ #define _METAPLUGIN_METADATA_H_ #include "pluginlib.h" #include "ptcomm.h" namespace metaplugin { namespace metadata { // bRC perform_accurate_check(bpContext *ctx, PTCOMM *ptcomm, POOL_MEM &fname, bool accurate_mode, bool &accurate_mode_err); // bRC perform_accurate_check_get(bpContext *ctx, PTCOMM *ptcomm, POOL_MEM &fname, bool accurate_mode, bool &accurate_mode_err); } // namespace accurate } // namespace metaplugin #endif // _METAPLUGIN_METADATA_H_ bacula-15.0.3/src/plugins/fd/pluginlib/metaplugin.cpp0000644000175000017500000030645114771010173022372 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file metaplugin.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula metaplugin interface. * @version 3.0.0 * @date 2021-10-07 * * @copyright Copyright (c) 2021 All rights reserved. * IP transferred to Bacula Systems according to agreement. */ #include "metaplugin.h" #include "metaplugin_attributes.h" #include "metaplugin_accurate.h" #include #include #include /* * libbac uses its own sscanf implementation which is not compatible with * libc implementation, unfortunately. * use bsscanf for Bacula sscanf flavor */ #ifdef sscanf #undef sscanf #endif #define pluginclass(ctx) (METAPLUGIN*)ctx->pContext; // Job Info Types #define BACKEND_JOB_INFO_BACKUP 'B' #define BACKEND_JOB_INFO_ESTIMATE 'E' #define BACKEND_JOB_INFO_RESTORE 'R' /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); static bRC endBackupFile(bpContext *ctx); static bRC pluginIO(bpContext *ctx, struct io_pkt *io); static bRC startRestoreFile(bpContext *ctx, const char *cmd); static bRC endRestoreFile(bpContext *ctx); static bRC createFile(bpContext *ctx, struct restore_pkt *rp); static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); static bRC metaplugincheckFile(bpContext *ctx, char *fname); static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl); static bRC queryParameter(bpContext *ctx, struct query_pkt *qp); static bRC metadataRestore(bpContext *ctx, struct meta_pkt *mp); /* Pointers to Bacula functions */ bFuncs *bfuncs = NULL; bInfo *binfo = NULL; static pFuncs pluginFuncs = { sizeof(pluginFuncs), FD_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, freePlugin, getPluginValue, setPluginValue, handlePluginEvent, startBackupFile, endBackupFile, startRestoreFile, endRestoreFile, pluginIO, createFile, setFileAttributes, metaplugincheckFile, handleXACLdata, NULL, /* No restore file list */ NULL, /* No checkStream */ queryParameter, metadataRestore, }; #ifdef __cplusplus extern "C" { #endif /* Plugin Information structure */ static pInfo pluginInfo = { sizeof(pluginInfo), FD_PLUGIN_INTERFACE_VERSION, FD_PLUGIN_MAGIC, PLUGIN_LICENSE, PLUGIN_AUTHOR, PLUGIN_DATE, PLUGIN_VERSION, PLUGIN_DESCRIPTION, }; /* * Plugin called here when it is first loaded */ bRC DLL_IMP_EXP loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo ** pinfo, pFuncs ** pfuncs) { bfuncs = lbfuncs; /* set Bacula function pointers */ binfo = lbinfo; Dmsg4(DINFO, "%s Plugin version %s%s %s (c) 2021 by Inteos\n", PLUGINNAME, PLUGIN_VERSION, VERSIONGIT_STR, PLUGIN_DATE); *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ return bRC_OK; } /* * Plugin called here when it is unloaded, normally when Bacula is going to exit. */ bRC DLL_IMP_EXP unloadPlugin() { return bRC_OK; } #ifdef __cplusplus } #endif /* * Check if a parameter (param) exist in ConfigFile variables set by user. * The checking ignore case of the parameter. * * in: * ini - a pointer to the ConfigFile class which has parsed user parameters * param - a parameter to search in ini parameter keys * out: * -1 - when a parameter param is not found in ini keys * - whan a parameter param is found and is an index in ini->items table */ int METAPLUGIN::check_ini_param(char *param) { if (ini.items){ for (int k = 0; ini.items[k].name; k++){ if (ini.items[k].found && strcasecmp(param, ini.items[k].name) == 0){ return k; } } } return -1; } /* * Search if parameter (param) is on parameter list prepared for backend. * The checking ignore case of the parameter. * * in: * param - the parameter which we are looking for * params - the list of parameters to search * out: * True when the parameter param is found in list * False when we can't find the param on list */ bool METAPLUGIN::check_plugin_param(const char *param, alist *params) { POOLMEM *par; char *equal; bool found = false; foreach_alist(par, params){ equal = strchr(par, '='); if (equal){ /* temporary terminate the par at parameter name */ *equal = '\0'; if (strcasecmp(par, param) == 0){ found = true; } /* restore parameter equal sign */ *equal = '='; } else { if (strcasecmp(par, param) == 0){ found = true; } } } return found; } /* * Counts the number of ini->items available as it is a NULL terminated array. * * in: * ini - a pointer to ConfigFile class * out: * - the number of ini->items */ int METAPLUGIN::get_ini_count() { int count = 0; if (ini.items){ for (int k = 0; ini.items[k].name; k++){ if (ini.items[k].found){ count++; } } } return count; } /** * @brief * * @param ctx * @param param * @param handler * @param key * @param val * @return bRC */ bRC METAPLUGIN::render_param(bpContext* ctx, POOL_MEM ¶m, INI_ITEM_HANDLER *handler, char *key, item_value val) { if (handler == ini_store_str){ Mmsg(param, "%s=%s\n", key, val.strval); } else if (handler == ini_store_int64){ Mmsg(param, "%s=%lld\n", key, val.int64val); } else if (handler == ini_store_bool){ Mmsg(param, "%s=%d\n", key, val.boolval ? 1 : 0); } else { DMSG1(ctx, DERROR, "Unsupported parameter handler for: %s\n", key); JMSG1(ctx, M_FATAL, "Unsupported parameter handler for: %s\n", key); return bRC_Error; } return bRC_OK; } /** * @brief Parsing a plugin command. * * @param ctx bpContext - Bacula Plugin context structure * @param command plugin command string to parse * @param params output parsed params list * @return bRC bRC_OK - on success, bRC_Error - on error */ bRC METAPLUGIN::parse_plugin_command(bpContext *ctx, const char *command, smart_alist ¶ms) { bool found; int count; int parargc, argc; POOL_MEM *param; DMSG(ctx, DINFO, "Parse command: %s\n", command); if (parser.parse_cmd(command) != bRC_OK) { DMSG0(ctx, DERROR, "Unable to parse Plugin command line.\n"); JMSG0(ctx, M_FATAL, "Unable to parse Plugin command line.\n"); return bRC_Error; } /* count the numbers of user parameters if any */ count = get_ini_count(); /* the first (zero) parameter is a plugin name, we should skip it */ argc = parser.argc - 1; parargc = argc + count; /* first parameters from plugin command saved during backup */ for (int i = 1; i < parser.argc; i++) { param = new POOL_MEM(PM_FNAME); found = false; int k; /* check if parameter overloaded by restore parameter */ if ((k = check_ini_param(parser.argk[i])) != -1) { found = true; DMSG1(ctx, DINFO, "parse_plugin_command: %s found in restore parameters\n", parser.argk[i]); if (render_param(ctx, *param, ini.items[k].handler, parser.argk[i], ini.items[k].val) != bRC_OK) { delete(param); return bRC_Error; } params.append(param); parargc--; } /* check if param overloaded above */ if (!found){ if (parser.argv[i]){ Mmsg(*param, "%s=%s\n", parser.argk[i], parser.argv[i]); params.append(param); } else { Mmsg(*param, "%s=1\n", parser.argk[i]); params.append(param); } } /* param is always ended with '\n' */ DMSG(ctx, DINFO, "Param: %s", param->c_str()); /* scan for abort_on_error parameter */ if (strcasecmp(parser.argk[i], "abort_on_error") == 0){ /* found, so check the value if provided, I only check the first char */ if (parser.argv[i] && *parser.argv[i] == '0'){ backend.ctx->clear_abort_on_error(); } else { backend.ctx->set_abort_on_error(); } DMSG1(ctx, DINFO, "abort_on_error found: %s\n", backend.ctx->is_abort_on_error() ? "True" : "False"); } /* scan for listing parameter, so the estimate job should be executed as a Listing procedure */ if (strcasecmp(parser.argk[i], "listing") == 0){ /* found, so check the value if provided */ if (parser.argv[i]){ listing = Listing; DMSG0(ctx, DINFO, "listing procedure param found\n"); } } /* scan for query parameter, so the estimate job should be executed as a QueryParam procedure */ if (strcasecmp(parser.argk[i], "query") == 0){ /* found, so check the value if provided */ if (parser.argv[i]){ listing = Query; DMSG0(ctx, DINFO, "query procedure param found\n"); } } } /* check what was missing in plugin command but get from ini file */ if (argc < parargc){ for (int k = 0; ini.items[k].name; k++){ if (ini.items[k].found && !check_plugin_param(ini.items[k].name, ¶ms)){ param = new POOL_MEM(PM_FNAME); DMSG1(ctx, DINFO, "parse_plugin_command: %s from restore parameters\n", ini.items[k].name); if (render_param(ctx, *param, ini.items[k].handler, (char*)ini.items[k].name, ini.items[k].val) != bRC_OK){ delete(param); return bRC_Error; } params.append(param); /* param is always ended with '\n' */ DMSG(ctx, DINFO, "Param: %s", param); } } } return bRC_OK; } /* * Parse a Restore Object saved during backup and modified by user during restore. * Every RO received will generate a dedicated backend context which is used * by bEventRestoreCommand to handle backend parameters for restore. * * in: * bpContext - Bacula Plugin context structure * rop - a restore object structure to parse * out: * bRC_OK - on success * bRC_Error - on error */ bRC METAPLUGIN::handle_plugin_restoreobj(bpContext *ctx, restore_object_pkt *rop) { if (!rop){ return bRC_OK; /* end of rop list */ } DMSG2(ctx, DDEBUG, "handle_plugin_restoreobj: %s %d\n", rop->object_name, rop->object_type); // do not handle FT_PLUGIN_CONFIG when no one was saved by default if (!DONOTSAVE_FT_PLUGIN_CONFIG && strcmp(rop->object_name, INI_RESTORE_OBJECT_NAME) == 0 && (rop->object_type == FT_PLUGIN_CONFIG || rop->object_type == FT_PLUGIN_CONFIG_FILLED)) { DMSG(ctx, DINFO, "INIcmd: %s\n", rop->plugin_name); ini.clear_items(); if (!ini.dump_string(rop->object, rop->object_len)) { DMSG0(ctx, DERROR, "ini->dump_string failed\n"); JMSG0(ctx, M_FATAL, "Unable to parse user set restore configuration.\n"); return bRC_Error; } ini.register_items(plugin_items_dump, sizeof(struct ini_items)); if (!ini.parse(ini.out_fname)) { DMSG0(ctx, DERROR, "ini->parse failed\n"); JMSG0(ctx, M_FATAL, "Unable to parse user set restore configuration.\n"); return bRC_Error; } for (int i = 0; ini.items[i].name; i++) { if (ini.items[i].found){ if (ini.items[i].handler == ini_store_str){ DMSG2(ctx, DINFO, "INI: %s = %s\n", ini.items[i].name, ini.items[i].val.strval); } else if (ini.items[i].handler == ini_store_int64){ DMSG2(ctx, DINFO, "INI: %s = %lld\n", ini.items[i].name, ini.items[i].val.int64val); } else if (ini.items[i].handler == ini_store_bool){ DMSG2(ctx, DINFO, "INI: %s = %s\n", ini.items[i].name, ini.items[i].val.boolval ? "True" : "False"); } else { DMSG1(ctx, DERROR, "INI: unsupported parameter handler for: %s\n", ini.items[i].name); JMSG1(ctx, M_FATAL, "INI: unsupported parameter handler for: %s\n", ini.items[i].name); return bRC_Error; } } } return bRC_OK; } // handle any other RO restore restore_object_class *ropclass = new restore_object_class; ropclass->sent = false; pm_strcpy(ropclass->plugin_name, rop->plugin_name); pm_strcpy(ropclass->object_name, rop->object_name); ropclass->length = rop->object_len; pm_memcpy(ropclass->data, rop->object, rop->object_len); restoreobject_list.append(ropclass); DMSG2(ctx, DINFO, "ROclass saved for later: %s %d\n", ropclass->object_name.c_str(), ropclass->length); return bRC_OK; } /* * Run external backend script/application using BACKEND_CMD compile variable. * It will run the backend in current backend context (backendctx) and should * be called when a new backend is really required only. * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when backend spawned successfully * bRC_Error - when Plugin cannot run backend */ bRC METAPLUGIN::run_backend(bpContext *ctx) { BPIPE *bp; if (access(backend_cmd.c_str(), X_OK) < 0){ berrno be; DMSG2(ctx, DERROR, "Unable to access backend: %s Err=%s\n", backend_cmd.c_str(), be.bstrerror()); JMSG2(ctx, M_FATAL, "Unable to access backend: %s Err=%s\n", backend_cmd.c_str(), be.bstrerror()); return bRC_Error; } DMSG(ctx, DINFO, "Executing: %s\n", backend_cmd.c_str()); char ed1[64]; bsnprintf(ed1, sizeof(ed1), "BACULA_JOBID=%d", JobId); char *envp[] = { ed1, NULL }; bp = open_bpipe(backend_cmd.c_str(), 0, "rwe", envp); if (bp == NULL){ berrno be; DMSG(ctx, DERROR, "Unable to run backend. Err=%s\n", be.bstrerror()); JMSG(ctx, M_FATAL, "Unable to run backend. Err=%s\n", be.bstrerror()); return bRC_Error; } /* setup communication channel */ backend.ctx->set_bpipe(bp); DMSG(ctx, DINFO, "Backend executed at PID=%i\n", bp->worker_pid); backend.ctx->set_timeout(BACKEND_TIMEOUT); DMSG(ctx, DINFO, "setup backend timeout=%d\n", backend.ctx->get_timeout()); return bRC_OK; } bRC backendctx_finish_func(PTCOMM *ptcomm, void *cp) { bpContext * ctx = (bpContext*)cp; bRC status = bRC_OK; POOL_MEM cmd(PM_FNAME); pm_strcpy(cmd, "FINISH\n"); if (!ptcomm->write_command(ctx, cmd.addr())){ status = bRC_Error; } if (!ptcomm->read_ack(ctx)){ status = bRC_Error; } return status; } /* * Sends a "FINISH" command to all executed backends indicating the end of * "Restore loop". * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when operation was successful * bRC_Error - on any error */ bRC METAPLUGIN::signal_finish_all_backends(bpContext *ctx) { return backend.foreach_command_status(backendctx_finish_func, ctx); } /* * Send end job command to backend. * It terminates the backend when command sending was unsuccessful, as it is * the very last procedure in protocol. * * in: * bpContext - for Bacula debug and jobinfo messages * ptcomm - backend context * out: * bRC_OK - when send command was successful * bRC_Error - on any error */ bRC send_endjob(bpContext *ctx, PTCOMM *ptcomm) { bRC status = bRC_OK; POOL_MEM cmd(PM_FNAME); pm_strcpy(cmd, "END\n"); if (!ptcomm->write_command(ctx, cmd.c_str())){ /* error */ status = bRC_Error; } else { if (!ptcomm->read_ack(ctx)){ DMSG0(ctx, DERROR, "Wrong backend response to JobEnd command.\n"); JMSG0(ctx, ptcomm->jmsg_err_level(), "Wrong backend response to JobEnd command.\n"); status = bRC_Error; } ptcomm->signal_term(ctx); } return status; } /* * Terminates the current backend pointed by ptcomm context. * The termination sequence consist of "End Job" protocol procedure and real * backend process termination including communication channel close. * When we'll get an error during "End Job" procedure then we inform the user * and terminate the backend as usual without unnecessary formalities. :) * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when backend termination was successful, i.e. no error in * "End Job" procedure * bRC_Error - when backend termination encountered an error. */ bRC backendctx_jobend_func(PTCOMM *ptcomm, void *cp) { bpContext *ctx = (bpContext *)cp; bRC status = bRC_OK; if (send_endjob(ctx, ptcomm) != bRC_OK){ /* error in end job */ DMSG0(ctx, DERROR, "Error in EndJob.\n"); status = bRC_Error; } int pid = ptcomm->get_backend_pid(); DMSG(ctx, DINFO, "Terminate backend at PID=%d\n", pid) ptcomm->terminate(ctx); return status; } /* * Terminate all executed backends. * Check METAPLUGIN::terminate_current_backend for more info. * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when all backends termination was successful * bRC_Error - when any backend termination encountered an error */ bRC METAPLUGIN::terminate_all_backends(bpContext *ctx) { return backend.foreach_command_status(backendctx_jobend_func, ctx); } /** * @brief Callback used for sending a `cancel event` to the selected backend * * @param ptcomm the backend communication object * @param cp a bpContext - for Bacula debug and jobinfo messages * @return bRC bRC_OK when success */ bRC backendctx_cancel_func(PTCOMM *ptcomm, void *cp) { bpContext * ctx = (bpContext*)cp; // cancel procedure // 1. get backend pid // 2. send SIGUSR1 to backend pid pid_t pid = ptcomm->get_backend_pid(); DMSG(ctx, DINFO, "Inform backend about Cancel at PID=%d ...\n", pid) kill(pid, SIGUSR1); return bRC_OK; } /** * @brief Send `cancel event` to every backend and terminate it. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @return bRC bRC_OK when success, bRC_Error if not */ bRC METAPLUGIN::cancel_all_backends(bpContext *ctx) { METAPLUGIN *pctx = (METAPLUGIN *)ctx->pContext; // the cancel procedure: for all backends execute cancel func return pctx->backend.foreach_command_status(backendctx_cancel_func, ctx); } /* * Send a "Job Info" protocol procedure parameters. * * in: * bpContext - for Bacula debug and jobinfo messages * type - a char compliant with the protocol indicating what jobtype we run * out: * bRC_OK - when send job info was successful * bRC_Error - on any error */ bRC METAPLUGIN::send_jobinfo(bpContext *ctx, char type) { int32_t rc; POOL_MEM cmd; char lvl; /* we will be sending Job Info data */ pm_strcpy(cmd, "Job\n"); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } /* required parameters */ Mmsg(cmd, "Name=%s\n", JobName); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } Mmsg(cmd, "JobID=%i\n", JobId); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } Mmsg(cmd, "Type=%c\n", type); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } /* optional parameters */ if (mode != RESTORE){ switch (mode){ case BACKUP_FULL: lvl = 'F'; break; case BACKUP_DIFF: lvl = 'D'; break; case BACKUP_INCR: lvl = 'I'; break; default: lvl = 0; } if (lvl){ Mmsg(cmd, "Level=%c\n", lvl); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } } } if (since){ Mmsg(cmd, "Since=%ld\n", since); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } } if (where){ Mmsg(cmd, "Where=%s\n", where); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } } if (regexwhere){ Mmsg(cmd, "RegexWhere=%s\n", regexwhere); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } } if (replace){ Mmsg(cmd, "Replace=%c\n", replace); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } } if (CUSTOMNAMESPACE){ Mmsg(cmd, "Namespace=%s\n", PLUGINNAMESPACE); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } } if (CUSTOMPREVJOBNAME && prevjobname){ Mmsg(cmd, "PrevJobName=%s\n", prevjobname); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } } const char *dir; bfuncs->getBaculaValue(ctx, bVarWorkingDir, &dir); Mmsg(cmd, "WorkingDir=%s\n", dir); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0) { /* error */ return bRC_Error; } bfuncs->getBaculaValue(ctx, bVarSysConfigPath, &dir); Mmsg(cmd, "SysconfigPath=%s\n", dir); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0) { /* error */ return bRC_Error; } bfuncs->getBaculaValue(ctx, bVarExePath, &dir); Mmsg(cmd, "ExePath=%s\n", dir); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0) { /* error */ return bRC_Error; } backend.ctx->signal_eod(ctx); if (!backend.ctx->read_ack(ctx)){ DMSG0(ctx, DERROR, "Wrong backend response to Job command.\n"); JMSG0(ctx, backend.ctx->jmsg_err_level(), "Wrong backend response to Job command.\n"); return bRC_Error; } return bRC_OK; } /* * Send a "Plugin Parameters" protocol procedure data. * It parse plugin command and ini parameters before sending it to backend. * * in: * bpContext - for Bacula debug and jobinfo messages * command - a Plugin command for a job * out: * bRC_OK - when send parameters was successful * bRC_Error - on any error */ bRC METAPLUGIN::send_parameters(bpContext *ctx, char *command) { int32_t rc; bRC status = bRC_OK; POOL_MEM cmd(PM_FNAME); // alist params(16, not_owned_by_alist); smart_alist params; POOL_MEM *param; bool found; #ifdef DEVELOPER static const char *regress_valid_params[] = { // add special test_backend commands to handle regression tests "regress_error_plugin_params", "regress_error_start_job", "regress_error_backup_no_files", "regress_error_backup_stderr", "regress_error_estimate_stderr", "regress_error_listing_stderr", "regress_error_restore_stderr", "regress_backup_plugin_objects", "regress_backup_other_file", "regress_backup_external_stat", "regress_error_backup_abort", "regress_metadata_support", "regress_standard_error_backup", "regress_cancel_backup", "regress_cancel_restore", "regress_working_dir", NULL, }; #endif /* parse and prepare final backend plugin params */ status = parse_plugin_command(ctx, command, params); if (status != bRC_OK){ /* error */ return status; } /* send backend info that parameters are coming */ pm_strcpy(cmd, "Params\n"); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } /* send all prepared parameters */ foreach_alist(param, ¶ms){ // check valid parameter list found = false; for (int a = 0; valid_params[a] != NULL; a++ ) { DMSG3(ctx, DVDEBUG, "=> '%s' vs '%s' [%d]\n", param, valid_params[a], strlen(valid_params[a])); if (strncasecmp(param->c_str(), valid_params[a], strlen(valid_params[a])) == 0){ found = true; break; } } #ifdef DEVELOPER if (!found){ // now handle regression tests commands for (int a = 0; regress_valid_params[a] != NULL; a++ ){ DMSG3(ctx, DVDEBUG, "regress=> '%s' vs '%s' [%d]\n", param, regress_valid_params[a], strlen(regress_valid_params[a])); if (strncasecmp(param->c_str(), regress_valid_params[a], strlen(regress_valid_params[a])) == 0){ found = true; break; } } } #endif // signal error if required if (!found) { pm_strcpy(cmd, param->c_str()); strip_trailing_junk(cmd.c_str()); DMSG1(ctx, DERROR, "Unknown parameter %s in Plugin command.\n", cmd.c_str()); JMSG1(ctx, M_ERROR, "Unknown parameter %s in Plugin command.\n", cmd.c_str()); } rc = backend.ctx->write_command(ctx, *param); if (rc < 0) { /* error */ return bRC_Error; } } // now send accurate parameter if requested and available if (ACCURATEPLUGINPARAMETER && accurate_mode) { pm_strcpy(cmd, "Accurate=1\n"); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0) { /* error */ return bRC_Error; } } #ifdef DEVELOPER const char * regress_working_dir; bfuncs->getBaculaValue(ctx, bVarWorkingDir, ®ress_working_dir); Mmsg(cmd, "regress_working_dir=%s\n", regress_working_dir); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0) { /* error */ return bRC_Error; } #endif // signal end of parameters block backend.ctx->signal_eod(ctx); /* ack Params command */ if (!backend.ctx->read_ack(ctx)){ DMSG0(ctx, DERROR, "Wrong backend response to Params command.\n"); JMSG0(ctx, backend.ctx->jmsg_err_level(), "Wrong backend response to Params command.\n"); return bRC_Error; } return bRC_OK; } /* * Send start job command pointed by command variable. * * in: * bpContext - for Bacula debug and jobinfo messages * command - the command string to send * out: * bRC_OK - when send command was successful * bRC_Error - on any error */ bRC METAPLUGIN::send_startjob(bpContext *ctx, const char *command) { POOL_MEM cmd; pm_strcpy(cmd, command); if (backend.ctx->write_command(ctx, cmd) < 0){ /* error */ return bRC_Error; } int32_t status; while ((status = backend.ctx->read_command(ctx, cmd)) != 0) { if (status < 0) { // handle error strip_trailing_newline(cmd.c_str()); DMSG(ctx, DERROR, "Wrong backend response to %s command.\n", cmd.c_str()); JMSG(ctx, backend.ctx->jmsg_err_level(), "Wrong backend response to %s command.\n", cmd.c_str()); return bRC_Error; } if (status > 0 && scan_parameter_int(cmd, "STRIP:", strip_path_option)) { DMSG1(ctx, DINFO, "set strip path = %d\n", strip_path_option); continue; } } return bRC_OK; } /* * Send "BackupStart" protocol command. * more info at METAPLUGIN::send_startjob * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when send command was successful * bRC_Error - on any error */ bRC METAPLUGIN::send_startbackup(bpContext *ctx) { return send_startjob(ctx, "BackupStart\n"); } /* * Send "EstimateStart" protocol command. * more info at METAPLUGIN::send_startjob * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when send command was successful * bRC_Error - on any error */ bRC METAPLUGIN::send_startestimate(bpContext *ctx) { return send_startjob(ctx, "EstimateStart\n"); } /* * Send "ListingStart" protocol command. * more info at METAPLUGIN::send_startjob * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when send command was successful * bRC_Error - on any error */ bRC METAPLUGIN::send_startlisting(bpContext *ctx) { return send_startjob(ctx, "ListingStart\n"); } /* * Send "QueryStart" protocol command. * more info at PLUGIN::send_startjob * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when send command was successful * bRC_Error - on any error */ bRC METAPLUGIN::send_startquery(bpContext *ctx) { return send_startjob(ctx, "QueryStart\n"); } /* * Send "RestoreStart" protocol command. * more info at METAPLUGIN::send_startjob * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when send command was successful * bRC_Error - on any error */ bRC METAPLUGIN::send_startrestore(bpContext *ctx) { int32_t rc; POOL_MEM cmd(PM_FNAME); const char * command = "RestoreStart\n"; POOL_MEM extpipename(PM_FNAME); pm_strcpy(cmd, command); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } if (backend.ctx->read_command(ctx, cmd) < 0){ DMSG(ctx, DERROR, "Wrong backend response to %s command.\n", command); JMSG(ctx, backend.ctx->jmsg_err_level(), "Wrong backend response to %s command.\n", command); return bRC_Error; } if (backend.ctx->is_eod()){ /* got EOD so the backend is ready for restore */ return bRC_OK; } /* here we expect a PIPE: command only */ if (scan_parameter_str(cmd, "PIPE:", extpipename)){ /* got PIPE: */ DMSG(ctx, DINFO, "PIPE:%s\n", extpipename.c_str()); backend.ctx->set_extpipename(extpipename.c_str()); /* TODO: decide if plugin should verify if extpipe is available */ pm_strcpy(cmd, "OK\n"); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } return bRC_OK; } return bRC_Error; } /* * Switches current backend context or executes new one when required. * The backend (application path) to execute is set in BACKEND_CMD compile * variable and handled by METAPLUGIN::run_backend() method. Just before new * backend execution the method search for already spawned backends which * handles the same Plugin command and when found the current backend context * is switched to already available on list. * * in: * bpContext - for Bacula debug and jobinfo messages * command - the Plugin command for which we execute a backend * out: * bRC_OK - success and backendctx has a current backend context and Plugin * should send an initialization procedure * bRC_Max - when was already prepared and initialized, so no * reinitialization required * bRC_Error - error in switching or running the backend */ bRC METAPLUGIN::switch_or_run_backend(bpContext *ctx, char *command) { DMSG0(ctx, DINFO, "Switch or run Backend.\n"); backend.switch_command(command); /* check if we have the backend with the same command already */ if (backend.ctx->is_open()) { /* and its open, so skip running the new */ DMSG0(ctx, DINFO, "Backend already prepared.\n"); return bRC_Max; } // now execute a backend if (run_backend(ctx) != bRC_OK){ return bRC_Error; } return bRC_OK; } /* * Prepares the backend for Backup/Restore/Estimate loops. * The preparation consist of backend execution and initialization, required * protocol initialize procedures: "Handshake", "Job Info", * "Plugin Parameters" and "Start Backup"/"Start Restore"/"Start Estimate" * depends on that job it is. * * in: * bpContext - for Bacula debug and jobinfo messages * command - a Plugin command from FileSet to start the job * out: * bRC_OK - when backend is operational and prepared for job or when it is * not our plugin command, anyway a success * bRC_Error - encountered any error during preparation */ bRC METAPLUGIN::prepare_backend(bpContext *ctx, char type, char *command) { // check if it is our Plugin command if (!isourplugincommand(PLUGINPREFIX, command) != 0){ // it is not our plugin prefix return bRC_OK; } // check for prohibitted command duplication if (type != BACKEND_JOB_INFO_RESTORE && backend.check_command(command)) { // already exist, report DMSG1(ctx, DERROR, "Plugin command=%s already defined, cannot proceed.\n", command); JMSG1(ctx, M_FATAL, "Plugin command already defined: \"%s\" Cannot proceed. You should correct FileSet configuration.\n", command); terminate_all_backends(ctx); return bRC_Error; } /* switch backend context, so backendctx has all required variables available */ bRC status = switch_or_run_backend(ctx, command); if (status == bRC_Max){ /* already prepared, skip rest of preparation */ return bRC_OK; } if (status != bRC_OK){ /* we have some error here */ return bRC_Error; } /* handshake (1) */ DMSG0(ctx, DINFO, "Backend handshake...\n"); if (!backend.ctx->handshake(ctx, PLUGINNAME, PLUGINAPI)) { backend.ctx->terminate(ctx); return bRC_Error; } /* Job Info (2) */ DMSG0(ctx, DINFO, "Job Info (2) ...\n"); if (send_jobinfo(ctx, type) != bRC_OK) { backend.ctx->terminate(ctx); return bRC_Error; } /* Plugin Params (3) */ DMSG0(ctx, DINFO, "Plugin Params (3) ...\n"); if (send_parameters(ctx, command) != bRC_OK) { backend.ctx->terminate(ctx); return bRC_Error; } switch (type) { case BACKEND_JOB_INFO_BACKUP: /* Start Backup (4) */ DMSG0(ctx, DINFO, "Start Backup (4) ...\n"); if (send_startbackup(ctx) != bRC_OK){ backend.ctx->terminate(ctx); return bRC_Error; } break; case BACKEND_JOB_INFO_ESTIMATE: { /* Start Estimate or Listing/Query (4) */ bRC rc = bRC_Error; switch (listing) { case Listing: DMSG0(ctx, DINFO, "Start Listing (4) ...\n"); rc = send_startlisting(ctx); break; case Query: DMSG0(ctx, DINFO, "Start Query Params (4) ...\n"); rc = send_startquery(ctx); break; default: DMSG0(ctx, DINFO, "Start Estimate (4) ...\n"); rc = send_startestimate(ctx); break; } if (rc != bRC_OK) { backend.ctx->terminate(ctx); return bRC_Error; } } break; case BACKEND_JOB_INFO_RESTORE: /* Start Restore (4) */ DMSG0(ctx, DINFO, "Start Restore (4) ...\n"); if (send_startrestore(ctx) != bRC_OK) { backend.ctx->terminate(ctx); return bRC_Error; } break; default: return bRC_Error; } DMSG0(ctx, DINFO, "Prepare backend done.\n"); return bRC_OK; } /* * This is the main method for handling events generated by Bacula. * The behavior of the method depends on event type generated, but there are * some events which does nothing, just return with bRC_OK. Every event is * tracked in debug trace file to verify the event flow during development. * * in: * bpContext - for Bacula debug and jobinfo messages * event - a Bacula event structure * value - optional event value * out: * bRC_OK - in most cases signal success/no error * bRC_Error - in most cases signal error * - depend on Bacula Plugin API if applied */ bRC METAPLUGIN::handlePluginEvent(bpContext *ctx, bEvent *event, void *value) { // extract original plugin context, basically it should be `this` METAPLUGIN *pctx = (METAPLUGIN *)ctx->pContext; // this ensures that handlePluginEvent is thread safe for extracted pContext // lock_guard lg(&pctx->mutex); - removed on request if (job_cancelled) { return bRC_Error; } switch (event->eventType) { case bEventJobStart: DMSG(ctx, D3, "bEventJobStart value=%s\n", NPRT((char *)value)); getBaculaVar(bVarJobId, (void *)&JobId); getBaculaVar(bVarJobName, (void *)&JobName); if (CUSTOMPREVJOBNAME){ getBaculaVar(bVarPrevJobName, (void *)&prevjobname); } break; case bEventJobEnd: DMSG(ctx, D3, "bEventJobEnd value=%s\n", NPRT((char *)value)); return terminate_all_backends(ctx); case bEventLevel: char lvl; lvl = (char)((intptr_t) value & 0xff); DMSG(ctx, D2, "bEventLevel='%c'\n", lvl); switch (lvl) { case 'F': DMSG0(ctx, D2, "backup level = Full\n"); mode = BACKUP_FULL; break; case 'I': DMSG0(ctx, D2, "backup level = Incr\n"); mode = BACKUP_INCR; break; case 'D': DMSG0(ctx, D2, "backup level = Diff\n"); mode = BACKUP_DIFF; break; default: DMSG0(ctx, D2, "unsupported backup level!\n"); return bRC_Error; } break; case bEventSince: since = (time_t) value; DMSG(ctx, D2, "bEventSince=%ld\n", (intptr_t) since); break; case bEventStartBackupJob: DMSG(ctx, D3, "bEventStartBackupJob value=%s\n", NPRT((char *)value)); break; case bEventEndBackupJob: DMSG(ctx, D2, "bEventEndBackupJob value=%s\n", NPRT((char *)value)); break; case bEventStartRestoreJob: DMSG(ctx, DINFO, "StartRestoreJob value=%s\n", NPRT((char *)value)); getBaculaVar(bVarWhere, &where); DMSG(ctx, DINFO, "Where=%s\n", NPRT(where)); getBaculaVar(bVarRegexWhere, ®exwhere); DMSG(ctx, DINFO, "RegexWhere=%s\n", NPRT(regexwhere)); getBaculaVar(bVarReplace, &replace); DMSG(ctx, DINFO, "Replace=%c\n", replace); mode = RESTORE; break; case bEventEndRestoreJob: DMSG(ctx, DINFO, "bEventEndRestoreJob value=%s\n", NPRT((char *)value)); return signal_finish_all_backends(ctx); /* Plugin command e.g. plugin = :parameters */ case bEventEstimateCommand: DMSG(ctx, D1, "bEventEstimateCommand value=%s\n", NPRT((char *)value)); estimate = true; return prepare_backend(ctx, BACKEND_JOB_INFO_ESTIMATE, (char*)value); /* Plugin command e.g. plugin = :parameters */ case bEventBackupCommand: DMSG(ctx, D2, "bEventBackupCommand value=%s\n", NPRT((char *)value)); pluginconfigsent = false; return prepare_backend(ctx, BACKEND_JOB_INFO_BACKUP, (char*)value); /* Plugin command e.g. plugin = :parameters */ case bEventRestoreCommand: DMSG(ctx, D2, "bEventRestoreCommand value=%s\n", NPRT((char *)value)); return prepare_backend(ctx, BACKEND_JOB_INFO_RESTORE, (char*)value); /* Plugin command e.g. plugin = :parameters */ case bEventPluginCommand: DMSG(ctx, D2, "bEventPluginCommand value=%s\n", NPRT((char *)value)); getBaculaVar(bVarAccurate, (void *)&accurate_mode); if (isourplugincommand(PLUGINPREFIX, (char*)value) && !backend_available) { DMSG2(ctx, DERROR, "Unable to use backend: %s Err=%s\n", backend_cmd.c_str(), backend_error.c_str()); JMSG2(ctx, M_FATAL, "Unable to use backend: %s Err=%s\n", backend_cmd.c_str(), backend_error.c_str()); return bRC_Error; } break; case bEventOptionPlugin: case bEventHandleBackupFile: if (isourplugincommand(PLUGINPREFIX, (char*)value)){ DMSG0(ctx, DERROR, "Invalid handle Option Plugin called!\n"); JMSG2(ctx, M_FATAL, "The %s plugin doesn't support the Option Plugin configuration.\n" "Please review your FileSet and move the Plugin=%s" "... command into the Include {} block.\n", PLUGINNAME, PLUGINPREFIX); return bRC_Error; } break; case bEventEndFileSet: DMSG(ctx, D3, "bEventEndFileSet value=%s\n", NPRT((char *)value)); break; case bEventRestoreObject: /* Restore Object handle - a plugin configuration for restore and user supplied parameters */ if (!value){ DMSG0(ctx, DINFO, "End restore objects\n"); break; } DMSG(ctx, D2, "bEventRestoreObject value=%p\n", value); return handle_plugin_restoreobj(ctx, (restore_object_pkt *) value); case bEventCancelCommand: DMSG2(ctx, D3, "bEventCancelCommand self = %p pctx = %p\n", this, pctx); // TODO: PETITION: Our plugin (RHV WhiteBearSolutions) search the packet E CANCEL. // TODO: If you modify this behaviour, please you notify us. // TODO: RPK[20210623]: The information about a new procedure was sent to Eric pctx->job_cancelled = true; return cancel_all_backends(ctx); default: // enabled only for Debug DMSG2(ctx, D2, "Unknown event: %s (%d) \n", eventtype2str(event), event->eventType); } return bRC_OK; } /* * Make a real data read from the backend and checks for EOD. * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error */ bRC METAPLUGIN::perform_read_data(bpContext *ctx, struct io_pkt *io) { int rc; if (nodata){ io->status = 0; return bRC_OK; } rc = backend.ctx->read_data_fixed(ctx, io->buf, io->count); if (rc < 0){ io->status = rc; io->io_errno = EIO; // We should get here only with Protocol errors, so we should terminate backends, as this is always fatal backendctx_cancel_func(backend.ctx, ctx); return bRC_Error; } io->status = rc; if (backend.ctx->is_eod()){ // TODO: we signal EOD as rc=0, so no need to explicity check for EOD, right? io->status = 0; } return bRC_OK; } /* * Make a real write data to the backend. * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error */ bRC METAPLUGIN::perform_write_data(bpContext *ctx, struct io_pkt *io) { int rc; POOL_MEM cmd(PM_FNAME); /* check if DATA was sent */ if (nodata){ pm_strcpy(cmd, "DATA\n"); rc = backend.ctx->write_command(ctx, cmd.c_str()); if (rc < 0){ /* error */ io->status = rc; io->io_errno = rc; return bRC_Error; } /* DATA command sent */ nodata = false; } DMSG1(ctx, DVDEBUG, "perform_write_data: %d\n", io->count); rc = backend.ctx->write_data(ctx, io->buf, io->count); io->status = rc; if (rc < 0){ io->io_errno = rc; return bRC_Error; } nodata = false; return bRC_OK; } /* * Handle protocol "DATA" command from backend during backup loop. * It handles a "no data" flag when saved file contains no data to * backup (empty file). * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error * io->status, io->io_errno - set to error on any error */ bRC METAPLUGIN::perform_backup_open(bpContext *ctx, struct io_pkt *io) { int rc; POOL_MEM cmd(PM_FNAME); /* expecting DATA command */ nodata = false; rc = backend.ctx->read_command(ctx, cmd); if (backend.ctx->is_eod()){ /* no data for file */ nodata = true; } else // expect no error and 'DATA' starting packet if (rc < 0 || !bstrcmp(cmd.c_str(), "DATA")){ io->status = rc; io->io_errno = EIO; openerror = backend.ctx->is_fatal() ? false : true; return bRC_Error; } return bRC_OK; } /* * Signal the end of data to restore and verify acknowledge from backend. * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error */ bRC METAPLUGIN::perform_write_end(bpContext *ctx, struct io_pkt *io) { if (!nodata){ /* signal end of data to restore and get ack */ if (!backend.ctx->send_ack(ctx)){ io->status = -1; io->io_errno = EPIPE; return bRC_Error; } } if (last_type == FT_DIREND) { struct xacl_pkt xacl; if (acldatalen > 0) { xacl.count = acldatalen; xacl.content = acldata.c_str(); bRC status = perform_write_acl(ctx, &xacl); if (status != bRC_OK){ return status; } } if (xattrdatalen > 0) { xacl.count = xattrdatalen; xacl.content = xattrdata.c_str(); bRC status = perform_write_xattr(ctx, &xacl); if (status != bRC_OK){ return status; } } } return bRC_OK; } /* * Reads ACL data from backend during backup. Save it for handleXACLdata from * Bacula. As we do not know if a backend will send the ACL data or not, we * cannot wait to read this data until handleXACLdata will be called. * TODO: The method has a limitation and accept up to PM_BSOCK acl data to save * which is about 64kB. It should be sufficient for virtually all acl data * we can imagine. But when a backend developer could expect a larger data * to save he should rewrite perform_read_acl() method. * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when ACL data was read successfully and this.readacl set to true * bRC_Error - on any error during acl data read */ bRC METAPLUGIN::perform_read_acl(bpContext *ctx) { DMSG0(ctx, DINFO, "perform_read_acl\n"); acldatalen = backend.ctx->read_data(ctx, acldata); if (acldatalen < 0){ DMSG0(ctx, DERROR, "Cannot read ACL data from backend.\n"); return bRC_Error; } DMSG1(ctx, DINFO, "readACL: %i\n", acldatalen); if (!backend.ctx->read_ack(ctx)){ /* should get EOD */ DMSG0(ctx, DERROR, "Protocol error, should get EOD.\n"); // In protocol error situations, the communication has no chance to continue, so we need to kill the backend backendctx_cancel_func(backend.ctx, ctx); return bRC_Error; } readacl = true; return bRC_OK; } /* * Reads XATTR data from backend during backup. Save it for handleXACLdata from * Bacula. As we do not know if a backend will send the XATTR data or not, we * cannot wait to read this data until handleXACLdata will be called. * TODO: The method has a limitation and accept up to PM_BSOCK xattr data to save * which is about 64kB. It should be sufficient for virtually all xattr data * we can imagine. But when a backend developer could expect a larger data * to save he should rewrite perform_read_xattr() method. * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when XATTR data was read successfully and this.readxattr set * to true * bRC_Error - on any error during acl data read */ bRC METAPLUGIN::perform_read_xattr(bpContext *ctx) { DMSG0(ctx, DINFO, "perform_read_xattr\n"); xattrdatalen = backend.ctx->read_data(ctx, xattrdata); if (xattrdatalen < 0){ DMSG0(ctx, DERROR, "Cannot read XATTR data from backend.\n"); return bRC_Error; } DMSG1(ctx, DINFO, "readXATTR: %i\n", xattrdatalen); if (!backend.ctx->read_ack(ctx)){ /* should get EOD */ DMSG0(ctx, DERROR, "Protocol error, should get EOD.\n"); // In protocol error situations, the communication has no chance to continue, so we need to kill the backend backendctx_cancel_func(backend.ctx, ctx); return bRC_Error; } readxattr = true; return bRC_OK; } /** * @brief Reads metadata info from backend and adds it as a metadata packet. * * @param ctx for Bacula debug and jobinfo messages * @param type detected Metadata type * @param sp save packet * @return bRC bRC_OK when success, bRC_Error when some error */ bRC METAPLUGIN::perform_read_metadata_info(bpContext *ctx, metadata_type type, struct save_pkt *sp) { POOL_MEM data(PM_MESSAGE); DMSG0(ctx, DINFO, "perform_read_metadata_info\n"); int len = backend.ctx->read_data(ctx, data); if (len < 0){ DMSG1(ctx, DERROR, "Cannot read METADATA(%i) information from backend.\n", type); return bRC_Error; } DMSG1(ctx, DINFO, "read METADATA info len: %i\n", len); if (!backend.ctx->read_ack(ctx)){ /* should get EOD */ DMSG0(ctx, DERROR, "Protocol error, should get EOD.\n"); // In protocol error situations, the communication has no chance to continue, so we need to kill the backend backendctx_cancel_func(backend.ctx, ctx); return bRC_Error; } // Bacula API for metadata requires that a plugin // handle metadata buffer allocation char *ptr = (char *)bmalloc(len); memcpy(ptr, data.addr(), len); // add it to the list for reference to not loose it metadatas_list.append(ptr); metadatas.add_packet(type, len, ptr); sp->plug_meta = &metadatas; return bRC_OK; } /** * @brief Does metadata command scan and map to metadata types. * * @param cmd a command string read from backend * @return metadata_type returned from map */ metadata_type METAPLUGIN::scan_metadata_type(bpContext *ctx, const POOL_MEM &cmd) { DMSG1(ctx, DDEBUG, "scan_metadata_type checking: %s\n", cmd.c_str()); for (int i = 0; plugin_metadata_map[i].command != NULL; i++) { if (bstrcmp(cmd.c_str(), plugin_metadata_map[i].command)){ DMSG2(ctx, DDEBUG, "match: %s => %d\n", plugin_metadata_map[i].command, plugin_metadata_map[i].type); return plugin_metadata_map[i].type; } } return plugin_meta_invalid; } const char * METAPLUGIN::prepare_metadata_type(metadata_type type) { for (int i = 0; plugin_metadata_map[i].command != NULL; i++){ if (plugin_metadata_map[i].type == type){ return plugin_metadata_map[i].command; } } return "METADATA_STREAM\n"; } /* * Sends ACL data from restore stream to backend. * TODO: The method has a limitation and accept a single xacl_pkt call for * a single file. As the restored acl stream and records are the same as * was saved during backup, you can expect no more then a single PM_BSOCK * and about 64kB of acl data send to backend. * * in: * bpContext - for Bacula debug and jobinfo messages * xacl_pkt - the restored ACL data for backend * out: * bRC_OK - when ACL data was restored successfully * bRC_Error - on any error during acl data restore */ bRC METAPLUGIN::perform_write_acl(bpContext* ctx, const xacl_pkt* xacl) { if (xacl->count > 0) { POOL_MEM cmd(PM_FNAME); /* send command ACL */ pm_strcpy(cmd, "ACL\n"); backend.ctx->write_command(ctx, cmd.c_str()); /* send acls data */ DMSG1(ctx, DINFO, "writeACL: %i\n", xacl->count); int rc = backend.ctx->write_data(ctx, xacl->content, xacl->count); if (rc < 0) { /* got some error */ return bRC_Error; } /* signal end of acls data to restore and get ack */ if (!backend.ctx->send_ack(ctx)) { return bRC_Error; } } return bRC_OK; } /* * Sends XATTR data from restore stream to backend. * TODO: The method has a limitation and accept a single xacl_pkt call for * a single file. As the restored acl stream and records are the same as * was saved during backup, you can expect no more then a single PM_BSOCK * and about 64kB of acl data send to backend. * * in: * bpContext - for Bacula debug and jobinfo messages * xacl_pkt - the restored XATTR data for backend * out: * bRC_OK - when XATTR data was restored successfully * bRC_Error - on any error during acl data restore */ bRC METAPLUGIN::perform_write_xattr(bpContext* ctx, const xacl_pkt* xacl) { if (xacl->count > 0) { POOL_MEM cmd(PM_FNAME); /* send command XATTR */ pm_strcpy(cmd, "XATTR\n"); backend.ctx->write_command(ctx, cmd.c_str()); /* send xattrs data */ DMSG1(ctx, DINFO, "writeXATTR: %i\n", xacl->count); int rc = backend.ctx->write_data(ctx, xacl->content, xacl->count); if (rc < 0) { /* got some error */ return bRC_Error; } /* signal end of xattrs data to restore and get ack */ if (!backend.ctx->send_ack(ctx)) { return bRC_Error; } } return bRC_OK; } /** * @brief The method works as a dispatcher for expected commands received from backend. * It handles a number of commands available at main dispatcher loop. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @return bRC bRC_OK - when plugin read the command, dispatched a work and setup flags * bRC_Error - on any error during backup */ bRC METAPLUGIN::perform_read_metacommands(bpContext *ctx) { POOL_MEM cmd(PM_FNAME); DMSG0(ctx, DDEBUG, "perform_read_metacommands()\n"); // setup flags nextfile = readacl = readxattr = false; objectsent = false; // loop on metadata from backend or EOD which means no more files to backup while (true) { if (backend.ctx->read_command(ctx, cmd) > 0) { /* yup, should read FNAME, ACL or XATTR from backend, check which one */ DMSG(ctx, DDEBUG, "read_command(1): %s\n", cmd.c_str()); if (scan_parameter_str(cmd, "FNAME:", fname)) { /* got FNAME: */ nextfile = true; object = FileObject; return bRC_OK; } if (scan_parameter_str(cmd, "PLUGINOBJ:", fname)) { /* got Plugin Object header */ nextfile = true; object = PluginObject; // pluginobject = true; return bRC_OK; } if (scan_parameter_str(cmd, "RESTOREOBJ:", fname)) { /* got Restore Object header */ nextfile = true; object = RestoreObject; // restoreobject = true; return bRC_OK; } if (scan_parameter_str(cmd, "CHECK:", fname)) { /* got accurate check query */ metaplugin::accurate::perform_accurate_check(ctx, backend.ctx, fname, lname, accurate_mode, accurate_mode_err); continue; } if (scan_parameter_str(cmd, "CHECKGET:", fname)) { /* got accurate get query */ metaplugin::accurate::perform_accurate_check_get(ctx, backend.ctx, fname, lname, accurate_mode, accurate_mode_err); continue; } if (scan_parameter_str(cmd, "ACCEPT:", fname)) { /* got AcceptFile() query */ perform_accept_file(ctx); continue; } if (scan_parameter_str(cmd, "INCLUDE:", fname)) { /* got AddInclude() request */ perform_addinclude(ctx); continue; } int split_nr = -1; if (scan_parameter_int(cmd, "STRIP:", split_nr)) { /* got Split Option change request */ perform_change_split_option(ctx, split_nr); continue; } if (bstrcmp(cmd.c_str(), "ACL")){ /* got ACL header */ perform_read_acl(ctx); continue; } if (bstrcmp(cmd.c_str(), "XATTR")){ /* got XATTR header */ perform_read_xattr(ctx); continue; } if (bstrcmp(cmd.c_str(), "FileIndex")){ /* got FileIndex query */ perform_file_index_query(ctx); continue; } /* error in protocol */ DMSG(ctx, DERROR, "Protocol error, got unknown command: %s\n", cmd.c_str()); JMSG(ctx, M_FATAL, "Protocol error, got unknown command: %s\n", cmd.c_str()); // In protocol error situations, the communication has no chance to continue, so we need to kill the backend backendctx_cancel_func(backend.ctx, ctx); return bRC_Error; } else { if (backend.ctx->is_fatal()){ /* raise up error from backend */ return bRC_Error; } if (backend.ctx->is_eod()){ /* no more files to backup */ DMSG0(ctx, DDEBUG, "No more files to backup from backend.\n"); return bRC_OK; } } } return bRC_Error; } struct bacula_ctx { JCR *jcr; /* jcr for plugin */ bRC rc; /* last return code */ bool disabled; /* set if plugin disabled */ bool restoreFileStarted; bool createFileCalled; bool cancelCalled; /* true if the plugin got the cancel event */ void *exclude; /* pointer to exclude files */ void *include; /* pointer to include/exclude files */ }; /** * @brief Perform AddInclude() for change current FileSet. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @return bRC always return bRC_OK */ bRC METAPLUGIN::perform_addinclude(bpContext *ctx) { if (!new_include_created) { DMSG0(ctx, DDEBUG, "perform_addinclude():create new Include\n"); bfuncs->NewInclude(ctx); new_include_created = true; if (strip_path_option > 0) { POOL_MEM tmp; Mmsg(tmp, "fP%d:", strip_path_option); DMSG1(ctx, DDEBUG, "perform_addinclude():addoption:\"%s\"\n", tmp.c_str()); bfuncs->AddOptions(ctx, tmp.c_str()); // Force onefs=no and strip base path } } DMSG1(ctx, DDEBUG, "perform_addinclude():%s\n", fname.c_str()); bfuncs->AddInclude(ctx, fname.c_str()); pm_strcpy(fname, NULL); return bRC_OK; } /** * @brief Changes a current split option value and forces new Include creation. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param nr a new split option to set * @return bRC bRC always return bRC_OK */ bRC METAPLUGIN::perform_change_split_option(bpContext *ctx, int nr) { nr = nr < 0 ? 0 : nr; if (nr != strip_path_option) { DMSG2(ctx, DDEBUG, "perform_change_split_option():%d -> %d\n", strip_path_option, nr); strip_path_option = nr; new_include_created = false; } return bRC_OK; } /** * @brief Respond to the file index query command from backend. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @return bRC bRC_OK when success, bRC_Error if not */ bRC METAPLUGIN::perform_file_index_query(bpContext *ctx) { POOL_MEM cmd(PM_FNAME); int32_t fileindex; getBaculaVar(bVarFileIndex, (void *)&fileindex); Mmsg(cmd, "%d\n", fileindex); if (backend.ctx->write_command(ctx, cmd) < 0){ /* error */ return bRC_Error; } return bRC_OK; } /** * @brief Perform a check if selected file should be accepted to backup. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @return bRC bRC_OK when success, bRC_Error if not */ bRC METAPLUGIN::perform_accept_file(bpContext *ctx) { if (strlen(fname.c_str()) == 0){ // input variable is not valid return bRC_Error; } DMSG0(ctx, DDEBUG, "perform_accept_file()\n"); POOL_MEM cmd(PM_FNAME); struct save_pkt sp; memset(&sp, 0, sizeof(sp)); metaplugin::attributes::Status status = metaplugin::attributes::read_attributes_command(ctx, backend.ctx, cmd, &sp, lname); switch(status) { case metaplugin::attributes::Invalid_File_Type: JMSG2(ctx, M_ERROR, "Invalid file type: %c for %s\n", sp.type, fname.c_str()); return bRC_Error; case metaplugin::attributes::Invalid_Stat_Packet: JMSG1(ctx, backend.ctx->jmsg_err_level(), "Invalid stat packet: %s\n", cmd.c_str()); return bRC_Error; case metaplugin::attributes::Status_OK: { // success we can perform accurate check for stat packet sp.fname = fname.c_str(); bRC rc = bfuncs->AcceptFile(ctx, &sp); POOL_MEM checkstatus(PM_NAME); Mmsg(checkstatus, "%s\n", rc == bRC_Skip ? "SKIP" : "OK"); DMSG1(ctx, DINFO, "perform_accept_file(): %s", checkstatus.c_str()); if (!backend.ctx->write_command(ctx, checkstatus)) { DMSG0(ctx, DERROR, "Cannot send AcceptFile() response to backend\n"); JMSG0(ctx, backend.ctx->jmsg_err_level(), "Cannot send AcceptFile() response to backend\n"); return bRC_Error; } } break; default: JMSG2(ctx, M_ERROR, "Invalid accept file protocol: %d for %s\n", status, fname.c_str()); return bRC_Error; } return bRC_OK; } /** * @brief * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param sp save_pkt from startBackupFile() * @return bRC bRC_OK when success, bRC_Error if not */ bRC METAPLUGIN::perform_read_pluginobject(bpContext *ctx, struct save_pkt *sp) { POOL_MEM cmd(PM_FNAME); if (strlen(fname.c_str()) == 0){ // input variable is not valid return bRC_Error; } sp->plugin_obj.path = fname.c_str(); sp->plugin_obj.status = PLUG_OBJ_STATUS_TERMINATED; // default success status DMSG0(ctx, DDEBUG, "perform_read_pluginobject()\n"); // loop on plugin objects parameters from backend and EOD while (true) { if (backend.ctx->read_command(ctx, cmd) > 0) { DMSG(ctx, DDEBUG, "read_command(3): %s\n", cmd.c_str()); if (scan_parameter_str(cmd, "PLUGINOBJ_CAT:", plugin_obj_cat)) { DMSG1(ctx, DDEBUG, "category: %s\n", plugin_obj_cat.c_str()); sp->plugin_obj.object_category = plugin_obj_cat.c_str(); continue; } if (scan_parameter_str(cmd, "PLUGINOBJ_TYPE:", plugin_obj_type)) { DMSG1(ctx, DDEBUG, "type: %s\n", plugin_obj_type.c_str()); sp->plugin_obj.object_type = plugin_obj_type.c_str(); continue; } if (scan_parameter_str(cmd, "PLUGINOBJ_NAME:", plugin_obj_name)) { DMSG1(ctx, DDEBUG, "name: %s\n", plugin_obj_name.c_str()); sp->plugin_obj.object_name = plugin_obj_name.c_str(); continue; } if (scan_parameter_str(cmd, "PLUGINOBJ_SRC:", plugin_obj_src)) { DMSG1(ctx, DDEBUG, "src: %s\n", plugin_obj_src.c_str()); sp->plugin_obj.object_source = plugin_obj_src.c_str(); continue; } if (scan_parameter_str(cmd, "PLUGINOBJ_UUID:", plugin_obj_uuid)) { DMSG1(ctx, DDEBUG, "uuid: %s\n", plugin_obj_uuid.c_str()); sp->plugin_obj.object_uuid = plugin_obj_uuid.c_str(); continue; } POOL_MEM param(PM_NAME); if (scan_parameter_str(cmd, "PLUGINOBJ_SIZE:", param)) { if (!size_to_uint64(param.c_str(), strlen(param.c_str()), &plugin_obj_size)) { // error in convert DMSG1(ctx, DERROR, "Cannot convert Plugin Object Size to integer! p=%s\n", param.c_str()); JMSG1(ctx, M_ERROR, "Cannot convert Plugin Object Size to integer! p=%s\n", param.c_str()); return bRC_Error; } DMSG1(ctx, DDEBUG, "size: %llu\n", plugin_obj_size); sp->plugin_obj.object_size = plugin_obj_size; continue; } if (scan_parameter_str(cmd, "PLUGINOBJ_COUNT:", param)) { uint32_t count = str_to_int64(param.c_str()); DMSG1(ctx, DDEBUG, "count: %lu\n", count); sp->plugin_obj.count = count; continue; } if (scan_parameter_str(cmd, "PLUGINOBJ_STATUS:", param)) { DMSG1(ctx, DDEBUG, "status: %s\n", param.c_str()); // OK, ERROR, WARNING, FATAL if (bstrcmp(param.c_str(), "OK")) { sp->plugin_obj.status = PLUG_OBJ_STATUS_TERMINATED; continue; } if (bstrcmp(param.c_str(), "WARNING")) { sp->plugin_obj.status = PLUG_OBJ_STATUS_WARNING; continue; } if (bstrcmp(param.c_str(), "ERROR")) { sp->plugin_obj.status = PLUG_OBJ_STATUS_ERROR; continue; } if (bstrcmp(param.c_str(), "FATAL")) { sp->plugin_obj.status = PLUG_OBJ_STATUS_FATAL; continue; } DMSG0(ctx, DDEBUG, "unknown status, setting error\n"); sp->plugin_obj.status = PLUG_OBJ_STATUS_ERROR; } /* error in protocol */ DMSG(ctx, DERROR, "Protocol error, got unknown command: %s\n", cmd.c_str()); JMSG(ctx, M_FATAL, "Protocol error, got unknown command: %s\n", cmd.c_str()); backendctx_cancel_func(backend.ctx, ctx); return bRC_Error; } else { if (backend.ctx->is_fatal()){ /* raise up error from backend */ return bRC_Error; } if (backend.ctx->is_eod()){ /* no more plugin object params to backup */ DMSG0(ctx, DINFO, "No more Plugin Object params from backend.\n"); objectsent = true; return bRC_OK; } } } return bRC_Error; } /** * @brief Receives a Restore Object data and populates save_pkt. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param sp save_pkt from startBackupFile() * @return bRC bRC_OK when success, bRC_Error if not */ bRC METAPLUGIN::perform_read_restoreobject(bpContext *ctx, struct save_pkt *sp) { POOL_MEM cmd(PM_FNAME); sp->restore_obj.object = NULL; if (strlen(fname.c_str()) == 0){ // input variable is not valid return bRC_Error; } DMSG0(ctx, DDEBUG, "perform_read_restoreobject()\n"); // read object length required param if (backend.ctx->read_command(ctx, cmd) > 0) { DMSG(ctx, DDEBUG, "read_command(4): %s\n", cmd.c_str()); POOL_MEM param(PM_NAME); uint64_t length; if (scan_parameter_str(cmd, "RESTOREOBJ_LEN:", param)) { if (!size_to_uint64(param.c_str(), strlen(param.c_str()), &length)){ // error in convert DMSG1(ctx, DERROR, "Cannot convert Restore Object length to integer! p=%s\n", param.c_str()); JMSG1(ctx, M_ERROR, "Cannot convert Restore Object length to integer! p=%s\n", param.c_str()); return bRC_Error; } DMSG1(ctx, DDEBUG, "size: %llu\n", length); sp->restore_obj.object_len = length; robjbuf.check_size(length + 1); } else { // no required param DMSG0(ctx, DERROR, "Cannot read Restore Object length!\n"); JMSG0(ctx, M_ERROR, "Cannot read Restore Object length!\n"); return bRC_Error; } } else { if (backend.ctx->is_fatal()){ /* raise up error from backend */ return bRC_Error; } } int32_t recv_len = 0; if (backend.ctx->recv_data(ctx, robjbuf, &recv_len) != bRC_OK) { DMSG0(ctx, DERROR, "Cannot read data from backend!\n"); return bRC_Error; } /* no more restore object data to backup */ DMSG0(ctx, DINFO, "No more Restore Object data from backend.\n"); objectsent = true; if (recv_len != sp->restore_obj.object_len) { DMSG2(ctx, DERROR, "Backend reported RO length:%ld read:%ld\n", sp->restore_obj.object_len, recv_len); JMSG2(ctx, M_ERROR, "Backend reported RO length:%ld read:%ld\n", sp->restore_obj.object_len, recv_len); sp->restore_obj.object_len = recv_len; } sp->restore_obj.object = robjbuf.c_str(); return bRC_OK; } /* * Handle Bacula Plugin I/O API for backend * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error * io->status, io->io_errno - correspond to a plugin io operation status */ bRC METAPLUGIN::pluginIO(bpContext *ctx, struct io_pkt *io) { static int rw = 0; // this variable handles single debug message { // synchronie access to job_cancelled variable // lock_guard lg(&mutex); - removed on request if (job_cancelled) { return bRC_Error; } } /* assume no error from the very beginning */ io->status = 0; io->io_errno = 0; switch (io->func) { case IO_OPEN: DMSG(ctx, D2, "IO_OPEN: (%s)\n", io->fname); switch (mode){ case BACKUP_FULL: case BACKUP_INCR: case BACKUP_DIFF: return perform_backup_open(ctx, io); case RESTORE: nodata = true; break; default: return bRC_Error; } break; case IO_READ: if (!rw) { rw = 1; DMSG2(ctx, D2, "IO_READ buf=%p len=%d\n", io->buf, io->count); } switch (mode){ case BACKUP_FULL: case BACKUP_INCR: case BACKUP_DIFF: return perform_read_data(ctx, io); default: return bRC_Error; } break; case IO_WRITE: if (!rw) { rw = 1; DMSG2(ctx, D2, "IO_WRITE buf=%p len=%d\n", io->buf, io->count); } switch (mode){ case RESTORE: return perform_write_data(ctx, io); default: return bRC_Error; } break; case IO_CLOSE: DMSG0(ctx, D2, "IO_CLOSE\n"); rw = 0; if (!backend.ctx->close_extpipe(ctx)){ return bRC_Error; } switch (mode){ case RESTORE: return perform_write_end(ctx, io); case BACKUP_FULL: case BACKUP_INCR: case BACKUP_DIFF: return perform_read_metacommands(ctx); default: return bRC_Error; } break; } return bRC_OK; } /* * Unimplemented, always return bRC_OK. */ bRC METAPLUGIN::getPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Unimplemented, always return bRC_OK. */ bRC METAPLUGIN::setPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Get all required information from backend to populate save_pkt for Bacula. * It handles a Restore Object (FT_PLUGIN_CONFIG) for every Full backup and * new Plugin Backup Command if setup in FileSet. It uses a help from * endBackupFile() handling the next FNAME command for the next file to * backup. The communication protocol requires some file attributes command * required it raise the error when insufficient parameters received from * backend. It assumes some parameters at save_pkt struct to be automatically * set like: sp->portable, sp->statp.st_blksize, sp->statp.st_blocks. * * in: * bpContext - for Bacula debug and jobinfo messages * save_pkt - Bacula Plugin API save packet structure * out: * bRC_OK - when save_pkt prepared successfully and we have file to backup * bRC_Max - when no more files to backup * bRC_Error - in any error */ bRC METAPLUGIN::startBackupFile(bpContext *ctx, struct save_pkt *sp) { POOL_MEM cmd(PM_FNAME); int reqparams = 2; if (backend.is_ctx_null()) { JMSG0(ctx, M_FATAL, "Unable to use the backend properly\n"); return bRC_Error; } if (job_cancelled) { return bRC_Error; } if (!DONOTSAVE_FT_PLUGIN_CONFIG) { /* The first file in Full backup, is the RestoreObject */ if (!estimate && mode == BACKUP_FULL && pluginconfigsent == false) { ConfigFile ini; ini.register_items(plugin_items_dump, sizeof(struct ini_items)); sp->restore_obj.object_name = (char *)INI_RESTORE_OBJECT_NAME; sp->restore_obj.object_len = ini.serialize(robjbuf.handle()); sp->restore_obj.object = robjbuf.c_str(); sp->type = FT_PLUGIN_CONFIG; DMSG2(ctx, DINFO, "Prepared RestoreObject/%s (%d) sent.\n", INI_RESTORE_OBJECT_NAME, FT_PLUGIN_CONFIG); return bRC_OK; } } // check if this is the first file from backend to backup if (!nextfile){ // so read FNAME or EOD/Error if (perform_read_metacommands(ctx) != bRC_OK){ // signal error return bRC_Error; } if (!nextfile){ // got EOD, so no files to backup at all! // if we return a value different from bRC_OK then Bacula will finish // backup process, which at first call means no files to archive return bRC_Max; } } // setup required fname in save_pkt DMSG(ctx, DINFO, "fname:%s\n", fname.c_str()); sp->fname = fname.c_str(); switch (object) { case RestoreObject: // handle Restore Object parameters and data if (perform_read_restoreobject(ctx, sp) != bRC_OK) { // signal error return bRC_Error; } sp->restore_obj.object_name = fname.c_str(); sp->type = FT_RESTORE_FIRST; sp->statp.st_size = sp->restore_obj.object_len; sp->statp.st_mode = 0700 | S_IFREG; { time_t now = time(NULL); sp->statp.st_ctime = now; sp->statp.st_mtime = now; sp->statp.st_atime = now; } break; case PluginObject: // handle Plugin Object parameters if (perform_read_pluginobject(ctx, sp) != bRC_OK) { // signal error return bRC_Error; } sp->type = FT_PLUGIN_OBJECT; sp->statp.st_size = sp->plugin_obj.object_size; break; default: // here we handle standard file metadata information reqparams--; // ensure clear state for metadatas sp->plug_meta = NULL; metadatas.reset(); metadatas_list.destroy(); while (backend.ctx->read_command(ctx, cmd) > 0) { DMSG(ctx, DINFO, "read_command(2): %s\n", cmd.c_str()); metaplugin::attributes::Status status = metaplugin::attributes::read_scan_stat_command(ctx, cmd, sp, lname); switch (status) { case metaplugin::attributes::Status_Error: return bRC_Error; case metaplugin::attributes::Invalid_File_Type: JMSG2(ctx, M_ERROR, "Invalid file type: %c for %s\n", sp->type, fname.c_str()); return bRC_Error; case metaplugin::attributes::Invalid_Stat_Packet: JMSG1(ctx, backend.ctx->jmsg_err_level(), "Invalid stat packet: %s\n", cmd.c_str()); return bRC_Error; case metaplugin::attributes::Status_OK: if (sp->type != FT_LNK) { reqparams--; } continue; case metaplugin::attributes::Status_Handled: reqparams--; continue; default: break; } status = metaplugin::attributes::read_scan_tstamp_command(ctx, cmd, sp); switch (status) { case metaplugin::attributes::Status_OK: continue; default: break; } if (scan_parameter_str(cmd, "LSTAT:", lname) == 1) { sp->link = lname.c_str(); reqparams--; DMSG(ctx, DINFO, "LSTAT:%s\n", lname.c_str()); continue; } POOL_MEM tmp(PM_FNAME); if (scan_parameter_str(cmd, "PIPE:", tmp)) { /* handle PIPE command */ DMSG(ctx, DINFO, "read pipe at: %s\n", tmp.c_str()); int extpipe = open(tmp.c_str(), O_RDONLY); if (extpipe > 0) { DMSG0(ctx, DINFO, "ExtPIPE file available.\n"); backend.ctx->set_extpipe(extpipe); pm_strcpy(tmp, "OK\n"); backend.ctx->write_command(ctx, tmp.c_str()); } else { /* here are common error signaling */ berrno be; DMSG(ctx, DERROR, "ExtPIPE file open error! Err=%s\n", be.bstrerror()); JMSG(ctx, backend.ctx->jmsg_err_level(), "ExtPIPE file open error! Err=%s\n", be.bstrerror()); pm_strcpy(tmp, "Err\n"); backend.ctx->signal_error(ctx, tmp.c_str()); return bRC_Error; } continue; } metadata_type mtype = scan_metadata_type(ctx, cmd); if (mtype != plugin_meta_invalid) { DMSG1(ctx, DDEBUG, "metaData handling: %d\n", mtype); if (perform_read_metadata_info(ctx, mtype, sp) != bRC_OK) { DMSG0(ctx, DERROR, "Cannot perform_read_metadata_info!\n"); JMSG0(ctx, backend.ctx->jmsg_err_level(), "Cannot perform_read_metadata_info!\n"); return bRC_Error; } continue; } else { DMSG1(ctx, DERROR, "Invalid File Attributes command: %s\n", cmd.c_str()); JMSG1(ctx, backend.ctx->jmsg_err_level(), "Invalid File Attributes command: %s\n", cmd.c_str()); return bRC_Error; } } DMSG0(ctx, DINFO, "File attributes end.\n"); if (reqparams > 0) { DMSG0(ctx, DERROR, "Protocol error, not enough file attributes from backend.\n"); JMSG0(ctx, M_FATAL, "Protocol error, not enough file attributes from backend.\n"); // In protocol error situations, the communication has no chance to continue, so we need to kill the backend backendctx_cancel_func(backend.ctx, ctx); return bRC_Error; } break; } if (backend.ctx->is_error()) { return bRC_Error; } sp->portable = true; sp->statp.st_blksize = 4096; sp->statp.st_blocks = sp->statp.st_size / 4096 + 1; switch (sp->type) { case FT_REG: case FT_DIREND: case FT_LNK: case FT_SPEC: DMSG3(ctx, DINFO, "TSDebug: %ld(at) %ld(mt) %ld(ct)\n", sp->statp.st_atime, sp->statp.st_mtime, sp->statp.st_ctime); break; default: break; } return bRC_OK; } /* * Check for a next file to backup or the end of the backup loop. * The next file to backup is indicated by a FNAME command from backend and * no more files to backup as EOD. It helps startBackupFile handling FNAME * for next file. * * in: * bpContext - for Bacula debug and jobinfo messages * save_pkt - Bacula Plugin API save packet structure * out: * bRC_OK - when no more files to backup * bRC_More - when Bacula should expect a next file * bRC_Error - in any error */ bRC METAPLUGIN::endBackupFile(bpContext *ctx) { POOL_MEM cmd(PM_FNAME); { // synchronie access to job_cancelled variable // lock_guard lg(&mutex); - removed on request if (job_cancelled) { return bRC_Error; } } if (!estimate && !DONOTSAVE_FT_PLUGIN_CONFIG) { /* The current file was the restore object, so just ask for the next file */ if (mode == BACKUP_FULL && pluginconfigsent == false) { pluginconfigsent = true; return bRC_More; } } // check for next file only when no previous error if (!openerror) { if (estimate || objectsent) { objectsent = false; if (perform_read_metacommands(ctx) != bRC_OK) { /* signal error */ return bRC_Error; } } if (nextfile) { DMSG1(ctx, DINFO, "nextfile %s backup!\n", fname.c_str()); return bRC_More; } } return bRC_OK; } /* * The PLUGIN is using this callback to handle Core restore. */ bRC METAPLUGIN::startRestoreFile(bpContext *ctx, const char *cmd) { if (restoreobject_list.size() > 0) { restore_object_class *ropclass; POOL_MEM backcmd(PM_FNAME); foreach_alist(ropclass, &restoreobject_list) { if (!ropclass->sent && strcmp(cmd, ropclass->plugin_name.c_str()) == 0) { Mmsg(backcmd, "RESTOREOBJ:%s\n", ropclass->object_name.c_str()); DMSG1(ctx, DINFO, "%s", backcmd.c_str()); ropclass->sent = true; if (!backend.ctx->write_command(ctx, backcmd.c_str())) { DMSG0(ctx, DERROR, "Error sending RESTOREOBJ command\n"); return bRC_Error; } Mmsg(backcmd, "RESTOREOBJ_LEN:%d\n", ropclass->length); if (!backend.ctx->write_command(ctx, backcmd.c_str())) { DMSG0(ctx, DERROR, "Error sending RESTOREOBJ_LEN command\n"); return bRC_Error; } /* send data */ if (backend.ctx->send_data(ctx, ropclass->data, ropclass->length) != bRC_OK) { DMSG0(ctx, DERROR, "Error sending RestoreObject data\n"); return bRC_Error; } } } } return bRC_OK; } /* * The PLUGIN is not using this callback to handle restore. */ bRC METAPLUGIN::endRestoreFile(bpContext *ctx) { return bRC_OK; } /* * Prepares a file to restore attributes based on data from restore_pkt. * It handles a response from backend to show if * * in: * bpContext - bacula plugin context * restore_pkt - Bacula Plugin API restore packet structure * out: * bRC_OK - when success reported from backend * rp->create_status = CF_EXTRACT - the backend will restore the file * with pleasure * rp->create_status = CF_SKIP - the backend wants to skip restoration, i.e. * the file already exist and Replace=n was set * bRC_Error, rp->create_status = CF_ERROR - in any error */ bRC METAPLUGIN::createFile(bpContext *ctx, struct restore_pkt *rp) { POOL_MEM cmd(PM_FNAME); // char type; { // synchronie access to job_cancelled variable // lock_guard lg(&mutex); - removed on request if (job_cancelled) { return bRC_Error; } } skipextract = false; acldatalen = 0; xattrdatalen = 0; if (CORELOCALRESTORE && islocalpath(where)) { DMSG0(ctx, DDEBUG, "createFile:Forwarding restore to Core\n"); rp->create_status = CF_CORE; } else { // FNAME:$fname$ Mmsg(cmd, "FNAME:%s\n", rp->ofname); backend.ctx->write_command(ctx, cmd); DMSG(ctx, DINFO, "createFile:%s", cmd.c_str()); // STAT:... metaplugin::attributes::make_stat_command(ctx, cmd, rp); backend.ctx->write_command(ctx, cmd); last_type = rp->type; DMSG(ctx, DINFO, "createFile:%s", cmd.c_str()); // TSTAMP:... if (metaplugin::attributes::make_tstamp_command(ctx, cmd, rp) == metaplugin::attributes::Status_OK) { backend.ctx->write_command(ctx, cmd); DMSG(ctx, DINFO, "createFile:%s", cmd.c_str()); } // LSTAT:$link$ if (rp->type == FT_LNK && rp->olname != NULL){ Mmsg(cmd, "LSTAT:%s\n", rp->olname); backend.ctx->write_command(ctx, cmd); DMSG(ctx, DINFO, "createFile:%s", cmd.c_str()); } backend.ctx->signal_eod(ctx); // check if backend accepted the file if (backend.ctx->read_command(ctx, cmd) > 0){ DMSG(ctx, DINFO, "createFile:resp: %s\n", cmd.c_str()); if (strcmp(cmd.c_str(), "OK") == 0){ rp->create_status = CF_EXTRACT; } else if (strcmp(cmd.c_str(), "SKIP") == 0){ rp->create_status = CF_SKIP; skipextract = true; } else if (strcmp(cmd.c_str(), "CORE") == 0){ rp->create_status = CF_CORE; } else { DMSG(ctx, DERROR, "Wrong backend response to create file, got: %s\n", cmd.c_str()); JMSG(ctx, backend.ctx->jmsg_err_level(), "Wrong backend response to create file, got: %s\n", cmd.c_str()); rp->create_status = CF_ERROR; return bRC_Error; } } else { if (backend.ctx->is_error()){ /* raise up error from backend */ rp->create_status = CF_ERROR; return bRC_Error; } } } return bRC_OK; } /* * Unimplemented, always return bRC_OK. */ bRC METAPLUGIN::setFileAttributes(bpContext *ctx, struct restore_pkt *rp) { return bRC_OK; } /** * @brief * * @param ctx * @param exepath * @return bRC */ void METAPLUGIN::setup_backend_command(bpContext *ctx, POOL_MEM &exepath) { DMSG(ctx, DINFO, "ExePath: %s\n", exepath.c_str()); Mmsg(backend_cmd, "%s/%s", exepath.c_str(), BACKEND_CMD); DMSG(ctx, DINFO, "BackendPath: %s\n", backend_cmd.c_str()); if (access(backend_cmd.c_str(), X_OK) < 0) { berrno be; DMSG2(ctx, DERROR, "Unable to use backend: %s Err=%s\n", backend_cmd.c_str(), be.bstrerror()); pm_strcpy(backend_error, be.bstrerror()); backend_available = false; } else { DMSG0(ctx, DINFO, "Backend available\n"); backend_available = true; } } /** * @brief * * @param ctx * @param xacl * @return bRC */ bRC METAPLUGIN::handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl) { { // synchronie access to job_cancelled variable // lock_guard lg(&mutex); - removed on request if (job_cancelled) { return bRC_Error; } } switch (xacl->func) { case BACL_BACKUP: if (readacl) { DMSG0(ctx, DINFO, "bacl_backup\n"); xacl->count = acldatalen; xacl->content = acldata.c_str(); readacl= false; } else { xacl->count = 0; } break; case BACL_RESTORE: DMSG1(ctx, DINFO, "bacl_restore: %d\n", last_type); if (!skipextract) { if (last_type != FT_DIREND) { return perform_write_acl(ctx, xacl); } else { DMSG0(ctx, DDEBUG, "delay ACL stream restore\n"); acldatalen = xacl->count; pm_memcpy(acldata, xacl->content, acldatalen); } } break; case BXATTR_BACKUP: if (readxattr){ DMSG0(ctx, DINFO, "bxattr_backup\n"); xacl->count = xattrdatalen; xacl->content = xattrdata.c_str(); readxattr= false; } else { xacl->count = 0; } break; case BXATTR_RESTORE: DMSG1(ctx, DINFO, "bxattr_restore: %d\n", last_type); if (!skipextract) { if (last_type != FT_DIREND) { return perform_write_xattr(ctx, xacl); } else { DMSG0(ctx, DDEBUG, "delay XATTR stream restore\n"); xattrdatalen = xacl->count; pm_memcpy(xattrdata, xacl->content, xattrdatalen); } } break; } return bRC_OK; } /* * QueryParameter interface */ bRC METAPLUGIN::queryParameter(bpContext *ctx, struct query_pkt *qp) { DMSG0(ctx, D1, "METAPLUGIN::queryParameter\n"); // check if it is our Plugin command if (!isourplugincommand(PLUGINPREFIX, qp->command) != 0){ // it is not our plugin prefix return bRC_OK; } { // synchronie access to job_cancelled variable // lock_guard lg(&mutex); - removed on request if (job_cancelled) { return bRC_Error; } } POOL_MEM cmd(PM_MESSAGE); if (listing == None) { listing = Query; Mmsg(cmd, "%s query=%s", qp->command, qp->parameter); if (prepare_backend(ctx, BACKEND_JOB_INFO_ESTIMATE, cmd.c_str()) == bRC_Error){ return bRC_Error; } } /* read backend response */ char pkt = 0; int32_t pktlen = backend.ctx->read_any(ctx, &pkt, cmd); if (pktlen < 0) { DMSG(ctx, DERROR, "Cannot read backend query response for %s command.\n", qp->parameter); JMSG(ctx, backend.ctx->jmsg_err_level(), "Cannot read backend query response for %s command.\n", qp->parameter); return bRC_Error; } bRC ret = bRC_More; /* check EOD */ if (backend.ctx->is_eod()){ /* got EOD so the backend finish response, so terminate the chat */ DMSG0(ctx, D1, "METAPLUGIN::queryParameter: got EOD\n"); backend.ctx->signal_term(ctx); backend.ctx->terminate(ctx); qp->result = NULL; ret = bRC_OK; } else { switch (pkt) { case 'C': { OutputWriter ow(qp->api_opts); char *p, *q, *t; alist values(10, not_owned_by_alist); key_pair *kp; /* * here we have: * key=value[,key2=value2[,...]] * parameters we should decompose */ p = cmd.c_str(); while (*p != '\0') { q = strchr(p, ','); if (q != NULL) { *q++ = '\0'; } // single key=value DMSG(ctx, D1, "METAPLUGIN::queryParameter:scan %s\n", p); if ((t = strchr(p, '=')) != NULL) { *t++ = '\0'; } else { t = (char*)""; // pointer to empty string } DMSG2(ctx, D1, "METAPLUGIN::queryParameter:pair '%s' = '%s'\n", p, t); if (strlen(p) > 0) { // push values only when we have key name kp = New(key_pair(p, t)); values.append(kp); } p = q != NULL ? q : (char*)""; } // if more values then one then it is a list if (values.size() > 1) { DMSG0(ctx, D1, "METAPLUGIN::queryParameter: will render list\n") ow.start_list(qp->parameter); } // render all values foreach_alist(kp, &values) { ow.get_output(OT_STRING, kp->key.c_str(), kp->value.c_str(), OT_END); delete kp; } if (values.size() > 1) { ow.end_list(); } pm_strcpy(robjbuf, ow.get_output(OT_END)); qp->result = robjbuf.c_str(); } break; case 'D': pm_memcpy(robjbuf, cmd.c_str(), pktlen); qp->result = robjbuf.c_str(); break; default: DMSG(ctx, DERROR, "METAPLUGIN::queryParameter: got invalid packet: %c\n", pkt); JMSG(ctx, M_ERROR, "METAPLUGIN::queryParameter: got invalid packet: %c\n", pkt); backend.ctx->signal_term(ctx); backend.ctx->terminate(ctx); qp->result = NULL; ret = bRC_Error; break; } } return ret; } /** * @brief Sends metadata to backend for restore. * * @param ctx for Bacula debug and jobinfo messages * @param mp * @return bRC */ bRC METAPLUGIN::metadataRestore(bpContext *ctx, struct meta_pkt *mp) { { // synchronie access to job_cancelled variable // lock_guard lg(&mutex); - removed on request if (job_cancelled) { return bRC_Error; } } if (!skipextract){ POOL_MEM cmd(PM_FNAME); if (mp->buf != NULL && mp->buf_len > 0){ /* send command METADATA */ pm_strcpy(cmd, prepare_metadata_type(mp->type)); backend.ctx->write_command(ctx, cmd.c_str()); /* send metadata stream data */ DMSG1(ctx, DINFO, "writeMetadata: %i\n", mp->buf_len); int rc = backend.ctx->write_data(ctx, (char*)mp->buf, mp->buf_len); if (rc < 0){ /* got some error */ return bRC_Error; } // signal end of metadata stream to restore and get ack backend.ctx->signal_eod(ctx); // check if backend accepted the file if (backend.ctx->read_command(ctx, cmd) > 0) { DMSG(ctx, DINFO, "metadataRestore:resp: %s\n", cmd.c_str()); if (bstrcmp(cmd.c_str(), "SKIP")) { // SKIP! skipextract = true; return bRC_Skip; } if (!bstrcmp(cmd.c_str(), "OK")) { DMSG(ctx, DERROR, "Wrong backend response to metadataRestore, got: %s\n", cmd.c_str()); JMSG(ctx, backend.ctx->jmsg_err_level(), "Wrong backend response to metadataRestore, got: %s\n", cmd.c_str()); return bRC_Error; } } else { if (backend.ctx->is_error()) { // raise up error from backend return bRC_Error; } } } } return bRC_OK; } /** * @brief Implements default metaplugin checkFile() callback. * When fname match plugin configured namespace then it return bRC_Seen by default * or calls custom checkFile() callback defined by backend developer. * * @param ctx for Bacula debug and jobinfo messages * @param fname file name to check * @return bRC bRC_Seen or bRC_OK */ bRC METAPLUGIN::checkFile(bpContext * ctx, char *fname) { if ((!CUSTOMNAMESPACE && isourpluginfname(PLUGINPREFIX, fname)) || (CUSTOMNAMESPACE && isourpluginfname(PLUGINNAMESPACE, fname))) { // synchronie access to job_cancelled variable // lock_guard lg(&mutex); - removed on request if (!job_cancelled) { if (::checkFile != NULL) { return ::checkFile(ctx, fname); } } return bRC_Seen; } return bRC_OK; } /** * @brief Unconditionally terminates backend * This callback is used on cancel event handling. * * @param ptcomm the backend communication object * @param cp a bpContext - for Bacula debug and jobinfo messages * @return bRC bRC_OK when success */ bRC backendctx_termination_func(PTCOMM *ptcomm, void *cp) { bpContext * ctx = (bpContext*)cp; // terminate procedure // 1. wait default 5 sec or defined in CUSTOMCANCELSLEEP // 2. terminate the backend as usual pid_t pid = ptcomm->get_backend_pid(); DMSG(ctx, DINFO, "Preparing the backend termination on Cancel at PID=%d ...\n", pid) int32_t waitsleep = (CUSTOMCANCELSLEEP == 0) * 5 + CUSTOMCANCELSLEEP; bmicrosleep(waitsleep, 1); DMSG(ctx, DINFO, "Terminate backend at PID=%d\n", pid); ptcomm->terminate(ctx); return bRC_OK; } /** * @brief Conditionally terminate backends for cancelled job * * @param ctx a bpContext - for Bacula debug and jobinfo messages */ void METAPLUGIN::terminate_backends_oncancel(bpContext *ctx) { // lock_guard lg(&mutex); - removed on request if (job_cancelled) { DMSG0(ctx, DINFO, "Ensure backend termination on cancelled job\n"); backend.foreach_command_status(backendctx_termination_func, ctx); job_cancelled = false; } } /* * Called here to make a new instance of the plugin -- i.e. when * a new Job is started. There can be multiple instances of * each plugin that are running at the same time. Your * plugin instance must be thread safe and keep its own * local data. */ static bRC newPlugin(bpContext *ctx) { int JobId; char *exepath; METAPLUGIN *self = New(METAPLUGIN); POOL_MEM exepath_clean(PM_FNAME); if (!self) return bRC_Error; ctx->pContext = (void*) self; pthread_t mythid = pthread_self(); DMSG2(ctx, DVDEBUG, "pContext = %p thid = %p\n", self, mythid); /* setup the backend command */ getBaculaVar(bVarExePath, (void *)&exepath); DMSG(ctx, DINFO, "bVarExePath: %s\n", exepath); pm_strcpy(exepath_clean, exepath); strip_trailing_slashes(exepath_clean.c_str()); self->setup_backend_command(ctx, exepath_clean); getBaculaVar(bVarJobId, (void *)&JobId); DMSG(ctx, D1, "newPlugin JobId=%d\n", JobId); return bRC_OK; } /* * Release everything concerning a particular instance of * a plugin. Normally called when the Job terminates. */ static bRC freePlugin(bpContext *ctx) { if (!ctx){ return bRC_Error; } METAPLUGIN *self = pluginclass(ctx); DMSG(ctx, D1, "freePlugin this=%p\n", self); if (!self){ return bRC_Error; } self->terminate_backends_oncancel(ctx); delete self; return bRC_OK; } /* * Called by core code to get a variable from the plugin. * Not currently used. */ static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) { ASSERT_CTX; DMSG0(ctx, D3, "getPluginValue called.\n"); METAPLUGIN *self = pluginclass(ctx); return self->getPluginValue(ctx,var, value); } /* * Called by core code to set a plugin variable. * Not currently used. */ static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) { ASSERT_CTX; DMSG0(ctx, D3, "setPluginValue called.\n"); METAPLUGIN *self = pluginclass(ctx); return self->setPluginValue(ctx, var, value); } /* * Called by Bacula when there are certain events that the * plugin might want to know. The value depends on the * event. */ static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) { ASSERT_CTX; pthread_t mythid = pthread_self(); METAPLUGIN *self = pluginclass(ctx); DMSG3(ctx, D1, "handlePluginEvent (%i) pContext = %p thid = %p\n", event->eventType, self, mythid); return self->handlePluginEvent(ctx, event, value); } /* * Called when starting to backup a file. Here the plugin must * return the "stat" packet for the directory/file and provide * certain information so that Bacula knows what the file is. * The plugin can create "Virtual" files by giving them * a name that is not normally found on the file system. */ static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) { ASSERT_CTX; if (!sp) { return bRC_Error; } DMSG0(ctx, D1, "startBackupFile.\n"); METAPLUGIN *self = pluginclass(ctx); return self->startBackupFile(ctx, sp); } /* * Done backing up a file. */ static bRC endBackupFile(bpContext *ctx) { ASSERT_CTX; DMSG0(ctx, D1, "endBackupFile.\n"); METAPLUGIN *self = pluginclass(ctx); return self->endBackupFile(ctx); } /* * Called when starting restore the file, right after a createFile(). */ static bRC startRestoreFile(bpContext *ctx, const char *cmd) { ASSERT_CTX; DMSG1(ctx, D1, "startRestoreFile: %s\n", NPRT(cmd)); METAPLUGIN *self = pluginclass(ctx); return self->startRestoreFile(ctx, cmd); } /* * Done restore the file. */ static bRC endRestoreFile(bpContext *ctx) { ASSERT_CTX; DMSG0(ctx, D1, "endRestoreFile.\n"); METAPLUGIN *self = pluginclass(ctx); return self->endRestoreFile(ctx); } /* * Do actual I/O. Bacula calls this after startBackupFile * or after startRestoreFile to do the actual file * input or output. */ static bRC pluginIO(bpContext *ctx, struct io_pkt *io) { ASSERT_CTX; DMSG0(ctx, DVDEBUG, "pluginIO.\n"); METAPLUGIN *self = pluginclass(ctx); return self->pluginIO(ctx, io); } /* * Called here to give the plugin the information needed to * re-create the file on a restore. It basically gets the * stat packet that was created during the backup phase. * This data is what is needed to create the file, but does * not contain actual file data. */ static bRC createFile(bpContext *ctx, struct restore_pkt *rp) { ASSERT_CTX; DMSG0(ctx, D1, "createFile.\n"); METAPLUGIN *self = pluginclass(ctx); return self->createFile(ctx, rp); } /* * Called after the file has been restored. This can be used to * set directory permissions, ... */ static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) { ASSERT_CTX; DMSG0(ctx, D1, "setFileAttributes.\n"); METAPLUGIN *self = pluginclass(ctx); return self->setFileAttributes(ctx, rp); } /* * handleXACLdata used for ACL/XATTR backup and restore */ static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl) { ASSERT_CTX; DMSG(ctx, D1, "handleXACLdata: %i\n", xacl->func); METAPLUGIN *self = pluginclass(ctx); return self->handleXACLdata(ctx, xacl); } /* QueryParameter interface */ static bRC queryParameter(bpContext *ctx, struct query_pkt *qp) { ASSERT_CTX; DMSG2(ctx, D1, "queryParameter: cmd:%s param:%s\n", qp->command, qp->parameter); METAPLUGIN *self = pluginclass(ctx); return self->queryParameter(ctx, qp); } /* Metadata Restore interface */ static bRC metadataRestore(bpContext *ctx, struct meta_pkt *mp) { ASSERT_CTX; DMSG2(ctx, D1, "metadataRestore: %d %d\n", mp->total_size, mp->type); METAPLUGIN *self = pluginclass(ctx); return self->metadataRestore(ctx, mp); } /* * checkFile used for accurate mode backup * * TODO: currently it is not working because a checking is performed against ldap plugin, not msad */ static bRC metaplugincheckFile(bpContext * ctx, char *fname) { ASSERT_CTX; DMSG(ctx, D3, "checkFile for: %s\n", fname); METAPLUGIN *self = pluginclass(ctx); return self->checkFile(ctx, fname); } bacula-15.0.3/src/plugins/fd/pluginlib/smartalist_test.cpp0000644000175000017500000000536114771010173023443 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file smartalist_test.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief Common definitions and utility functions for Inteos plugins - unittest. * @version 1.1.0 * @date 2020-12-23 * * @copyright Copyright (c) 2020 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "pluginlib.h" #include "smartalist.h" #include "unittests.h" #include bFuncs *bfuncs; bInfo *binfo; static int referencenumber = 0; struct testclass : public SMARTALLOC { testclass() { referencenumber++; }; ~testclass() { referencenumber--; }; }; struct testvalue : public SMARTALLOC { int value; testvalue(const testvalue *other) { value = other->value; } testvalue() : value(0) {} }; int main() { Unittests pluglib_test("smartalist_test"); testclass *ti; // Pmsg0(0, "Initialize tests ...\n"); { smart_alist list; ti = New(testclass); list.append(ti); ok(referencenumber == 1, "check first insert"); } ok(referencenumber == 0, "check first smart free"); constexpr const int refs = 100; referencenumber = 0; { smart_alist list; for (int a = 0; a < refs; a++) { ti = New(testclass); list.append(ti); } ok(referencenumber == refs, "check bulk inserts"); } ok(referencenumber == 0, "check smart free"); { smart_alist list; testvalue *ti = New(testvalue); ti->value = 0xfff1; list.append(ti); smart_alist copylist; copylist = list; testvalue *tv = (testvalue*)copylist.first(); rok(tv != NULL, "test first value"); ok(tv->value == ti->value, "test copy value"); ok(tv != ti, "test copy pointer"); } // #if __cplusplus >= 201104 // { // smart_alist list; // // fill it with data // for (int i = 0; i < 10;i++) // { // testvalue *ti = New(testvalue); // ti->value = 100 + i; // list.append(ti); // } // ok(list.size() == 10, "test prefill data size"); // } // #endif return report(); } bacula-15.0.3/src/plugins/fd/pluginlib/ptcomm.h0000644000175000017500000002617014771010173021166 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file ptcomm.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a process communication lowlevel library for Bacula plugin. * @version 3.0.0 * @date 2021-08-20 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #ifndef _PTCOMM_H_ #define _PTCOMM_H_ #include "pluginlib.h" #define PTCOMM_DEFAULT_TIMEOUT 3600 // timeout waiting for data is 1H as some backends could spent it doing real work // TODO: I think we should move it to plugin configurable variable instead of a const #define PTCOMM_MAX_PACKET_SIZE 999999 /* * The protocol packet header. * Every packet exchanged between Plugin and Backend will have a special header * which allow to perfectly synchronize data exchange mitigating the risk * of a deadlock, where both ends will wait for a data and no one wants to * send it to the other end. * The protocol implements a single char packet status which could be: * D - data packet * C - command packet * E - error packet * F - EOD packet * T - terminate connection * W - warning message * I - information message * A - fatal error message (abort) * The length is an ascii coded decimal trailed by a "newline" char - '\n'. * So, a packet header could be rendered as: 'C000012\n' */ struct PTHEADER { char status; char length[7]; }; /* * This is a low-level transport communication class which handles all bits and * bytes of the protocol. * The class express a high-level methods for low-level transport protocol. * It handles a communication channel (bpipe) with backend execution and * termination. The external data exchange using named pipes or local files is * handled by this class too. */ class PTCOMM : public SMARTALLOC { private: BPIPE *bpipe; // this is our bpipe to communicate with backend */ int rfd; // backend `stdout` to plugin file descriptor int wfd; // backend `stdin` to plugin file descriptor int efd; // backend `stderr` to plugin file descriptor int maxfd; // max file descriptors from bpipe channels POOL_MEM errmsg; // message buffer for error string */ int extpipe; // set when data blast is performed using external pipe/file */ POOL_MEM extpipename; // name of the external pipe/file for restore */ bool f_eod; // the backend signaled EOD */ bool f_error; // the backend signaled an error */ bool f_fatal; // the backend signaled a fatal error */ bool f_cont; // when we are reading next part of data packet */ bool abort_on_error; // abort on error flag */ int32_t remaininglen; // the number of bytes to read when `f_cont` is true uint32_t m_timeout; // a timeout when waiting for data to read from backend, in seconds protected: bool recvbackend_data(bpContext *ctx, char *buf, int32_t nbytes); bool sendbackend_data(bpContext *ctx, const char *buf, int32_t nbytes); int32_t recvbackend_header(bpContext *ctx, char *cmd, bool any=false); int32_t handle_read_header(bpContext *ctx, char *cmd, bool any=false); int32_t handle_payload(bpContext *ctx, char *buf, int32_t nbytes); int32_t recvbackend(bpContext *ctx, char *cmd, POOL_MEM &buf, bool any=false); int32_t recvbackend_fixed(bpContext *ctx, char cmd, char *buf, int32_t bufsize); bool sendbackend(bpContext *ctx, char cmd, const POOLMEM *buf, int32_t len, bool _single_senddata = true); public: PTCOMM(const char * command = NULL) : bpipe(NULL), rfd(0), wfd(0), efd(0), maxfd(0), errmsg(PM_MESSAGE), extpipe(-1), extpipename(PM_FNAME), f_eod(false), f_error(false), f_fatal(false), f_cont(false), abort_on_error(false), remaininglen(0), m_timeout(PTCOMM_DEFAULT_TIMEOUT) {} #if __cplusplus > 201103L PTCOMM(PTCOMM &) = delete; PTCOMM(PTCOMM &&) = delete; #endif ~PTCOMM() { terminate(NULL); } bool handshake(bpContext *ctx, const char *pluginname, const char *pluginapi); int32_t read_command(bpContext *ctx, POOL_MEM &buf); int32_t read_any(bpContext *ctx, char *cmd, POOL_MEM &buf); int32_t read_data(bpContext *ctx, POOL_MEM &buf); int32_t read_data_fixed(bpContext *ctx, char *buf, int32_t len); // we have to force non-optimized sendbackend path as origin of `*buf` is unknown bool write_command(bpContext *ctx, const char *buf, bool _single_senddata = false); bRC send_data(bpContext *ctx, const char *buf, int32_t len, bool _single_senddata = false); bRC send_data(bpContext *ctx, POOL_MEM &buf, int32_t len) { return send_data(ctx, buf.addr(), len, true); } bRC recv_data(bpContext *ctx, POOL_MEM &buf, int32_t *recv_len=NULL); /** * @brief Sends a command to the backend. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param buf a message buffer contains command to send * @return int32_t * -1 - when encountered any error * - the number of bytes sent, success */ int32_t write_command(bpContext *ctx, POOL_MEM &buf) { return write_command(ctx, buf.c_str(), true); } int32_t write_data(bpContext *ctx, const char *buf, int32_t len, bool _single_senddata = false); bool read_ack(bpContext *ctx); bool send_ack(bpContext *ctx); /** * @brief Signals en error to the backend. * The buf, when not NULL, can hold an error string sent do the backend. * * @param ctx for Bacula debug and jobinfo messages * @param buf when not NULL should consist of an error string * when NULL, no error string sent to the backend * @return true success * @return false when encountered any error */ inline bool signal_error(bpContext *ctx, const char * buf, bool _single_senddata = false) { int32_t len = buf ? strlen(buf) : 0; return sendbackend(ctx, 'E', buf, len, _single_senddata); } inline bool signal_error(bpContext *ctx, const POOL_MEM &buf) { return signal_error(ctx, buf.c_str(), true); } POOLMEM *get_error(bpContext *ctx); /** * @brief Signals EOD to backend. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @return true success * @return false when encountered any error */ inline bool signal_eod(bpContext *ctx) { return sendbackend(ctx, 'F', NULL, 0); } /** * @brief Signal end of communication to the backend. * The backend should close the connection after receiving this packet. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @return true success * @return false when encountered any error */ inline bool signal_term(bpContext *ctx) { return sendbackend(ctx, 'T', NULL, 0); } void terminate(bpContext *ctx); /** * @brief Returns a backend PID if available. * I'm using an arthrymetic way to make a conditional value return; * * @return int backend PID - when backend available; -1 - when backend is unavailable */ inline pid_t get_backend_pid() { return bpipe != NULL ? bpipe->worker_pid : -1; } /** * @brief Sets a BPIPE object for our main communication channel. * * @param bp object, we do not check for NULL here */ inline void set_bpipe(BPIPE *bp) { bpipe = bp; rfd = fileno(bpipe->rfd); wfd = fileno(bpipe->wfd); efd = fileno(bpipe->efd); maxfd = MAX(rfd, wfd); maxfd = MAX(maxfd, efd) + 1; } /** * @brief Sets a FILE descriptor used as external pipe during backup and restore. * * @param ep a FILE* descriptor used during */ inline void set_extpipe(int ep) { extpipe = ep; } /** * @brief Sets an external pipe name for restore. * * @param epname - external pipe name */ inline void set_extpipename(char *epname) { pm_strcpy(extpipename, epname); } bool close_extpipe(bpContext *ctx); /** * @brief Checks if connection is open and we can use a bpipe object for communication. * * @return true if connection is available * @return false if connection is closed and we can't use bpipe object */ inline bool is_open() { return bpipe != NULL; } /** * @brief Checks if connection is closed and we can't use a bpipe object for communication. * * @return true if connection is closed and we can't use bpipe object * @return false if connection is available */ inline bool is_closed() { return bpipe == NULL; } /** * @brief Checks if backend sent us some error, backend error message is flagged on f_error. * * @return true when last packet was an error * @return false when no error packets was received */ inline bool is_error() { return f_error || f_fatal; } /** * @brief Checks if backend sent us fatal error, backend error message is flagged on f_fatal. * * @return true when last packet was a fatal error * @return false when no fatal error packets was received */ inline bool is_fatal() { return f_fatal || (f_error && abort_on_error); } inline int jmsg_err_level() { return is_fatal() ? M_FATAL : M_ERROR; } /** * @brief Checks if backend signaled EOD, eod from backend is flagged on f_eod. * * @return true when backend signaled EOD on last packet * @return false when backend did not signal EOD */ inline bool is_eod() { return f_eod; } /** * @brief Clears the EOD from backend flag, f_eod. * The eod flag is set when EOD message received from backend and not cleared * until next recvbackend() call. */ void clear_eod() { f_eod = false; } /** * @brief Set the abort on error flag */ inline void set_abort_on_error() { abort_on_error = true; } /** * @brief Clears the abort on error flag. */ inline void clear_abort_on_error() { abort_on_error = false; } /** * @brief return abort on error flag status * * @return true if flag is set * @return false if flag is not set */ inline bool is_abort_on_error() { return abort_on_error; } /** * @brief Set the timeout value in seconds * * @param timeout a value in seconds */ inline void set_timeout(uint32_t timeout) { m_timeout = timeout > 0 ? timeout : PTCOMM_DEFAULT_TIMEOUT; } /** * @brief Get the timeout object * * @return uint32_t a current value of the PTCOMM Timeout */ inline uint32_t get_timeout() { return m_timeout; } }; #endif /* _PTCOMM_H_ */ bacula-15.0.3/src/plugins/fd/pluginlib/iso8601.h0000644000175000017500000000330714771010173020775 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file iso8601.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin ISO8601 parsing library. * @version 1.2.0 * @date 2020-01-05 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #ifndef PLUGINLIB_ISO8601_H #define PLUGINLIB_ISO8601_H #include "pluginlib.h" #include "lib/bregex.h" // do not use regex for parsing #undef ISO8601_USE_REGEX class ISO8601DateTime : public SMARTALLOC { private: #ifdef ISO8601_USE_REGEX regex_t ISO8601_REGEX; regex_t TIMEZONE_REGEX; bool status; #endif public: #ifdef ISO8601_USE_REGEX ISO8601DateTime(); #else #if __cplusplus > 201103L ISO8601DateTime() = default; ~ISO8601DateTime() = default; #else ISO8601DateTime() {}; ~ISO8601DateTime() {}; #endif #endif utime_t parse_data(const char * datestring); inline utime_t parse_data(POOL_MEM &datestring) { return parse_data(datestring.c_str()); } #ifdef ISO8601_USE_REGEX inline bool isready() { return status; }; #endif }; #endif // PLUGINLIB_ISO8601_H bacula-15.0.3/src/plugins/fd/pluginlib/pluginbase.h0000644000175000017500000000347214771010173022020 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file pluginbase.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula File Daemon generic plugin interface. * @version 1.0.0 * @date 2021-04-08 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "pluginclass.h" #define USE_CMD_PARSER #include "fd_common.h" #ifndef PLUGINLIB_PLUGINBASE_H #define PLUGINLIB_PLUGINBASE_H // Plugin Info definitions extern const char *PLUGIN_LICENSE; extern const char *PLUGIN_AUTHOR; extern const char *PLUGIN_DATE; extern const char *PLUGIN_VERSION; extern const char *PLUGIN_DESCRIPTION; // Plugin linking time variables extern const char *PLUGINPREFIX; extern const char *PLUGINNAME; extern const char *PLUGINNAMESPACE; extern const bool CUSTOMNAMESPACE; extern const char *BACKEND_CMD; // custom checkFile() callback typedef bRC (*checkFile_t)(bpContext *ctx, char *fname); extern checkFile_t checkFile; // the list of valid plugin options extern const char *valid_params[]; struct metadataTypeMap { const char *command; metadata_type type; }; pluginlib::PLUGINBCLASS *new_plugin_factory(bpContext *ctx); #endif // PLUGINLIB_PLUGINBASE_H bacula-15.0.3/src/plugins/fd/pluginlib/metaplugin_accurate.h0000644000175000017500000000272614771010173023704 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file metaplugin_accurate.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is an Accurate Mode handling subroutines for metaplugin. * @version 1.0.0 * @date 2021-09-20 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #ifndef _METAPLUGIN_ACCURATE_H_ #define _METAPLUGIN_ACCURATE_H_ #include "pluginlib.h" #include "ptcomm.h" namespace metaplugin { namespace accurate { bRC perform_accurate_check(bpContext *ctx, PTCOMM *ptcomm, POOL_MEM &fname, POOL_MEM &lname, bool accurate_mode, bool &accurate_mode_err); bRC perform_accurate_check_get(bpContext *ctx, PTCOMM *ptcomm, POOL_MEM &fname, POOL_MEM &lname, bool accurate_mode, bool &accurate_mode_err); } // namespace accurate } // namespace metaplugin #endif // _METAPLUGIN_ACCURATE_H_ bacula-15.0.3/src/plugins/fd/pluginlib/iso8601_test.cpp0000644000175000017500000000362314771010173022370 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file iso8601_test.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin ISO8601 parsing library - unittest. * @version 1.2.0 * @date 2020-01-05 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "bacula.h" #include "unittests.h" #include "iso8601.h" struct testvect { const char *teststr; const utime_t test_time; }; static testvect tests[] = { {"20000101T23:01:02Z", 946767662}, {"20200514T08:35:43Z", 1589445343}, {"2020-05-14T08:35:43Z", 1589445343}, {"20200514T07:27:34Z", 1589441254}, {"2020-05-14T07:27:34Z", 1589441254}, {"2021-01-13T09:44:38Z", 1610531078}, // {"20210113T09:44:38 +400", 1610531078}, // {"2021-01-13T09:44:38+400", 1610531078}, {NULL, 0}, }; int main() { Unittests iso8601_test("iso8601_test"); ISO8601DateTime dt; const char *teststr = tests[0].teststr; utime_t test_time = tests[0].test_time; for (int a = 1; teststr != NULL; a++) { utime_t t = dt.parse_data(teststr); char test_descr[64]; snprintf(test_descr, 64, "Test %d", a); ok(t == test_time, test_descr); teststr = tests[a].teststr; test_time = tests[a].test_time; } return report(); } bacula-15.0.3/src/plugins/fd/pluginlib/pluginlib.cpp0000644000175000017500000005115214771010173022205 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file pluginlib.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief Common definitions and utility functions for Inteos plugins. * @version 2.2.0 * @date 2021-04-26 * * Common definitions and utility functions for Inteos plugins. * Functions defines a common framework used in our utilities and plugins. * Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. */ #include "pluginlib.h" /* Events that are passed to plugin typedef enum { bEventJobStart = 1, bEventJobEnd = 2, bEventStartBackupJob = 3, bEventEndBackupJob = 4, bEventStartRestoreJob = 5, bEventEndRestoreJob = 6, bEventStartVerifyJob = 7, bEventEndVerifyJob = 8, bEventBackupCommand = 9, bEventRestoreCommand = 10, bEventEstimateCommand = 11, bEventLevel = 12, bEventSince = 13, bEventCancelCommand = 14, bEventVssBackupAddComponents = 15, bEventVssRestoreLoadComponentMetadata = 16, bEventVssRestoreSetComponentsSelected = 17, bEventRestoreObject = 18, bEventEndFileSet = 19, bEventPluginCommand = 20, bEventVssBeforeCloseRestore = 21, bEventVssPrepareSnapshot = 22, bEventOptionPlugin = 23, bEventHandleBackupFile = 24, bEventComponentInfo = 25 } bEventType; */ const char *eventtype2str(bEvent *event){ switch (event->eventType){ case bEventJobStart: return "bEventJobStart"; case bEventJobEnd: return "bEventJobEnd"; case bEventStartBackupJob: return "bEventStartBackupJob"; case bEventEndBackupJob: return "bEventEndBackupJob"; case bEventStartRestoreJob: return "bEventStartRestoreJob"; case bEventEndRestoreJob: return "bEventEndRestoreJob"; case bEventStartVerifyJob: return "bEventStartVerifyJob"; case bEventEndVerifyJob: return "bEventEndVerifyJob"; case bEventBackupCommand: return "bEventBackupCommand"; case bEventRestoreCommand: return "bEventRestoreCommand"; case bEventEstimateCommand: return "bEventEstimateCommand"; case bEventLevel: return "bEventLevel"; case bEventSince: return "bEventSince"; case bEventCancelCommand: return "bEventCancelCommand"; case bEventVssBackupAddComponents: return "bEventVssBackupAddComponents"; case bEventVssRestoreLoadComponentMetadata: return "bEventVssRestoreLoadComponentMetadata"; case bEventVssRestoreSetComponentsSelected: return "bEventVssRestoreSetComponentsSelected"; case bEventRestoreObject: return "bEventRestoreObject"; case bEventEndFileSet: return "bEventEndFileSet"; case bEventPluginCommand: return "bEventPluginCommand"; case bEventVssBeforeCloseRestore: return "bEventVssBeforeCloseRestore"; case bEventVssPrepareSnapshot: return "bEventVssPrepareSnapshot"; case bEventOptionPlugin: return "bEventOptionPlugin"; case bEventHandleBackupFile: return "bEventHandleBackupFile"; case bEventComponentInfo: return "bEventComponentInfo"; default: return "Unknown"; } } /* * Return the real size of the disk based on the size suffix. * * in: * disksize - the numeric value of the disk size to compute * suff - the suffix for a disksize value * out: * uint64_t - the size of the disk computed with suffix */ uint64_t pluglib_size_suffix(int disksize, char suff) { uint64_t size; switch (suff){ case 'G': size = (uint64_t)disksize * 1024 * 1048576; break; case 'M': size = (uint64_t)disksize * 1048576; break; case 'T': size = (uint64_t)disksize * 1048576 * 1048576; break; case 'K': case 'k': size = (uint64_t)disksize * 1024; break; default: size = disksize; } return size; } /* * Return the real size of the disk based on the size suffix. * This version uses a floating point numbers (double) for computation. * * in: * disksize - the numeric value of the disk size to compute * suff - the suffix for a disksize value * out: * uint64_t - the size of the disk computed with suffix */ uint64_t pluglib_size_suffix(double disksize, char suff) { uint64_t size; switch (suff){ case 'G': size = disksize * 1024.0 * 1048576.0; break; case 'M': size = disksize * 1048576.0; break; case 'T': size = disksize * 1048576.0 * 1048576.0; break; case 'K': case 'k': size = disksize * 1024.0; break; default: size = disksize; } return size; } /* * Creates a path hierarchy on local FS. * It is used for local restore mode to create a required directory. * The functionality is similar to 'mkdir -p'. * * TODO: make a support for relative path * TODO: check if we can use findlib/makepath implementation instead * * in: * bpContext - for Bacula debug and jobinfo messages * path - a full path to create, does not check if the path is relative, * could fail in this case * out: * bRC_OK - path creation was successful * bRC_Error - on any error */ bRC pluglib_mkpath(bpContext* ctx, char* path, bool isfatal) { #ifdef PLUGINPREFIX #define _OLDPREFIX PLUGINPREFIX #endif #define PLUGINPREFIX "pluglibmkpath:" struct stat statp; POOL_MEM dir(PM_FNAME); char *p, *q; if (!path){ return bRC_Error; } if (stat(path, &statp) == 0){ if (S_ISDIR(statp.st_mode)){ return bRC_OK; } else { DMSG(ctx, DERROR, "Path %s is not directory\n", path); JMSG(ctx, isfatal ? M_FATAL : M_ERROR, "Path %s is not directory\n", path); return bRC_Error; } } DMSG(ctx, DDEBUG, "mkpath verify dir: %s\n", path); pm_strcpy(dir, path); p = dir.addr() + 1; while (*p && (q = strchr(p, (int)PathSeparator)) != NULL){ *q = 0; DMSG(ctx, DDEBUG, "mkpath scanning(1): %s\n", dir.c_str()); if (stat(dir.c_str(), &statp) == 0){ *q = PathSeparator; p = q + 1; continue; } DMSG0(ctx, DDEBUG, "mkpath will create dir(1).\n"); if (mkdir(dir.c_str(), 0750) < 0){ /* error */ berrno be; DMSG2(ctx, DERROR, "Cannot create directory %s Err=%s\n", dir.c_str(), be.bstrerror()); JMSG2(ctx, isfatal ? M_FATAL : M_ERROR, "Cannot create directory %s Err=%s\n", dir.c_str(), be.bstrerror()); return bRC_Error; } *q = PathSeparator; p = q + 1; } DMSG0(ctx, DDEBUG, "mkpath will create dir(2).\n"); if (mkdir(path, 0750) < 0){ /* error */ berrno be; DMSG2(ctx, DERROR, "Cannot create directory %s Err=%s\n", path, be.bstrerror()); JMSG2(ctx, isfatal ? M_FATAL : M_ERROR, "Cannot create directory %s Err=%s\n", path, be.bstrerror()); return bRC_Error; } DMSG0(ctx, DDEBUG, "mkpath finish.\n"); #ifdef _OLDPREFIX #define PLUGINPREFIX _OLDPREFIX #undef _OLDPREFIX #else #undef PLUGINPREFIX #endif return bRC_OK; } /** * @brief It splits a string `str` into a separate substrings split on `sep` character. * * @param str a string to split * @param sep split separator character * @return alist* newly allocated list of splitted substrings */ alist * plugutil_str_split_to_alist(const char * str, const char sep) { alist * list = New(alist(5, true)); plugutil_str_split_to_alist(*list, str, sep); return list; } /** * @brief It splits a string `str` into a separate substrings split on `sep` character. * * @param list a list used to populate split results * @param str a string to split * @param sep split separator character */ void plugutil_str_split_to_alist(alist *list, const char * str, const char sep) { plugutil_str_split_to_alist(*list, str, sep); } /** * @brief It splits a string `str` into a separate substrings split on `sep` character. Leading and trailing spaces are removed. * * @param list a list used to populate split results * @param str a string to split * @param sep split separator character */ void plugutil_str_split_to_alist(alist &list, const char * str, const char sep) { if (str != NULL && strlen(str) > 0) { POOL_MEM buf(PM_NAME); const char * p = str; const char * q; do { // search for separator char - sep q = strchr(p, sep); if (q == NULL){ // copy whole string from p to buf pm_strcpy(buf, p); } else { // copy string from p up to q pm_memcpy(buf, p, q - p + 1); buf.c_str()[q - p] = '\0'; p = q + 1; // next element } /* The following cleanup has been added to split strings such as * item1: value => 'item1', 'value' and not 'item1', ' value' */ strip_leading_space(buf.c_str()); strip_trailing_junk(buf.c_str()); // in buf we have splitted string part const char * s = bstrdup(buf.c_str()); list.append((void*)s); } while (q != NULL); } } /* * Render an external tool parameter for string value. * * in: * bpContext - for Bacula debug and jobinfo messages * param - a pointer to the param variable where we will render a parameter * pname - a name of the parameter to compare * fmt - a low-level parameter name * name - a name of the parameter from parameter list * value - a value to render * out: * True if parameter was rendered * False if it was not the parameter required */ bool render_param(POOLMEM **param, const char *pname, const char *fmt, const char *name, const char *value) { if (bstrcasecmp(name, pname)){ if (!*param){ *param = get_pool_memory(PM_NAME); Mmsg(*param, " -%s '%s' ", fmt, value); DMsg1(DDEBUG, "render param:%s\n", *param); } return true; } return false; } /* * Render an external tool parameter for string value. * * in: * bpContext - for Bacula debug and jobinfo messages * param - a pointer to the param variable where we will render a parameter * pname - a name of the parameter to compare * fmt - a low-level parameter name * name - a name of the parameter from parameter list * value - a value to render * out: * True if parameter was rendered * False if it was not the parameter required */ bool render_param(POOLMEM **param, const char *pname, const char *fmt, const char *name, const int value) { if (bstrcasecmp(name, pname)){ if (!*param){ *param = get_pool_memory(PM_NAME); Mmsg(*param, " -%s %d ", value); DMsg1(DDEBUG, "render param:%s\n", *param); } return true; } return false; } /** * @brief * * @param param * @param pname * @param name * @param value * @return true * @return false */ bool parse_param(POOL_MEM ¶m, const char *pname, const char *name, const char *value) { if (bstrcasecmp(name, pname)){ pm_strcpy(param, value); DMsg1(DDEBUG, "parse param:%s\n", param.c_str()); return true; } return false; }; /* * Setup XECOMMCTX parameter for boolean value. * * in: * bpContext - for Bacula debug and jobinfo messages * param - a pointer to the param variable where we will render a parameter * pname - a name of the parameter to compare * name - a name of the parameter from parameter list * value - a value to render * out: * True if parameter was rendered * False if it was not the parameter required */ // TODO: It should be called setup_param bool render_param(bool ¶m, const char *pname, const char *name, const bool value) { if (bstrcasecmp(name, pname)) { param = value; DMsg2(DDEBUG, "render param: %s=%s\n", pname, param ? "True" : "False"); return true; } return false; } /** * @brief Renders a parameter as a "key=value" string into a prepared buffer. * * @param param the place to render to * @param handler the handler determines the value type * @param key the "key" name * @param val the "value" to render * @return true if parameter rendering ok * @return false on rendering error */ bool render_param(POOL_MEM ¶m, INI_ITEM_HANDLER *handler, char *key, item_value val) { if (handler == ini_store_str){ Mmsg(param, "%s=%s\n", key, val.strval); } else if (handler == ini_store_int64){ Mmsg(param, "%s=%lld\n", key, val.int64val); } else if (handler == ini_store_bool){ Mmsg(param, "%s=%d\n", key, val.boolval ? 1 : 0); } else { DMsg1(DERROR, "Unsupported parameter handler for: %s\n", key); return false; } return true; } /** * @brief Set the up param value * * @param param the param variable where we will setup a parameter * @param pname a name of the parameter to compare * @param name a name of the parameter from parameter list * @param value a value to setup * @return true if parameter was handled * @return false if it was not the parameter required, and param was not changed */ bool setup_param(int32_t ¶m, const char *pname, const char *name, const int32_t value) { if (bstrcasecmp(name, pname)) { param = value; DMsg2(DDEBUG, "setup param: %s=%d\n", pname, param); return true; } return false; } /** * @brief Set the up param value * * @param param the param variable where we will setup a parameter * @param pname a name of the parameter to compare * @param name a name of the parameter from parameter list * @param value a value to setup * @return true if parameter was handled * @return false if it was not the parameter required, and param was not changed */ bool setup_param(bool ¶m, const char *pname, const char *name, const bool value) { if (bstrcasecmp(name, pname)) { param = value; DMsg2(DDEBUG, "render param: %s=%s\n", pname, param ? "True" : "False"); return true; } return false; } /** * @brief Set the up param value * * @param param the param variable where we will setup a parameter * @param pname a name of the parameter to compare * @param name a name of the parameter from parameter list * @param value a value to setup * @return true if parameter was handled * @return false if it was not the parameter required, and param was not changed */ bool setup_param(POOL_MEM ¶m, const char *pname, const char *name, const char *value) { if (bstrcasecmp(name, pname)) { pm_strcpy(param, value); DMsg2(DDEBUG, "setup param: %s=%s\n", pname, param.c_str()); return true; } return false; } /** * @brief Setup parameter for boolean from string value. * The parameter value will be false if value start with '0' character and * will be true in any other case. So, when a plugin will have a following: * param * param=xxx * param=1 * then a param will be set to true. * * @param param the param variable where we will render a parameter * @param pname a name of the parameter to compare * @param name a name of the parameter from parameter list * @param value a value to parse * @return true if parameter was parsed * @return false if it was not the parameter required */ bool parse_param(bool ¶m, const char *pname, const char *name, const char *value) { if (bstrcasecmp(name, pname)){ if (value && *value == '0'){ param = false; } else { param = true; } DMsg2(DINFO, "%s parameter: %s\n", name, param ? "True" : "False"); return true; } return false; } /** * @brief Setup Plugin parameter for integer from string value. * * @param param the param variable where we will render a parameter * @param pname a name of the parameter to compare * @param name a name of the parameter from parameter list * @param value a value to render * @param err a pointer to error flag when conversion was unsuccessful, optional * @return true * @return false */ bool parse_param(int ¶m, const char *pname, const char *name, const char *value, bool * err) { // clear error flag when requested if (err != NULL) *err = false; if (value && bstrcasecmp(name, pname)){ /* convert str to integer */ long outparam = strtol(value, NULL, 10); if (outparam == LONG_MIN || outparam == LONG_MAX){ // error in conversion? if (errno == ERANGE){ // yes, error DMsg2(DERROR, "Invalid %s parameter: %s\n", name, value); // setup error flag if (err != NULL) *err = true; return false; } } param = outparam; DMsg2(DINFO, "%s parameter: %d\n", name, param); return true; } return false; } /* * Render and add a parameter for string value to alist. * When alist is NULL (uninitialized) then it creates a new list to use. * * in: * bpContext - for Bacula debug and jobinfo messages * list - pointer to alist class to use * pname - a name of the parameter to compare * name - a name of the parameter from parameter list * value - a value to render * out: * True if parameter was rendered * False if it was not the parameter required */ bool parse_param_add_str(alist **list, const char *pname, const char *name, const char *value) { POOLMEM *param; if (list != NULL){ if (bstrcasecmp(name, pname)){ if (!*list){ *list = New(alist(8, not_owned_by_alist)); } param = get_pool_memory(PM_NAME); pm_strcpy(param, value); (*list)->append(param); DMsg2(DDEBUG, "add param: %s=%s\n", name, param); return true; } } return false; } /** * @brief Scans for `prefix` in `cmd`, when match copy remaining to `param`. * * @param cmd - command string to can for prefix and extract parameter * @param prefix - prefix string to check * @param param - when `prefix` match then copy the remaining from `cmd` * @return true - when prefix match in the command * @return false - when not */ bool scan_parameter_str(const char * cmd, const char *prefix, POOL_MEM ¶m) { if (prefix != NULL) { int len = strlen(prefix); if (strncmp(cmd, prefix, len) == 0) { // prefix match, extract param pm_strcpy(param, cmd + len); strip_trailing_newline(param.c_str()); return true; } } return false; } bool scan_parameter_int(const char * cmd, const char *prefix, int ¶m) { POOL_MEM tmp; if (scan_parameter_str(cmd, prefix, tmp)) { param = atoi(tmp.c_str()); return true; } return false; } // ensure error message is terminated with newline and terminated with standard c-string nul void scan_and_terminate_str(POOL_MEM &buf, int msglen) { if (msglen >= 0){ // we will consume at most two chars more buf.check_size(msglen + 2); bool check = msglen > 0 ? buf.c_str()[msglen - 1] != '\n' : true; buf.c_str()[msglen] = check * '\n'; buf.c_str()[msglen + 1] = '\0'; } } namespace pluginlib { /** * @brief Render and add a parameter for string value to alist. * In this case POOL_MEM is created. * * @param list a list to append * @param pname a name of the parameter to compare * @param name a name of the parameter from parameter list * @param value a value to render * @return true if parameter was rendered * @return false if it was not the parameter required */ bool parse_param_add_str(alist &list, const char *pname, const char *name, const char *value) { if (bstrcasecmp(name, pname)) { POOL_MEM *param = new POOL_MEM(PM_NAME); pm_strcpy(*param, value); list.append(param); DMsg2(DDEBUG, "add param: %s=%s\n", name, (*param).c_str()); return true; } return false; } } // namespace pluginlib bacula-15.0.3/src/plugins/fd/pluginlib/pluginclass.h0000644000175000017500000002713114771010173022211 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file pluginclass.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula File Daemon general plugin framework. The Class. * @version 1.0.0 * @date 2021-04-08 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "pluginlib.h" #include "lib/ini.h" #include "pluginlib/commctx.h" #include "pluginlib/smartalist.h" #define USE_CMD_PARSER #include "fd_common.h" #ifndef PLUGINLIB_PLUGINCLASS_H #define PLUGINLIB_PLUGINCLASS_H /// The list of restore options saved to the Plugin Config restore object. extern struct ini_items plugin_items_dump[]; /// defines if plugin should handle local filesystem restore with Bacula Core functions /// `false` means plugin will handle local restore itself /// `true` means Bacula Core functions will handle local restore extern const bool CORELOCALRESTORE; namespace pluginlib { /* * This is a main plugin API class. It manages a plugin context. * All the public methods correspond to a public Bacula API calls, even if * a callback is not implemented. */ class PLUGINBCLASS: public SMARTALLOC { public: enum MODE { None = 0, BackupFull, BackupIncr, BackupDiff, EstimateFull, EstimateIncr, EstimateDiff, Listing, QueryParams, Restore, }; enum OBJECT { FileObject, PluginObject, RestoreObject, }; virtual bRC getPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } virtual bRC setPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } virtual bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); virtual bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); virtual bRC endBackupFile(bpContext *ctx); virtual bRC startRestoreFile(bpContext *ctx, const char *cmd); virtual bRC endRestoreFile(bpContext *ctx); virtual bRC pluginIO(bpContext *ctx, struct io_pkt *io); virtual bRC createFile(bpContext *ctx, struct restore_pkt *rp); virtual bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); virtual bRC checkFile(bpContext *ctx, char *fname); virtual bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl); virtual bRC queryParameter(bpContext *ctx, struct query_pkt *qp); virtual bRC metadataRestore(bpContext *ctx, struct meta_pkt *mp); void setup_plugin(bpContext *ctx); PLUGINBCLASS(bpContext *bpctx) : mode(None), JobId(0), JobName(NULL), since(0), where(NULL), regexwhere(NULL), replace(0), pluginconfigsent(false), object(FileObject), fname(PM_FNAME), lname(PM_FNAME), robjbuf(PM_MESSAGE), parser(), execpath(PM_FNAME), workingpath(PM_FNAME), job_cancelled(false), accurate_mode(0), m_listing_query(PM_NAME), m_listing_top_nr(-1), m_listing_func(-1) {} #if __cplusplus > 201103L PLUGINBCLASS() = delete; PLUGINBCLASS(PLUGINBCLASS&) = delete; PLUGINBCLASS(PLUGINBCLASS&&) = delete; #endif virtual ~PLUGINBCLASS() {} protected: MODE mode; /// Plugin mode of operation int JobId; /// Job ID char *JobName; /// Job name time_t since; /// Job since parameter char *where; /// the Where variable for restore job if set by user char *regexwhere; /// the RegexWhere variable for restore job if set by user char replace; /// the replace variable for restore job bool pluginconfigsent; /// set when Plugin Config was sent during Full backup OBJECT object; /// the object type to handdle POOL_MEM fname; /// current file name to backup (grabbed from backend) POOL_MEM lname; /// current LSTAT data if any POOL_MEM robjbuf; /// the buffer for restore object data cmd_parser parser; /// Plugin command parser POOL_MEM execpath; /// ready to use path where bacula binaries are located POOL_MEM workingpath; /// ready to use path for bacula working directory bool job_cancelled; /// it signal the metaplugin that job was cancelled int accurate_mode; /// if the job is accurate POOL_MEM m_listing_query; /// int m_listing_top_nr; /// int m_listing_func; /// virtual bRC parse_plugin_config(bpContext *ctx, restore_object_pkt *rop); virtual bRC parse_plugin_command(bpContext *ctx, const char *command); virtual bRC parse_plugin_restore_object(bpContext *ctx, restore_object_pkt *rop); virtual bRC handle_plugin_config(bpContext *ctx, restore_object_pkt *rop) { return bRC_OK; } virtual bRC handle_plugin_restore_object(bpContext *ctx, restore_object_pkt *rop) { return bRC_OK; } virtual bRC prepare_estimate(bpContext *ctx, char *command) { return bRC_OK; } virtual bRC prepare_backup(bpContext *ctx, char *command) { return bRC_OK; } virtual bRC prepare_restore(bpContext *ctx, char *command) { return bRC_OK; } virtual bRC prepare_command(bpContext *ctx, char *command) { return bRC_OK; } virtual bRC perform_backup_open(bpContext *ctx, struct io_pkt *io) { return bRC_OK; } virtual bRC perform_restore_open(bpContext *ctx, struct io_pkt *io) { return bRC_OK; } virtual bRC perform_read_data(bpContext *ctx, struct io_pkt *io) { return bRC_OK; } virtual bRC perform_write_data(bpContext *ctx, struct io_pkt *io) { return bRC_OK; } virtual bRC perform_seek_write(bpContext *ctx, struct io_pkt *io) { return bRC_OK; } virtual bRC perform_restore_close(bpContext *ctx, struct io_pkt *io) { return bRC_OK; } virtual bRC perform_backup_close(bpContext *ctx, struct io_pkt *io) { return bRC_OK; } virtual bRC perform_restore_create_file(bpContext *ctx, struct restore_pkt *rp) { return bRC_OK; } virtual bRC perform_restore_set_file_attributes(bpContext *ctx, struct restore_pkt *rp) { return bRC_OK; } virtual bRC perform_backup_check_file(bpContext *ctx, char *fname) { return bRC_Seen; } virtual bRC perform_acl_backup(bpContext *ctx, struct xacl_pkt *xacl) { return bRC_OK; } virtual bRC perform_acl_restore(bpContext *ctx, struct xacl_pkt *xacl) { return bRC_OK; } virtual bRC perform_xattr_backup(bpContext *ctx, struct xacl_pkt *xacl) { return bRC_OK; } virtual bRC perform_xattr_restore(bpContext *ctx, struct xacl_pkt *xacl) { return bRC_OK; } virtual bRC perform_query_parameter(bpContext *ctx, struct query_pkt *qp) { return bRC_OK; } virtual bRC perform_restore_metadata(bpContext *ctx, struct meta_pkt *mp) { return bRC_OK; } virtual bRC perform_jobstart(bpContext *ctx) { return bRC_OK; } virtual bRC perform_backupjobstart(bpContext *ctx) { return bRC_OK; } virtual bRC perform_restorejobstart(bpContext *ctx) { return bRC_OK; } virtual bRC perform_jobend(bpContext *ctx) { return bRC_OK; } virtual bRC perform_backupjobend(bpContext *ctx) { return bRC_OK; } virtual bRC perform_restorejobend(bpContext *ctx) { return bRC_OK; } virtual bRC perform_joblevel(bpContext *ctx, char lvl) { return bRC_OK; } virtual bRC perform_jobsince(bpContext *ctx) { return bRC_OK; } virtual bRC perform_jobendfileset(bpContext *ctx) { return bRC_OK; } virtual bRC perform_start_backup_file(bpContext *ctx, struct save_pkt *sp) { return bRC_Max; } virtual bRC perform_start_restore_file(bpContext *ctx, const char *cmd) { return bRC_OK; } virtual bRC perform_end_backup_file(bpContext *ctx) { return bRC_OK; } virtual bRC perform_end_restore_file(bpContext *ctx) { return bRC_OK; } virtual bRC perform_cancel_command(bpContext *ctx) { return bRC_OK; } virtual const char **get_listing_top_struct() { return NULL; } virtual void pluginctx_switch_command(const char *command) {} virtual bool pluginctx_check_command(const char *command) { return false; } virtual bool pluginctx_are_parameters_parsed() const { return false; } virtual void pluginctx_set_parameters_parsed() {} virtual void pluginctx_clear_abort_on_error() {} virtual void pluginctx_set_abort_on_error() {} virtual bRC pluginctx_parse_parameter(bpContext *ctx, const char *argk, const char *argv) { return bRC_Error; } virtual bRC pluginctx_parse_parameter(bpContext *ctx, ini_items &item) { return bRC_Error; } virtual bool pluginctx_is_abort_on_error() { return false; } virtual int pluginctx_jmsg_err_level() { return -1; } virtual bRC pluginctx_parse_plugin_config(bpContext *ctx, restore_object_pkt *rop) { return bRC_Error; } virtual bRC pluginctx_handle_restore_object(bpContext *ctx, restore_object_pkt *rop) { return bRC_Error; } }; /** * @brief A final template class instantinated with custom Plugin Context. * * @tparam CTX a custom Plugin Context to use */ template class PLUGINCLASS : public PLUGINBCLASS { public: PLUGINCLASS(bpContext *bpctx) : PLUGINBCLASS(bpctx) {} #if __cplusplus > 201103L PLUGINCLASS() = delete; PLUGINCLASS(PLUGINCLASS&) = delete; PLUGINCLASS(PLUGINCLASS&&) = delete; #endif virtual ~PLUGINCLASS() {} protected: COMMCTX pluginctx; /// the current plugin execution context virtual void pluginctx_switch_command(const char *command) { pluginctx.switch_command(command); } virtual bool pluginctx_check_command(const char *command) { return pluginctx.check_command(command); } virtual bool pluginctx_are_parameters_parsed() { return pluginctx->are_parameters_parsed(); } virtual void pluginctx_set_parameters_parsed() { pluginctx->set_parameters_parsed(); } virtual void pluginctx_clear_abort_on_error() { pluginctx->clear_abort_on_error(); } virtual void pluginctx_set_abort_on_error() { pluginctx->set_abort_on_error(); } virtual bRC pluginctx_parse_parameter(bpContext *ctx, const char *argk, const char *argv) { return pluginctx->parse_parameter(ctx, argk, argv); } virtual bRC pluginctx_parse_parameter(bpContext *ctx, ini_items &item) { return pluginctx->parse_parameter(ctx, item); } virtual bool pluginctx_is_abort_on_error() { return pluginctx->is_abort_on_error(); } virtual int pluginctx_jmsg_err_level() { return pluginctx->jmsg_err_level(); } virtual bRC pluginctx_parse_plugin_config(bpContext *ctx, restore_object_pkt *rop) { return pluginctx->parse_plugin_config(ctx, rop); } virtual bRC pluginctx_handle_restore_object(bpContext *ctx, restore_object_pkt *rop) { return pluginctx->handle_restore_object(ctx, rop); } }; } // namespace pluginlib #endif // PLUGINLIB_PLUGINCLASS_H bacula-15.0.3/src/plugins/fd/pluginlib/metaplugin_attributes.h0000644000175000017500000000347114771010173024301 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file metaplugin_attributes.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Backend `STAT` command handling subroutines for metaplugin. * @version 1.0.0 * @date 2021-08-20 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #ifndef _METAPLUGIN_ATTRIBUTES_H_ #define _METAPLUGIN_ATTRIBUTES_H_ #include "pluginlib.h" #include "ptcomm.h" namespace metaplugin { namespace attributes { typedef enum { Status_OK, Invalid_Stat_Packet, Invalid_File_Type, Status_Handled, Not_Command, Status_Error, } Status; Status read_scan_stat_command(bpContext *ctx, POOL_MEM &cmd, struct save_pkt *sp, POOL_MEM &lname); Status make_stat_command(bpContext *ctx, POOL_MEM &cmd, const restore_pkt *rp); Status read_scan_tstamp_command(bpContext *ctx, POOL_MEM &cmd, struct save_pkt *sp); Status make_tstamp_command(bpContext *ctx, POOL_MEM &cmd, const restore_pkt *rp); Status read_attributes_command(bpContext *ctx, PTCOMM *ptcomm, POOL_MEM &cmd, struct save_pkt *sp, POOL_MEM &lname); } // attributes } // metaplugin #endif // _METAPLUGIN_ATTRIBUTES_H_ bacula-15.0.3/src/plugins/fd/pluginlib/execprog_test.cpp0000644000175000017500000000347314771010173023076 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file execprog_test.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin external command execution context - unittest. * @version 1.2.0 * @date 2020-01-05 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "bacula.h" #include "unittests.h" #include "execprog.h" bFuncs *bfuncs; bInfo *binfo; int main() { Unittests iso8601_test("execprog_test"); EXECPROG execprog; nok(execprog.is_open(), "default is_open()"); ok(execprog.is_closed(), "default is_closed()"); nok(execprog.is_error(), "default is_error()"); nok(execprog.is_fatal(), "default is_fatal()"); nok(execprog.is_abort_on_error(), "default is_abort_on_error()"); nok(execprog.is_eod(), "default is_eod()"); nok(execprog.get_cmd_pid() > 0, "default get_cmd_pid()"); ok(execprog.execute_command(NULL, "ls -l /etc/passwd"), "execprog()"); POOL_MEM out(PM_MESSAGE); int rc = execprog.read_output(NULL, out); // Pmsg1(0, "out: %s\n", out.c_str()); ok(rc > 0, "read_output()"); ok(execprog.get_cmd_pid() > 0, "get_cmd_pid()"); execprog.terminate(NULL); return report(); } bacula-15.0.3/src/plugins/fd/pluginlib/ptcomm.cpp0000644000175000017500000007647714771010173021540 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file ptcomm.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin library for interfacing with Metaplugin backend. * @version 2.0.0 * @date 2020-11-20 * * @copyright Copyright (c) 2020 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "ptcomm.h" #include #include /* * libbac uses its own sscanf implementation which is not compatible with * libc implementation, unfortunately. * use bsscanf for Bacula sscanf flavor */ #ifdef sscanf #undef sscanf #endif /* * Closes external pipe if available (opened). * * in: * bpContext - for Bacula debug and jobinfo messages * out: * true - when closed without a problem * false - when got any error */ bool PTCOMM::close_extpipe(bpContext *ctx) { /* close expipe if used */ if (extpipe > 0){ int rc = close(extpipe); extpipe = -1; if (rc != 0){ berrno be; DMSG(ctx, DERROR, "Cannot close ExtPIPE. Err=%s\n", be.bstrerror()); JMSG(ctx, M_ERROR, "Cannot close ExtPIPE. Err=%s\n", be.bstrerror()); return false; } } return true; } /** * @brief Terminate the connection represented by BPIPE object. * it shows a debug and job messages when connection close is unsuccessful * and when ctx is available only. * * @param ctx bpContext - Bacula Plugin context required for debug/job messages to show, * it could be NULL in this case no messages will be shown */ void PTCOMM::terminate(bpContext *ctx) { if (is_closed()) { return; } DMSG0(ctx, DINFO, "Terminating backend ...\n"); struct timeval _timeout; _timeout.tv_sec = 0; _timeout.tv_usec = 1000; fd_set rfds; FD_ZERO(&rfds); FD_SET(efd, &rfds); int status = select(maxfd, &rfds, NULL, NULL, &_timeout); if (status != 0) { // no timeout waiting for data, so its plausible that some data are available if (FD_ISSET(efd, &rfds)) { // do read of error channel status = read(efd, errmsg.c_str(), errmsg.size() - 1); errmsg.c_str()[status] = '\0'; // terminate string strip_trailing_junk(errmsg.c_str()); if (status < 0) { /* show any error during message read */ berrno be; DMSG(ctx, DERROR, "BPIPE read error on error channel: ERR=%s\n", be.bstrerror()); JMSG(ctx, M_ERROR, "BPIPE read error on error channel: ERR=%s\n", be.bstrerror()); } else { // got data on error channel, report it DMSG1(ctx, DERROR, "Backend reported error: %s\n", errmsg.c_str()); JMSG1(ctx, M_ERROR, "Backend reported error: %s\n", errmsg.c_str()); } } } pid_t worker_pid = bpipe->worker_pid; status = close_bpipe(bpipe); bpipe = NULL; // indicate closed bpipe if (status && ctx) { /* error during close */ berrno be; DMSG(ctx, DERROR, "Error closing backend. Err=%s\n", be.bstrerror(status)); JMSG(ctx, M_ERROR, "Error closing backend. Err=%s\n", be.bstrerror(status)); } if (worker_pid) { /* terminate the backend */ DMSG(ctx, DINFO, "Killing backend with PID=%d\n", worker_pid); kill(worker_pid, SIGTERM); } if (extpipe > 0) { close_extpipe(ctx); } } /** * @brief Reads `nbytes` of data from backend into a buffer `buf`. * * This is a dedicated method for reading raw data from backend. * It reads exact `nbytes` number of bytes and stores it at `buf`. * It will not return until all requested data is ready or got error. * You have to use it when you known exact number of bytes to read from * the backend. The method handles errors and timeout reading data. * * @param ctx - for Bacula debug jobinfo messages * @param buf - the memory buffer where we will read data * @param nbytes - the exact number of bytes to read into `buf` * @return true - when read was successful * @return false - on any error */ bool PTCOMM::recvbackend_data(bpContext *ctx, char *buf, int32_t nbytes) { int status; int rbytes = 0; int ebytes = nbytes; struct timeval _timeout; _timeout.tv_sec = m_timeout > 0 ? m_timeout : PTCOMM_DEFAULT_TIMEOUT; _timeout.tv_usec = 0; while (nbytes > 0) { fd_set rfds; FD_ZERO(&rfds); FD_SET(rfd, &rfds); FD_SET(efd, &rfds); status = select(maxfd, &rfds, NULL, NULL, &_timeout); if (status == 0) { // this means timeout waiting f_error = true; DMSG1(ctx, DERROR, "BPIPE read timeout=%d.\n", PTCOMM_DEFAULT_TIMEOUT); JMSG1(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE read timeout=%d.\n", PTCOMM_DEFAULT_TIMEOUT); return false; } // check if data descriptor is ready bool rfd_isset = FD_ISSET(rfd, &rfds); // check if any messages on error channel when data channel is empty if (FD_ISSET(efd, &rfds) && !rfd_isset) { // do read of error channel f_error = true; status = read(efd, errmsg.c_str(), errmsg.size() - 1); errmsg.c_str()[status] = '\0'; // terminate string strip_trailing_junk(errmsg.c_str()); if (status < 0) { /* show any error during message read */ berrno be; DMSG(ctx, DERROR, "BPIPE read error on error channel: ERR=%s\n", be.bstrerror()); JMSG(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE read error on error channel: ERR=%s\n", be.bstrerror()); } else { // got data on error channel, report it DMSG1(ctx, DERROR, "Backend reported error: %s\n", errmsg.c_str()); JMSG1(ctx, is_fatal() ? M_FATAL : M_ERROR, "Backend reported error: %s\n", errmsg.c_str()); } } // handle data read if (rfd_isset) { // do read of data status = read(rfd, buf + rbytes, nbytes); if (status < 0) { /* show any error during data read */ berrno be; f_error = true; DMSG(ctx, DERROR, "BPIPE read error: ERR=%s\n", be.bstrerror()); JMSG(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE read error: ERR=%s\n", be.bstrerror()); return false; } if (status == 0){ /* the backend closed the connection without terminate signal 'T' */ f_error = true; DMSG0(ctx, DERROR, "Backend closed the connection.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "Backend closed the connection.\n"); return false; } nbytes -= status; rbytes += status; } } DMSG2(ctx, DDEBUG, "Data read. Expected=%d. Read=%d\n", ebytes, rbytes); return true; } /** * @brief Sends `nbytes` of data to backend from the buffer `buf`. * * This is a dedicated method for sending raw data to backend. * It writes exact `nbytes` number of bytes from `buf`. * It will not return until all requested data is sent or got error. * The method handles errors and timeout writing data. * * @param ctx - for Bacula debug jobinfo messages * @param buf - the memory buffer from we will read data * @param nbytes - the exact number of bytes to send to backend * @return true - when send was successful * @return false - on any error */ bool PTCOMM::sendbackend_data(bpContext *ctx, const char *buf, int32_t nbytes) { int status; int wbytes = 0; struct timeval _timeout; _timeout.tv_sec = m_timeout > 0 ? m_timeout : PTCOMM_DEFAULT_TIMEOUT; _timeout.tv_usec = 0; while (nbytes > 0) { fd_set rfds; fd_set wfds; FD_ZERO(&rfds); FD_ZERO(&wfds); FD_SET(efd, &rfds); FD_SET(wfd, &wfds); status = select(maxfd, &rfds, &wfds, NULL, &_timeout); if (status == 0) { // this means timeout waiting f_error = true; DMSG1(ctx, DERROR, "BPIPE write timeout=%d.\n", _timeout.tv_sec); JMSG1(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE write timeout=%d.\n", _timeout.tv_sec); return false; } // check if any data on error channel if (FD_ISSET(efd, &rfds)) { // do read of error channel f_error = true; status = read(efd, errmsg.c_str(), errmsg.size()); if (status < 0) { /* show any error during message read */ berrno be; DMSG(ctx, DERROR, "BPIPE read error on error channel: ERR=%s\n", be.bstrerror()); JMSG(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE read error on error channel: ERR=%s\n", be.bstrerror()); } else { // got data on error channel, report it DMSG1(ctx, DERROR, "Backend reported error: %s\n", errmsg.c_str()); JMSG1(ctx, is_fatal() ? M_FATAL : M_ERROR, "Backend reported error: %s\n", errmsg.c_str()); } } // check if data descriptor is ready if (FD_ISSET(wfd, &wfds)) { // do write of data status = write(wfd, buf + wbytes, nbytes); if (status < 0) { /* show any error during data write */ berrno be; f_error = true; DMSG(ctx, DERROR, "BPIPE write error: ERR=%s\n", be.bstrerror()); JMSG(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE write error: ERR=%s\n", be.bstrerror()); return false; } nbytes -= status; wbytes += status; } } return true; } /** * @brief Reads a protocol header from backend and return payload length. * * This method should be used at the start of every read from backend. * It handles a full protocol chatting, i.e. error, warning and information * messages besides EOD or termination. * * @param ctx - for Bacula debug jobinfo messages * @param cmd - an expected command to read: `C` or `D` * @return int32_t - the size of the packet payload */ int32_t PTCOMM::recvbackend_header(bpContext *ctx, char *cmd, bool any) { if (is_closed()) { DMSG0(ctx, DERROR, "BPIPE to backend is closed, cannot receive data.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE to backend is closed, cannot receive data.\n"); return -1; } if (cmd == NULL) { DMSG0(ctx, DERROR, "Runtime error. cmd == NULL. Cannot read data.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "Runtime error. cmd == NULL. Cannot read data.\n"); return -1; } PTHEADER header; bool workdone = false; f_eod = f_error = f_fatal = false; int32_t nbytes = sizeof(PTHEADER); while (!workdone){ if (!recvbackend_data(ctx, (char*)&header, nbytes)) { DMSG0(ctx, DERROR, "PTCOMM cannot get packet header from backend.\n"); JMSG0(ctx, M_FATAL, "PTCOMM cannot get packet header from backend.\n"); f_eod = f_error = f_fatal = true; return -1; } // some packet commands require data header.length[6] = 0; /* end of string */ DMSG2(ctx, DDEBUG, "HEADERRECV: %c %s\n", header.status, header.length); /* check for protocol status */ if (header.status == 'F'){ /* signal EOD */ f_eod = true; return 0; } if (header.status == 'T'){ /* backend signaled a connection termination */ terminate(ctx); return 0; } // convert packet length from ASCII to binary int32_t msglen = atoi(header.length); if (header.status == 'C' || header.status == 'D') { if (!any) { if (header.status != *cmd) { DMSG3(ctx, DERROR, "Protocol error. Expected packet: %c got: %c:%s\n", *cmd, header.status, header.length); JMSG3(ctx, M_FATAL, "Protocol error. Expected packet: %c got: %c:%s\n", *cmd, header.status, header.length); return -1; } } else { *cmd = header.status; } // this means no additional handling required return msglen; } // need a space for nul and newline char at the end of the message errmsg.check_size(msglen + 2); // read the rest of the package if (!recvbackend_data(ctx, errmsg.c_str(), msglen)) { DMSG0(ctx, DERROR, "PTCOMM cannot get message from backend.\n"); JMSG0(ctx, M_FATAL, "PTCOMM cannot get message from backend.\n"); return -1; } // ensure error message is terminated with newline and terminated with standard c-string nul scan_and_terminate_str(errmsg, msglen); switch (header.status) { /* backend signal errors */ case 'E': case 'A': /* setup error flags */ f_error = true; f_fatal = header.status == 'A'; /* show error to Bacula */ DMSG(ctx, DERROR, "Backend Error: %s", errmsg.c_str()); JMSG(ctx, f_fatal ? M_FATAL : M_ERROR, "%s", errmsg.c_str()); workdone = true; break; // handle warning and info messages below case 'W': // handle warning message DMSG(ctx, DERROR, "%s", errmsg.c_str()); JMSG(ctx, M_WARNING, "%s", errmsg.c_str()); continue; case 'I': // handle information message DMSG(ctx, DINFO, "%s", errmsg.c_str()); JMSG(ctx, M_INFO, "%s", errmsg.c_str()); continue; case 'S': // handle saved message DMSG(ctx, DDEBUG, "%s", errmsg.c_str()); JMSG(ctx, M_SAVED, "%s", errmsg.c_str()); continue; case 'N': // handle not-saved message DMSG(ctx, DDEBUG, "%s", errmsg.c_str()); JMSG(ctx, M_NOTSAVED, "%s", errmsg.c_str()); continue; case 'R': // handle restored message DMSG(ctx, DINFO, "%s", errmsg.c_str()); JMSG(ctx, M_RESTORED, "%s", errmsg.c_str()); continue; case 'P': // handle skipped message DMSG(ctx, DINFO, "%s", errmsg.c_str()); JMSG(ctx, M_SKIPPED, "%s", errmsg.c_str()); continue; case 'O': // handle operator message (now it is only M_MOUNT) DMSG(ctx, DINFO, "%s", errmsg.c_str()); JMSG(ctx, M_MOUNT, "%s", errmsg.c_str()); continue; case 'V': // handle event message DMSG(ctx, DINFO, "%s", errmsg.c_str()); JMSG(ctx, M_EVENTS, "%s", errmsg.c_str()); continue; case 'Q': // handle event message DMSG(ctx, DERROR, "%s", errmsg.c_str()); JMSG(ctx, M_ERROR, "%s", errmsg.c_str()); continue; default: DMSG2(ctx, DERROR, "Protocol error. Unknown packet: %c:%s\n", header.status, header.length); JMSG2(ctx, M_FATAL, "Protocol error. Unknown packet: %c:%s\n", header.status, header.length); return -1; } } return -1; } /** * @brief Handles a receive (read) packet header. * * @param ctx for Bacula debug jobinfo messages * @param cmd the expected command to receive or received * @param any when true then it accepts any command received and saves it in `cmd` * @return int32_t the packet length encoded in header */ int32_t PTCOMM::handle_read_header(bpContext *ctx, char *cmd, bool any) { // first read is the packet header where we will have info about data // which is sent to us; the packet header is 8 chars/bytes length fixed // nbytes shows how many bytes we expects to read int32_t length = recvbackend_header(ctx, cmd, any); if (length < 0) { // error DMSG0(ctx, DERROR, "PTCOMM cannot get packet header from backend.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "PTCOMM cannot get packet header from backend.\n"); f_eod = f_error = f_fatal = true; return -1; } return length; } /** * @brief Handles a payload (message) which comes after the header. * * @param ctx bpContext - for Bacula debug jobinfo messages * @param buf - the POOLMEM buffer we will read data * @param nbytes - the size of the fized buffer * @return int32_t * 0: when backend sent signal, i.e. EOD or Term * -1: when we've got any error; the function will report it to Bacula when * ctx is not NULL * : the size of received message */ int32_t PTCOMM::handle_payload(bpContext *ctx, char *buf, int32_t nbytes) { // handle raw data read as payload if(!recvbackend_data(ctx, buf, nbytes)){ // error DMSG0(ctx, DERROR, "PTCOMM cannot get packet payload from backend.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "PTCOMM cannot get packet payload from backend.\n"); f_eod = f_error = f_fatal = true; return -1; } char bindata[32]; DMSG1(ctx, DDEBUG, "RECV> %s\n", asciidump(buf, nbytes, bindata, 32)); return nbytes; } /** * @brief Receive a packet from the backend. * * The caller expects a packet of a particular type (`cmd`) and we return * from function only when we will receive this kind of packet or get any error. * The `buf` will be extended if message extent current buffer size. * * @param ctx bpContext - for Bacula debug jobinfo messages * @param cmd the packet type expected * @param buf the POOL_MEM buffer we will read data * @return int32_t * 0: when backend sent signal, i.e. EOD or Term * -1: when we've got any error; the function will report it to Bacula when * ctx is not NULL * : the size of received message */ int32_t PTCOMM::recvbackend(bpContext *ctx, char *cmd, POOL_MEM &buf, bool any) { // handle header int32_t length = handle_read_header(ctx, cmd, any); if (length < 0) { return -1; } // handle data payload if (length > 0) { // check requested buffer size buf.check_size(length + 1); return handle_payload(ctx, buf.c_str(), length); } return 0; } /** * @brief Receive a packet from the backend. * * The caller expects a packet of a particular type (`cmd`) and we return * from function when we will receive this kind of packet or get any error. * The `buf` is fixed size, so it won't be extended for larger messages. * In this case you have to make more calls to get all data. * * @param ctx bpContext - for Bacula debug jobinfo messages * @param cmd - the packet type expected * @param buf - the POOLMEM buffer we will read data * @param bufsize - the size of the fized buffer * @return int32_t * 0: when backend sent signal, i.e. EOD or Term * -1: when we've got any error; the function will report it to Bacula when * ctx is not NULL * : the size of received message */ int32_t PTCOMM::recvbackend_fixed(bpContext *ctx, char cmd, char *buf, int32_t bufsize) { int32_t length = remaininglen; char lcmd = cmd; if (!f_cont) { // handle header length = handle_read_header(ctx, &lcmd); if (length < 0) return -1; } // handle data payload if (length > 0) { // we will need subsequent call to handle remaining data only when `buf` to short f_cont = length > bufsize; int32_t nbytes = f_cont * bufsize + (!f_cont) * length; remaininglen = f_cont * (length - bufsize); return handle_payload(ctx, buf, nbytes); } return 0; } /** * @brief Sends packet to the backend. * The protocol allows sending no more than 999999 bytes of data in one packet. * If you require to send more data you have to split it in more packages, and * backend has to assemble it into a larger chunk of data. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param cmd the packet status to send * @param buf the packet contents * @param len the length of the contents * @param _single_senddata defines if function should optimize the send_data call * using POOLMEM* extra space (the default) or not. * Warning: the optimization works only(!) with POOLMEM/POOL_MEM memory. * Other memory buffers, i.e. a const buf will fail with SIGSEGV. * @return true success * @return false when encountered any error */ bool PTCOMM::sendbackend(bpContext *ctx, char cmd, const POOLMEM *buf, int32_t len, bool _single_senddata) { PTHEADER *header; PTHEADER myheader; if (is_closed()){ DMSG0(ctx, DERROR, "BPIPE to backend is closed, cannot send data.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE to backend is closed, cannot send data.\n"); return false; } if (len > PTCOMM_MAX_PACKET_SIZE){ /* message length too long, cannot send it */ DMSG(ctx, DERROR, "Message length %i too long, cannot send data.\n", len); JMSG(ctx, M_FATAL, "Message length %i too long, cannot send data.\n", len); return false; } if (_single_senddata) { // The code at `_single_senddata` uses POOLMEM abufhead reserved space for // packet header rendering in the same way as bsock.c do. The code was tested // and is working fine. No memory leakage or corruption encountered. // The only pros for this code is a single sendbackend_data call for a whole // message instead of two sendbackend_data callse (header + data) for a standard // method. if (buf){ // we will prepare POOLMEM for sending data so we can render header here header = (PTHEADER*) (buf - sizeof(PTHEADER)); } else { // we will send header only header = &myheader; _single_senddata = false; } } else { header = &myheader; } header->status = cmd; if (bsnprintf(header->length, sizeof(header->length), "%06i\n", len) != 7){ /* problem rendering packet header */ DMSG0(ctx, DERROR, "Problem rendering packet header for command.\n"); JMSG0(ctx, M_FATAL, "Problem rendering packet header for command.\n"); return false; } header->length[6] = '\n'; char hlendata[17]; char bindata[17]; DMSG2(ctx, DDEBUG, "SENT: %s %s\n", asciidump((char*)header, sizeof(PTHEADER), hlendata, sizeof(hlendata)), asciidump(buf, len, bindata, sizeof(bindata))); bool _status; if (_single_senddata) { _status = sendbackend_data(ctx, (char *)header, len + sizeof(PTHEADER)); } else { _status = sendbackend_data(ctx, (char *)header, sizeof(PTHEADER)) && sendbackend_data(ctx, buf, len); } if (!_status){ // error DMSG0(ctx, DERROR, "PTCOMM cannot write packet to backend.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "PTCOMM cannot write packet to backend.\n"); f_eod = f_error = f_fatal = true; return false; } return true; } /** * @brief Reads the next command message from the backend communication channel. * * It expects the command, so returned data will be null terminated string * stripped on any unwanted junk, i.e. '\n' or 'space'. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param buf buffer allocated for command * @return int32_t * -1 - when encountered any error * 0 - when backend sent signal, i.e. EOD or Term * - the number of bytes received, success */ int32_t PTCOMM::read_command(bpContext *ctx, POOL_MEM &buf) { char cmd = 'C'; int32_t status = recvbackend(ctx, &cmd, buf, false); if (status > 0) { /* mark end of string because every command is a string */ buf.check_size(status + 1); buf.c_str()[status] = '\0'; /* strip any junk in command like '\n' or trailing spaces */ strip_trailing_junk(buf.c_str()); } return status; } /** * @brief Reads the next packet from the backend communication channel. * * It accepts any command or data packet. The next byte after * the received data will be terminated with '\0' for easy string * handling. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param cmd a pointer to a `char` which show what kind of packet was received * @param buf buffer allocated for command * @return int32_t * -1 - when encountered any error * 0 - when backend sent signal, i.e. EOD or Term * - the number of bytes received, success */ int32_t PTCOMM::read_any(bpContext *ctx, char *cmd, POOL_MEM &buf) { int32_t status = recvbackend(ctx, cmd, buf, true); if (status > 0) { /* mark end of string for easy usage */ buf.check_size(status + 1); buf.c_str()[status] = '\0'; status++; } return status; } /* * Reads the next data message from the backend. * The number of bytes received will not exceed the buffer length even when * backend will send more data. In this case next call to read_data() will * return the next part of the message. * * in: * bpContext - for Bacula debug and jobinfo messages * buf - buffer allocated for data * len - the size of the allocated buffer * out: * -1 - when encountered any error * 0 - when backend sent signal, i.e. EOD or Term * - the number of bytes received, success * buf - the command string received from backend */ int32_t PTCOMM::read_data(bpContext *ctx, POOL_MEM &buf) { int32_t status; if (extpipe > 0) { status = read(extpipe, buf.c_str(), buf.size()); } else { char cmd = 'D'; status = recvbackend(ctx, &cmd, buf, false); } return status; } /* * Reads the next data message from the backend. * The number of bytes received will not exceed the buffer length even when * backend will send more data. In this case next call to read_data() will * return the next part of the message. * * in: * bpContext - for Bacula debug and jobinfo messages * buf - buffer allocated for data * len - the size of the allocated buffer * out: * -1 - when encountered any error * 0 - when backend sent signal, i.e. EOD or Term * - the number of bytes received, success * buf - the command string received from backend */ int32_t PTCOMM::read_data_fixed(bpContext *ctx, char *buf, int32_t len) { int32_t status; if (extpipe > 0){ status = read(extpipe, buf, len); } else { status = recvbackend_fixed(ctx, 'D', buf, len); } return status; } /* * Receive an acknowledge from backend (the EOD package). * * in: * bpContext - for Bacula debug and jobinfo messages * out: * True when acknowledge received * False when got any error */ bool PTCOMM::read_ack(bpContext *ctx) { POOL_MEM buf(PM_FNAME); char cmd = 'F'; if (recvbackend(ctx, &cmd, buf, false) == 0 && f_eod) { f_eod = false; return true; } return false; } /** * @brief Sends a command to the backend. * The command has to be a nul terminated string. * * @param ctx for Bacula debug and jobinfo messages * @param buf a message buffer contains command to send * @param _single_senddata when true then low-level driver will use POOLMEM reserved space for transfer * @return true success * @return false when encountered any error */ bool PTCOMM::write_command(bpContext *ctx, const char *buf, bool _single_senddata) { int32_t len = buf ? strlen(buf) : 0; return sendbackend(ctx, 'C', buf, len, _single_senddata); } /* * Sends a raw data to backend. * The length of the data should not exceed max packet size which is 999999 Byes. * * in: * bpContext - for Bacula debug and jobinfo messages * buf - a message buffer contains data to send * len - the length of the data to send * out: * -1 - when encountered any error * - the number of bytes sent, success */ int32_t PTCOMM::write_data(bpContext *ctx, const char *buf, int32_t len, bool _single_senddata) { int32_t status; if (extpipe > 0){ status = write(extpipe, buf, len); } else { status = len; if (!sendbackend(ctx, 'D', buf, len, _single_senddata)){ status = -1; } } return status; } /* * Sends acknowledge to the backend which consist of the following flow: * -> EOD * <- OK * or * <- Error * * in: * bpContext - for Bacula debug and jobinfo messages * out: * True when acknowledge sent successful * False when got any error */ bool PTCOMM::send_ack(bpContext *ctx) { POOL_MEM buf(PM_FNAME); if (!signal_eod(ctx)){ // error return false; } if (read_command(ctx, buf) < 0){ // error return false; } // check if backend response with OK if (bstrcmp(buf.c_str(), "OK")){ // great ACK confirmed return true; } return false; } /** * @brief Send a handshake procedure to the backend using PLUGINNAME and PLUGINAPI. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param pluginname - the plugin name part of the handshake * @param pluginapi - the protocol version of the plugin * @return true - when handshake successful * @return false - when not */ bool PTCOMM::handshake(bpContext *ctx, const char *pluginname, const char * pluginapi) { POOL_MEM cmd(PM_FNAME); Mmsg(cmd, "Hello %s %s\n", pluginname, pluginapi); int32_t status = write_command(ctx, cmd); if (status > 0){ status = read_command(ctx, cmd); if (status > 0){ if (bstrcmp(cmd.c_str(), "Hello Bacula")){ /* handshake successful */ return true; } else { DMSG(ctx, DERROR, "Wrong backend response to Hello command, got: %s\n", cmd.c_str()); JMSG(ctx, is_fatal() ? M_FATAL : M_ERROR, "Wrong backend response to Hello command, got: %s\n", cmd.c_str()); } } } return false; } /** * @brief Sends a single stream of data to backend read from a buf. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param buf a buffer to read a data to send * @param len a lengtho of the data to send * @return bRC bRC_OK when successful, bRC_Error otherwise */ bRC PTCOMM::send_data(bpContext *ctx, const char *buf, int32_t len, bool _single_senddata) { /* send data */ int32_t offset = 0; while (offset < len) { int32_t count = MIN(len - offset, PTCOMM_MAX_PACKET_SIZE); int32_t status = write_data(ctx, buf + offset, count, _single_senddata); if (status < 0) { /* got some error */ return bRC_Error; } offset += status; } /* signal end of data to restore and get ack */ if (!send_ack(ctx)) { return bRC_Error; } return bRC_OK; } /** * @brief Receives a single stream of data from backend and saves it at buf. * For a good performance it is expected that `buf` will be preallocated * for the expected size of the stream. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param buf a buffer to save received data * @param recv_len a number of bytes saved at buf * @return bRC bRC_OK when successful, bRC_Error otherwise */ bRC PTCOMM::recv_data(bpContext *ctx, POOL_MEM &buf, int32_t *recv_len) { POOL_MEM cmd(PM_MESSAGE); int32_t offset = 0; // loop on data from backend and EOD while (!is_eod()) { int32_t status = read_data(ctx, cmd); if (status > 0) { buf.check_size(offset + status); // it should be preallocated memcpy(buf.c_str() + offset, cmd.c_str(), status); offset += status; } else { if (is_fatal()){ /* raise up error from backend */ return bRC_Error; } } } if (recv_len != NULL) { *recv_len = offset; } return bRC_OK; } bacula-15.0.3/src/plugins/fd/pluginlib/metaplugin_attributes.cpp0000644000175000017500000002356314771010173024640 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file metaplugin_attributes.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Backend attributes (STAT, TSTAMP) command handling subroutines for metaplugin. * @version 1.0.0 * @date 2021-08-20 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "metaplugin_attributes.h" /* * libbac uses its own sscanf implementation which is not compatible with * libc implementation, unfortunately. * use bsscanf for Bacula sscanf flavor */ #ifdef sscanf #undef sscanf #endif namespace metaplugin { namespace attributes { /** * @brief Scans an input command and handles STAT sttributes. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param cmd a command buffer to scan and analyze * @param sp save packet to fill - output * @return Status Status_OK when data managed * Status_Handled when data managed and TSTAMP command is not required * Not_Command when it was not this command */ Status read_scan_stat_command(bpContext *ctx, POOL_MEM &cmd, struct save_pkt *sp, POOL_MEM &lname) { char type; size_t size; int uid, gid; uint perms; int nlinks; DMSG0(ctx, DDEBUG, "read_scan_stat_command()\n"); if (strncmp(cmd.c_str(), "STAT:/", 6) == 0) { POOL_MEM param(PM_FNAME); // handle stat(2) for this file scan_parameter_str(cmd, "STAT:", param); DMSG1(ctx, DDEBUG, "read_scan_stat_command():stat:%s\n", param.c_str()); int rc = lstat(param.c_str(), &sp->statp); if (rc < 0) { // error berrno be; DMSG3(ctx, DERROR, "Invalid STAT path: %s Err=%s (%d)\n", param.c_str(), be.bstrerror(), be.code()); JMSG3(ctx, M_ERROR, "Invalid STAT path: %s Err=%s (%d)\n", param.c_str(), be.bstrerror(), be.code()); memset(&sp->statp, 0, sizeof(sp->statp)); sp->type = FT_REG; return Status_Handled; } // stat is working as expected DMSG1(ctx, DDEBUG, "read_scan_stat_command():stat: %o\n", sp->statp.st_mode); switch (sp->statp.st_mode & S_IFMT) { case S_IFDIR: sp->type = FT_DIREND; sp->link = sp->fname; break; case S_IFREG: sp->type = FT_REG; break; case S_IFLNK: { sp->type = FT_LNK; ssize_t rc = readlink(param.c_str(), lname.c_str(), lname.size()); if (rc < 0) { berrno be; // error reading link value DMSG2(ctx, DERROR, "Error reading link value. Err=%s (%d)", be.bstrerror(), be.code()); JMSG3(ctx, M_ERROR, "Error reading link value: %s, Err=%s (%d)", param.c_str(), be.bstrerror(), be.code()); memset(&sp->statp, 0, sizeof(sp->statp)); sp->type = FT_REG; return Status_Handled; } lname.c_str()[rc] = '\0'; sp->link = lname.c_str(); DMSG1(ctx, DDEBUG, "read_scan_stat_command():readlink:%s\n", sp->link); } break; case S_IFIFO: sp->type = FT_SPEC; break; default: DMSG1(ctx, DERROR, "Unsupported file type: %o\n", sp->statp.st_mode & S_IFMT); return Invalid_Stat_Packet; } return Status_Handled; } int32_t nfi = -1; int nrscan = sscanf(cmd.c_str(), "STAT:%c %ld %d %d %o %d %d", &type, &size, &uid, &gid, &perms, &nlinks, &nfi); DMSG1(ctx, DDEBUG, "read_scan_stat_command(nrscan): %d\n", nrscan); if (nrscan >= 6) { sp->statp.st_size = size; sp->statp.st_nlink = nlinks; sp->statp.st_uid = uid; sp->statp.st_gid = gid; sp->statp.st_mode = perms; switch (type) { case 'F': sp->type = FT_REG; break; case 'E': sp->type = FT_REGE; break; case 'D': sp->type = FT_DIREND; sp->link = sp->fname; break; case 'S': sp->type = FT_LNK; break; case 'L': if (nrscan > 6){ sp->type = FT_LNKSAVED; sp->LinkFI = nfi; } else { DMSG1(ctx, DERROR, "Invalid stat packet: %s\n", cmd.c_str()); return Invalid_Stat_Packet; } break; default: /* we need to signal error */ sp->type = type; DMSG1(ctx, DERROR, "Invalid file type: %c\n", type); return Invalid_File_Type; } DMSG4(ctx, DDEBUG, "SCAN: type:%d size:%lld uid:%d gid:%d\n", (int)sp->type, sp->statp.st_size, sp->statp.st_uid, sp->statp.st_gid); DMSG3(ctx, DDEBUG, "SCAN: mode:%06o nl:%d fi:%d\n", sp->statp.st_mode, sp->statp.st_nlink, sp->LinkFI); return Status_OK; } return Not_Command; } /** * @brief Scans an input command and handles TSTAMP sttributes. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param cmd a command buffer to scan and analyze * @param sp save packet to fill - output * @return Status Status_OK when data managed * Not_Command when it was not this command */ Status read_scan_tstamp_command(bpContext *ctx, POOL_MEM &cmd, struct save_pkt *sp) { time_t _atime; time_t _mtime; time_t _ctime; DMSG0(ctx, DDEBUG, "read_scan_tstamp_command()\n"); int nrscan = sscanf(cmd.c_str(), "TSTAMP:%ld %ld %ld", &_atime, &_mtime, &_ctime); DMSG1(ctx, DDEBUG, "read_scan_tstamp_command(nrscan): %d\n", nrscan); if (nrscan == 3) { sp->statp.st_atime = _atime; sp->statp.st_mtime = _mtime; sp->statp.st_ctime = _ctime; DMSG3(ctx, DINFO, "SCAN: %ld(at) %ld(mt) %ld(ct)\n", sp->statp.st_atime, sp->statp.st_mtime, sp->statp.st_ctime); return Status_OK; } return Not_Command; } /** * @brief Prepare a STAT command based on selected data. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param cmd the command buffer - output * @param rp restore packet to use * @return Status Status_OK */ Status make_stat_command(bpContext *ctx, POOL_MEM &cmd, const restore_pkt *rp) { /* STAT:... */ char type; switch (rp->type) { case FT_REGE: type = 'E'; break; case FT_DIREND: type = 'D'; break; case FT_LNK: type = 'S'; break; case FT_LNKSAVED: type = 'L'; break; case FT_MASK: // This is a special metaplugin protocol hack type = 'X'; // as the accurate code does not know this exact value break; case FT_REG: default: type = 'F'; break; } Mmsg(cmd, "STAT:%c %lld %d %d %06o %d %d\n", type, rp->statp.st_size, rp->statp.st_uid, rp->statp.st_gid, rp->statp.st_mode, (int)rp->statp.st_nlink, rp->LinkFI); DMSG(ctx, DDEBUG, "make_stat_command:%s", cmd.c_str()); return Status_OK; } /** * @brief Prepares a TSTAMP command based on selected data. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param cmd the command buffer - output * @param rp restore packet to use * @return Status Status_OK */ Status make_tstamp_command(bpContext *ctx, POOL_MEM &cmd, const restore_pkt *rp) { /* TSTAMP:... */ Mmsg(cmd, "TSTAMP:%ld %ld %ld\n", rp->statp.st_atime, rp->statp.st_mtime, rp->statp.st_ctime); DMSG(ctx, DDEBUG, "make_tstamp_command:%s", cmd.c_str()); return Status_OK; } /** * @brief Reads a file attributes sequence (STAT, TSTAMP) used at different sections. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param ptcomm backend communication class - the current context * @param cmd the buffer for command handled * @param sp save packet to fill when file attributes handled * @return Status Status_OK when file attributes commands handled, Status_Error on any error, other depends on enum. */ Status read_attributes_command(bpContext *ctx, PTCOMM *ptcomm, POOL_MEM &cmd, struct save_pkt *sp, POOL_MEM &lname) { DMSG0(ctx, DDEBUG, "read_attributes_command()\n"); // supported sequence is `STAT` followed by `TSTAMP` if (ptcomm->read_command(ctx, cmd) < 0) { // error return Status_Error; } Status status = read_scan_stat_command(ctx, cmd, sp, lname); switch(status) { case Status_OK: // go with TSATMP command if (ptcomm->read_command(ctx, cmd) < 0) { // error return Status_Error; } status = read_scan_tstamp_command(ctx, cmd, sp); break; case Status_Handled: status = Status_OK; break; default: break; } return status; } } // namespace attributes } // namespace metaplugin bacula-15.0.3/src/plugins/fd/pluginlib/commctx_test.cpp0000644000175000017500000000743314771010173022734 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file commctx_test.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief A Bacula plugin command context switcher template - unittest. * @version 1.1.0 * @date 2020-12-23 * * @copyright Copyright (c) 2020 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "pluginlib.h" #include "commctx.h" #include "unittests.h" bFuncs *bfuncs; bInfo *binfo; static int referencenumber = 0; static int dosomethingvariable = 0; struct testctx : public SMARTALLOC { const char * cmd; testctx(const char *command) : cmd(command) { referencenumber++; }; ~testctx() { referencenumber--; }; bool meth() { return true; } }; void do_something(testctx*, void*data) { dosomethingvariable++; if (data != NULL) { int *var = (int *)data; dosomethingvariable += *var; } } bRC do_status(testctx*, void*data) { if (data != NULL) { return bRC_OK; } return bRC_Error; } int main() { Unittests commctx_test("commctx_test"); // Pmsg0(0, "Initialize tests ...\n"); #define TEST1 "TEST1" #define TEST2 "TEST2" { COMMCTX ctx; nok(ctx.check_command(TEST1), "test empty ctx list"); ok(referencenumber == 0, "check no allocation yet"); auto testctx1 = ctx.switch_command(TEST1); ok(testctx1 != nullptr, "test first command"); ok(referencenumber == 1, "check allocation"); auto testctx2 = ctx.switch_command(TEST2); ok(testctx2 != nullptr, "test next command"); ok(referencenumber == 2, "check allocation"); auto currentctx = ctx.switch_command(TEST1); ok(currentctx != nullptr, "test switch command"); ok(currentctx == testctx1, "test switch command to proper"); ok(referencenumber == 2, "check allocation"); ok(ctx.check_command(TEST2), "test check command"); } ok(referencenumber == 0, "check smart free"); { COMMCTX ctx; auto testctx1 = ctx.switch_command(TEST1); ok(testctx1 != nullptr, "test switch command1"); ok(referencenumber == 1, "check ref allocation1"); auto testctx2 = ctx.switch_command(TEST2); ok(testctx2 != nullptr, "test switch command2"); ok(referencenumber == 2, "check allocation2"); int append = 2; ctx.foreach_command(do_something, &append); ok(dosomethingvariable == 6, "dosomethingvariable"); dosomethingvariable = 0; ctx.foreach_command(do_something, NULL); ok(dosomethingvariable == 2, "do_something with NULL"); auto status = ctx.foreach_command_status(do_status, &append); ok(status != bRC_Error, "do_status"); status = ctx.foreach_command_status(do_status, NULL); ok(status == bRC_Error, "do_status with NULL"); } { COMMCTX ctx; auto testctx1 = ctx.switch_command(TEST1); ok(testctx1 != nullptr, "test switch command1"); auto cmd1 = ctx->cmd; ok(strcmp(cmd1, TEST1) == 0, "test arrow operator variable"); auto testctx2 = ctx.switch_command(TEST2); ok(testctx2 != nullptr, "test switch command2"); auto cmd2 = ctx->meth(); ok(cmd2, "test arrow operator method"); } return report(); } bacula-15.0.3/src/plugins/fd/pluginlib/smartptr_test.cpp0000644000175000017500000000423614771010173023134 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file smartptr_test.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief Common definitions and utility functions for Inteos plugins - unittest. * @version 1.2.0 * @date 2021-04-23 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "pluginlib.h" #include "smartptr.h" #include "unittests.h" bFuncs *bfuncs; bInfo *binfo; static int referencenumber = 0; static int referencenumber2 = 0; struct testclass { testclass() { referencenumber++; } ~testclass() { referencenumber--; } }; int main() { Unittests pluglib_test("smartalist_test"); // Pmsg0(0, "Initialize tests ...\n"); referencenumber = 0; // ensure it is zero { smart_ptr ptr; ok(ptr.get() == NULL, "check default constructor"); ok(referencenumber == 0, "check refnr"); } referencenumber = 0; // ensure it is zero { smart_ptr ptr(new testclass); ok(referencenumber == 1, "check smart allocation"); } ok(referencenumber == 0, "check smart free"); referencenumber = 0; // ensure it is zero referencenumber2 = 0; // ensure it is zero { smart_ptr ptr(new testclass); ok(referencenumber == 1, "check first allocation"); testclass * other = new testclass; ok(referencenumber == 2, "check other allocation"); ptr = other; ok(referencenumber == 1, "check assign operator"); } ok(referencenumber == 0, "check smart free"); return report(); } bacula-15.0.3/src/plugins/fd/pluginlib/metaplugin_metadata.cpp0000644000175000017500000000242114771010173024220 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file metaplugin_accurate.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a metadata (ACL, XATTR, METADATA) handling for metaplugin. * @version 1.0.0 * @date 2021-09-20 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "metaplugin_metadata.h" /* * libbac uses its own sscanf implementation which is not compatible with * libc implementation, unfortunately. * use bsscanf for Bacula sscanf flavor */ #ifdef sscanf #undef sscanf #endif namespace metaplugin { namespace metadata { } // namespace metadata } // namespace metaplugin bacula-15.0.3/src/plugins/fd/pluginlib/execprog.cpp0000644000175000017500000002177314771010173022042 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file execprog.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin external command execution context. * @version 1.2.0 * @date 2020-01-05 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "execprog.h" /* Plugin compile time variables */ #define PLUGINPREFIX "execprog:" /* * Terminate the connection represented by BPIPE object. * it shows a debug and job messages when connection close is unsuccessful * and when ctx is available only. * * in: * bpContext - Bacula Plugin context required for debug/job messages to show, * it could be NULL in this case no messages will be shown * out: * none */ int EXECPROG::terminate(bpContext *ctx, bool raise_error) { // clear status tstatus = 0; pm_strcpy(m_errmsg, NULL); if (!is_closed()) { // after close_bpipe it is no longer available pid_t wpid = bpipe->worker_pid; tstatus = close_bpipe(bpipe); berrno be; pm_strcpy(m_errmsg, be.bstrerror(tstatus)); if (tstatus) { /* error during close */ DMSG(ctx, DERROR, "Error closing command. Err=%s (%d)\n", m_errmsg.c_str()); if (raise_error) { // raise it on request JMSG(ctx, M_ERROR, "Error closing command. Err=%s (%d)\n", m_errmsg.c_str()); } } if (wpid) { /* terminate the process command */ kill(wpid, SIGTERM); } bpipe = NULL; } return tstatus; } /* * Run command and prepared parameters. * * in: * bpContext - for Bacula debug and jobinfo messages * cmd - the command to execute * out: * True - when command execute successfully * False - when execution return error */ bool EXECPROG::execute_command(bpContext *ctx, const char *cmd, const char *args) { POOL_MEM exe_cmd(PM_FNAME); if (cmd == NULL) { /* cannot execute command NULL */ DMSG0(ctx, DERROR, "Logic error: Cannot execute NULL command!\n"); JMSG0(ctx, M_FATAL, "Logic error: Cannot execute NULL command!\n"); return false; } /* the format of a command line to execute is: [ - the size of received message */ int32_t EXECPROG::read_output(bpContext *ctx, POOL_MEM &out) { int status; int rbytes; bool ndone; if (is_closed()){ DMSG0(ctx, DERROR, "BPIPE to command is closed, cannot get data.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE to command is closed, cannot get data.\n"); return -1; } /* set variables */ rbytes = 0; ndone = true; /* read all output data */ while (ndone){ status = read_data(ctx, out.addr() + rbytes, out.size() - rbytes); if (status < 0){ /* error */ return -1; } rbytes += status; if (is_eod()){ /* we read all data available */ ndone = false; continue; } /* it seems out buffer is too small for all data */ out.check_size(rbytes + 1024); } // we terminate the output as it would be the standard string out.check_size(rbytes + 1); out.c_str()[rbytes] = '\0'; return rbytes; } /* * Reads a single data block from command. * It reads as more data as is available on the other size and will fit into * a memory buffer - buf. When EOD encountered during reading it will set * f_eod flag, so checking this flag is mandatory! * * in: * bpContext - for Bacula debug jobinfo messages * buf - a memory buffer for data * len - the length of the memory buffer - buf * out: * -1 - when we've got any error; the function reports it to Bacula when * ctx is not NULL * when no more data to read - EOD * - the size of received data */ int32_t EXECPROG::read_data(bpContext *ctx, POOLMEM *buf, int32_t len) { int status; int nbytes; int rbytes; int timeout; if (buf == NULL || len < 1){ /* we have no space to read data */ DMSG0(ctx, DERROR, "No space to read data from tool.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "No space to read data from tool.\n"); return -1; } if (is_closed()){ DMSG0(ctx, DERROR, "BPIPE to command is closed, cannot get data.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE to command is closed, cannot get data.\n"); return -1; } /* we will read no more then len bytes available in the buf */ nbytes = len; rbytes = 0; /* clear flags */ f_eod = f_error = f_fatal = false; timeout = 200; // timeout of 200ms while (nbytes){ status = fread(buf + rbytes, 1, nbytes, bpipe->rfd); if (status == 0){ berrno be; if (ferror(bpipe->rfd) != 0){ // check if it is an interrupted system call then restart if (be.code() == EINTR){ clearerr(bpipe->rfd); continue; } f_error = true; DMSG(ctx, DERROR, "BPIPE read error: ERR=%s\n", be.bstrerror()); JMSG(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE read error: ERR=%s\n", be.bstrerror()); return -1; } if (feof(bpipe->rfd) != 0){ f_eod = true; return rbytes; } bmicrosleep(0, 1000); // sleep 1mS if (!timeout--){ /* reach timeout*/ f_error = true; DMSG0(ctx, DERROR, "BPIPE read timeout.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE read timeout.\n"); return -1; } } else { timeout = 200; // reset timeout } nbytes -= status; rbytes += status; } return rbytes; } /* * Sends a raw data block to xe tool. * * in: * bpContext - for Bacula debug and jobinfo messages * buf - a message buffer contains data to send * len - the length of the data to send * out: * -1 - when encountered any error * - the number of bytes sent, success */ int32_t EXECPROG::write_data(bpContext *ctx, POOLMEM *buf, int32_t len) { int status; int nbytes; int wbytes; int timeout; if (buf == NULL){ /* we have no data to write */ DMSG0(ctx, DERROR, "No data to send to command.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "No data to send to command.\n"); return -1; } if (is_closed()){ DMSG0(ctx, DERROR, "BPIPE to command is closed, cannot send data.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE to command is closed, cannot send data.\n"); return -1; } /* we will write len bytes available in the buf */ nbytes = len; wbytes = 0; /* clear flags */ f_eod = f_error = f_fatal = false; timeout = 200; // timeout of 200ms while (nbytes){ status = fwrite(buf + wbytes, 1, nbytes, bpipe->wfd); if (status == 0){ berrno be; if (ferror(bpipe->wfd) != 0){ f_error = true; DMSG(ctx, DERROR, "BPIPE write error: ERR=%s\n", be.bstrerror()); JMSG(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE write error: ERR=%s\n", be.bstrerror()); return -1; } bmicrosleep(0, 1000); // sleep 1mS if (!timeout--){ /* reached timeout*/ f_error = true; DMSG0(ctx, DERROR, "BPIPE write timeout.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE write timeout.\n"); return -1; } } else { timeout = 200; // reset timeout } nbytes -= status; wbytes += status; } return wbytes; } bacula-15.0.3/src/plugins/fd/pluginlib/pluginctx.h0000644000175000017500000001047014771010173021700 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file pluginctx.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula File Daemon general plugin framework. The Context. * @version 1.0.0 * @date 2021-04-08 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "pluginlib.h" #include "lib/bregex.h" #ifndef PLUGINLIB_PLUGINCTX_H #define PLUGINLIB_PLUGINCTX_H // The list of restore options saved to the RestoreObject. extern struct ini_items plugin_items_dump[]; namespace pluginlib { /* * This is a main plugin API class. It manages a plugin context. * All the public methods correspond to a public Bacula API calls, even if * a callback is not implemented. */ class PLUGINCTX : public SMARTALLOC { public: PLUGINCTX(const char * command) : cmd(PM_FNAME), f_error(false), f_fatal(false), abort_on_error(false), f_debug(false), m_parameters_parsed(false), ini(), preg() { pm_strcpy(cmd, command); } #if __cplusplus > 201103L PLUGINCTX() = delete; PLUGINCTX(PLUGINCTX&) = delete; PLUGINCTX(PLUGINCTX&&) = delete; #endif virtual ~PLUGINCTX() {} virtual bRC parse_parameter(bpContext *ctx, const char *argk, const char *argv) = 0; virtual bRC parse_parameter(bpContext *ctx, ini_items &item) = 0; virtual bRC parse_plugin_config(bpContext *ctx, restore_object_pkt *rop); virtual bRC handle_restore_object(bpContext *ctx, restore_object_pkt *rop) { return bRC_OK; } /** * @brief Checks if plugin context operation is flagged on f_error. * * @return true when is error * @return false when no error */ inline bool is_error() const { return f_error || f_fatal; } /** * @brief Checks if plugin context operation is flagged on f_fatal. * * @return true when is fatal error * @return false when no fatal error */ inline bool is_fatal() const { return f_fatal || (f_error && abort_on_error); } /** * @brief Return a Job Message error level based on context * * @return int */ inline int jmsg_err_level() const { return is_fatal() ? M_FATAL : M_ERROR; } /** * @brief Set the abort on error flag */ inline void set_abort_on_error() { abort_on_error = true; } /** * @brief Clears the abort on error flag. */ inline void clear_abort_on_error() { abort_on_error = false; } /** * @brief return abort on error flag status * * @return true if flag is set * @return false if flag is not set */ inline bool is_abort_on_error() const { return abort_on_error; } inline bool are_parameters_parsed() const { return m_parameters_parsed; } inline void set_parameters_parsed() { m_parameters_parsed = true; } protected: POOL_MEM cmd; /// plugin command for this context bool f_error; /// the plugin signaled an error bool f_fatal; /// the plugin signaled a fatal error bool abort_on_error; /// abort on error flag bool f_debug; /// when additional debugging required bool m_parameters_parsed; ConfigFile ini; /// Restore ini file handler regex_t preg; /// this is a regex context for include/exclude virtual int check_ini_param(char *param); virtual bool check_plugin_param(const char *param, alist *params) { return false; } virtual int get_ini_count() { return 0; } }; } // namespace pluginlib #endif // PLUGINLIB_PLUGINBASE_H bacula-15.0.3/src/plugins/fd/pluginlib/execprog.h0000644000175000017500000001423414771010173021501 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file execprog.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin external command execution context. * @version 1.2.0 * @date 2020-01-05 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #ifndef PLUGINLIB_EXECPROG_H #define PLUGINLIB_EXECPROG_H #include "pluginlib/pluginlib.h" class EXECPROG : public SMARTALLOC { private: BPIPE *bpipe; // this is our bpipe to communicate with external tools int rfd; // backend `stdout` to plugin file descriptor int wfd; // backend `stdin` to plugin file descriptor int efd; // backend `stderr` to plugin file descriptor int maxfd; // max file descriptors from bpipe channels POOL_MEM m_errmsg; /// message buffer for error string int extpipe; // set when data blast is performed using external pipe/file POOL_MEM extpipename; // name of the external pipe/file for restore bool f_eod; // the backend signaled EOD bool f_error; // the backend signaled an error bool f_fatal; // the backend signaled a fatal error bool f_cont; // when we are reading next part of data packet bool abort_on_error; // abort on error flag int32_t remaininglen; // the number of bytes to read when `f_cont` is true struct timeval _timeout; // a timeout when waiting for data to read from backend int tstatus; // public: EXECPROG() : bpipe(NULL), rfd(0), wfd(0), efd(0), maxfd(0), m_errmsg(PM_MESSAGE), extpipe(-1), extpipename(PM_FNAME), f_eod(false), f_error(false), f_fatal(false), f_cont(false), abort_on_error(false), remaininglen(0), #if __cplusplus >= 201103L _timeout{0}, #endif tstatus(0) { #if __cplusplus < 201103L _timeout.tv_sec = 0; _timeout.tv_usec = 0; #endif } #if __cplusplus >= 201103L EXECPROG(EXECPROG &) = delete; EXECPROG(EXECPROG &&) = delete; ~EXECPROG() = default; #else ~EXECPROG() {}; #endif /** * @brief Checks if connection is open and we can use a bpipe object for communication. * * @return true if connection is closed and we can't use bpipe object * @return false if connection is available */ inline bool is_open() { return bpipe != NULL; } /** * @brief Checks if connection is closed and we can't use a bpipe object for communication. * * @return true if connection is closed and we can't use bpipe object * @return false if connection is available */ inline bool is_closed() { return bpipe == NULL; } /** * @brief Checks if backend sent us some error, backend error message is flagged on f_error. * * @return true * @return false */ inline bool is_error() { return f_error || f_fatal; } /** * @brief Checks if backend sent us fatal error, backend error message is flagged on f_fatal. * * @return true * @return false */ inline bool is_fatal() { return f_fatal || (f_error && abort_on_error); } /** * @brief Set the abort on error object * */ inline void set_abort_on_error() { abort_on_error = true; } /** * @brief Clear abort_on_error flag. * */ inline void clear_abort_on_error() { abort_on_error = false; } /** * @brief Return abort_on_error flag. * * @return true * @return false */ inline bool is_abort_on_error() { return abort_on_error; } /** * @brief Checks if backend signaled EOD, eod from backend is flagged on f_eod. * * @return true when backend signaled EOD on last packet * @return false when backend did not signal EOD */ inline bool is_eod() { return f_eod; } /** * @brief Clears the EOD from backend flag, f_eod. * The eod flag is set when EOD message received from backend and not cleared * until next recvbackend() call. */ inline void clear_eod() { f_eod = false; } /** * @brief Get the cmd pid. * Returns a backend PID if available. * * @return int the backend PID */ inline int get_cmd_pid() { if (bpipe){ return bpipe->worker_pid; } return -1; }; inline int get_terminate_status() { return tstatus; } /* all you need is to simply execute the command first */ bool execute_command(bpContext *ctx, const char *cmd, const char *args = ""); /* * Run command and prepared parameters. */ inline bool execute_command(bpContext *ctx, const char *cmd, const POOL_MEM &args) { return execute_command(ctx, cmd, args.c_str()); } inline bool execute_command(bpContext *ctx, const POOL_MEM &cmd, const POOL_MEM &args) { return execute_command(ctx, cmd.c_str(), args.c_str()); } inline bool execute_command(bpContext *ctx, const POOL_MEM &cmd) { return execute_command(ctx, cmd.c_str()); } /* then just simply read or write data to it */ int32_t read_data(bpContext *ctx, POOLMEM *buf, int32_t len); int32_t read_output(bpContext *ctx, POOL_MEM &out); int32_t write_data(bpContext *ctx, POOLMEM *buf, int32_t len); /* and finally terminate execution when finish */ int terminate(bpContext *ctx, bool raise_error = true); inline POOL_MEM &get_error() { return m_errmsg; } inline char *get_error_str() { return m_errmsg.c_str(); } /* direct pipe management */ inline int close_wpipe() { return ::close_wpipe(bpipe); } }; #endif // PLUGINLIB_EXECPROG_H bacula-15.0.3/src/plugins/fd/pluginlib/pluginlib_test.cpp0000644000175000017500000002066514771010173023251 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file commctx.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief Common definitions and utility functions for Inteos plugins - unittest. * @version 1.1.0 * @date 2020-12-23 * */ #include "pluginlib.h" #include "unittests.h" #include "smartalist.h" bFuncs *bfuncs; bInfo *binfo; struct vectstruct { const char *path; bool result; const char *descr; }; int main() { Unittests pluglib_test("pluglib_test"); char * s; // Pmsg0(0, "Initialize tests ...\n"); { alist * list; list = plugutil_str_split_to_alist("123456789"); ok(list != NULL, "default split"); ok(list->size() == 1, "expect single strings"); foreach_alist(s, list){ ok(strlen(s) == 9, "check element length"); } delete list; list = plugutil_str_split_to_alist("123.456"); ok(list != NULL, "split: 123.456"); ok(list->size() == 2, "expect two strings"); foreach_alist(s, list){ ok(strlen(s) == 3, "check element length"); } delete list; list = plugutil_str_split_to_alist("12345.56789.abcde"); ok(list != NULL, "split: 12345.56789.abcde"); ok(list->size() == 3, "expect three strings"); foreach_alist(s, list){ ok(strlen(s) == 5, "check element length"); } delete list; list = plugutil_str_split_to_alist("1.bacula..Eric.Kern"); ok(list != NULL, "split: 1.bacula..Eric.Kern"); ok(list->size() == 5, "expect three strings"); ok(strcmp((char*)list->first(), "1") == 0, "check element 1"); ok(strcmp((char*)list->next(), "bacula") == 0, "check element bacula"); ok(strlen((char*)list->next()) == 0, "check empty element"); ok(strcmp((char*)list->next(), "Eric") == 0, "check element Eric"); ok(strcmp((char*)list->next(), "Kern") == 0, "check element Kern"); delete list; } { alist list; plugutil_str_split_to_alist(list, "123456789"); ok(list.size() > 0, "default split"); ok(list.size() == 1, "expect single strings"); foreach_alist(s, &list){ ok(strlen(s) == 9, "check element length"); } } { alist list; plugutil_str_split_to_alist(list, "123.456"); ok(list.size() > 0, "split: 123.45"); ok(list.size() == 2, "expect two strings"); foreach_alist(s, &list){ ok(strlen(s) == 3, "check element length"); } } { alist list; plugutil_str_split_to_alist(list, "12345.56789.abcde"); ok(list.size() > 0, "split: 12345.56789.abcde"); ok(list.size() == 3, "expect three strings"); foreach_alist(s, &list){ ok(strlen(s) == 5, "check element length"); } } { alist list; plugutil_str_split_to_alist(list, "1.bacula..Eric.Kern"); ok(list.size() > 0, "split: 1.bacula..Eric.Kern"); ok(list.size() == 5, "expect three strings"); ok(strcmp((char*)list.first(), "1") == 0, "check element 1"); ok(strcmp((char*)list.next(), "bacula") == 0, "check element bacula"); ok(strlen((char*)list.next()) == 0, "check empty element"); ok(strcmp((char*)list.next(), "Eric") == 0, "check element Eric"); ok(strcmp((char*)list.next(), "Kern") == 0, "check element Kern"); } { alist list; plugutil_str_split_to_alist(list, "key: 'value'", ':'); is(list.size(), 2, "split: key: value"); is((char*)list.get(0), "key", "split: check key"); is((char*)list.get(1), "'value'", "split: check value (without extra space)"); } { alist list; plugutil_str_split_to_alist(list, NULL); is(list.size(), 0, "split: NULL"); } { alist list; plugutil_str_split_to_alist(list, ""); is(list.size(), 0, "split: empty"); } POOL_MEM cmd1(PM_NAME); POOL_MEM param(PM_NAME); const char *prefix = "FNAME:"; const char *fname1 = "/etc/passwd"; Mmsg(cmd1, "%s%s\n", prefix, fname1); // pm_strcpy(cmd1, prefix); // pm_strcat(cmd1, fname1); // pm_strcat(cmd1, "\n"); ok(scan_parameter_str(cmd1, prefix, param), "check scan parameter str match"); ok(bstrcmp(param.c_str(), fname1) , "check scan parameter str param"); nok(scan_parameter_str(cmd1, "prefix", param), "check scan parameter str not match"); const char *fname2 = "/home/this is a filename with spaces which /are hard to/ manage.com"; pm_strcpy(cmd1, prefix); pm_strcat(cmd1, fname2); pm_strcat(cmd1, "\n"); ok(scan_parameter_str(cmd1, prefix, param), "check scan parameter str with spaces match"); ok(bstrcmp(param.c_str(), fname2) , "check scan parameter str with spaces param"); char cmd2[256]; snprintf(cmd2, 256, "%s%s\n", prefix, fname1); ok(scan_parameter_str(cmd2, prefix, param), "check scan parameter for char* str match"); ok(bstrcmp(param.c_str(), fname1) , "check scan parameter for char* str param"); nok(scan_parameter_str(cmd2, "prefix", param), "check scan parameter for char* str not match"); int value = -1; snprintf(cmd2, 256, "%s10\n", prefix); ok(scan_parameter_int(cmd2, prefix, value), "test scan_parameter_int"); ok(value == 10, "test scan_parameter_int value"); value = -1; nok(scan_parameter_int(cmd2, "prefix", value), "test scan_parameter_int not match"); ok(value == -1, "test scan_parameter_int value unchanged"); const vectstruct testvect1[] = { {"", false, "checking empty"}, {"/", false, "checking slash"}, {"other", false, "checking other"}, {"/tmp", true, "checking local"}, {"/tmp/restore", true, "checking local"}, #ifdef HAVE_WIN32 {"c:", true, "checking local win32"}, {"d:/", true, "checking local win32"}, {"E:/", true, "checking local win32"}, {"F:/test", true, "checking local win32"}, {"g:/test/restore", true, "checking local win32"}, #endif {NULL, false, NULL}, }; for (int i = 0; testvect1[i].path != NULL; i++) { bool result = islocalpath(testvect1[i].path); ok(result == testvect1[i].result, testvect1[i].descr); } struct strvectstruct { const char *input; const char *output; const int msglen; const int len; const char *descr; }; const strvectstruct testvect2[] = { { "TEST1****", "TEST1\n\0", 5, 7, "TEST1" }, { "TEST2\n****", "TEST2\n\0", 6, 7, "TEST2" }, { "TEST3\n\0****", "TEST3\n\0", 7, 7, "TEST3" }, { "TEST4\0****", "TEST4\n\0", 6, 7, "TEST4" }, { "TEST5\0\n****", "TEST5\n\0", 7, 7, "TEST5" }, { "****", "\n\0", 0, 2, "Test empty" }, { NULL, NULL, 0, 0, NULL }, }; POOL_MEM ebuf(PM_NAME); for (int i = 0; testvect2[i].input != NULL; i++) { pm_memcpy(ebuf, testvect2[i].input, strlen(testvect2[i].input)); scan_and_terminate_str(ebuf, testvect2[i].msglen); ok(memcmp(ebuf.c_str(), testvect2[i].output, testvect2[i].len) == 0, testvect2[i].descr); } { // debug_level = 800; alist mylist(10, not_owned_by_alist); bool status = pluginlib::parse_param_add_str(mylist, "test", "test", "value"); ok(status, "test parse_param_add_str"); rok(mylist.size() == 1, "test alist size after append"); delete (POOL_MEM*)mylist.get(0); } { // debug_level = 800; smart_alist mylist; bool status = pluginlib::parse_param_add_str(mylist, "test", "test", "othervalue"); ok(status, "test parse_param_add_str with smart_alist"); rok(mylist.size() == 1, "test alist size after append"); } // { // alist mylist(10, not_owned_by_alist); // mylist.append((void*)10); // mylist.append((void*)20); // mylist.append((void*)30); // ok(mylist.size() == 3, "test size"); // uint64_t *a; // int i; // foreach_alist_index(i, a, &mylist) // { // printf("i:%d a:%ld\n", i, (uint64_t)a); // mylist.remove(i); // i--; // } // } return report(); } bacula-15.0.3/src/plugins/fd/pluginlib/metaplugin_test.cpp0000644000175000017500000000664114771010173023427 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file metaplugin_tests.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula metaplugin unittests * @version 1.0.0 * @date 2021-04-07 * * @copyright Copyright (c) 2021 All rights reserved. * IP transferred to Bacula Systems according to agreement. */ #include "metaplugin.h" #include "bacula.h" #include "unittests.h" /* Plugin Info definitions */ const char *PLUGIN_LICENSE = "Bacula AGPLv3"; const char *PLUGIN_AUTHOR = "Radoslaw Korzeniewski"; const char *PLUGIN_DATE = "April 2021"; const char *PLUGIN_VERSION = "1.0.0"; const char *PLUGIN_DESCRIPTION = "Bacula Enterprise MetaPlugin"; /* Plugin compile time variables */ const char *PLUGINPREFIX = "metaplugin:"; const char *PLUGINNAME = "metaplugin"; const char *PLUGINNAMESPACE = "@metaplugin"; const bool CUSTOMNAMESPACE = true; const char *PLUGINAPI = "4"; const char *BACKEND_CMD = "/bin/true"; static bRC mycheckFile(bpContext *ctx, char *fname); checkFile_t checkFile = NULL; // checkFile_t checkFile = mycheckFile; struct ini_items plugin_items_dump[] = { {NULL, NULL, NULL, 0, NULL} }; const char * valid_params[] = { NULL }; #ifdef DEVELOPER const metadataTypeMap plugin_metadata_map[] = {{"METADATA_STREAM", plugin_meta_blob}}; #else const metadataTypeMap plugin_metadata_map[] = {{NULL, plugin_meta_invalid}}; #endif /* In order to link with libbaccfg */ int32_t r_last; int32_t r_first; RES_HEAD **res_head; bool save_resource(RES_HEAD **rhead, int type, RES_ITEM *items, int pass){return false;} bool save_resource(CONFIG*, int, RES_ITEM*, int) {return false;} void dump_resource(int type, RES *ares, void sendit(void *sock, const char *fmt, ...), void *sock){} void free_resource(RES *rres, int type){} union URES {}; RES_TABLE resources[] = {}; URES res_all; static bRC mycheckFile(bpContext *ctx, char *fname) { return bRC_Error; } struct vectstruct { const char *fname; bRC status; }; static const vectstruct testvect1[] = { {"/etc/passwd", bRC_OK}, {"/@MYSQL/", bRC_OK}, {"/@metaplugin/test/file", bRC_Seen}, {"/@kubernetes/namespace/test", bRC_OK}, {"@metaplugin/other", bRC_Seen}, {"test/file", bRC_OK}, {NULL, bRC_Error}, }; int main() { Unittests metaplugin_test("metaplugin_test"); struct bpContext bpctx = {}; bpContext *bpctxp = &bpctx; POOL_MEM testfname(PM_FNAME); METAPLUGIN mp(bpctxp); for (int i = 0; testvect1[i].fname != NULL; i++){ pm_strcpy(testfname, testvect1[i].fname); bRC status = mp.checkFile(bpctxp, testfname.c_str()); ok(status == testvect1[i].status, testfname.c_str()); } checkFile = mycheckFile; pm_strcpy(testfname, "@metaplugin/"); ok(mp.checkFile(bpctxp, testfname.c_str()) == bRC_Error, "custom mycheckFile()"); } bacula-15.0.3/src/plugins/fd/pluginlib/iso8601.cpp0000644000175000017500000001065414771010173021333 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file iso8601.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin ISO8601 parsing library. * @version 1.2.0 * @date 2020-01-05 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "iso8601.h" #include "lib/btime.h" #ifdef ISO8601_USE_REGEX // Adapted from http://delete.me.uk/2005/03/iso8601.html #define ISO8601_REGEX_RAW "(?P[0-9]{4})-(?P[0-9]{1,2})-(?P[0-9]{1,2})" \ "T(?P[0-9]{2}):(?P[0-9]{2})(:(?P[0-9]{2})(\.(?P[0-9]+))?)?" \ "(?PZ|[-+][0-9]{2}(:?[0-9]{2})?)?" #define ISO8601_TIMEZONE_REGEX_RAW "(?P[+-])(?P[0-9]{2}):?(?P[0-9]{2})?" /** * @brief Construct a new ISO8601DateTime::ISO8601DateTime object * */ ISO8601DateTime::ISO8601DateTime() : status(false) { const int options = REG_EXTENDED | REG_ICASE; int rc; char prbuf[500]; status = true; rc = regcomp(&ISO8601_REGEX, ISO8601_REGEX_RAW, options); if (rc != 0){ regerror(rc, &ISO8601_REGEX, prbuf, sizeof(prbuf)); DMsg0(DERROR, "Cannot initialize Bacula regression library for ISO8601_REGEX!\n"); DMsg1(DERROR, "regex Err=%s\n", prbuf); status = false; } rc = regcomp(&TIMEZONE_REGEX, ISO8601_TIMEZONE_REGEX_RAW, options); if (rc != 0){ regerror(rc, &ISO8601_REGEX, prbuf, sizeof(prbuf)); DMsg0(DERROR, "Cannot initialize Bacula regression library for TIMEZONE_REGEX!"); DMsg1(DERROR, "regex Err=%s\n", prbuf); status = false; } }; #endif // #define ISO8601STRSCAN1 // #define ISO8601STRSCAN2 // #define ISO8601STRSCAN3 static const char *scan_formats[] = { "%FT%T%z", "%FT%T%t%z", "%FT%T%Z", "%Y%m%dT%TZ", "%Y%m%dT%T%z", "%Y%m%dT%T%t%z", "%Y-%m-%dT%TZ", "%Y-%m-%dT%T%z", "%Y-%m-%dT%T%t%z", NULL, }; /** * @brief Parses ISO 8601 dates into utime_t values * The timezone is parsed from the date string. However it is quite common to * have dates without a timezone (not strictly correct). In this case the * default timezone specified in default_timezone is used. This is UTC by * default. * * @param datestring * @return utime_t */ utime_t ISO8601DateTime::parse_data(const char *datestring) { utime_t time = 0; tzset(); if (strlen(datestring) > 0){ const char *fmt = scan_formats[0]; for (int a = 1; fmt != NULL; a++) { #if __cplusplus > 201103L struct tm tm {0}; #else struct tm tm; memset(&tm, 0, sizeof(tm)); #endif char *rc = strptime(datestring, fmt, &tm); if (rc != NULL && *rc == '\0'){ // no error scanning time time = mktime(&tm) - timezone; break; } fmt = scan_formats[a]; } } return time; #ifdef ISO8601_USE_REGEX int rc; rc = regexec(&ISO8601_REGEX, datestring.c_str(), 0, NULL, 0); if (rc == 0){ /* found */ } // m = ISO8601_REGEX.match(datestring) // if not m: // raise ParseError("Unable to parse date string %r" % datestring) // groups = m.groupdict() // tz = parse_timezone(groups["timezone"]) // if groups["fraction"] is None: // groups["fraction"] = 0 // else: // groups["fraction"] = int(float("0.%s" % groups["fraction"]) * 1e6) // try: // return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]), // int(groups["hour"]), int(groups["minute"]), int(groups["second"]), // int(groups["fraction"]), tz) // except Exception as e: // raise ParseError("Failed to create a valid datetime record due to: %s" // % e) #endif }; bacula-15.0.3/src/plugins/fd/pluginlib/pluginlib.h0000644000175000017500000002262514771010173021655 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file pluginlib.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief Common definitions and utility functions for Inteos plugins. * @version 2.2.0 * @date 2021-04-26 * * Common definitions and utility functions for Inteos plugins. * Functions defines a common framework used in our utilities and plugins. * Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. */ #ifndef _PLUGINLIB_H_ #define _PLUGINLIB_H_ #include #include #include #include "bacula.h" #include "lib/ini.h" #include "fd_plugins.h" /* Pointers to Bacula functions used in plugins */ extern bFuncs *bfuncs; extern bInfo *binfo; // Plugin linking time variables extern const char *PLUGINPREFIX; extern const char *PLUGINNAME; /* module definition */ #ifndef PLUGMODULE #define PLUGMODULE "PluginLib::" #endif #define _STR(x) __STR(x) #define __STR(x) #x #ifdef VERSIONGIT #define VERSIONGIT_STR _STR(VERSIONGIT) #else #define VERSIONGIT_STR "/unknown" #endif /* size of different string or query buffers */ #define BUFLEN 4096 #define BIGBUFLEN 65536 /* debug and messages functions */ #define JMSG0(ctx,type,msg) \ if (ctx) bfuncs->JobMessage ( ctx, __FILE__, __LINE__, type, 0, "%s " msg, PLUGINPREFIX ); #define JMSG1 JMSG #define JMSG(ctx,type,msg,var) \ if (ctx) bfuncs->JobMessage ( ctx, __FILE__, __LINE__, type, 0, "%s " msg, PLUGINPREFIX, var ); #define JMSG2(ctx,type,msg,var1,var2) \ if (ctx) bfuncs->JobMessage ( ctx, __FILE__, __LINE__, type, 0, "%s " msg, PLUGINPREFIX, var1, var2 ); #define JMSG3(ctx,type,msg,var1,var2,var3) \ if (ctx) bfuncs->JobMessage ( ctx, __FILE__, __LINE__, type, 0, "%s " msg, PLUGINPREFIX, var1, var2, var3 ); #define JMSG4(ctx,type,msg,var1,var2,var3,var4) \ if (ctx) bfuncs->JobMessage ( ctx, __FILE__, __LINE__, type, 0, "%s " msg, PLUGINPREFIX, var1, var2, var3, var4 ); #define DMSG0(ctx,level,msg) \ if (ctx) bfuncs->DebugMessage ( ctx, __FILE__, __LINE__, level, "%s " msg, PLUGINPREFIX ); #define DMSG1 DMSG #define DMSG(ctx,level,msg,var) \ if (ctx) bfuncs->DebugMessage ( ctx, __FILE__, __LINE__, level, "%s " msg, PLUGINPREFIX, var ); #define DMSG2(ctx,level,msg,var1,var2) \ if (ctx) bfuncs->DebugMessage ( ctx, __FILE__, __LINE__, level, "%s " msg, PLUGINPREFIX, var1, var2 ); #define DMSG3(ctx,level,msg,var1,var2,var3) \ if (ctx) bfuncs->DebugMessage ( ctx, __FILE__, __LINE__, level, "%s " msg, PLUGINPREFIX, var1, var2, var3 ); #define DMSG4(ctx,level,msg,var1,var2,var3,var4) \ if (ctx) bfuncs->DebugMessage ( ctx, __FILE__, __LINE__, level, "%s " msg, PLUGINPREFIX, var1, var2, var3, var4 ); #define DMSG6(ctx,level,msg,var1,var2,var3,var4,var5,var6) \ if (ctx) bfuncs->DebugMessage ( ctx, __FILE__, __LINE__, level, "%s " msg, PLUGINPREFIX, var1, var2, var3, var4, var5, var6 ); #define DMSG7(ctx,level,msg,var1,var2,var3,var4,var5,var6,var7) \ if (ctx) bfuncs->DebugMessage ( ctx, __FILE__, __LINE__, level, "%s " msg, PLUGINPREFIX, var1, var2, var3, var4, var5, var6, var7); /* fixed debug level definitions */ #define D1 1 /* debug for every error */ #define DERROR D1 #define D2 10 /* debug only important stuff */ #define DINFO D2 #define D3 200 /* debug for information only */ #define DDEBUG D3 #define D4 800 /* debug for detailed information only */ #define DVDEBUG D4 #define getBaculaVar(bvar,val) bfuncs->getBaculaValue(ctx, bvar, val); #define checkChanges(sp) bfuncs->checkChanges(ctx, sp); #define getAccurateAttribs(att) bfuncs->getAccurateAttribs(ctx, att); /* used for sanity check in plugin functions */ #define ASSERT_CTX \ if (!ctx || !ctx->pContext || !bfuncs) \ { \ return bRC_Error; \ } /* defines for handleEvent */ #define DMSG_EVENT_STR(event,value) DMSG2(ctx, DINFO, "%s value=%s\n", eventtype2str(event), NPRT((char *)value)); #define DMSG_EVENT_CHAR(event,value) DMSG2(ctx, DINFO, "%s value='%c'\n", eventtype2str(event), (char)value); #define DMSG_EVENT_LONG(event,value) DMSG2(ctx, DINFO, "%s value=%ld\n", eventtype2str(event), (intptr_t)value); #define DMSG_EVENT_PTR(event,value) DMSG2(ctx, DINFO, "%s value=%p\n", eventtype2str(event), value); /* pure debug macros */ #define DMsg0(level,msg) Dmsg1(level, PLUGMODULE "%s: " msg, __func__) #define DMsg1(level,msg,a1) Dmsg2(level, PLUGMODULE "%s: " msg, __func__, a1) #define DMsg2(level,msg,a1,a2) Dmsg3(level, PLUGMODULE "%s: " msg, __func__, a1, a2) #define DMsg3(level,msg,a1,a2,a3) Dmsg4(level, PLUGMODULE "%s: " msg, __func__, a1, a2, a3) #define DMsg4(level,msg,a1,a2,a3,a4) Dmsg5(level, PLUGMODULE "%s: " msg, __func__, a1, a2, a3, a4) #define BOOLSTR(b) (b?"True":"False") /* * Common structure for key/pair values */ class key_pair : public SMARTALLOC { public: POOL_MEM key; POOL_MEM value; key_pair() : key(PM_NAME), value(PM_MESSAGE) {}; key_pair(const char *k, const char *v) { pm_strcpy(key, k); pm_strcpy(value, v); }; ~key_pair() {}; }; const char *eventtype2str(bEvent *event); uint64_t pluglib_size_suffix(int disksize, char suff); uint64_t pluglib_size_suffix(double disksize, char suff); bRC pluglib_mkpath(bpContext* ctx, char* path, bool isfatal); /* * Checks if plugin command points to our Plugin * * in: * command - the plugin command used for backup/restore * out: * True - if it is our plugin command * False - the other plugin command */ inline bool isourplugincommand(const char *pluginprefix, const char *command) { /* check if it is our Plugin command */ int len = strlen(pluginprefix); if (len > 0 && pluginprefix[len-1] == ':') { len--; } if (strncmp(pluginprefix, command, len) == 0){ /* it is our plugin prefix */ return true; } return false; } /** * @brief Checks if fname is a part of pluginprefix namespace * * @param pluginprefix plugin namespace prefix * @param fname file name to check * @return true when it matches * @return false when not */ inline bool isourpluginfname(const char *pluginprefix, const char *fname) { /* check if it is our Plugin fname */ if (strncmp(pluginprefix, fname, strlen(pluginprefix)) == 0){ return true; } char _fn[strlen(pluginprefix) + 2]; _fn[0] = '/'; _fn[1] = '\0'; strcat(_fn, pluginprefix); if (strncmp(_fn, fname, strlen(_fn)) == 0){ return true; } return false; } void plugutil_str_split_to_alist(alist &list, const char * str, const char sep = '.'); void plugutil_str_split_to_alist(alist * list, const char * str, const char sep = '.'); alist * plugutil_str_split_to_alist(const char * str, const char sep = '.'); /** * @brief Verifies if path is local except '/' * * @param path * @return true * @return false */ inline bool islocalpath(const char *path) { bool result = path && strlen(path) > 1; bool spath = path[0] == '/'; #ifdef HAVE_WIN32 bool wpath = isalpha(path[0]) && path[1] == ':' ; // simple drive letter #else bool wpath = false; #endif return result && (spath || wpath); } /* plugin parameters manipulation */ bool render_param(POOLMEM **param, const char *pname, const char *fmt, const char *name, const char *value); bool render_param(POOLMEM **param, const char *pname, const char *fmt, const char *name, const int value); bool render_param(bool ¶m, const char *pname, const char *name, const bool value); bool render_param(POOL_MEM ¶m, INI_ITEM_HANDLER *handler, char *key, item_value val); bool parse_param(bool ¶m, const char *pname, const char *name, const char *value); bool parse_param(int ¶m, const char *pname, const char *name, const char *value, bool *err = NULL); bool parse_param(POOL_MEM ¶m, const char *pname, const char *name, const char *value); bool setup_param(int32_t ¶m, const char *pname, const char *name, const int32_t value); bool setup_param(bool ¶m, const char *pname, const char *name, const bool value); bool setup_param(POOL_MEM ¶m, const char *pname, const char *name, const char *value); bool parse_param_add_str(alist **list, const char *pname, const char *name, const char *value); bool scan_parameter_str(const char * cmd, const char *prefix, POOL_MEM ¶m); inline bool scan_parameter_str(const POOL_MEM &cmd, const char *prefix, POOL_MEM ¶m) { return scan_parameter_str(cmd.c_str(), prefix, param); } bool scan_parameter_int(const char *cmd, const char *prefix, int ¶m); inline bool scan_parameter_int(const POOL_MEM &cmd, const char *prefix, int ¶m) { return scan_parameter_int(cmd.c_str(), prefix, param); } void scan_and_terminate_str(POOL_MEM &buf, int msglen); namespace pluginlib { bool parse_param_add_str(alist &list, const char *pname, const char *name, const char *value); } // namespace pluginlib #endif /* _PLUGINLIB_H_ */ bacula-15.0.3/src/plugins/fd/pluginlib/pluginbase.cpp0000644000175000017500000002224014771010173022345 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file pluginbase.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula File Daemon generic plugin interface. * @version 1.0.0 * @date 2021-04-08 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "pluginbase.h" #include #include #include /* * libbac uses its own sscanf implementation which is not compatible with * libc implementation, unfortunately. * use bsscanf for Bacula sscanf flavor */ #ifdef sscanf #undef sscanf #endif #define pluginclass(ctx) pluginlib::PLUGINBCLASS *self = (pluginlib::PLUGINBCLASS*)ctx->pContext; /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); static bRC endBackupFile(bpContext *ctx); static bRC pluginIO(bpContext *ctx, struct io_pkt *io); static bRC startRestoreFile(bpContext *ctx, const char *cmd); static bRC endRestoreFile(bpContext *ctx); static bRC createFile(bpContext *ctx, struct restore_pkt *rp); static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); static bRC plugincheckFile(bpContext *ctx, char *fname); static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl); static bRC queryParameter(bpContext *ctx, struct query_pkt *qp); static bRC metadataRestore(bpContext *ctx, struct meta_pkt *mp); /* Pointers to Bacula functions */ bFuncs *bfuncs = NULL; bInfo *binfo = NULL; static pFuncs pluginFuncs = { sizeof(pluginFuncs), FD_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, freePlugin, getPluginValue, setPluginValue, handlePluginEvent, startBackupFile, endBackupFile, startRestoreFile, endRestoreFile, pluginIO, createFile, setFileAttributes, plugincheckFile, handleXACLdata, NULL, /* No restore file list */ NULL, /* No checkStream */ queryParameter, metadataRestore, }; #ifdef __cplusplus extern "C" { #endif /* Plugin Information structure */ static pInfo pluginInfo = { sizeof(pluginInfo), FD_PLUGIN_INTERFACE_VERSION, FD_PLUGIN_MAGIC, PLUGIN_LICENSE, PLUGIN_AUTHOR, PLUGIN_DATE, PLUGIN_VERSION, PLUGIN_DESCRIPTION, }; /* * Plugin called here when it is first loaded */ bRC DLL_IMP_EXP loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo ** pinfo, pFuncs ** pfuncs) { bfuncs = lbfuncs; /* set Bacula function pointers */ binfo = lbinfo; Dmsg4(DINFO, "%s Plugin version %s%s %s\n", PLUGINNAME, PLUGIN_VERSION, VERSIONGIT_STR, PLUGIN_DATE); *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ return bRC_OK; } /* * Plugin called here when it is unloaded, normally when Bacula is going to exit. */ bRC DLL_IMP_EXP unloadPlugin() { return bRC_OK; } #ifdef __cplusplus } #endif /* * Called here to make a new instance of the plugin -- i.e. when * a new Job is started. There can be multiple instances of * each plugin that are running at the same time. Your * plugin instance must be thread safe and keep its own * local data. */ static bRC newPlugin(bpContext *ctx) { pluginlib::PLUGINBCLASS *self = (pluginlib::PLUGINBCLASS*)new_plugin_factory(ctx); if (!self) { DMSG1(ctx, DERROR, "newPlugin: Cannot build plugin %s!\n", PLUGINNAME); return bRC_Error; } DMSG(ctx, DINFO, "newPlugin: %s\n", PLUGINNAME); ctx->pContext = (void *)self; pthread_t mythid = pthread_self(); DMSG2(ctx, DVDEBUG, "pContext = %p thid = %p\n", self, mythid); /* setup plugin */ self->setup_plugin(ctx); return bRC_OK; } /* * Release everything concerning a particular instance of * a plugin. Normally called when the Job terminates. */ static bRC freePlugin(bpContext *ctx) { ASSERT_CTX; pluginclass(ctx); DMSG(ctx, D1, "freePlugin this=%p\n", self); if (!self){ return bRC_Error; } delete self; return bRC_OK; } /* * Called by core code to get a variable from the plugin. * Not currently used. */ static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) { ASSERT_CTX; DMSG0(ctx, D3, "getPluginValue called.\n"); pluginclass(ctx); return self->getPluginValue(ctx,var, value); } /* * Called by core code to set a plugin variable. * Not currently used. */ static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) { ASSERT_CTX; DMSG0(ctx, D3, "setPluginValue called.\n"); pluginclass(ctx); return self->setPluginValue(ctx, var, value); } /* * Called by Bacula when there are certain events that the * plugin might want to know. The value depends on the * event. */ static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) { ASSERT_CTX; if (!event) { return bRC_Error; } pthread_t mythid = pthread_self(); pluginclass(ctx); DMSG3(ctx, D1, "handlePluginEvent (%i) pContext = %p thid = %p\n", event->eventType, self, mythid); return self->handlePluginEvent(ctx, event, value); } /* * Called when starting to backup a file. Here the plugin must * return the "stat" packet for the directory/file and provide * certain information so that Bacula knows what the file is. * The plugin can create "Virtual" files by giving them * a name that is not normally found on the file system. */ static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) { ASSERT_CTX; if (!sp) { return bRC_Error; } DMSG0(ctx, D1, "startBackupFile.\n"); pluginclass(ctx); return self->startBackupFile(ctx, sp); } /* * Done backing up a file. */ static bRC endBackupFile(bpContext *ctx) { ASSERT_CTX; DMSG0(ctx, D1, "endBackupFile.\n"); pluginclass(ctx); return self->endBackupFile(ctx); } /* * Called when starting restore the file, right after a createFile(). */ static bRC startRestoreFile(bpContext *ctx, const char *cmd) { ASSERT_CTX; DMSG1(ctx, D1, "startRestoreFile: %s\n", NPRT(cmd)); pluginclass(ctx); return self->startRestoreFile(ctx, cmd); } /* * Done restore the file. */ static bRC endRestoreFile(bpContext *ctx) { ASSERT_CTX; DMSG0(ctx, D1, "endRestoreFile.\n"); pluginclass(ctx); return self->endRestoreFile(ctx); } /* * Do actual I/O. Bacula calls this after startBackupFile * or after startRestoreFile to do the actual file * input or output. */ static bRC pluginIO(bpContext *ctx, struct io_pkt *io) { ASSERT_CTX; DMSG0(ctx, DVDEBUG, "pluginIO.\n"); pluginclass(ctx); return self->pluginIO(ctx, io); } /* * Called here to give the plugin the information needed to * re-create the file on a restore. It basically gets the * stat packet that was created during the backup phase. * This data is what is needed to create the file, but does * not contain actual file data. */ static bRC createFile(bpContext *ctx, struct restore_pkt *rp) { ASSERT_CTX; DMSG0(ctx, D1, "createFile.\n"); pluginclass(ctx); return self->createFile(ctx, rp); } /* * Called after the file has been restored. This can be used to * set directory permissions, ... */ static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) { ASSERT_CTX; DMSG0(ctx, D1, "setFileAttributes.\n"); pluginclass(ctx); return self->setFileAttributes(ctx, rp); } /* * handleXACLdata used for ACL/XATTR backup and restore */ static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl) { ASSERT_CTX; DMSG(ctx, D1, "handleXACLdata: %i\n", xacl->func); pluginclass(ctx); return self->handleXACLdata(ctx, xacl); } /* * QueryParameter interface */ static bRC queryParameter(bpContext *ctx, struct query_pkt *qp) { ASSERT_CTX; DMSG2(ctx, D1, "queryParameter: cmd:%s param:%s\n", qp->command, qp->parameter); pluginclass(ctx); return self->queryParameter(ctx, qp); } /* * Metadata Restore interface */ static bRC metadataRestore(bpContext *ctx, struct meta_pkt *mp) { ASSERT_CTX; DMSG2(ctx, D1, "metadataRestore: %d %d\n", mp->total_size, mp->type); pluginclass(ctx); return self->metadataRestore(ctx, mp); } /* * checkFile used for accurate mode backup */ static bRC plugincheckFile(bpContext * ctx, char *fname) { ASSERT_CTX; DMSG(ctx, D3, "checkFile for: %s\n", fname); pluginclass(ctx); return self->checkFile(ctx, fname); } bacula-15.0.3/src/plugins/fd/pluginlib/smartptr.h0000644000175000017500000000312414771010173021535 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file commctx.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a simple smart pointer guard conceptually based on C++11 smart pointers - unique_ptr. * @version 1.2.0 * @date 2021-04-23 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #ifndef PLUGINLIB_SMARTPTR_H #define PLUGINLIB_SMARTPTR_H /** * @brief This is a simple smart pointer based conceptually on C++11 smart unique pointer. * * @tparam T is a type for a pointer to guard */ template class smart_ptr { T * _ptr; public: smart_ptr() : _ptr(NULL) {} smart_ptr(T *ptr) : _ptr(ptr) {} ~smart_ptr() { if (_ptr != NULL) { delete _ptr; _ptr = NULL; } } T * get() { return _ptr; } smart_ptr& operator=(T* p) { if (_ptr != NULL) { delete _ptr; } _ptr = p; return *this; } }; #endif // PLUGINLIB_SMARTPTR_H bacula-15.0.3/src/plugins/fd/pluginlib/pluginclass.cpp0000644000175000017500000006541614771010173022554 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file pluginclass.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula File Daemon general plugin framework. The Class. * @version 1.0.0 * @date 2021-04-08 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "pluginclass.h" #include #include #include /* * libbac uses its own sscanf implementation which is not compatible with * libc implementation, unfortunately. * use bsscanf for Bacula sscanf flavor */ #ifdef sscanf #undef sscanf #endif // Plugin linking time variables extern const char *PLUGINPREFIX; extern const char *PLUGINNAME; extern const char *PLUGINNAMESPACE; // synchronie access to job_cancelled variable // smart_lock lg(&mutex); - removed on request #define CHECK_JOB_CANCELLED \ { \ if (job_cancelled) { \ return bRC_Error; \ } \ } namespace pluginlib { /** * @brief The main plugin setup method executed * * @param ctx for Bacula debug and jobinfo messages */ void PLUGINBCLASS::setup_plugin(bpContext *ctx) { DMSG0(ctx, DINFO, "PLUGINCLASS::setup_plugin\n"); getBaculaVar(bVarJobId, (void *)&JobId); DMSG(ctx, D1, "bVarJobId: %d\n", JobId); char *varpath; getBaculaVar(bVarExePath, (void *)&varpath); DMSG(ctx, DINFO, "bVarExePath: %s\n", varpath); pm_strcpy(execpath, varpath); strip_trailing_slashes(execpath.c_str()); DMSG(ctx, DINFO, "ExePath: %s\n", execpath.c_str()); getBaculaVar(bVarWorkingDir, (void *)&varpath); DMSG(ctx, DINFO, "bVarWorkingDir: %s\n", varpath); pm_strcpy(workingpath, varpath); strip_trailing_slashes(workingpath.c_str()); DMSG(ctx, DINFO, "WorkingPath: %s\n", workingpath.c_str()); } /** * @brief This is the main method for handling events generated by Bacula. * The behavior of the method depends on event type generated, but there are * some events which does nothing, just return with bRC_OK. Every event is * tracked in debug trace file to verify the event flow during development. * * @param ctx for Bacula debug and jobinfo messages * @param event a Bacula event structure * @param value optional event value * @return bRC bRC_OK - in most cases signal success/no error * bRC_Error - in most cases signal error * - depend on Bacula Plugin API if applied */ bRC PLUGINBCLASS::handlePluginEvent(bpContext *ctx, bEvent *event, void *value) { // extract original plugin context, basically it should be `this` PLUGINBCLASS *pctx = (PLUGINBCLASS *)ctx->pContext; CHECK_JOB_CANCELLED; switch (event->eventType) { case bEventJobStart: DMSG(ctx, D3, "bEventJobStart value=%s\n", NPRT((char *)value)); // getBaculaVar(bVarJobId, (void *)&JobId); getBaculaVar(bVarJobName, (void *)&JobName); // if (CUSTOMPREVJOBNAME){ // getBaculaVar(bVarPrevJobName, (void *)&prevjobname); // } return perform_jobstart(ctx); case bEventJobEnd: DMSG(ctx, D3, "bEventJobEnd value=%s\n", NPRT((char *)value)); return perform_jobend(ctx); case bEventLevel: char lvl; lvl = (char)((intptr_t) value & 0xff); DMSG(ctx, D2, "bEventLevel='%c'\n", lvl); switch (lvl) { case 'F': DMSG0(ctx, D2, "backup level = Full\n"); mode = BackupFull; break; case 'I': DMSG0(ctx, D2, "backup level = Incr\n"); mode = BackupIncr; break; case 'D': DMSG0(ctx, D2, "backup level = Diff\n"); mode = BackupDiff; break; default: // TODO: handle other backup levels DMSG0(ctx, D2, "unsupported backup level!\n"); return bRC_Error; } return perform_joblevel(ctx, lvl); case bEventSince: since = (time_t) value; DMSG(ctx, D2, "bEventSince=%ld\n", (intptr_t) since); return perform_jobsince(ctx); case bEventStartBackupJob: DMSG(ctx, D3, "bEventStartBackupJob value=%s\n", NPRT((char *)value)); return perform_backupjobstart(ctx); case bEventEndBackupJob: DMSG(ctx, D2, "bEventEndBackupJob value=%s\n", NPRT((char *)value)); return perform_backupjobend(ctx); case bEventStartRestoreJob: DMSG(ctx, DINFO, "StartRestoreJob value=%s\n", NPRT((char *)value)); getBaculaVar(bVarWhere, &where); DMSG(ctx, DINFO, "Where=%s\n", NPRT(where)); getBaculaVar(bVarRegexWhere, ®exwhere); DMSG(ctx, DINFO, "RegexWhere=%s\n", NPRT(regexwhere)); getBaculaVar(bVarReplace, &replace); DMSG(ctx, DINFO, "Replace=%c\n", replace); mode = Restore; return perform_restorejobstart(ctx); case bEventEndRestoreJob: DMSG(ctx, DINFO, "bEventEndRestoreJob value=%s\n", NPRT((char *)value)); return perform_restorejobend(ctx); /* Plugin command e.g. plugin = :parameters */ case bEventEstimateCommand: DMSG(ctx, D1, "bEventEstimateCommand value=%s\n", NPRT((char *)value)); switch (mode) { case BackupIncr: mode = EstimateIncr; break; case BackupDiff: mode = EstimateDiff; break; default: #if __cplusplus >= 201703L [[fallthrough]]; #endif case BackupFull: mode = EstimateFull; break; } pluginctx_switch_command((char *)value); return prepare_estimate(ctx, (char *)value); /* Plugin command e.g. plugin = :parameters */ case bEventBackupCommand: DMSG(ctx, D2, "bEventBackupCommand value=%s\n", NPRT((char *)value)); pluginctx_switch_command((char *)value); return prepare_backup(ctx, (char*)value); /* Plugin command e.g. plugin = :parameters */ case bEventRestoreCommand: DMSG(ctx, D2, "bEventRestoreCommand value=%s\n", NPRT((char *)value)); pluginctx_switch_command((char *)value); return prepare_restore(ctx, (char*)value); /* Plugin command e.g. plugin = :parameters */ case bEventPluginCommand: DMSG(ctx, D2, "bEventPluginCommand value=%s\n", NPRT((char *)value)); getBaculaVar(bVarAccurate, (void *)&accurate_mode); return prepare_command(ctx, (char*)value); case bEventOptionPlugin: #if __cplusplus >= 201703L [[fallthrough]]; #endif case bEventHandleBackupFile: if (isourplugincommand(PLUGINPREFIX, (char*)value)){ DMSG0(ctx, DERROR, "Invalid handle Option Plugin called!\n"); JMSG2(ctx, M_FATAL, "The %s plugin doesn't support the Option Plugin configuration.\n" "Please review your FileSet and move the Plugin=%s" "... command into the Include {} block.\n", PLUGINNAME, PLUGINPREFIX); return bRC_Error; } break; case bEventEndFileSet: DMSG(ctx, D3, "bEventEndFileSet value=%s\n", NPRT((char *)value)); return perform_jobendfileset(ctx); case bEventRestoreObject: /* Restore Object handle - a plugin configuration for restore and user supplied parameters */ if (!value){ DMSG0(ctx, DINFO, "End restore objects\n"); break; } DMSG(ctx, D2, "bEventRestoreObject value=%p\n", value); return parse_plugin_restore_object(ctx, (restore_object_pkt *) value); case bEventCancelCommand: DMSG2(ctx, D3, "bEventCancelCommand self = %p pctx = %p\n", this, pctx); pctx->job_cancelled = true; return pctx->perform_cancel_command(ctx); default: // enabled only for Debug DMSG2(ctx, D2, "Unknown event: %s (%d) \n", eventtype2str(event), event->eventType); } return bRC_OK; } /** * @brief Parsing a plugin command. * * @param ctx bpContext - Bacula Plugin context structure * @param command plugin command string to parse * @param params output parsed params list * @return bRC bRC_OK - on success, bRC_Error - on error */ bRC PLUGINBCLASS::parse_plugin_command(bpContext *ctx, const char *command) { DMSG(ctx, DINFO, "Parse command: %s\n", command); if (parser.parse_cmd(command) != bRC_OK) { DMSG0(ctx, DERROR, "Unable to parse Plugin command line.\n"); JMSG0(ctx, M_FATAL, "Unable to parse Plugin command line.\n"); return bRC_Error; } /* switch pluginctx to the required context or allocate a new one */ pluginctx_switch_command(command); /* the first (zero) parameter is a plugin name, we should skip it */ for (int i = 1; i < parser.argc; i++) { /* scan for abort_on_error parameter */ if (strcasecmp(parser.argk[i], "abort_on_error") == 0){ /* found, so check the value if provided, I only check the first char */ if (parser.argv[i] && *parser.argv[i] == '0') { pluginctx_clear_abort_on_error(); } else { pluginctx_set_abort_on_error(); } DMSG1(ctx, DINFO, "abort_on_error found: %s\n", pluginctx_is_abort_on_error() ? "True" : "False"); continue; } /* scan for listing parameter, so the estimate job should be executed as a Listing procedure */ if (EstimateFull == mode && bstrcmp(parser.argk[i], "listing")) { /* we have a listing parameter which for estimate means .ls command */ DMSG0(ctx, DINFO, "listing procedure param found\n"); mode = Listing; const char **table = get_listing_top_struct(); if (table) { pm_strcpy(m_listing_query, parser.argv[i]); m_listing_top_nr = 0; for (int func = 0; table[func]; func++) { if (bstrcmp(parser.argv[i], table[func]) || (*parser.argv[i] == '/' && bstrcmp(parser.argv[i] + 1, table[func]))) { DMSG2(ctx, DDEBUG, "listing data: %s %d\n", table[func], func); m_listing_func = func; m_listing_top_nr = -1; break; } } } DMSG2(ctx, DDEBUG, "m_listing_top_nr: %d m_listing_func: %d\n", m_listing_top_nr, m_listing_func); continue; } /* handle it with pluginctx */ bRC status = pluginctx_parse_parameter(ctx, parser.argk[i], parser.argv[i]); switch (status) { case bRC_OK: /* the parameter was handled by xenctx, proceed to the next */ continue; case bRC_Error: /* parsing returned error, raise it up */ return bRC_Error; default: break; } DMSG(ctx, DERROR, "Unknown parameter: %s\n", parser.argk[i]); JMSG(ctx, pluginctx_jmsg_err_level(), "Unknown parameter: %s\n", parser.argk[i]); } #if 0 /* count the numbers of user parameters if any */ // count = get_ini_count(); /* the first (zero) parameter is a plugin name, we should skip it */ argc = parser.argc - 1; parargc = argc + count; /* first parameters from plugin command saved during backup */ for (int i = 1; i < parser.argc; i++) { param = new POOL_MEM(PM_FNAME); // TODO: change to POOL_MEM found = false; int k; /* check if parameter overloaded by restore parameter */ if ((k = check_ini_param(parser.argk[i])) != -1){ found = true; DMSG1(ctx, DINFO, "parse_plugin_command: %s found in restore parameters\n", parser.argk[i]); if (render_param(ctx, *param, ini.items[k].handler, parser.argk[i], ini.items[k].val) != bRC_OK){ delete(param); return bRC_Error; } params.append(param); parargc--; } /* check if param overloaded above */ if (!found){ if (parser.argv[i]){ Mmsg(*param, "%s=%s\n", parser.argk[i], parser.argv[i]); params.append(param); } else { Mmsg(*param, "%s=1\n", parser.argk[i]); params.append(param); } } /* param is always ended with '\n' */ DMSG(ctx, DINFO, "Param: %s", param); } /* check what was missing in plugin command but get from ini file */ if (argc < parargc){ for (int k = 0; ini.items[k].name; k++){ if (ini.items[k].found && !check_plugin_param(ini.items[k].name, ¶ms)){ param = new POOL_MEM(PM_FNAME); DMSG1(ctx, DINFO, "parse_plugin_command: %s from restore parameters\n", ini.items[k].name); if (render_param(ctx, *param, ini.items[k].handler, (char*)ini.items[k].name, ini.items[k].val) != bRC_OK){ delete(param); return bRC_Error; } params.append(param); /* param is always ended with '\n' */ DMSG(ctx, DINFO, "Param: %s", param); } } } #endif return bRC_OK; } /** * @brief Parse a Restore Object saved during backup. It handle both plugin config and other restore objects. * * @param ctx Bacula Plugin context structure * @param rop a restore object structure to parse * @return bRC bRC_OK - on success * bRC_Error - on error */ bRC PLUGINBCLASS::parse_plugin_restore_object(bpContext *ctx, restore_object_pkt *rop) { if (!rop){ return bRC_OK; /* end of rop list */ } DMSG2(ctx, DDEBUG, "parse_plugin_restore_object: %s %d\n", rop->object_name, rop->object_type); pluginctx_switch_command(rop->plugin_name); // first check plugin config if (strcmp(rop->object_name, INI_RESTORE_OBJECT_NAME) == 0 && (rop->object_type == FT_PLUGIN_CONFIG || rop->object_type == FT_PLUGIN_CONFIG_FILLED)) { /* we have a single config RO for every command */ DMSG(ctx, DINFO, "plugin config for: %s\n", rop->plugin_name); bRC status = parse_plugin_config(ctx, rop); if (status != bRC_OK) { return bRC_Error; } return pluginctx_parse_plugin_config(ctx, rop); } // handle any other RO restore bRC status = handle_plugin_restore_object(ctx, rop); if (status != bRC_OK) { return bRC_Error; } return pluginctx_handle_restore_object(ctx, rop); } /** * @brief Handle Bacula Plugin I/O API * * @param ctx for Bacula debug and jobinfo messages * @param io Bacula Plugin API I/O structure for I/O operations * @return bRC bRC_OK - when successful * bRC_Error - on any error * io->status, io->io_errno - correspond to a plugin io operation status */ bRC PLUGINBCLASS::pluginIO(bpContext *ctx, struct io_pkt *io) { static int rw = 0; // this variable handles single debug message CHECK_JOB_CANCELLED; /* assume no error from the very beginning */ io->status = 0; io->io_errno = 0; switch (io->func) { case IO_OPEN: DMSG(ctx, D2, "IO_OPEN: (%s)\n", io->fname); switch (mode) { case BackupFull: #if __cplusplus >= 201703L [[fallthrough]]; #endif case BackupDiff: #if __cplusplus >= 201703L [[fallthrough]]; #endif case BackupIncr: return perform_backup_open(ctx, io); case Restore: return perform_restore_open(ctx, io); default: return bRC_Error; } break; case IO_READ: if (!rw) { rw = 1; DMSG2(ctx, D2, "IO_READ buf=%p len=%d\n", io->buf, io->count); } switch (mode) { case BackupFull: #if __cplusplus >= 201703L [[fallthrough]]; #endif case BackupDiff: #if __cplusplus >= 201703L [[fallthrough]]; #endif case BackupIncr: return perform_read_data(ctx, io); default: return bRC_Error; } break; case IO_WRITE: if (!rw) { rw = 1; DMSG2(ctx, D2, "IO_WRITE buf=%p len=%d\n", io->buf, io->count); } switch (mode) { case Restore: return perform_write_data(ctx, io); default: return bRC_Error; } break; case IO_CLOSE: DMSG0(ctx, D2, "IO_CLOSE\n"); rw = 0; switch (mode) { case Restore: return perform_restore_close(ctx, io); case BackupFull: #if __cplusplus >= 201703L [[fallthrough]]; #endif case BackupDiff: #if __cplusplus >= 201703L [[fallthrough]]; #endif case BackupIncr: return perform_backup_close(ctx, io); default: return bRC_Error; } break; case IO_SEEK: DMSG2(ctx, D2, "IO_SEEK off=%lld wh=%d\n", io->offset, io->whence); switch (mode) { case Restore: return perform_seek_write(ctx, io); default: return bRC_Error; } break; } return bRC_OK; } /** * @brief Get all required information from backend to populate save_pkt for Bacula. * It handles a Restore Object (FT_PLUGIN_CONFIG) for every Full backup and * new Plugin Backup Command if setup in FileSet. It uses a help from * endBackupFile() handling the next FNAME command for the next file to * backup. The communication protocol requires some file attributes command * required it raise the error when insufficient parameters received from * backend. It assumes some parameters at save_pkt struct to be automatically * set like: sp->portable, sp->statp.st_blksize, sp->statp.st_blocks. * * @param ctx for Bacula debug and jobinfo messages * @param sp Bacula Plugin API save packet structure * @return bRC bRC_OK - when save_pkt prepared successfully and we have file to backup * bRC_Max - when no more files to backup * bRC_Error - in any error */ bRC PLUGINBCLASS::startBackupFile(bpContext *ctx, struct save_pkt *sp) { CHECK_JOB_CANCELLED; /* The first file in Full backup, is the Plugin Config */ if (mode == BackupFull && pluginconfigsent == false) { ConfigFile ini; ini.register_items(plugin_items_dump, sizeof(struct ini_items)); sp->restore_obj.object_name = (char *)INI_RESTORE_OBJECT_NAME; sp->restore_obj.object_len = ini.serialize(robjbuf.handle()); sp->restore_obj.object = robjbuf.c_str(); sp->type = FT_PLUGIN_CONFIG; DMSG2(ctx, DINFO, "Prepared RestoreObject/%s (%d) sent.\n", INI_RESTORE_OBJECT_NAME, FT_PLUGIN_CONFIG); return bRC_OK; } if (mode == Listing) { if (m_listing_top_nr >= 0) { const char **table = get_listing_top_struct(); if (table) { DMSG1(ctx, DINFO, "m_listing_top_nr: %d\n", m_listing_top_nr); const char *table_name = (char *)table[m_listing_top_nr]; if (table_name) { // handle listing top m_listing_top_nr++; sp->fname = (char *)table_name; sp->type = FT_DIREND; sp->statp.st_size = 0; sp->statp.st_nlink = 1; sp->statp.st_uid = 0; sp->statp.st_gid = 0; sp->statp.st_mode = 040750; sp->statp.st_blksize = 4096; sp->statp.st_blocks = 1; sp->statp.st_atime = sp->statp.st_mtime = sp->statp.st_ctime = time(NULL); return bRC_OK; } else { m_listing_top_nr = -1; return bRC_Max; } } else { DMSG0(ctx, DERROR, "Listing is not enabled for this plugin\n"); JMSG0(ctx, DERROR, "Listing is not enabled for this plugin\n"); return bRC_Error; } } } bRC status = perform_start_backup_file(ctx, sp); DMSG2(ctx, DINFO, "StartBackup: %s %ld\n", sp->fname, sp->statp.st_size); // DMSG3(ctx, DINFO, "TSDebug: %ld(at) %ld(mt) %ld(ct)\n", // sp->statp.st_atime, sp->statp.st_mtime, sp->statp.st_ctime); return status; } /** * @brief Check for a next file to backup or the end of the backup loop. * The next file to backup is indicated by a FNAME command from backend and * no more files to backup as EOD. It helps startBackupFile handling FNAME * for next file. * * @param ctx for Bacula debug and jobinfo messages * @return bRC bRC_OK - when no more files to backup * bRC_More - when Bacula should expect a next file * bRC_Error - in any error */ bRC PLUGINBCLASS::endBackupFile(bpContext *ctx) { CHECK_JOB_CANCELLED; /* When current file was the Plugin Config, so just ask for the next file */ if (mode == BackupFull && pluginconfigsent == false) { pluginconfigsent = true; return bRC_More; } if (mode == Listing && m_listing_top_nr >= 0) { return bRC_More; } bRC status = perform_end_backup_file(ctx); if (status == bRC_More) { DMSG1(ctx, DINFO, "Nextfile %s backup!\n", fname.c_str()); } return status; } /** * @brief * * @param ctx * @param cmd * @return bRC */ bRC PLUGINBCLASS::startRestoreFile(bpContext *ctx, const char *cmd) { CHECK_JOB_CANCELLED; return perform_start_restore_file(ctx, cmd); } /** * @brief * * @param ctx * @return bRC */ bRC PLUGINBCLASS::endRestoreFile(bpContext *ctx) { CHECK_JOB_CANCELLED; return perform_end_restore_file(ctx); } /** * @brief Prepares a file to restore attributes based on data from restore_pkt. * * @param ctx for Bacula debug and jobinfo messages * @param rp Bacula Plugin API restore packet structure * @return bRC bRC_OK - when success * rp->create_status = CF_EXTRACT - the plugin will restore the file itself * rp->create_status = CF_SKIP - the plugin wants to skip restoration, i.e. the file already exist and Replace=n was set * rp->create_status = CF_CORE - the plugin wants Bacula to do the restore * bRC_Error, rp->create_status = CF_ERROR - in any error */ bRC PLUGINBCLASS::createFile(bpContext *ctx, struct restore_pkt *rp) { CHECK_JOB_CANCELLED; if (CORELOCALRESTORE && islocalpath(where)) { DMSG0(ctx, DDEBUG, "createFile:Forwarding restore to Core\n"); rp->create_status = CF_CORE; return bRC_OK; } return perform_restore_create_file(ctx, rp); } /** * @brief * * @param ctx * @param rp * @return bRC */ bRC PLUGINBCLASS::setFileAttributes(bpContext *ctx, struct restore_pkt *rp) { CHECK_JOB_CANCELLED; return perform_restore_set_file_attributes(ctx, rp); } /** * @brief Implements default pluginclass checkFile() callback. * When fname match plugin configured namespace then it return bRC_Seen by default * if no custom perform_backup_check_file() method is implemented by developer. * * @param ctx for Bacula debug and jobinfo messages * @param fname file name to check * @return bRC bRC_Seen or bRC_OK */ bRC PLUGINBCLASS::checkFile(bpContext * ctx, char *fname) { CHECK_JOB_CANCELLED; DMSG2(ctx, DINFO, "PLUGINBCLASS::checkFile: %s %s\n", PLUGINNAMESPACE, fname); if (isourpluginfname(PLUGINNAMESPACE, fname)) { return perform_backup_check_file(ctx, fname); } return bRC_OK; } /** * @brief Handle ACL and XATTR data during backup or restore. * * @param ctx for Bacula debug and jobinfo messages * @param xacl * @return bRC bRC_OK when successfull, OK * bRC_Error on any error found */ bRC PLUGINBCLASS::handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl) { CHECK_JOB_CANCELLED; // defaults xacl->count = 0; xacl->content = NULL; switch (xacl->func) { case BACL_BACKUP: return perform_acl_backup(ctx, xacl); case BACL_RESTORE: return perform_acl_restore(ctx, xacl); case BXATTR_BACKUP: return perform_xattr_backup(ctx, xacl); case BXATTR_RESTORE: return perform_xattr_restore(ctx, xacl); default: DMSG1(ctx, DERROR, "PLUGINBCLASS::handleXACLdata: unknown xacl function: %d\n", xacl->func); } return bRC_OK; } /** * @brief * * @param ctx for Bacula debug and jobinfo messages * @param qp * @return bRC bRC_OK when successfull, OK * bRC_Error on any error found */ bRC PLUGINBCLASS::queryParameter(bpContext *ctx, struct query_pkt *qp) { // check if it is our Plugin command if (!isourplugincommand(PLUGINPREFIX, qp->command) != 0) { // it is not our plugin prefix return bRC_OK; } CHECK_JOB_CANCELLED; if (mode != QueryParams) // TODO: do we really need this? { mode = QueryParams; } pluginctx_switch_command(qp->command); return perform_query_parameter(ctx, qp); } /** * @brief * * @param ctx for Bacula debug and jobinfo messages * @param mp * @return bRC bRC_OK when successfull, OK * bRC_Error on any error found */ bRC PLUGINBCLASS::metadataRestore(bpContext *ctx, struct meta_pkt *mp) { CHECK_JOB_CANCELLED; return perform_restore_metadata(ctx, mp); } } // namespace pluginlib bacula-15.0.3/src/plugins/fd/pluginlib/metaplugin_accurate.cpp0000644000175000017500000001606714771010173024242 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file metaplugin_accurate.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is an Accurate Mode handling subroutines for metaplugin. * @version 1.0.0 * @date 2021-09-20 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "metaplugin_accurate.h" #include "metaplugin_attributes.h" /* * libbac uses its own sscanf implementation which is not compatible with * libc implementation, unfortunately. * use bsscanf for Bacula sscanf flavor */ #ifdef sscanf #undef sscanf #endif namespace metaplugin { namespace accurate { /** * @brief Perform a backend Accurate Mode query. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param ptcomm backend communication class - the current context * @param fname a file name to handle * @param accurate_mode the current state of accurate mode * @param accurate_mode_err when accurate mode error handled * @return bRC bRC_OK when success, bRC_Error if not */ bRC perform_accurate_check(bpContext *ctx, PTCOMM *ptcomm, POOL_MEM &fname, POOL_MEM &lname, bool accurate_mode, bool &accurate_mode_err) { if (strlen(fname.c_str()) == 0){ // input variable is not valid return bRC_Error; } DMSG0(ctx, DDEBUG, "perform_accurate_check()\n"); POOL_MEM cmd(PM_FNAME); struct save_pkt sp; memset(&sp, 0, sizeof(sp)); // supported sequence is `STAT` followed by `TSTAMP` if (ptcomm->read_command(ctx, cmd) < 0) { // error return bRC_Error; } metaplugin::attributes::Status status = metaplugin::attributes::read_scan_stat_command(ctx, cmd, &sp, lname); if (status == metaplugin::attributes::Status_OK) { if (ptcomm->read_command(ctx, cmd) < 0) { // error return bRC_Error; } status = metaplugin::attributes::read_scan_tstamp_command(ctx, cmd, &sp); if (status == metaplugin::attributes::Status_OK) { // success we can perform accurate check for stat packet bRC rc = bRC_OK; // return 'OK' as a default if (accurate_mode) { sp.fname = fname.c_str(); rc = checkChanges(&sp); } else { if (!accurate_mode_err) { DMSG0(ctx, DERROR, "Backend CHECK command require accurate mode on!\n"); JMSG0(ctx, M_ERROR, "Backend CHECK command require accurate mode on!\n"); accurate_mode_err = true; } } POOL_MEM checkstatus(PM_NAME); Mmsg(checkstatus, "%s\n", rc == bRC_Seen ? "SEEN" : "OK"); DMSG1(ctx, DINFO, "perform_accurate_check(): %s", checkstatus.c_str()); if (!ptcomm->write_command(ctx, checkstatus)) { DMSG0(ctx, DERROR, "Cannot send checkChanges() response to backend\n"); JMSG0(ctx, ptcomm->jmsg_err_level(), "Cannot send checkChanges() response to backend\n"); return bRC_Error; } return bRC_OK; } } else { // check possible errors switch (status) { case metaplugin::attributes::Invalid_File_Type: JMSG2(ctx, M_ERROR, "Invalid file type: %c for %s\n", sp.type, fname.c_str()); return bRC_Error; case metaplugin::attributes::Invalid_Stat_Packet: JMSG1(ctx, ptcomm->jmsg_err_level(), "Invalid stat packet: %s\n", cmd.c_str()); return bRC_Error; default: break; } // future extension for `ATTR` command // ... } return bRC_Error; } /** * @brief Perform a backend Accurate Get Mode query. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param ptcomm backend communication class - the current context * @param fname a file name to handle * @param accurate_mode the current state of accurate mode * @param accurate_mode_err when accurate mode error handled * @return bRC bRC_OK when success, bRC_Error if not */ bRC perform_accurate_check_get(bpContext *ctx, PTCOMM *ptcomm, POOL_MEM &fname, POOL_MEM &lname, bool accurate_mode, bool &accurate_mode_err) { POOL_MEM cmd(PM_FNAME); if (strlen(fname.c_str()) == 0){ // input variable is not valid return bRC_Error; } DMSG0(ctx, DDEBUG, "perform_accurate_check_get()\n"); if (!accurate_mode) { DMSG0(ctx, DERROR, "Backend CHECKGET command require accurate mode on!\n"); JMSG0(ctx, M_ERROR, "Backend CHECKGET command require accurate mode on!\n"); accurate_mode_err = true; // the job is not accurate, so no accurate data will be available at all pm_strcpy(cmd, "NOACCJOB\n"); if (!ptcomm->signal_error(ctx, cmd)) { DMSG0(ctx, DERROR, "Cannot send 'No Accurate Job' info to backend\n"); JMSG0(ctx, ptcomm->jmsg_err_level(), "Cannot send 'No Accurate Job' info to backend\n"); return bRC_Error; } return bRC_OK; } accurate_attribs_pkt attribs; memset(&attribs, 0, sizeof(attribs)); attribs.fname = fname.c_str(); bRC rc = getAccurateAttribs(&attribs); struct restore_pkt rp; switch (rc) { case bRC_Seen: memcpy(&rp.statp, &attribs.statp, sizeof(rp.statp)); rp.type = FT_MASK; // This is a special metaplugin protocol hack // because the current Bacula accurate code does // not handle FileType on catalog attributes, yet. // STAT:... metaplugin::attributes::make_stat_command(ctx, cmd, &rp); ptcomm->write_command(ctx, cmd); // TSTAMP:... if (metaplugin::attributes::make_tstamp_command(ctx, cmd, &rp) == metaplugin::attributes::Status_OK) { ptcomm->write_command(ctx, cmd); DMSG(ctx, DINFO, "createFile:%s", cmd.c_str()); } break; default: pm_strcpy(cmd, "UNAVAIL\n"); if (!ptcomm->write_command(ctx, cmd)) { DMSG0(ctx, DERROR, "Cannot send 'UNAVAIL' response to backend\n"); JMSG0(ctx, ptcomm->jmsg_err_level(), "Cannot send 'UNAVAIL' response to backend\n"); return bRC_Error; } break; } return bRC_OK; } } // namespace accurate } // namespace metaplugin bacula-15.0.3/src/plugins/fd/pluginlib/test_metaplugin_backend.c0000644000175000017500000022465214771010173024542 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file test_metaplugin_backend.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a dumb and extremely simple backend simulator used for test Metaplugin. * @version 2.1.1 * @date 2021-03-10 * */ #include #include #include #include #include #include #include #include #include #include #ifndef LOGDIR #define LOGDIR "/tmp" #endif #define EXIT_BACKEND_NOMEMORY 255 #define EXIT_BACKEND_LOGFILE_ERROR 1 #define EXIT_BACKEND_HEADER_TOOSHORT 2 #define EXIT_BACKEND_MESSAGE_TOOLONG 3 #define EXIT_BACKEND_DATA_COMMAND_REQ 4 #define EXIT_BACKEND_SIGNAL_HANDLER_ERROR 5 #define EXIT_BACKEND_CANCEL 6 extern const char *PLUGINPREFIX; extern const char *PLUGINNAME; int logfd; pid_t mypid; char *buf; char *buflog; char symlink_fname[32]; char namedpipe_fname[32]; bool regress_error_plugin_params = false; bool regress_error_start_job = false; bool regress_error_backup_no_files = false; bool regress_error_backup_stderr = false; bool regress_backup_plugin_objects = false; bool regress_error_backup_abort = false; bool regress_error_estimate_stderr = false; bool regress_error_listing_stderr = false; bool regress_error_restore_stderr = false; bool regress_backup_other_file = false; bool regress_metadata_support = false; bool regress_standard_error_backup = false; bool regress_cancel_backup = false; bool regress_cancel_restore = false; bool regress_backup_external_stat = false; bool Job_Level_Incremental = false; char working_directory[4096]; // it should be no more #define BUFLEN 4096 #define BIGBUFLEN 131072 /** * @brief saves the log text to logfile * * @param txt log text to save */ void LOG(const char *txt) { char _buf[BUFLEN]; int p = 0; for (int a = 0; txt[a]; a++) { char c = txt[a]; if (c == '\n') { _buf[p++] = '\\'; _buf[p++] = 'n'; } else { _buf[p++] = c; } } _buf[p++] = '\n'; _buf[p] = '\0'; write(logfd, _buf, p); } /** * @brief Reads the raw packet from plugin. * * @param buf the memory buffer to save packet payload * @return int the size of the packer read */ int read_plugin(char * buf) { size_t len; size_t nread; size_t size; char header[8]; len = read(STDIN_FILENO, &header, 8); if (len < 8){ LOG("#> Err: header too short"); close(logfd); exit(EXIT_BACKEND_HEADER_TOOSHORT); } if (header[0] == 'F'){ LOG(">> EOD >>"); return 0; } if (header[0] == 'T'){ LOG(">> TERM >>"); close(logfd); exit(EXIT_SUCCESS); } size = atoi(header + 1); if (header[0] == 'C'){ if (size > BIGBUFLEN){ LOG("#> Err: message too long"); close(logfd); exit(EXIT_BACKEND_MESSAGE_TOOLONG); } len = read(STDIN_FILENO, buf, size); buf[len] = 0; snprintf(buflog, BUFLEN, "> %s", buf); LOG(buflog); } else { snprintf(buflog, BUFLEN, "> Data:%lu", size); LOG(buflog); len = 0; while (len < size) { int32_t nbytes = 0; int rc = ioctl(STDIN_FILENO, FIONREAD, &nbytes); snprintf(buflog, BUFLEN, ">> FIONREAD:%d:%ld", rc, (long int)nbytes); LOG(buflog); if (size > (size_t)nbytes){ rc = ioctl(STDIN_FILENO, FIONREAD, &nbytes); snprintf(buflog, BUFLEN, ">> Second FIONREAD:%d:%ld", rc, (long int)nbytes); LOG(buflog); } size_t bufread = size - len > BIGBUFLEN ? BIGBUFLEN : size - len; nread = read(STDIN_FILENO, buf, bufread); len += nread; snprintf(buflog, BUFLEN, ">> Dataread:%lu", nread); LOG(buflog); } } return len; } void read_plugin_data_stream() { int len = read_plugin(buf); if (len == 0){ /* empty file to restore */ LOG("#> Empty data."); return; } bool loopgo = true; int fsize = len; while (loopgo){ len = read_plugin(buf); fsize += len; if (len > 0){ LOG("#> data stream saved."); continue; } else { loopgo = false; snprintf(buflog, 4096, "#> data END = %i", fsize); } } } // #define USE_PRINTF /** * @brief Sends/writes the data to plugin with assembling the raw packet. * * @param cmd the packet type to sent * @param str the text to write */ void write_plugin(const char cmd, const char *str) { int len; const char * out; char header[9]; if (str){ len = strlen(str); out = str; } else { len = 0; out = ""; } #ifdef USE_PRINTF printf("%c%06d\n", cmd, len); printf("%s", out); fflush(stdout); #else snprintf(header, 9, "%c%06d\n", cmd, len); write(STDOUT_FILENO, header, 8); write(STDOUT_FILENO, out, len); #endif snprintf(buflog, BUFLEN, "<< %c%06d:%s", cmd, len, out); LOG(buflog); } /** * @brief Sends/writes the binary data to plugin with assembling the raw packet. * * @param cmd the packet type to sent * @param str the text to write */ void write_plugin_bin(const unsigned char *str, int len = 0) { const unsigned char * out; if (str) { out = str; } else { out = (const unsigned char*)""; } printf("D%06d\n", len); int status = fwrite(out, len, 1, stdout); fflush(stdout); snprintf(buflog, BUFLEN, "<< D%06d:%d:", len ,status); LOG(buflog); } /** * @brief Sends the EOD packet to plugin. */ void signal_eod(){ printf("F000000\n"); fflush(stdout); LOG("<< EOD <<"); } /** * @brief Sends the termination packet to plugin. */ void signal_term(){ printf("T000000\n"); fflush(stdout); LOG("<< TERM <<"); } static bool jobcancelled = false; static void catch_function(int signo) { if (regress_cancel_backup) { LOG("#CANCELLED BACKUP#"); } else if (regress_cancel_restore) { LOG("#CANCELLED RESTORE#"); } else { LOG("#CANCELLED UNKNOWN#"); } jobcancelled = true; } unsigned char restore_object_data[] = { 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x76, 0x31, 0x0a, 0x6b, 0x69, 0x6e, 0x64, 0x3a, 0x20, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x2d, 0x2d, 0x2d, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x76, 0x31, 0x0a, 0x6b, 0x69, 0x6e, 0x64, 0x3a, 0x20, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x61, 0x70, 0x70, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x23, 0x20, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x62, 0x61, 0x63, 0x75, 0x6c, 0x61, 0x0a, 0x20, 0x20, 0x23, 0x20, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x23, 0x20, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6b, 0x65, 0x79, 0x3a, 0x20, 0x35, 0x62, 0x41, 0x6f, 0x56, 0x32, 0x43, 0x70, 0x7a, 0x42, 0x76, 0x68, 0x42, 0x51, 0x5a, 0x61, 0x59, 0x55, 0x58, 0x31, 0x71, 0x59, 0x61, 0x77, 0x43, 0x30, 0x30, 0x71, 0x68, 0x72, 0x78, 0x38, 0x63, 0x45, 0x57, 0x30, 0x66, 0x4b, 0x31, 0x7a, 0x59, 0x6b, 0x54, 0x78, 0x56, 0x64, 0x62, 0x78, 0x66, 0x76, 0x57, 0x4d, 0x79, 0x69, 0x30, 0x68, 0x35, 0x51, 0x62, 0x77, 0x65, 0x4a, 0x6b, 0x71, 0x0a, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x59, 0x6d, 0x46, 0x6a, 0x64, 0x57, 0x78, 0x68, 0x43, 0x67, 0x3d, 0x3d, 0x0a, 0x20, 0x20, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x3a, 0x20, 0x63, 0x47, 0x78, 0x31, 0x5a, 0x32, 0x6c, 0x75, 0x64, 0x47, 0x56, 0x7a, 0x64, 0x41, 0x6f, 0x3d, 0x0a, 0x20, 0x20, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6b, 0x65, 0x79, 0x3a, 0x20, 0x4e, 0x57, 0x4a, 0x42, 0x62, 0x31, 0x59, 0x79, 0x51, 0x33, 0x42, 0x36, 0x51, 0x6e, 0x5a, 0x6f, 0x51, 0x6c, 0x46, 0x61, 0x59, 0x56, 0x6c, 0x56, 0x57, 0x44, 0x46, 0x78, 0x57, 0x57, 0x46, 0x33, 0x51, 0x7a, 0x41, 0x77, 0x63, 0x57, 0x68, 0x79, 0x65, 0x44, 0x68, 0x6a, 0x52, 0x56, 0x63, 0x77, 0x5a, 0x6b, 0x73, 0x78, 0x65, 0x6c, 0x6c, 0x72, 0x56, 0x48, 0x68, 0x57, 0x5a, 0x47, 0x4a, 0x34, 0x5a, 0x6e, 0x5a, 0x58, 0x54, 0x58, 0x6c, 0x70, 0x4d, 0x47, 0x67, 0x31, 0x55, 0x57, 0x4a, 0x33, 0x5a, 0x55, 0x70, 0x72, 0x63, 0x51, 0x6f, 0x3d, 0x0a, 0x2d, 0x2d, 0x2d, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x76, 0x31, 0x0a, 0x6b, 0x69, 0x6e, 0x64, 0x3a, 0x20, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x61, 0x70, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x6d, 0x61, 0x70, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x61, 0x70, 0x70, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x3a, 0x20, 0x62, 0x61, 0x63, 0x75, 0x6c, 0x61, 0x0a, 0x20, 0x20, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x3a, 0x20, 0x31, 0x32, 0x37, 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, 0x0a, 0x20, 0x20, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x3a, 0x20, 0x27, 0x35, 0x34, 0x33, 0x32, 0x27, 0x0a, 0x2d, 0x2d, 0x2d, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x76, 0x31, 0x0a, 0x6b, 0x69, 0x6e, 0x64, 0x3a, 0x20, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x73, 0x75, 0x62, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x61, 0x70, 0x70, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x73, 0x70, 0x65, 0x63, 0x3a, 0x0a, 0x20, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x23, 0x20, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x50, 0x3a, 0x20, 0x4e, 0x6f, 0x6e, 0x65, 0x0a, 0x20, 0x20, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x2d, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x66, 0x6f, 0x6f, 0x20, 0x23, 0x20, 0x41, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x6c, 0x79, 0x2c, 0x20, 0x6e, 0x6f, 0x20, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x69, 0x73, 0x20, 0x6e, 0x65, 0x65, 0x64, 0x65, 0x64, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x70, 0x6f, 0x72, 0x74, 0x3a, 0x20, 0x31, 0x32, 0x33, 0x34, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x3a, 0x20, 0x31, 0x32, 0x33, 0x34, 0x0a, 0x2d, 0x2d, 0x2d, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x76, 0x31, 0x0a, 0x6b, 0x69, 0x6e, 0x64, 0x3a, 0x20, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x6e, 0x67, 0x69, 0x6e, 0x78, 0x2d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x61, 0x70, 0x70, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x6e, 0x67, 0x69, 0x6e, 0x78, 0x2d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x74, 0x69, 0x65, 0x72, 0x3a, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x0a, 0x73, 0x70, 0x65, 0x63, 0x3a, 0x0a, 0x20, 0x20, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x2d, 0x20, 0x70, 0x6f, 0x72, 0x74, 0x3a, 0x20, 0x38, 0x30, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x77, 0x65, 0x62, 0x0a, 0x20, 0x20, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x50, 0x3a, 0x20, 0x4e, 0x6f, 0x6e, 0x65, 0x0a, 0x20, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x61, 0x70, 0x70, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x6e, 0x67, 0x69, 0x6e, 0x78, 0x2d, 0x77, 0x65, 0x62, 0x0a, 0x2d, 0x2d, 0x2d, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x76, 0x31, 0x0a, 0x6b, 0x69, 0x6e, 0x64, 0x3a, 0x20, 0x50, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x2d, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x61, 0x70, 0x70, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x73, 0x70, 0x65, 0x63, 0x3a, 0x0a, 0x20, 0x20, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4d, 0x6f, 0x64, 0x65, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x20, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x6e, 0x63, 0x65, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x3a, 0x20, 0x31, 0x47, 0x69, 0x0a, 0x2d, 0x2d, 0x2d, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x76, 0x31, 0x0a, 0x6b, 0x69, 0x6e, 0x64, 0x3a, 0x20, 0x50, 0x6f, 0x64, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x31, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x61, 0x70, 0x70, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x20, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x23, 0x20, 0x74, 0x69, 0x65, 0x72, 0x3a, 0x20, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x0a, 0x73, 0x70, 0x65, 0x63, 0x3a, 0x0a, 0x20, 0x20, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x31, 0x0a, 0x20, 0x20, 0x73, 0x75, 0x62, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x73, 0x75, 0x62, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x2d, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x3a, 0x20, 0x62, 0x75, 0x73, 0x79, 0x62, 0x6f, 0x78, 0x3a, 0x31, 0x2e, 0x32, 0x38, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x20, 0x73, 0x6c, 0x65, 0x65, 0x70, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x20, 0x22, 0x33, 0x36, 0x30, 0x30, 0x22, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x3a, 0x20, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x20, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x2d, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x0a, 0x2d, 0x2d, 0x2d, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x76, 0x31, 0x0a, 0x6b, 0x69, 0x6e, 0x64, 0x3a, 0x20, 0x50, 0x6f, 0x64, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x32, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x61, 0x70, 0x70, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x73, 0x70, 0x65, 0x63, 0x3a, 0x0a, 0x20, 0x20, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x32, 0x0a, 0x20, 0x20, 0x73, 0x75, 0x62, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x73, 0x75, 0x62, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x2d, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x3a, 0x20, 0x62, 0x75, 0x73, 0x79, 0x62, 0x6f, 0x78, 0x3a, 0x31, 0x2e, 0x32, 0x38, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x20, 0x73, 0x6c, 0x65, 0x65, 0x70, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x20, 0x22, 0x33, 0x36, 0x30, 0x30, 0x22, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x2d, 0x2d, 0x2d, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x61, 0x70, 0x70, 0x73, 0x2f, 0x76, 0x31, 0x0a, 0x6b, 0x69, 0x6e, 0x64, 0x3a, 0x20, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x65, 0x74, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x61, 0x70, 0x70, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x74, 0x69, 0x65, 0x72, 0x3a, 0x20, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x0a, 0x73, 0x70, 0x65, 0x63, 0x3a, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x3a, 0x20, 0x31, 0x0a, 0x20, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x69, 0x65, 0x72, 0x3a, 0x20, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x0a, 0x20, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x69, 0x65, 0x72, 0x3a, 0x20, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x70, 0x65, 0x63, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2d, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x3a, 0x20, 0x67, 0x63, 0x72, 0x2e, 0x69, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x2f, 0x67, 0x62, 0x2d, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x3a, 0x76, 0x33, 0x0a, 0x2d, 0x2d, 0x2d, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x61, 0x70, 0x70, 0x73, 0x2f, 0x76, 0x31, 0x0a, 0x6b, 0x69, 0x6e, 0x64, 0x3a, 0x20, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x6e, 0x67, 0x69, 0x6e, 0x78, 0x2d, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x61, 0x70, 0x70, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x0a, 0x73, 0x70, 0x65, 0x63, 0x3a, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x3a, 0x20, 0x32, 0x0a, 0x20, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x70, 0x70, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x0a, 0x20, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x70, 0x70, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x70, 0x65, 0x63, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x6e, 0x67, 0x69, 0x6e, 0x78, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x3a, 0x20, 0x6e, 0x67, 0x69, 0x6e, 0x78, 0x3a, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x3a, 0x20, 0x38, 0x30, 0x38, 0x30, 0x0a, 0x2d, 0x2d, 0x2d, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x61, 0x70, 0x70, 0x73, 0x2f, 0x76, 0x31, 0x0a, 0x6b, 0x69, 0x6e, 0x64, 0x3a, 0x20, 0x53, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c, 0x53, 0x65, 0x74, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x6e, 0x67, 0x69, 0x6e, 0x78, 0x2d, 0x77, 0x65, 0x62, 0x0a, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x73, 0x70, 0x65, 0x63, 0x3a, 0x0a, 0x20, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x70, 0x70, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x6e, 0x67, 0x69, 0x6e, 0x78, 0x2d, 0x77, 0x65, 0x62, 0x0a, 0x20, 0x20, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x6e, 0x67, 0x69, 0x6e, 0x78, 0x22, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x3a, 0x20, 0x33, 0x0a, 0x20, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x70, 0x70, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x6e, 0x67, 0x69, 0x6e, 0x78, 0x2d, 0x77, 0x65, 0x62, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x69, 0x65, 0x72, 0x3a, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x70, 0x65, 0x63, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x72, 0x61, 0x63, 0x65, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x3a, 0x20, 0x31, 0x30, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x6e, 0x67, 0x69, 0x6e, 0x78, 0x2d, 0x77, 0x65, 0x62, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x3a, 0x20, 0x6b, 0x38, 0x73, 0x2e, 0x67, 0x63, 0x72, 0x2e, 0x69, 0x6f, 0x2f, 0x6e, 0x67, 0x69, 0x6e, 0x78, 0x2d, 0x73, 0x6c, 0x69, 0x6d, 0x3a, 0x30, 0x2e, 0x38, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x3a, 0x20, 0x38, 0x30, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x77, 0x65, 0x62, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x77, 0x77, 0x77, 0x2d, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x3a, 0x20, 0x2f, 0x75, 0x73, 0x72, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x2f, 0x6e, 0x67, 0x69, 0x6e, 0x78, 0x2f, 0x68, 0x74, 0x6d, 0x6c, 0x0a, 0x20, 0x20, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x2d, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x2d, 0x77, 0x77, 0x77, 0x2d, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x70, 0x65, 0x63, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4d, 0x6f, 0x64, 0x65, 0x73, 0x3a, 0x20, 0x5b, 0x20, 0x22, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x6e, 0x63, 0x65, 0x22, 0x20, 0x5d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x3a, 0x20, 0x31, 0x47, 0x69, 0x0a, }; unsigned int restore_object_data_len = 3984; /** * @brief Perform test backup. */ void perform_backup() { // This is a test for FileIndex Query snprintf(buf, BIGBUFLEN, "FileIndex\n"); write_plugin('C', buf); char firesponse[32] = {0}; // well the file index is int32_t so max 11 chars read_plugin(firesponse); int fileindex = atoi(firesponse); snprintf(buf, BIGBUFLEN, "TEST05 - FileIndex query: %d", fileindex); write_plugin('I', buf); // here we store the linked.file origin fname char fileindex_link[256]; snprintf(fileindex_link, 256, "%s/bucket/%d/vm1.iso", PLUGINPREFIX, mypid); // Backup Loop if (regress_error_backup_no_files) { write_plugin('E', "No files found for pattern container1/otherobject\n"); signal_eod(); return; } // first file snprintf(buf, BIGBUFLEN, "FNAME:%s\n", fileindex_link); // we use it here write_plugin('C', buf); write_plugin('C', "STAT:F 1048576 100 100 100640 2\n"); // this will be the first file hardlinked write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); write_plugin('I', "TEST5"); signal_eod(); // here comes a file data contents write_plugin('C', "DATA\n"); write_plugin('D', "/* here comes a file data contents */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); signal_eod(); write_plugin('I', "TEST5Data"); // and now additional metadata write_plugin('C', "ACL\n"); write_plugin('D', "user::rw-\nuser:root:-wx\ngroup::r--\nmask::rwx\nother::r--\n"); write_plugin('I', "TEST5Acl"); signal_eod(); if (regress_cancel_backup) { LOG("#Cancel wait started..."); snprintf(buf, BIGBUFLEN, "#Cancel PID: %d", getpid()); write_plugin('I', buf); while (!jobcancelled) sleep(1); LOG("#Cancel event received, EXIT"); exit(EXIT_BACKEND_CANCEL); } // next file // this files we will restore using Bacula Core functionality, so it is crucial write_plugin('I', "TEST6"); snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/etc/issue\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:F 26 200 200 100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); write_plugin('C', "PIPE:/etc/issue\n"); read_plugin(buf); signal_eod(); write_plugin('C', "DATA\n"); write_plugin('I', "TEST6Data"); write_plugin('I', "TEST6A"); snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/fileforcore\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:F 27 200 200 100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); write_plugin('C', "DATA\n"); write_plugin('I', "TEST6AData"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); signal_eod(); write_plugin('I', "TEST6Axattr"); write_plugin('C', "XATTR\n"); write_plugin('D', "bacula.custom.data=Inteos\nsystem.custom.data=Bacula\n"); signal_eod(); // next file snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/vm2.iso\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:F 1048576 200 200 100640 1\n"); if (regress_error_backup_stderr) { // test some stderror handling, yes in the middle file parameters errno = EACCES; perror("I've got some unsuspected error which I'd like to display on stderr (COMM_STDERR)"); sleep(1); } write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); write_plugin('I', "TEST7"); /* here comes a file data contents */ write_plugin('C', "DATA\n"); write_plugin('D', "/* here comes a file data contents */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); if (regress_error_backup_stderr && false) { // test some stderror handling, yes in the middle of data transfer errno = EACCES; perror("I've got some unsuspected error which I'd like to display on stderr (COMM_STDERR)"); sleep(1); } write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); signal_eod(); write_plugin('I', "TEST7Data"); write_plugin('C', "XATTR\n"); write_plugin('D', "bacula.custom.data=Inteos\nsystem.custom.data=Bacula\n"); signal_eod(); if (regress_backup_other_file) { // restore object snprintf(buf, BIGBUFLEN, "RESTOREOBJ:TestRObject%d\n", mypid); write_plugin('C', buf); snprintf(buf, BIGBUFLEN, "RESTOREOBJ_LEN:%u\n", restore_object_data_len); write_plugin('C', buf); write_plugin_bin(restore_object_data, restore_object_data_len); signal_eod(); snprintf(buf, BIGBUFLEN, "RESTOREOBJ:OtherObject%d\n", mypid); write_plugin('C', buf); const char *r_data = "/* here comes a file data contents */"; snprintf(buf, BIGBUFLEN, "RESTOREOBJ_LEN:%lu\n", strlen(r_data) + 1); write_plugin('C', buf); write_plugin('D', r_data); signal_eod(); // long restore object snprintf(buf, BIGBUFLEN, "RESTOREOBJ:LongObject%d\n", mypid); write_plugin('C', buf); const size_t longobject_num = 6; snprintf(buf, BIGBUFLEN, "RESTOREOBJ_LEN:%lu\n", BIGBUFLEN * longobject_num); write_plugin('C', buf); memset(buf, 'A', BIGBUFLEN); for (size_t a = 0; a < longobject_num; a++) { write_plugin_bin((unsigned char*)buf, BIGBUFLEN); } signal_eod(); // next file snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/vm222-other-file.iso\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:F 1048576 200 200 100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); write_plugin('I', "TEST7-Other"); /* here comes a file data contents */ write_plugin('C', "DATA\n"); write_plugin('D', "/* here comes a file data contents */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); write_plugin('I', "TEST7-Other-End"); signal_eod(); // check acceptfile() skip write_plugin('C', "ACCEPT:/exclude/file1\n"); write_plugin('C', "STAT:F 1048576 200 200 100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); read_plugin(buf); write_plugin('I', "TEST ACCEPT Response\n"); write_plugin('I', buf); if (strncmp(buf, "SKIP", 4) == 0) { write_plugin('I', "TEST ACCEPT Response OK (ACCEPT_FILE_OK)\n"); } // check acceptfile() ok write_plugin('C', "ACCEPT:/etc/passwd\n"); write_plugin('C', "STAT:F 1048576 200 200 100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); read_plugin(buf); write_plugin('I', "TEST ACCEPT Response\n"); write_plugin('I', buf); if (strncmp(buf, "OK", 2) == 0) { write_plugin('I', "TEST ACCEPT Response OK (ACCEPT_FILE_OK)\n"); } // check acceptfile() with STAT(2) on file write_plugin('C', "ACCEPT:/etc/passwd\n"); write_plugin('C', "STAT:/etc/passwd\n"); read_plugin(buf); write_plugin('I', "TEST ACCEPT Response\n"); write_plugin('I', buf); if (strncmp(buf, "OK", 2) == 0) { write_plugin('I', "TEST ACCEPT Response OK (ACCEPT_FILE_OK)\n"); } } bool seen = false; if (Job_Level_Incremental) { // we can accurateCheck query write_plugin('C', "CHECK:/etc/passwd\n"); write_plugin('C', "STAT:F 37 0 0 100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); read_plugin(buf); write_plugin('I', "TEST CHECK Response"); write_plugin('I', buf); if (strncmp(buf, "SEEN", 4) == 0) { seen = true; } // accurate check nonexistent file snprintf(buf, BIGBUFLEN, "CHECK:%s/nonexistent/%d/file\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:F 0 0 0 100640 1\n"); write_plugin('C', "TSTAMP:0 0 0\n"); read_plugin(buf); if (strncmp(buf, "SEEN", 4) != 0) { write_plugin('I', "TEST CHECK nonexistentok"); } // now accurateGet query write_plugin('C', "CHECKGET:/etc/passwd\n"); read_plugin(buf); if (strncmp(buf, "STAT", 4) == 0) { // yes, the data is available read_plugin(buf); write_plugin('I', "TEST CHECKGET"); } // accurate check nonexistent file snprintf(buf, BIGBUFLEN, "CHECKGET:%s/nonexistent/%d/file\n", PLUGINPREFIX, mypid); write_plugin('C', buf); read_plugin(buf); if (strncmp(buf, "UNAVAIL", 7) == 0) { write_plugin('I', "TEST CHECK nonexistentok"); } // check addinclude() snprintf(buf, BIGBUFLEN, "%s/passwd", working_directory); creat(buf, 0600); snprintf(buf, BIGBUFLEN, "INCLUDE:%s/passwd", working_directory); write_plugin('C', buf); snprintf(buf, BIGBUFLEN, "%s/group", working_directory); creat(buf, 0600); snprintf(buf, BIGBUFLEN, "INCLUDE:%s/group", working_directory); write_plugin('C', buf); write_plugin('C', "STRIP:3\n"); snprintf(buf, BIGBUFLEN, "%s/tmp", working_directory); mkdir(buf, 0700); snprintf(buf, BIGBUFLEN, "%s/tmp/bacula", working_directory); mkdir(buf, 0700); snprintf(buf, BIGBUFLEN, "%s/tmp/bacula/test", working_directory); mkdir(buf, 0700); snprintf(buf, BIGBUFLEN, "%s/tmp/bacula/test/split", working_directory); mkdir(buf, 0700); snprintf(buf, BIGBUFLEN, "%s/tmp/bacula/test/split/file", working_directory); creat(buf, 0600); snprintf(buf, BIGBUFLEN, "INCLUDE:%s/tmp/bacula/test/split", working_directory); write_plugin('C', buf); } // backup if full or not seen if (!Job_Level_Incremental || !seen) { write_plugin('C', "FNAME:/etc/passwd\n"); write_plugin('C', "STAT:F 37 0 0 100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); /* here comes a file data contents */ write_plugin('C', "DATA\n"); write_plugin('D', "/* here comes a file data contents */"); signal_eod(); } // next file write_plugin('I', "TEST8"); snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/SHELL\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:F 1099016 0 0 100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); write_plugin('C', "PIPE:/bin/bash\n"); read_plugin(buf); signal_eod(); write_plugin('C', "DATA\n"); write_plugin('I', "TEST8Data"); if (regress_standard_error_backup) { // next file snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/standard-error-file\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:F 1048576 200 200 100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); write_plugin('I', "TEST8-Error-Start"); /* here comes a file data contents */ write_plugin('E', "TEST8-Error: Standard IO Error goes Here"); write_plugin('I', "TEST8-Error-End"); // signal_eod(); } if (regress_backup_plugin_objects) { // test Plugin Objects interface write_plugin('I', "TEST PluginObject"); snprintf(buf, BIGBUFLEN, "PLUGINOBJ:%s/images/%d/vm1\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "PLUGINOBJ_CAT:Image\n"); write_plugin('C', "PLUGINOBJ_TYPE:VM\n"); snprintf(buf, BIGBUFLEN, "PLUGINOBJ_NAME:%s%d/vm1 - Name\n", PLUGINPREFIX, mypid); write_plugin('C', buf); snprintf(buf, BIGBUFLEN, "PLUGINOBJ_SRC:%s\n", PLUGINPREFIX); write_plugin('C', buf); write_plugin('C', "PLUGINOBJ_UUID:c3260b8c560e5e093e8913065fa3cba9\n"); write_plugin('C', "PLUGINOBJ_SIZE:1024kB\n"); signal_eod(); write_plugin('I', "TEST PluginObject - END"); } // next file write_plugin('I', "TEST9"); snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/lockfile\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:E 0 300 300 0100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); write_plugin('I', "TEST9E"); signal_eod(); // next file snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/file.xattr\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:E 0 300 300 0100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); write_plugin('I', "TEST10"); signal_eod(); write_plugin('C', "XATTR\n"); write_plugin('D', "bacula.custom.data=Inteos\nsystem.custom.data=Bacula\n"); signal_eod(); snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/vmsnap.iso\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:S 1048576 0 0 100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); snprintf(buf, BIGBUFLEN, "LSTAT:bucket/%d/vm1.iso/1508502750.495885/69312986/10485760/\n", mypid); write_plugin('C', buf); signal_eod(); write_plugin('I', "TEST11 - segmented object"); write_plugin('C', "DATA\n"); write_plugin('D', "/* here comes a file data contents */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); signal_eod(); if (regress_standard_error_backup) { // next file snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/standard-error-file2\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:F 1048576 200 200 100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); write_plugin('I', "TEST8-Error-Start"); /* here comes a file data contents */ write_plugin('C', "DATA\n"); write_plugin('D', "/* here comes a file data contents */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); write_plugin('E', "TEST8-Error: Standard IO Error goes Here"); write_plugin('I', "TEST8-Error-End"); // signal_eod(); } const int bigfileblock = 100000; const int bigfilesize = bigfileblock * 5; snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/bigfile.raw\n", PLUGINPREFIX, mypid); write_plugin('C', buf); snprintf(buf, BIGBUFLEN, "STAT:F %d 0 0 100640 1\n", bigfilesize); write_plugin('C', buf); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); write_plugin('I', "TEST17 - big file block"); write_plugin('C', "DATA\n"); { unsigned char *bigfileblock_ptr = (unsigned char*)malloc(bigfileblock); memset(bigfileblock_ptr, 0xA1, bigfileblock); for (int s = bigfilesize; s > 0; s -= bigfileblock) { write_plugin_bin(bigfileblock_ptr, bigfileblock); } free(bigfileblock_ptr); } signal_eod(); if (regress_error_backup_abort) { snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/file on error\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:F 234560 900 900 0100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); write_plugin('A', "Some error...\n"); return; } snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/data.dir/\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:D 1024 100 100 040755 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); write_plugin('I', "TEST15 - backup data dir"); write_plugin('C', "DATA\n"); write_plugin('D', "/* here comes a file data contents */"); write_plugin('D', "/* here comes another file line */"); signal_eod(); snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/directory.with.xattrs/\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:D 1024 100 100 040755 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); write_plugin('I', "TEST16 - backup dir + xattrs"); write_plugin('C', "DATA\n"); write_plugin('D', "/* here comes a file data contents */"); write_plugin('D', "/* here comes another file line */"); signal_eod(); write_plugin('C', "XATTR\n"); write_plugin('D', "bacula.custom.data=Inteos\nsystem.custom.data=Bacula\n"); signal_eod(); snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/acl.dir/\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:D 1024 100 100 040755 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); write_plugin('I', "TEST12 - backup dir"); signal_eod(); write_plugin('C', "ACL\n"); write_plugin('D', "user::rwx\ngroup::r-x\nother::r-x\n"); signal_eod(); snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:D 1024 100 100 040755 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); write_plugin('I', "TEST12 - backup dir"); signal_eod(); write_plugin('C', "XATTR\n"); write_plugin('D', "bacula.custom.data=Inteos\nsystem.custom.data=Bacula\n"); signal_eod(); snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/\n", PLUGINPREFIX); write_plugin('C', buf); write_plugin('C', "STAT:D 1024 100 100 040755 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); write_plugin('I', "TEST12 - backup another dir"); write_plugin('W', "Make some warning messages."); signal_eod(); const char longfilenamestr[] = "cb1e1926239b467c8e9affd7d22cea4993940d1e8f5377a1540d2b58e10be5669888c7e729fc9fe98f1400ca2e68c93075fd26e2806bebd727c71022de47f37b" "cb1e1926239b467c8e9affd7d22cea4993940d1e8f5377a1540d2b58e10be5669888c7e729fc9fe98f1400ca2e68c93075fd26e2806bebd727c71022de47f37b" "cb1e1926239b467c8e9affd7d22cea4993940d1e8f5377a1540d2b58e10be5669888c7e729fc9fe98f1400ca2e68c93075fd26e2806bebd727c71022de47f37b" "cb1e1926239b467c8e9affd7d22cea4993940d1e8f5377a1540d2b58e10be5669888c7e729fc9fe98f1400ca2e68c93075fd26e2806bebd727c71022de47f37b" "ENDOFNAME"; const char *longfilename = longfilenamestr; // test for fname > 500c #if __cplusplus > 201103L static_assert(sizeof(longfilenamestr) > 500); #endif snprintf(buf, BIGBUFLEN, "FNAME:%s/%s\n", PLUGINPREFIX, longfilename); write_plugin('C', buf); write_plugin('C', "STAT:F 234560 901 901 0100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); write_plugin('I', "TEST13 - long FNAME test"); write_plugin('C', "DATA\n"); write_plugin('D', "/* here comes a file data contents */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); signal_eod(); if (regress_metadata_support) { snprintf(buf, BIGBUFLEN, "FNAME:%s/office/%d/document.docx\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:F 10240 100 100 040755 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); write_plugin('C', "METADATA_STREAM\n"); write_plugin('D', "{ \"bacula.custom.data\": \"Inteos\"\n \"system.custom.data\":\"Bacula\" }\n"); signal_eod(); write_plugin('C', "METADATA_STREAM\n"); write_plugin('D', "This is a binary data!"); signal_eod(); write_plugin('C', "METADATA_STREAM\n"); const size_t _mdlargebuf_len = 300000; unsigned char *_mdlargebuf = (unsigned char *)malloc(_mdlargebuf_len); memset(_mdlargebuf, '0', _mdlargebuf_len); write_plugin_bin(_mdlargebuf, _mdlargebuf_len); free(_mdlargebuf); signal_eod(); // disabled intentionally // write_plugin('C', "METADATA_CATALOG\n"); // write_plugin('D', "TABLE1: { field1: \"value1\", field2: \"value2\", field3: \"value3\"}"); // signal_eod(); // write_plugin('C', "METADATA_CATALOG\n"); // write_plugin('D', "TABLE2: { field2: \"value1\", field2: \"value2\", field3: \"value3\"}"); // signal_eod(); signal_eod(); // end of file attributes write_plugin('I', "TEST14 - backup metadata"); write_plugin('C', "DATA\n"); write_plugin('D', "/* here comes a file data contents */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); signal_eod(); } snprintf(buf, BIGBUFLEN, "FNAME:%s/office/%d/linked.file\n", PLUGINPREFIX, mypid); write_plugin('C', buf); snprintf(buf, BIGBUFLEN, "STAT:L 10240 100 100 040755 2 %d\n", fileindex); write_plugin('C', buf); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); snprintf(buf, BIGBUFLEN, "LSTAT:%s\n", fileindex_link); write_plugin('C', buf); signal_eod(); signal_eod(); // the file with external stat(2) packet snprintf(buf, BIGBUFLEN, "FNAME:%s/java/%d/stat.file\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:/etc/passwd\n"); write_plugin('I', "TEST18"); signal_eod(); // here comes a file data contents write_plugin('C', "DATA\n"); write_plugin('D', "/* here comes a file data contents */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); write_plugin('D', "/* here comes another file line */"); signal_eod(); write_plugin('I', "TEST18Data"); // now test `STAT:/path/to/file` using symbolic link snprintf(symlink_fname, 32, "/tmp/passwd.%d", mypid); symlink("/etc/passwd", symlink_fname); // the file with external stat(2) packet snprintf(buf, BIGBUFLEN, "FNAME:%s/java/%d/stat.symlink\n", PLUGINPREFIX, mypid); write_plugin('C', buf); snprintf(buf, BIGBUFLEN, "STAT:%s\n", symlink_fname); write_plugin('C', buf); write_plugin('I', "TEST18S"); signal_eod(); signal_eod(); // now test `STAT:/path/to/file` using named pipe snprintf(namedpipe_fname, 32, "/tmp/namedpipe.%d", mypid); mkfifo(namedpipe_fname, 0600); // the file with external stat(2) packet snprintf(buf, BIGBUFLEN, "FNAME:%s/java/%d/stat.namedpipe\n", PLUGINPREFIX, mypid); write_plugin('C', buf); snprintf(buf, BIGBUFLEN, "STAT:%s\n", namedpipe_fname); write_plugin('C', buf); write_plugin('I', "TEST18NP"); signal_eod(); signal_eod(); // if (regress_backup_external_stat) { // the file with external stat(2) packet snprintf(buf, BIGBUFLEN, "FNAME:%s/java/%d/stat.error\n", PLUGINPREFIX, mypid); write_plugin('C', buf); snprintf(buf, BIGBUFLEN, "STAT:%s\n", "/nonexistent.file"); write_plugin('C', buf); write_plugin('I', "TEST18NEX"); signal_eod(); signal_eod(); } // this plugin object should be the latest item to backup if (regress_backup_plugin_objects) { // test Plugin Objects interface write_plugin('I', "TEST PluginObject Last"); snprintf(buf, BIGBUFLEN, "PLUGINOBJ:%s/images/%d/last_po_item\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "PLUGINOBJ_CAT:POITEM\n"); write_plugin('C', "PLUGINOBJ_TYPE:POINTEM\n"); snprintf(buf, BIGBUFLEN, "PLUGINOBJ_NAME:%s%d/last_po_item - Name\n", PLUGINPREFIX, mypid); write_plugin('C', buf); snprintf(buf, BIGBUFLEN, "PLUGINOBJ_SRC:%s\n", PLUGINPREFIX); write_plugin('C', buf); write_plugin('C', "PLUGINOBJ_UUID:09bf8b2a-915d-11eb-8ebb-db6e14058a82\n"); write_plugin('C', "PLUGINOBJ_SIZE:1024kB\n"); signal_eod(); write_plugin('I', "TEST PluginObject Last - END"); } write_plugin('I', "M_INFO test message\n"); write_plugin('W', "M_WARNING test message\n"); write_plugin('S', "M_SAVED test message\n"); write_plugin('N', "M_NOTSAVED test message\n"); write_plugin('R', "M_RESTORED test message\n"); write_plugin('P', "M_SKIPPED test message\n"); write_plugin('O', "M_OPER?MOUNT test message\n"); write_plugin('V', "M_EVENTS test message\n"); if (regress_standard_error_backup) { write_plugin('Q', "M_ERROR test message\n"); } /* this is the end of all data */ signal_eod(); } /** * @brief Perform test estimate */ void perform_estimate(){ /* Estimate Loop (5) */ snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/vm1.iso\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:F 1048576 100 100 100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); // write_plugin('I', "TEST5"); signal_eod(); snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/vm2.iso\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:F 1048576 200 200 100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); // write_plugin('I', "TEST5A"); if (regress_error_estimate_stderr) { // test some stderror handling errno = EACCES; perror("I've got some unsuspected error which I'd like to display on stderr (COMM_STDERR)"); } snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/lockfile\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:E 0 300 300 0100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/vmsnap.iso\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:S 0 0 0 0120777 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); snprintf(buf, BIGBUFLEN, "LSTAT:/bucket/%d/vm1.iso\n", mypid); write_plugin('C', buf); signal_eod(); snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/%d/\n", PLUGINPREFIX, mypid); write_plugin('C', buf); write_plugin('C', "STAT:D 1024 100 100 040755 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); snprintf(buf, BIGBUFLEN, "FNAME:%s/bucket/\n", PLUGINPREFIX); write_plugin('C', buf); write_plugin('C', "STAT:D 1024 100 100 040755 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); /* this is the end of all data */ signal_eod(); } /* * The listing procedure * when: * - / - it display drwxr-x--- containers * - containers - it display drwxr-x--- bucket1 * drwxr-x--- bucket2 */ void perform_listing(char *listing){ /* Listing Loop (5) */ if (strcmp(listing, "containers") == 0){ /* this is a containers listing */ write_plugin('C', "FNAME:bucket1/\n"); write_plugin('C', "STAT:D 1024 100 100 040755 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); if (regress_error_listing_stderr) { // test some stderror handling errno = EACCES; perror("I've got some unsuspected error which I'd like to display on stderr (COMM_STDERR)"); } write_plugin('C', "FNAME:bucket2/\n"); write_plugin('C', "STAT:D 1024 100 100 040755 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); } else { if (strcmp(listing, "containers/bucket1") == 0){ snprintf(buf, BIGBUFLEN, "FNAME:bucket1/%d/vm1.iso\n", mypid); write_plugin('C', buf); write_plugin('C', "STAT:F 1048576 100 100 100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); snprintf(buf, BIGBUFLEN, "FNAME:bucket1/%d/lockfile\n", mypid); write_plugin('C', buf); write_plugin('C', "STAT:E 0 300 300 0100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); } else if (strcmp(listing, "containers/bucket2") == 0){ snprintf(buf, BIGBUFLEN, "FNAME:bucket2/%d/vm2.iso\n", mypid); write_plugin('C', buf); write_plugin('C', "STAT:F 1048576 200 200 100640 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); signal_eod(); snprintf(buf, BIGBUFLEN, "FNAME:bucket2/%d/vmsnap.iso\n", mypid); write_plugin('C', buf); write_plugin('C', "STAT:S 0 0 0 0120777 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); snprintf(buf, BIGBUFLEN, "LSTAT:/bucket/%d/vm1.iso\n", mypid); write_plugin('C', buf); signal_eod(); } else { /* this is a top-level listing, response with a single containers list */ snprintf(buf, BIGBUFLEN, "FNAME:containers\n"); write_plugin('C', buf); write_plugin('C', "STAT:D 0 0 0 040755 1\n"); write_plugin('C', "TSTAMP:1504271937 1504271937 1504271937\n"); // write_plugin('I', "TEST5"); signal_eod(); } } /* this is the end of all data */ signal_eod(); } const unsigned char m_json[] = { 0x7b, 0x22, 0x77, 0x69, 0x64, 0x67, 0x65, 0x74, 0x22, 0x3a, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x62, 0x75, 0x67, 0x22, 0x3a, 0x20, 0x22, 0x6f, 0x6e, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x22, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x22, 0x3a, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x22, 0x3a, 0x20, 0x22, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x4b, 0x6f, 0x6e, 0x66, 0x61, 0x62, 0x75, 0x6c, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x57, 0x69, 0x64, 0x67, 0x65, 0x74, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x20, 0x22, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22, 0x3a, 0x20, 0x35, 0x30, 0x30, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x20, 0x35, 0x30, 0x30, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x22, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x22, 0x3a, 0x20, 0x7b, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x72, 0x63, 0x22, 0x3a, 0x20, 0x22, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2f, 0x53, 0x75, 0x6e, 0x2e, 0x70, 0x6e, 0x67, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x20, 0x22, 0x73, 0x75, 0x6e, 0x31, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x68, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x22, 0x3a, 0x20, 0x32, 0x35, 0x30, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x76, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x22, 0x3a, 0x20, 0x32, 0x35, 0x30, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0x3a, 0x20, 0x22, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3a, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3a, 0x20, 0x22, 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x20, 0x48, 0x65, 0x72, 0x65, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x3a, 0x20, 0x33, 0x36, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x22, 0x3a, 0x20, 0x22, 0x62, 0x6f, 0x6c, 0x64, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x20, 0x22, 0x74, 0x65, 0x78, 0x74, 0x31, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x68, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x22, 0x3a, 0x20, 0x32, 0x35, 0x30, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x76, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x22, 0x3a, 0x20, 0x31, 0x30, 0x30, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0x3a, 0x20, 0x22, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6f, 0x6e, 0x4d, 0x6f, 0x75, 0x73, 0x65, 0x55, 0x70, 0x22, 0x3a, 0x20, 0x22, 0x73, 0x75, 0x6e, 0x31, 0x2e, 0x6f, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x20, 0x3d, 0x20, 0x28, 0x73, 0x75, 0x6e, 0x31, 0x2e, 0x6f, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x20, 0x2f, 0x20, 0x31, 0x30, 0x30, 0x29, 0x20, 0x2a, 0x20, 0x39, 0x30, 0x3b, 0x22, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x7d, 0x7d, 0x0a, 0x00 }; unsigned int m_json_len = 603; /* * The query param procedure * return 3 simple parameters */ void perform_queryparam(const char *query) { /* Query Loop (5) */ if (strcmp(query, "m_id") == 0) { snprintf(buf, BIGBUFLEN, "%s=test1\n", query); write_plugin('C', buf); snprintf(buf, BIGBUFLEN, "%s=test2\n", query); write_plugin('C', buf); snprintf(buf, BIGBUFLEN, "%s=test3\n", query); write_plugin('C', buf); } else if (strcmp(query, "m_json") == 0) { write_plugin_bin(m_json, m_json_len); write_plugin('D', "UmFkb3PFgmF3IEtvcnplbmlld3NraQo=\n"); } /* this is the end of all data */ signal_eod(); } /* * The main and universal restore procedure */ void perform_restore() { bool loopgo = true; bool restore_skip_create = false; bool restore_with_core = false; bool restore_skip_metadata = false; if (regress_error_restore_stderr) { // test some stderror handling errno = EACCES; perror("I've got some unsuspected error which I'd like to display on stderr (COMM_STDERR)"); } /* Restore Loop (5) */ LOG("#> Restore Loop."); while (true) { read_plugin(buf); /* check if FINISH job */ if (strcmp(buf, "FINISH\n") == 0) { LOG("#> finish files."); break; } /* check for ACL command */ if (strcmp(buf, "ACL\n") == 0) { while (read_plugin(buf) > 0); LOG("#> ACL data saved."); write_plugin('I', "TEST5R - acl data saved."); write_plugin('C', "OK\n"); continue; } /* check for XATTR command */ if (strcmp(buf, "XATTR\n") == 0) { while (read_plugin(buf) > 0); LOG("#> XATTR data saved."); write_plugin('I', "TEST5R - xattr data saved."); write_plugin('C', "OK\n"); continue; } /* check if FNAME then follow file parameters */ if (strncmp(buf, "FNAME:", 6) == 0) { restore_with_core = strstr(buf, "/_restore_with_core/") != NULL && (strstr(buf, "/etc/issue") != NULL || strstr(buf, "/fileforcore") != NULL); restore_skip_create = strstr(buf, "/_restore_skip_create/") != NULL; restore_skip_metadata = strstr(buf, "/_restore_skip_metadata/") != NULL; /* we read here a file parameters */ while (read_plugin(buf) > 0); if (restore_skip_create){ // simple skipall write_plugin('I', "TEST5R - create file skipped."); write_plugin('C', "SKIP\n"); continue; } if (restore_with_core){ // signal Core write_plugin('I', "TEST5R - handle file with Core."); write_plugin('C', "CORE\n"); continue; } // signal OK write_plugin('I', "TEST5R - create file ok."); write_plugin('C', "OK\n"); continue; } /* check for METADATA stream */ if (strncmp(buf, "METADATA_STREAM", 15) == 0) { // handle metadata read_plugin_data_stream(); /* signal OK */ LOG("#> METADATA_STREAM data saved."); if (restore_skip_metadata){ write_plugin('I', "TEST5R - metadata select skip restore."); write_plugin('C', "SKIP\n"); continue; } write_plugin('I', "TEST5R - metadata saved."); write_plugin('C', "OK\n"); continue; } /* Restore Object stream */ if (strncmp(buf, "RESTOREOBJ", 10) == 0) { read_plugin(buf); // RESTOREOBJ_LEN // handle object data read_plugin_data_stream(); /* signal OK */ LOG("#> RESTOREOBJ data saved."); write_plugin('I', "TEST6R - RO saved."); write_plugin('C', "OK\n"); continue; } /* check if DATA command, so read the data packets */ if (strcmp(buf, "DATA\n") == 0){ int len = read_plugin(buf); if (len == 0){ /* empty file to restore */ LOG("#> Empty file."); continue; } else { LOG("#> file data saved."); } loopgo = true; int fsize = len; while (loopgo){ len = read_plugin(buf); fsize += len; if (len > 0){ LOG("#> file data saved."); continue; } else { loopgo = false; snprintf(buflog, 4096, "#> file data END = %i", fsize); } } /* confirm restore ok */ write_plugin('I', "TEST5R - end of data."); write_plugin('C', "OK\n"); } else { write_plugin('E', "Error DATA command required."); exit(EXIT_BACKEND_DATA_COMMAND_REQ); } } /* this is the end of all data */ signal_eod(); } /* * Start here */ int main(int argc, char** argv) { int len; char *listing; char *query; buf = (char*)malloc(BIGBUFLEN); if (buf == NULL){ exit(EXIT_BACKEND_NOMEMORY); } buflog = (char*)malloc(BUFLEN); if (buflog == NULL){ exit(EXIT_BACKEND_NOMEMORY); } listing = (char*)malloc(BUFLEN); if (listing == NULL){ exit(EXIT_BACKEND_NOMEMORY); } query = (char*)malloc(BUFLEN); if (query == NULL){ exit(EXIT_BACKEND_NOMEMORY); } working_directory[0] = 0; mypid = getpid(); snprintf(buf, 4096, "%s/%s_backend_%d.log", LOGDIR, PLUGINNAME, mypid); logfd = open(buf, O_CREAT|O_TRUNC|O_WRONLY, 0640); if (logfd < 0){ exit(EXIT_BACKEND_LOGFILE_ERROR); } //sleep(30); #ifdef F_GETPIPE_SZ int pipesize = fcntl(STDIN_FILENO, F_GETPIPE_SZ); snprintf(buflog, BUFLEN, "#> F_GETPIPE_SZ:%i", pipesize); LOG(buflog); #endif /* handshake (1) */ len = read_plugin(buf); #if 1 write_plugin('C',"Hello Bacula\n"); #else write_plugin('E',"Invalid Plugin name."); goto Term; #endif /* Job Info (2) */ while ((len = read_plugin(buf)) > 0) { if (strcmp(buf, "Level=I\n") == 0) { Job_Level_Incremental = true; continue; } } write_plugin('I', "TEST2"); signal_eod(); /* Plugin Params (3) */ while ((len = read_plugin(buf)) > 0) { // "regress_error_plugin_params", // "regress_error_start_job", // "regress_error_backup_no_files", // "regress_error_backup_stderr", // "regress_error_estimate_stderr", // "regress_error_listing_stderr", // "regress_error_restore_stderr", // "regress_backup_plugin_objects", // "regress_error_backup_abort", // "regress_standard_error_backup", // "regress_cancel_backup", // "regress_cancel_restore", if (strcmp(buf, "regress_error_plugin_params=1\n") == 0) { regress_error_plugin_params = true; continue; } if (strcmp(buf, "regress_error_start_job=1\n") == 0) { regress_error_start_job = true; continue; } if (strcmp(buf, "regress_error_backup_no_files=1\n") == 0) { regress_error_backup_no_files = true; continue; } if (strcmp(buf, "regress_error_backup_stderr=1\n") == 0) { regress_error_backup_stderr = true; continue; } if (strcmp(buf, "regress_error_estimate_stderr=1\n") == 0) { regress_error_estimate_stderr = true; continue; } if (strcmp(buf, "regress_error_listing_stderr=1\n") == 0) { regress_error_listing_stderr = true; continue; } if (strcmp(buf, "regress_error_restore_stderr=1\n") == 0) { regress_error_restore_stderr = true; continue; } if (strcmp(buf, "regress_backup_plugin_objects=1\n") == 0) { regress_backup_plugin_objects = true; continue; } if (strcmp(buf, "regress_error_backup_abort=1\n") == 0) { regress_error_backup_abort = true; continue; } if (strcmp(buf, "regress_backup_other_file=1\n") == 0) { regress_backup_other_file = true; continue; } if (strcmp(buf, "regress_backup_external_stat=1\n") == 0) { regress_backup_external_stat = true; continue; } if (strcmp(buf, "regress_metadata_support=1\n") == 0) { regress_metadata_support = true; continue; } if (strcmp(buf, "regress_standard_error_backup=1\n") == 0) { regress_standard_error_backup = true; continue; } if (strcmp(buf, "regress_cancel_backup=1\n") == 0) { regress_cancel_backup = true; continue; } if (strcmp(buf, "regress_cancel_restore=1\n") == 0) { regress_cancel_restore = true; continue; } if (sscanf(buf, "listing=%s\n", buf) == 1) { strcpy(listing, buf); continue; } if (sscanf(buf, "query=%s\n", buf) == 1) { strcpy(query, buf); continue; } if (sscanf(buf, "regress_working_dir=%s\n", buf) == 1) { strcpy(working_directory, buf); continue; } } if (regress_cancel_restore || regress_cancel_backup) { if (signal(SIGUSR1, catch_function) == SIG_ERR) { LOG("Cannot setup signal handler!"); exit(EXIT_BACKEND_SIGNAL_HANDLER_ERROR); } } write_plugin('I', "TEST3"); if (!regress_error_plugin_params) { signal_eod(); } else { write_plugin('E', "We do not accept your TEST3E! AsRequest."); } /* Start Backup/Estimate/Restore (4) */ len = read_plugin(buf); write_plugin('I', "TEST4"); if (regress_error_start_job) { write_plugin('A', "We do not accept your TEST4E! AsRequest."); goto Term; } if (Job_Level_Incremental) { write_plugin('C', "STRIP:1\n"); } signal_eod(); // ACK of the `BackupStart` phase is always required! /* check what kind of Job we have */ buf[len] = 0; if (strcmp(buf, "BackupStart\n") == 0) { perform_backup(); } else if (strcmp(buf, "EstimateStart\n") == 0) { perform_estimate(); } else if (strcmp(buf, "ListingStart\n") == 0) { perform_listing(listing); } else if (strcmp(buf, "QueryStart\n") == 0) { perform_queryparam(query); } else if (strcmp(buf, "RestoreStart\n") == 0) { perform_restore(); } /* End Job */ len = read_plugin(buf); write_plugin('I', "TESTEND"); signal_eod(); len = read_plugin(buf); Term: signal_term(); LOG("#> Unlink symlink_fname."); unlink(symlink_fname); LOG("#> Unlink namedpipe_fname."); unlink(namedpipe_fname); LOG("#> Terminating backend."); close(logfd); free(buf); free(buflog); free(listing); return (EXIT_SUCCESS); } bacula-15.0.3/src/plugins/fd/pluginlib/commctx.h0000644000175000017500000001021314771010173021330 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file commctx.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin command context switcher template. * @version 1.3.0 * @date 2020-09-13 * * @copyright Copyright (c) 2021 All rights reserved. * IP transferred to Bacula Systems according to agreement. */ #ifndef PLUGINLIB_COMMCTX_H #define PLUGINLIB_COMMCTX_H #include "pluginlib.h" #include "smartalist.h" template class COMMCTX : public SMARTALLOC { struct CMD : public SMARTALLOC { POOL_MEM cmd; T * ptr; CMD(const char * command) : cmd(PM_FNAME), ptr(New(T(command))) { pm_strcpy(cmd, command); }; #if __cplusplus > 201103L CMD() = delete; CMD(CMD &) = delete; CMD(CMD &&) = delete; #endif ~CMD() { delete ptr; }; }; /** The command context list for multiple config execution for a single job */ smart_alist _command_list; public: T * ctx; COMMCTX() : ctx(NULL) {}; #if __cplusplus > 201103L COMMCTX(COMMCTX &) = delete; COMMCTX(COMMCTX &&) = delete; ~COMMCTX() = default; #else ~COMMCTX() {}; #endif T * switch_command(const char *command); bool check_command(const char *command); void foreach_command(void (*func)(T *, void *), void *param); bRC foreach_command_status(bRC (*func)(T *, void *), void *param); bool is_ctx_null() { return ctx == NULL ; } /* Shortcut that will blow up if ctx is not yet defined, use carefully * The gdb backtrace is misleading, we see pluginctx->xxx while we * do pluginctx.ctx->xxx */ T * operator->() { return ctx; } }; /** * @brief Switches current command context to requested. * * @tparam T is command context type. * @param command is command context string to switch into. * @return T* is command context pointer. */ template T * COMMCTX::switch_command(const char * command) { CMD * cmdctx; foreach_alist(cmdctx, &_command_list) { if (bstrcmp(cmdctx->cmd.c_str(), command)) { ctx = cmdctx->ptr; return ctx; } } cmdctx = New(CMD(command)); _command_list.append(cmdctx); ctx = cmdctx->ptr; return ctx; } /** * @brief Checks if command context is already defined on list. * * @tparam T is command context type. * @param command is command context string to check. * @return true when command context already exist. * @return false when command context does not exist yet. */ template bool COMMCTX::check_command(const char *command) { CMD * cmdctx; foreach_alist(cmdctx, &_command_list) { if (bstrcmp(cmdctx->cmd.c_str(), command)) return true; } return false; } /** * @brief Iterate on all command context to execute function. * * @tparam T is command context type. * @param param is the execution function param. */ template void COMMCTX::foreach_command(void(*func)(T*, void*), void* param) { CMD * cmdctx; foreach_alist(cmdctx, &_command_list) { ctx = cmdctx->ptr; func(ctx, param); } } /** * @brief Iterate on all command context to execute function and return status. * * @tparam T is command context type. * @param param is the execution function param. */ template bRC COMMCTX::foreach_command_status(bRC(*func)(T*, void*), void* param) { CMD * cmdctx; bRC status = bRC_OK; foreach_alist(cmdctx, &_command_list) { ctx = cmdctx->ptr; bRC rc = func(ctx, param); if (rc != bRC_OK) status = rc; } return status; } #endif // PLUGINLIB_COMMCTX_H bacula-15.0.3/src/plugins/fd/pluginlib/metaplugin.h0000644000175000017500000003171314771010173022033 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file metaplugin.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula metaplugin interface. * @version 3.0.0 * @date 2021-10-07 * * @copyright Copyright (c) 2021 All rights reserved. * IP transferred to Bacula Systems according to agreement. */ #include "pluginlib.h" #include "ptcomm.h" #include "lib/ini.h" #include "commctx.h" #include "smartalist.h" #define USE_CMD_PARSER #include "fd_common.h" #ifndef PLUGINLIB_METAPLUGIN_H #define PLUGINLIB_METAPLUGIN_H // custom checkFile() callback typedef bRC (*checkFile_t)(bpContext *ctx, char *fname); // metadata handling map struct metadataTypeMap { const char *command; metadata_type type; }; extern const metadataTypeMap plugin_metadata_map[]; // Plugin Info definitions extern const char *PLUGIN_LICENSE; extern const char *PLUGIN_AUTHOR; extern const char *PLUGIN_DATE; extern const char *PLUGIN_VERSION; extern const char *PLUGIN_DESCRIPTION; // Plugin linking time variables extern const char *PLUGINPREFIX; /// is used for prefixing every Job and Debug messages generted by a plugin extern const char *PLUGINNAME; /// should match the backend $pluginname$ used for Handshake procedure extern const bool CUSTOMNAMESPACE; /// defines if metaplugin should send `Namespace=...` backend plugin parameter using PLUGINNAMESPACE variable extern const bool CUSTOMPREVJOBNAME; /// defines if metaplugin should send `PrevJobName=...` backend plugin parameter from bacula variable extern const char *PLUGINNAMESPACE; /// custom backend plugin namespace used as file name prefix extern const char *PLUGINAPI; /// the plugin api string which should match backend expectations extern const char *BACKEND_CMD; /// a backend execution command path extern const int32_t CUSTOMCANCELSLEEP; /// custom wait time for backend between USR1 and terminate procedures extern const bool ACCURATEPLUGINPARAMETER; /// accurate parameter for plugin parameter extern const int ADDINCLUDESTRIPOPTION; /// setup precompiled include path strip option extern const bool DONOTSAVE_FT_PLUGIN_CONFIG; /// when set to `true` then Metaplugin won't save FT_PLUGIN_CONFIG as a first file during Full backup extern const uint32_t BACKEND_TIMEOUT; /// define a custom timeout value in seconds for data exchange (read from or write to backend) /// defines if metaplugin should handle local filesystem restore with Bacula Core functions /// `false` means metaplugin will redirect local restore to backend /// `true` means Bacula Core functions will handle local restore extern const bool CORELOCALRESTORE; // The list of restore options saved to the RestoreObject. extern struct ini_items plugin_items_dump[]; // the list of valid plugin options extern const char *valid_params[]; // custom checkFile() callback extern checkFile_t checkFile; // a simple ro buf class struct restore_object_class { POOL_MEM plugin_name; POOL_MEM object_name; POOL_MEM data; int32_t length; bool sent; }; /* * This is a main plugin API class. It manages a plugin context. * All the public methods correspond to a public Bacula API calls, even if * a callback is not implemented. */ class METAPLUGIN: public SMARTALLOC { public: enum MODE { NONE = 0, Backup_Full, BACKUP_FULL = Backup_Full, Backup_Incr, BACKUP_INCR = Backup_Incr, Backup_Diff, BACKUP_DIFF = Backup_Diff, Estimate_Full, Estimate_Incr, Estimate_Diff, // Listing, // QueryParams, RESTORE, }; POOL_MEM backend_cmd; bRC getPluginValue(bpContext *ctx, pVariable var, void *value); bRC setPluginValue(bpContext *ctx, pVariable var, void *value); bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); bRC endBackupFile(bpContext *ctx); bRC startRestoreFile(bpContext *ctx, const char *cmd); bRC endRestoreFile(bpContext *ctx); bRC pluginIO(bpContext *ctx, struct io_pkt *io); bRC createFile(bpContext *ctx, struct restore_pkt *rp); bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); bRC checkFile(bpContext *ctx, char *fname); bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl); bRC queryParameter(bpContext *ctx, struct query_pkt *qp); bRC metadataRestore(bpContext *ctx, struct meta_pkt *mp); void setup_backend_command(bpContext *ctx, POOL_MEM &exepath); void terminate_backends_oncancel(bpContext *ctx); METAPLUGIN() : backend_cmd(PM_FNAME), job_cancelled(false), backend_available(false), backend_error(PM_MESSAGE), mode(NONE), JobId(0), JobName(NULL), since(0), accurate_mode(0), accurate_mode_err(false), where(NULL), regexwhere(NULL), replace(0), pluginconfigsent(false), estimate(false), listing(None), nodata(false), nextfile(false), openerror(false), object(FileObject), objectsent(false), readacl(false), readxattr(false), skipextract(false), new_include_created(false), strip_path_option(ADDINCLUDESTRIPOPTION), last_type(0), fname(PM_FNAME), lname(PM_FNAME), robjbuf(PM_MESSAGE), plugin_obj_cat(PM_FNAME), plugin_obj_type(PM_FNAME), plugin_obj_name(PM_FNAME), plugin_obj_src(PM_FNAME), plugin_obj_uuid(PM_FNAME), plugin_obj_size(PM_FNAME), acldatalen(0), acldata(PM_MESSAGE), xattrdatalen(0), xattrdata(PM_MESSAGE), metadatas_list(10, owned_by_alist), prevjobname(NULL), restoreobject_list() {} #if __cplusplus > 201103L METAPLUGIN(METAPLUGIN&) = delete; METAPLUGIN(METAPLUGIN&&) = delete; #endif ~METAPLUGIN() {} private: enum OBJECT { FileObject, PluginObject, RestoreObject, }; enum LISTING { None, Listing, Query, }; bool job_cancelled; // it signal the metaplugin that job was cancelled // smart_mutex mutex; // mutex to synchronize data access - removed on request bool backend_available; // When `False` then backend program is unuseable or unavailable POOL_MEM backend_error; // Holds the error string when backend program is unavailable MODE mode; // Plugin mode of operation int JobId; // Job ID char *JobName; // Job name time_t since; // Job since parameter int accurate_mode; // if the job is accurate bool accurate_mode_err; // if true then no more errors about lack of accurate mode required char *where; // the Where variable for restore job if set by user char *regexwhere; // the RegexWhere variable for restore job if set by user char replace; // the replace variable for restore job bool pluginconfigsent; // set when RestoreObject/INI_RESTORE_OBJECT_NAME_FT_PLUGIN_CONFIG was sent during Full backup bool estimate; // used when mode is METAPLUGIN_BACKUP_* but we are doing estimate only LISTING listing; // used for a Listing procedure for estimate bool nodata; // set when backend signaled no data for backup or no data for restore bool nextfile; // set when IO_CLOSE got FNAME: command bool openerror; // show if "openfile" was unsuccessful OBJECT object; // bool objectsent; // set when startBackupFile handled object and endBackupFile has to check for nextfile bool readacl; // got ACL data from backend bool readxattr; // got XATTR data from backend bool skipextract; // got SKIP response from backend, so we should artificially skip it for backend bool new_include_created; // when NewInclude() was executed int strip_path_option; // int32_t last_type; // contains the last restore file type COMMCTX backend; // the backend context list for multiple backend execution for a single job POOL_MEM fname; // current file name to backup (grabbed from backend) POOL_MEM lname; // current LSTAT data if any POOL_MEM robjbuf; // the buffer for restore object data POOL_MEM plugin_obj_cat; // Plugin object Category POOL_MEM plugin_obj_type; // Plugin object Type POOL_MEM plugin_obj_name; // Plugin object Name POOL_MEM plugin_obj_src; // Plugin object Source POOL_MEM plugin_obj_uuid; // Plugin object UUID uint64_t plugin_obj_size; // Plugin object Size int acldatalen; // the length of the data in acl buffer POOL_MEM acldata; // the buffer for ACL data received from backend int xattrdatalen; // the length of the data in xattr buffer POOL_MEM xattrdata; // the buffer for XATTR data received from backend cmd_parser parser; // Plugin command parser ConfigFile ini; // Restore ini file handler alist metadatas_list; // a list if managed metadatas plugin_metadata metadatas; // the private metadata class const char *prevjobname; // this is a bVarPrevJobName parameter if requested // a list of received RO from bacula which we will use to feed into backend later smart_alist restoreobject_list; bRC parse_plugin_command(bpContext *ctx, const char *command, smart_alist ¶ms); bRC handle_plugin_restoreobj(bpContext *ctx, restore_object_pkt *rop); bRC run_backend(bpContext *ctx); bRC send_parameters(bpContext *ctx, char *command); bRC send_jobinfo(bpContext *ctx, char type); bRC send_startjob(bpContext *ctx, const char *command); bRC send_startbackup(bpContext *ctx); bRC send_startestimate(bpContext *ctx); bRC send_startlisting(bpContext *ctx); bRC send_startquery(bpContext *ctx); bRC send_startrestore(bpContext *ctx); bRC send_endjob(bpContext *ctx); bRC prepare_backend(bpContext *ctx, char type, char *command); bRC perform_backup_open(bpContext *ctx, struct io_pkt *io); bRC perform_restore_open(bpContext *ctx, struct io_pkt *io); bRC perform_read_data(bpContext *ctx, struct io_pkt *io); bRC perform_write_data(bpContext *ctx, struct io_pkt *io); bRC perform_write_end(bpContext *ctx, struct io_pkt *io); bRC perform_read_metacommands(bpContext *ctx); bRC perform_read_pluginobject(bpContext *ctx, struct save_pkt *sp); bRC perform_read_restoreobject(bpContext *ctx, struct save_pkt *sp); bRC perform_read_acl(bpContext *ctx); bRC perform_write_acl(bpContext *ctx, const xacl_pkt * xacl); bRC perform_read_xattr(bpContext *ctx); bRC perform_write_xattr(bpContext *ctx, const xacl_pkt * xacl); bRC perform_read_metadata_info(bpContext *ctx, metadata_type type, struct save_pkt *sp); bRC perform_file_index_query(bpContext *ctx); bRC perform_accept_file(bpContext *ctx); bRC perform_addinclude(bpContext *ctx); bRC perform_change_split_option(bpContext *ctx, int nr); // bRC perform_write_metadata_info(bpContext *ctx, struct meta_pkt *mp); metadata_type scan_metadata_type(bpContext *ctx, const POOL_MEM &cmd); const char *prepare_metadata_type(metadata_type type); int check_ini_param(char *param); bool check_plugin_param(const char *param, alist *params); int get_ini_count(); void new_backendlist(bpContext *ctx, char *command); bRC switch_or_run_backend(bpContext *ctx, char *command); bRC terminate_current_backend(bpContext *ctx); bRC terminate_all_backends(bpContext *ctx); bRC cancel_all_backends(bpContext *ctx); bRC signal_finish_all_backends(bpContext *ctx); bRC render_param(bpContext *ctx, POOL_MEM ¶m, INI_ITEM_HANDLER *handler, char *key, item_value val); }; #endif // PLUGINLIB_METAPLUGIN_H bacula-15.0.3/src/plugins/fd/pluginlib/pluginctx.cpp0000644000175000017500000026217114771010173022242 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file pluginctx.cpp * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula File Daemon general plugin framework. The Ccontext. * @version 1.0.0 * @date 2021-04-08 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "pluginctx.h" #include #include #include /* * libbac uses its own sscanf implementation which is not compatible with * libc implementation, unfortunately. * use bsscanf for Bacula sscanf flavor */ #ifdef sscanf #undef sscanf #endif namespace pluginlib { /** * @brief Check if a parameter (param) exist in ConfigFile variables set by user. * The checking ignore case of the parameter. * * @param param a parameter to search in ini parameter keys * @return int -1 - when a parameter param is not found in ini keys * - whan a parameter param is found and is an index in ini->items table */ int PLUGINCTX::check_ini_param(char *param) { if (ini.items){ for (int k = 0; ini.items[k].name; k++){ if (ini.items[k].found && strcasecmp(param, ini.items[k].name) == 0){ return k; } } } return -1; } bRC PLUGINCTX::parse_plugin_config(bpContext *ctx, restore_object_pkt *rop) { ini.clear_items(); if (!ini.dump_string(rop->object, rop->object_len)) { DMSG0(ctx, DERROR, "ini->dump_string failed\n"); JMSG0(ctx, M_FATAL, "Unable to parse user set restore configuration.\n"); return bRC_Error; } ini.register_items(plugin_items_dump, sizeof(struct ini_items)); if (!ini.parse(ini.out_fname)) { DMSG0(ctx, DERROR, "ini->parse failed\n"); JMSG0(ctx, M_FATAL, "Unable to parse user set restore configuration.\n"); return bRC_Error; } for (int i = 0; ini.items[i].name; i++) { if (ini.items[i].found) { if (ini.items[i].handler == ini_store_str) { DMSG2(ctx, DINFO, "INI: %s = %s\n", ini.items[i].name, ini.items[i].val.strval); } else if (ini.items[i].handler == ini_store_int64) { DMSG2(ctx, DINFO, "INI: %s = %lld\n", ini.items[i].name, ini.items[i].val.int64val); } else if (ini.items[i].handler == ini_store_bool) { DMSG2(ctx, DINFO, "INI: %s = %s\n", ini.items[i].name, ini.items[i].val.boolval ? "True" : "False"); } else { DMSG1(ctx, DERROR, "INI: unsupported parameter handler for: %s\n", ini.items[i].name); JMSG1(ctx, M_FATAL, "INI: unsupported parameter handler for: %s\n", ini.items[i].name); return bRC_Error; } } } return bRC_OK; } #if 0 /** * @brief Search if parameter (param) is on parameter list prepared for backend. * The checking ignore case of the parameter. * * @param param the parameter which we are looking for * @param params the list of parameters to search * @return true when the parameter param is found in list * @return false when we can't find the param on list */ bool PLUGINCLASS::check_plugin_param(const char *param, alist *params) { POOLMEM *par; char *equal; bool found = false; foreach_alist(par, params){ equal = strchr(par, '='); if (equal){ /* temporary terminate the par at parameter name */ *equal = '\0'; if (strcasecmp(par, param) == 0){ found = true; } /* restore parameter equal sign */ *equal = '='; } else { if (strcasecmp(par, param) == 0){ found = true; } } } return found; } /** * @brief Counts the number of ini->items available as it is a NULL terminated array. * * @return int the number of ini->items */ int PLUGINCLASS::get_ini_count() { int count = 0; if (ini.items){ for (int k = 0; ini.items[k].name; k++){ if (ini.items[k].found){ count++; } } } return count; } /** * @brief Parsing a plugin command. * * @param ctx bpContext - Bacula Plugin context structure * @param command plugin command string to parse * @param params output parsed params list * @return bRC bRC_OK - on success, bRC_Error - on error */ bRC PLUGINCLASS::parse_plugin_command(bpContext *ctx, const char *command, smart_alist ¶ms) { bool found; int count; int parargc, argc; POOL_MEM *param; DMSG(ctx, DINFO, "Parse command: %s\n", command); if (parser.parse_cmd(command) != bRC_OK) { DMSG0(ctx, DERROR, "Unable to parse Plugin command line.\n"); JMSG0(ctx, M_FATAL, "Unable to parse Plugin command line.\n"); return bRC_Error; } /* count the numbers of user parameters if any */ count = get_ini_count(); /* the first (zero) parameter is a plugin name, we should skip it */ argc = parser.argc - 1; parargc = argc + count; /* first parameters from plugin command saved during backup */ for (int i = 1; i < parser.argc; i++) { param = new POOL_MEM(PM_FNAME); // TODO: change to POOL_MEM found = false; int k; /* check if parameter overloaded by restore parameter */ if ((k = check_ini_param(parser.argk[i])) != -1){ found = true; DMSG1(ctx, DINFO, "parse_plugin_command: %s found in restore parameters\n", parser.argk[i]); if (render_param(ctx, *param, ini.items[k].handler, parser.argk[i], ini.items[k].val) != bRC_OK){ delete(param); return bRC_Error; } params.append(param); parargc--; } /* check if param overloaded above */ if (!found){ if (parser.argv[i]){ Mmsg(*param, "%s=%s\n", parser.argk[i], parser.argv[i]); params.append(param); } else { Mmsg(*param, "%s=1\n", parser.argk[i]); params.append(param); } } /* param is always ended with '\n' */ DMSG(ctx, DINFO, "Param: %s", param); /* scan for abort_on_error parameter */ if (strcasecmp(parser.argk[i], "abort_on_error") == 0){ /* found, so check the value if provided, I only check the first char */ if (parser.argv[i] && *parser.argv[i] == '0'){ backend.ctx->clear_abort_on_error(); } else { backend.ctx->set_abort_on_error(); } DMSG1(ctx, DINFO, "abort_on_error found: %s\n", backend.ctx->is_abort_on_error() ? "True" : "False"); } /* scan for listing parameter, so the estimate job should be executed as a Listing procedure */ if (strcasecmp(parser.argk[i], "listing") == 0){ /* found, so check the value if provided */ if (parser.argv[i]){ listing = Listing; DMSG0(ctx, DINFO, "listing procedure param found\n"); } } /* scan for query parameter, so the estimate job should be executed as a QueryParam procedure */ if (strcasecmp(parser.argk[i], "query") == 0){ /* found, so check the value if provided */ if (parser.argv[i]){ listing = Query; DMSG0(ctx, DINFO, "query procedure param found\n"); } } } /* check what was missing in plugin command but get from ini file */ if (argc < parargc){ for (int k = 0; ini.items[k].name; k++){ if (ini.items[k].found && !check_plugin_param(ini.items[k].name, ¶ms)){ param = new POOL_MEM(PM_FNAME); DMSG1(ctx, DINFO, "parse_plugin_command: %s from restore parameters\n", ini.items[k].name); if (render_param(ctx, *param, ini.items[k].handler, (char*)ini.items[k].name, ini.items[k].val) != bRC_OK){ delete(param); return bRC_Error; } params.append(param); /* param is always ended with '\n' */ DMSG(ctx, DINFO, "Param: %s", param); } } } return bRC_OK; } /* * Parse a Restore Object saved during backup and modified by user during restore. * Every RO received will generate a dedicated backend context which is used * by bEventRestoreCommand to handle backend parameters for restore. * * in: * bpContext - Bacula Plugin context structure * rop - a restore object structure to parse * out: * bRC_OK - on success * bRC_Error - on error */ bRC PLUGINCLASS::handle_plugin_restoreobj(bpContext *ctx, restore_object_pkt *rop) { if (!rop){ return bRC_OK; /* end of rop list */ } DMSG2(ctx, DDEBUG, "handle_plugin_restoreobj: %s %d\n", rop->object_name, rop->object_type); // if (strcmp(rop->object_name, INI_RESTORE_OBJECT_NAME) == 0) { if (strcmp(rop->object_name, INI_RESTORE_OBJECT_NAME) == 0 && (rop->object_type == FT_PLUGIN_CONFIG || rop->object_type == FT_PLUGIN_CONFIG_FILLED)) { DMSG(ctx, DINFO, "INIcmd: %s\n", rop->plugin_name); ini.clear_items(); if (!ini.dump_string(rop->object, rop->object_len)) { DMSG0(ctx, DERROR, "ini->dump_string failed\n"); JMSG0(ctx, M_FATAL, "Unable to parse user set restore configuration.\n"); return bRC_Error; } ini.register_items(plugin_items_dump, sizeof(struct ini_items)); if (!ini.parse(ini.out_fname)) { DMSG0(ctx, DERROR, "ini->parse failed\n"); JMSG0(ctx, M_FATAL, "Unable to parse user set restore configuration.\n"); return bRC_Error; } for (int i = 0; ini.items[i].name; i++) { if (ini.items[i].found){ if (ini.items[i].handler == ini_store_str){ DMSG2(ctx, DINFO, "INI: %s = %s\n", ini.items[i].name, ini.items[i].val.strval); } else if (ini.items[i].handler == ini_store_int64){ DMSG2(ctx, DINFO, "INI: %s = %lld\n", ini.items[i].name, ini.items[i].val.int64val); } else if (ini.items[i].handler == ini_store_bool){ DMSG2(ctx, DINFO, "INI: %s = %s\n", ini.items[i].name, ini.items[i].val.boolval ? "True" : "False"); } else { DMSG1(ctx, DERROR, "INI: unsupported parameter handler for: %s\n", ini.items[i].name); JMSG1(ctx, M_FATAL, "INI: unsupported parameter handler for: %s\n", ini.items[i].name); return bRC_Error; } } } return bRC_OK; } // handle any other RO restore restore_object_class *ropclass = new restore_object_class; ropclass->sent = false; pm_strcpy(ropclass->plugin_name, rop->plugin_name); pm_strcpy(ropclass->object_name, rop->object_name); ropclass->length = rop->object_len; pm_memcpy(ropclass->data, rop->object, rop->object_len); restoreobject_list.append(ropclass); DMSG2(ctx, DINFO, "ROclass saved for later: %s %d\n", ropclass->object_name.c_str(), ropclass->length); return bRC_OK; } /* * Run external backend script/application using BACKEND_CMD compile variable. * It will run the backend in current backend context (backendctx) and should * be called when a new backend is really required only. * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when backend spawned successfully * bRC_Error - when Plugin cannot run backend */ bRC PLUGINCLASS::run_backend(bpContext *ctx) { BPIPE *bp; if (access(backend_cmd.c_str(), X_OK) < 0){ berrno be; DMSG2(ctx, DERROR, "Unable to access backend: %s Err=%s\n", backend_cmd.c_str(), be.bstrerror()); JMSG2(ctx, M_FATAL, "Unable to access backend: %s Err=%s\n", backend_cmd.c_str(), be.bstrerror()); return bRC_Error; } DMSG(ctx, DINFO, "Executing: %s\n", backend_cmd.c_str()); bp = open_bpipe(backend_cmd.c_str(), 0, "rwe"); if (bp == NULL){ berrno be; DMSG(ctx, DERROR, "Unable to run backend. Err=%s\n", be.bstrerror()); JMSG(ctx, M_FATAL, "Unable to run backend. Err=%s\n", be.bstrerror()); return bRC_Error; } /* setup communication channel */ backend.ctx->set_bpipe(bp); DMSG(ctx, DINFO, "Backend executed at PID=%i\n", bp->worker_pid); return bRC_OK; } bRC backendctx_finish_func(PTCOMM *ptcomm, void *cp) { bpContext * ctx = (bpContext*)cp; bRC status = bRC_OK; POOL_MEM cmd(PM_FNAME); pm_strcpy(cmd, "FINISH\n"); if (!ptcomm->write_command(ctx, cmd.addr())){ status = bRC_Error; } if (!ptcomm->read_ack(ctx)){ status = bRC_Error; } return status; } /* * Sends a "FINISH" command to all executed backends indicating the end of * "Restore loop". * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when operation was successful * bRC_Error - on any error */ bRC PLUGINCLASS::signal_finish_all_backends(bpContext *ctx) { return backend.foreach_command_status(backendctx_finish_func, ctx); } /* * Send end job command to backend. * It terminates the backend when command sending was unsuccessful, as it is * the very last procedure in protocol. * * in: * bpContext - for Bacula debug and jobinfo messages * ptcomm - backend context * out: * bRC_OK - when send command was successful * bRC_Error - on any error */ bRC send_endjob(bpContext *ctx, PTCOMM *ptcomm) { bRC status = bRC_OK; POOL_MEM cmd(PM_FNAME); pm_strcpy(cmd, "END\n"); if (!ptcomm->write_command(ctx, cmd.c_str())){ /* error */ status = bRC_Error; } else { if (!ptcomm->read_ack(ctx)){ DMSG0(ctx, DERROR, "Wrong backend response to JobEnd command.\n"); JMSG0(ctx, ptcomm->jmsg_err_level(), "Wrong backend response to JobEnd command.\n"); status = bRC_Error; } ptcomm->signal_term(ctx); } return status; } /* * Terminates the current backend pointed by ptcomm context. * The termination sequence consist of "End Job" protocol procedure and real * backend process termination including communication channel close. * When we'll get an error during "End Job" procedure then we inform the user * and terminate the backend as usual without unnecessary formalities. :) * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when backend termination was successful, i.e. no error in * "End Job" procedure * bRC_Error - when backend termination encountered an error. */ bRC backendctx_jobend_func(PTCOMM *ptcomm, void *cp) { bpContext *ctx = (bpContext *)cp; bRC status = bRC_OK; if (send_endjob(ctx, ptcomm) != bRC_OK){ /* error in end job */ DMSG0(ctx, DERROR, "Error in EndJob.\n"); status = bRC_Error; } int pid = ptcomm->get_backend_pid(); DMSG(ctx, DINFO, "Terminate backend at PID=%d\n", pid) ptcomm->terminate(ctx); return status; } /* * Terminate all executed backends. * Check PLUGINCLASS::terminate_current_backend for more info. * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when all backends termination was successful * bRC_Error - when any backend termination encountered an error */ bRC PLUGINCLASS::terminate_all_backends(bpContext *ctx) { return backend.foreach_command_status(backendctx_jobend_func, ctx); } /** * @brief Callback used for sending a `cancel event` to the selected backend * * @param ptcomm the backend communication object * @param cp a bpContext - for Bacula debug and jobinfo messages * @return bRC bRC_OK when success */ bRC backendctx_cancel_func(PTCOMM *ptcomm, void *cp) { bpContext * ctx = (bpContext*)cp; // cancel procedure // 1. get backend pid // 2. send SIGUSR1 to backend pid pid_t pid = ptcomm->get_backend_pid(); DMSG(ctx, DINFO, "Inform backend about Cancel at PID=%d ...\n", pid) kill(pid, SIGUSR1); return bRC_OK; } /** * @brief Send `cancel event` to every backend and terminate it. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @return bRC bRC_OK when success, bRC_Error if not */ bRC PLUGINCLASS::cancel_all_backends(bpContext *ctx) { PLUGINCLASS *pctx = (PLUGINCLASS *)ctx->pContext; // the cancel procedure: for all backends execute cancel func return pctx->backend.foreach_command_status(backendctx_cancel_func, ctx); } /* * Send a "Job Info" protocol procedure parameters. * * in: * bpContext - for Bacula debug and jobinfo messages * type - a char compliant with the protocol indicating what jobtype we run * out: * bRC_OK - when send job info was successful * bRC_Error - on any error */ bRC PLUGINCLASS::send_jobinfo(bpContext *ctx, char type) { int32_t rc; POOL_MEM cmd; char lvl; /* we will be sending Job Info data */ pm_strcpy(cmd, "Job\n"); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } /* required parameters */ Mmsg(cmd, "Name=%s\n", JobName); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } Mmsg(cmd, "JobID=%i\n", JobId); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } Mmsg(cmd, "Type=%c\n", type); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } /* optional parameters */ if (mode != RESTORE){ switch (mode){ case BACKUP_FULL: lvl = 'F'; break; case BACKUP_DIFF: lvl = 'D'; break; case BACKUP_INCR: lvl = 'I'; break; default: lvl = 0; } if (lvl){ Mmsg(cmd, "Level=%c\n", lvl); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } } } if (since){ Mmsg(cmd, "Since=%ld\n", since); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } } if (where){ Mmsg(cmd, "Where=%s\n", where); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } } if (regexwhere){ Mmsg(cmd, "RegexWhere=%s\n", regexwhere); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } } if (replace){ Mmsg(cmd, "Replace=%c\n", replace); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } } if (CUSTOMNAMESPACE){ Mmsg(cmd, "Namespace=%s\n", PLUGINNAMESPACE); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } } if (CUSTOMPREVJOBNAME && prevjobname){ Mmsg(cmd, "PrevJobName=%s\n", prevjobname); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } } backend.ctx->signal_eod(ctx); if (!backend.ctx->read_ack(ctx)){ DMSG0(ctx, DERROR, "Wrong backend response to Job command.\n"); JMSG0(ctx, backend.ctx->jmsg_err_level(), "Wrong backend response to Job command.\n"); return bRC_Error; } return bRC_OK; } /* * Send a "Plugin Parameters" protocol procedure data. * It parse plugin command and ini parameters before sending it to backend. * * in: * bpContext - for Bacula debug and jobinfo messages * command - a Plugin command for a job * out: * bRC_OK - when send parameters was successful * bRC_Error - on any error */ bRC PLUGINCLASS::send_parameters(bpContext *ctx, char *command) { int32_t rc; bRC status = bRC_OK; POOL_MEM cmd(PM_FNAME); // alist params(16, not_owned_by_alist); smart_alist params; POOL_MEM *param; bool found; #ifdef DEVELOPER static const char *regress_valid_params[] = { // add special test_backend commands to handle regression tests "regress_error_plugin_params", "regress_error_start_job", "regress_error_backup_no_files", "regress_error_backup_stderr", "regress_error_estimate_stderr", "regress_error_listing_stderr", "regress_error_restore_stderr", "regress_backup_plugin_objects", "regress_backup_other_file", "regress_error_backup_abort", "regress_metadata_support", "regress_standard_error_backup", "regress_cancel_backup", "regress_cancel_restore", NULL, }; #endif /* parse and prepare final backend plugin params */ status = parse_plugin_command(ctx, command, params); if (status != bRC_OK){ /* error */ return status; } /* send backend info that parameters are coming */ pm_strcpy(cmd, "Params\n"); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } /* send all prepared parameters */ foreach_alist(param, ¶ms){ // check valid parameter list found = false; for (int a = 0; valid_params[a] != NULL; a++ ) { DMSG3(ctx, DVDEBUG, "=> '%s' vs '%s' [%d]\n", param, valid_params[a], strlen(valid_params[a])); if (strncasecmp(param->c_str(), valid_params[a], strlen(valid_params[a])) == 0){ found = true; break; } } #ifdef DEVELOPER if (!found){ // now handle regression tests commands for (int a = 0; regress_valid_params[a] != NULL; a++ ){ DMSG3(ctx, DVDEBUG, "regress=> '%s' vs '%s' [%d]\n", param, regress_valid_params[a], strlen(regress_valid_params[a])); if (strncasecmp(param->c_str(), regress_valid_params[a], strlen(regress_valid_params[a])) == 0){ found = true; break; } } } #endif // signal error if required if (!found) { pm_strcpy(cmd, param->c_str()); strip_trailing_junk(cmd.c_str()); DMSG1(ctx, DERROR, "Unknown parameter %s in Plugin command.\n", cmd.c_str()); JMSG1(ctx, M_ERROR, "Unknown parameter %s in Plugin command.\n", cmd.c_str()); } rc = backend.ctx->write_command(ctx, *param); if (rc < 0) { /* error */ return bRC_Error; } } // now send accurate parameter if requested and available if (ACCURATEPLUGINPARAMETER && accurate_mode) { pm_strcpy(cmd, "Accurate=1\n"); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0) { /* error */ return bRC_Error; } } // signal end of parameters block backend.ctx->signal_eod(ctx); /* ack Params command */ if (!backend.ctx->read_ack(ctx)){ DMSG0(ctx, DERROR, "Wrong backend response to Params command.\n"); JMSG0(ctx, backend.ctx->jmsg_err_level(), "Wrong backend response to Params command.\n"); return bRC_Error; } return bRC_OK; } /* * Send start job command pointed by command variable. * * in: * bpContext - for Bacula debug and jobinfo messages * command - the command string to send * out: * bRC_OK - when send command was successful * bRC_Error - on any error */ bRC PLUGINCLASS::send_startjob(bpContext *ctx, const char *command) { POOL_MEM cmd; pm_strcpy(cmd, command); if (backend.ctx->write_command(ctx, cmd) < 0){ /* error */ return bRC_Error; } if (!backend.ctx->read_ack(ctx)){ strip_trailing_newline(cmd.c_str()); DMSG(ctx, DERROR, "Wrong backend response to %s command.\n", cmd.c_str()); JMSG(ctx, backend.ctx->jmsg_err_level(), "Wrong backend response to %s command.\n", cmd.c_str()); return bRC_Error; } return bRC_OK; } /* * Send "BackupStart" protocol command. * more info at PLUGINCLASS::send_startjob * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when send command was successful * bRC_Error - on any error */ bRC PLUGINCLASS::send_startbackup(bpContext *ctx) { return send_startjob(ctx, "BackupStart\n"); } /* * Send "EstimateStart" protocol command. * more info at PLUGINCLASS::send_startjob * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when send command was successful * bRC_Error - on any error */ bRC PLUGINCLASS::send_startestimate(bpContext *ctx) { return send_startjob(ctx, "EstimateStart\n"); } /* * Send "ListingStart" protocol command. * more info at PLUGINCLASS::send_startjob * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when send command was successful * bRC_Error - on any error */ bRC PLUGINCLASS::send_startlisting(bpContext *ctx) { return send_startjob(ctx, "ListingStart\n"); } /* * Send "QueryStart" protocol command. * more info at PLUGIN::send_startjob * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when send command was successful * bRC_Error - on any error */ bRC PLUGINCLASS::send_startquery(bpContext *ctx) { return send_startjob(ctx, "QueryStart\n"); } /* * Send "RestoreStart" protocol command. * more info at PLUGINCLASS::send_startjob * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when send command was successful * bRC_Error - on any error */ bRC PLUGINCLASS::send_startrestore(bpContext *ctx) { int32_t rc; POOL_MEM cmd(PM_FNAME); const char * command = "RestoreStart\n"; POOL_MEM extpipename(PM_FNAME); pm_strcpy(cmd, command); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } if (backend.ctx->read_command(ctx, cmd) < 0){ DMSG(ctx, DERROR, "Wrong backend response to %s command.\n", command); JMSG(ctx, backend.ctx->jmsg_err_level(), "Wrong backend response to %s command.\n", command); return bRC_Error; } if (backend.ctx->is_eod()){ /* got EOD so the backend is ready for restore */ return bRC_OK; } /* here we expect a PIPE: command only */ if (scan_parameter_str(cmd, "PIPE:", extpipename)){ /* got PIPE: */ DMSG(ctx, DINFO, "PIPE:%s\n", extpipename.c_str()); backend.ctx->set_extpipename(extpipename.c_str()); /* TODO: decide if plugin should verify if extpipe is available */ pm_strcpy(cmd, "OK\n"); rc = backend.ctx->write_command(ctx, cmd); if (rc < 0){ /* error */ return bRC_Error; } return bRC_OK; } return bRC_Error; } /* * Switches current backend context or executes new one when required. * The backend (application path) to execute is set in BACKEND_CMD compile * variable and handled by PLUGINCLASS::run_backend() method. Just before new * backend execution the method search for already spawned backends which * handles the same Plugin command and when found the current backend context * is switched to already available on list. * * in: * bpContext - for Bacula debug and jobinfo messages * command - the Plugin command for which we execute a backend * out: * bRC_OK - success and backendctx has a current backend context and Plugin * should send an initialization procedure * bRC_Max - when was already prepared and initialized, so no * reinitialization required * bRC_Error - error in switching or running the backend */ bRC PLUGINCLASS::switch_or_run_backend(bpContext *ctx, char *command) { DMSG0(ctx, DINFO, "Switch or run Backend.\n"); backend.switch_command(command); /* check if we have the backend with the same command already */ if (backend.ctx->is_open()) { /* and its open, so skip running the new */ DMSG0(ctx, DINFO, "Backend already prepared.\n"); return bRC_Max; } // now execute a backend if (run_backend(ctx) != bRC_OK){ return bRC_Error; } return bRC_OK; } /* * This is the main method for handling events generated by Bacula. * The behavior of the method depends on event type generated, but there are * some events which does nothing, just return with bRC_OK. Every event is * tracked in debug trace file to verify the event flow during development. * * in: * bpContext - for Bacula debug and jobinfo messages * event - a Bacula event structure * value - optional event value * out: * bRC_OK - in most cases signal success/no error * bRC_Error - in most cases signal error * - depend on Bacula Plugin API if applied */ bRC PLUGINCLASS::handlePluginEvent(bpContext *ctx, bEvent *event, void *value) { // extract original plugin context, basically it should be `this` PLUGINCLASS *pctx = (PLUGINCLASS *)ctx->pContext; // this ensures that handlePluginEvent is thread safe for extracted pContext // lock_guard lg(&pctx->mutex); - removed on request if (job_cancelled) { return bRC_Error; } switch (event->eventType) { case bEventJobStart: DMSG(ctx, D3, "bEventJobStart value=%s\n", NPRT((char *)value)); getBaculaVar(bVarJobId, (void *)&JobId); getBaculaVar(bVarJobName, (void *)&JobName); if (CUSTOMPREVJOBNAME){ getBaculaVar(bVarPrevJobName, (void *)&prevjobname); } break; case bEventJobEnd: DMSG(ctx, D3, "bEventJobEnd value=%s\n", NPRT((char *)value)); return terminate_all_backends(ctx); case bEventLevel: char lvl; lvl = (char)((intptr_t) value & 0xff); DMSG(ctx, D2, "bEventLevel='%c'\n", lvl); switch (lvl) { case 'F': DMSG0(ctx, D2, "backup level = Full\n"); mode = BACKUP_FULL; break; case 'I': DMSG0(ctx, D2, "backup level = Incr\n"); mode = BACKUP_INCR; break; case 'D': DMSG0(ctx, D2, "backup level = Diff\n"); mode = BACKUP_DIFF; break; default: DMSG0(ctx, D2, "unsupported backup level!\n"); return bRC_Error; } break; case bEventSince: since = (time_t) value; DMSG(ctx, D2, "bEventSince=%ld\n", (intptr_t) since); break; case bEventStartBackupJob: DMSG(ctx, D3, "bEventStartBackupJob value=%s\n", NPRT((char *)value)); break; case bEventEndBackupJob: DMSG(ctx, D2, "bEventEndBackupJob value=%s\n", NPRT((char *)value)); break; case bEventStartRestoreJob: DMSG(ctx, DINFO, "StartRestoreJob value=%s\n", NPRT((char *)value)); getBaculaVar(bVarWhere, &where); DMSG(ctx, DINFO, "Where=%s\n", NPRT(where)); getBaculaVar(bVarRegexWhere, ®exwhere); DMSG(ctx, DINFO, "RegexWhere=%s\n", NPRT(regexwhere)); getBaculaVar(bVarReplace, &replace); DMSG(ctx, DINFO, "Replace=%c\n", replace); mode = RESTORE; break; case bEventEndRestoreJob: DMSG(ctx, DINFO, "bEventEndRestoreJob value=%s\n", NPRT((char *)value)); return signal_finish_all_backends(ctx); /* Plugin command e.g. plugin = :parameters */ case bEventEstimateCommand: DMSG(ctx, D1, "bEventEstimateCommand value=%s\n", NPRT((char *)value)); estimate = true; return prepare_backend(ctx, BACKEND_JOB_INFO_ESTIMATE, (char*)value); /* Plugin command e.g. plugin = :parameters */ case bEventBackupCommand: DMSG(ctx, D2, "bEventBackupCommand value=%s\n", NPRT((char *)value)); pluginconfigsent = false; return prepare_backend(ctx, BACKEND_JOB_INFO_BACKUP, (char*)value); /* Plugin command e.g. plugin = :parameters */ case bEventRestoreCommand: DMSG(ctx, D2, "bEventRestoreCommand value=%s\n", NPRT((char *)value)); return prepare_backend(ctx, BACKEND_JOB_INFO_RESTORE, (char*)value); /* Plugin command e.g. plugin = :parameters */ case bEventPluginCommand: DMSG(ctx, D2, "bEventPluginCommand value=%s\n", NPRT((char *)value)); getBaculaVar(bVarAccurate, (void *)&accurate_mode); if (isourplugincommand(PLUGINPREFIX, (char*)value) && !backend_available) { DMSG2(ctx, DERROR, "Unable to use backend: %s Err=%s\n", backend_cmd.c_str(), backend_error.c_str()); JMSG2(ctx, M_FATAL, "Unable to use backend: %s Err=%s\n", backend_cmd.c_str(), backend_error.c_str()); return bRC_Error; } break; case bEventOptionPlugin: case bEventHandleBackupFile: if (isourplugincommand(PLUGINPREFIX, (char*)value)){ DMSG0(ctx, DERROR, "Invalid handle Option Plugin called!\n"); JMSG2(ctx, M_FATAL, "The %s plugin doesn't support the Option Plugin configuration.\n" "Please review your FileSet and move the Plugin=%s" "... command into the Include {} block.\n", PLUGINNAME, PLUGINPREFIX); return bRC_Error; } break; case bEventEndFileSet: DMSG(ctx, D3, "bEventEndFileSet value=%s\n", NPRT((char *)value)); break; case bEventRestoreObject: /* Restore Object handle - a plugin configuration for restore and user supplied parameters */ if (!value){ DMSG0(ctx, DINFO, "End restore objects\n"); break; } DMSG(ctx, D2, "bEventRestoreObject value=%p\n", value); return handle_plugin_restoreobj(ctx, (restore_object_pkt *) value); case bEventCancelCommand: DMSG2(ctx, D3, "bEventCancelCommand self = %p pctx = %p\n", this, pctx); // TODO: PETITION: Our plugin (RHV WhiteBearSolutions) search the packet E CANCEL. // TODO: If you modify this behaviour, please you notify us. // TODO: RPK[20210623]: The information about a new procedure was sent to Eric pctx->job_cancelled = true; return cancel_all_backends(ctx); default: // enabled only for Debug DMSG2(ctx, D2, "Unknown event: %s (%d) \n", eventtype2str(event), event->eventType); } return bRC_OK; } /* * Make a real data read from the backend and checks for EOD. * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error */ bRC PLUGINCLASS::perform_read_data(bpContext *ctx, struct io_pkt *io) { int rc; if (nodata){ io->status = 0; return bRC_OK; } rc = backend.ctx->read_data_fixed(ctx, io->buf, io->count); if (rc < 0){ io->status = rc; io->io_errno = EIO; return bRC_Error; } io->status = rc; if (backend.ctx->is_eod()){ // TODO: we signal EOD as rc=0, so no need to explicity check for EOD, right? io->status = 0; } return bRC_OK; } /* * Make a real write data to the backend. * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error */ bRC PLUGINCLASS::perform_write_data(bpContext *ctx, struct io_pkt *io) { int rc; POOL_MEM cmd(PM_FNAME); /* check if DATA was sent */ if (nodata){ pm_strcpy(cmd, "DATA\n"); rc = backend.ctx->write_command(ctx, cmd.c_str()); if (rc < 0){ /* error */ io->status = rc; io->io_errno = rc; return bRC_Error; } /* DATA command sent */ nodata = false; } DMSG1(ctx, DVDEBUG, "perform_write_data: %d\n", io->count); rc = backend.ctx->write_data(ctx, io->buf, io->count); io->status = rc; if (rc < 0){ io->io_errno = rc; return bRC_Error; } nodata = false; return bRC_OK; } /* * Handle protocol "DATA" command from backend during backup loop. * It handles a "no data" flag when saved file contains no data to * backup (empty file). * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error * io->status, io->io_errno - set to error on any error */ bRC PLUGINCLASS::perform_backup_open(bpContext *ctx, struct io_pkt *io) { int rc; POOL_MEM cmd(PM_FNAME); /* expecting DATA command */ nodata = false; rc = backend.ctx->read_command(ctx, cmd); if (backend.ctx->is_eod()){ /* no data for file */ nodata = true; } else // expect no error and 'DATA' starting packet if (rc < 0 || !bstrcmp(cmd.c_str(), "DATA")){ io->status = rc; io->io_errno = EIO; openerror = backend.ctx->is_fatal() ? false : true; return bRC_Error; } return bRC_OK; } /* * Signal the end of data to restore and verify acknowledge from backend. * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error */ bRC PLUGINCLASS::perform_write_end(bpContext *ctx, struct io_pkt *io) { if (!nodata){ /* signal end of data to restore and get ack */ if (!backend.ctx->send_ack(ctx)){ io->status = -1; io->io_errno = EPIPE; return bRC_Error; } } if (last_type == FT_DIREND) { struct xacl_pkt xacl; if (acldatalen > 0) { xacl.count = acldatalen; xacl.content = acldata.c_str(); bRC status = perform_write_acl(ctx, &xacl); if (status != bRC_OK){ return status; } } if (xattrdatalen > 0) { xacl.count = xattrdatalen; xacl.content = xattrdata.c_str(); bRC status = perform_write_xattr(ctx, &xacl); if (status != bRC_OK){ return status; } } } return bRC_OK; } /* * Reads ACL data from backend during backup. Save it for handleXACLdata from * Bacula. As we do not know if a backend will send the ACL data or not, we * cannot wait to read this data until handleXACLdata will be called. * TODO: The method has a limitation and accept up to PM_BSOCK acl data to save * which is about 64kB. It should be sufficient for virtually all acl data * we can imagine. But when a backend developer could expect a larger data * to save he should rewrite perform_read_acl() method. * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when ACL data was read successfully and this.readacl set to true * bRC_Error - on any error during acl data read */ bRC PLUGINCLASS::perform_read_acl(bpContext *ctx) { DMSG0(ctx, DINFO, "perform_read_acl\n"); acldatalen = backend.ctx->read_data(ctx, acldata); if (acldatalen < 0){ DMSG0(ctx, DERROR, "Cannot read ACL data from backend.\n"); return bRC_Error; } DMSG1(ctx, DINFO, "readACL: %i\n", acldatalen); if (!backend.ctx->read_ack(ctx)){ /* should get EOD */ DMSG0(ctx, DERROR, "Protocol error, should get EOD.\n"); return bRC_Error; } readacl = true; return bRC_OK; } /* * Reads XATTR data from backend during backup. Save it for handleXACLdata from * Bacula. As we do not know if a backend will send the XATTR data or not, we * cannot wait to read this data until handleXACLdata will be called. * TODO: The method has a limitation and accept up to PM_BSOCK xattr data to save * which is about 64kB. It should be sufficient for virtually all xattr data * we can imagine. But when a backend developer could expect a larger data * to save he should rewrite perform_read_xattr() method. * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when XATTR data was read successfully and this.readxattr set * to true * bRC_Error - on any error during acl data read */ bRC PLUGINCLASS::perform_read_xattr(bpContext *ctx) { DMSG0(ctx, DINFO, "perform_read_xattr\n"); xattrdatalen = backend.ctx->read_data(ctx, xattrdata); if (xattrdatalen < 0){ DMSG0(ctx, DERROR, "Cannot read XATTR data from backend.\n"); return bRC_Error; } DMSG1(ctx, DINFO, "readXATTR: %i\n", xattrdatalen); if (!backend.ctx->read_ack(ctx)){ /* should get EOD */ DMSG0(ctx, DERROR, "Protocol error, should get EOD.\n"); return bRC_Error; } readxattr = true; return bRC_OK; } /** * @brief Reads metadata info from backend and adds it as a metadata packet. * * @param ctx for Bacula debug and jobinfo messages * @param type detected Metadata type * @param sp save packet * @return bRC bRC_OK when success, bRC_Error when some error */ bRC PLUGINCLASS::perform_read_metadata_info(bpContext *ctx, metadata_type type, struct save_pkt *sp) { POOL_MEM data(PM_MESSAGE); DMSG0(ctx, DINFO, "perform_read_metadata_info\n"); int len = backend.ctx->read_data(ctx, data); if (len < 0){ DMSG1(ctx, DERROR, "Cannot read METADATA(%i) information from backend.\n", type); return bRC_Error; } DMSG1(ctx, DINFO, "read METADATA info len: %i\n", len); if (!backend.ctx->read_ack(ctx)){ /* should get EOD */ DMSG0(ctx, DERROR, "Protocol error, should get EOD.\n"); return bRC_Error; } // Bacula API for metadata requires that a plugin // handle metadata buffer allocation POOLMEM *ptr = (POOLMEM *)bmalloc(len); memcpy(ptr, data.addr(), len); // add it to the list for reference to not lot it metadatas_list.append(ptr); metadatas.add_packet(type, len, ptr); sp->plug_meta = &metadatas; return bRC_OK; } /** * @brief Does metadata command scan and map to metadata types. * * @param cmd a command string read from backend * @return metadata_type returned from map */ metadata_type PLUGINCLASS::scan_metadata_type(bpContext *ctx, const POOL_MEM &cmd) { DMSG1(ctx, DDEBUG, "scan_metadata_type checking: %s\n", cmd.c_str()); for (int i = 0; plugin_metadata_map[i].command != NULL; i++) { if (bstrcmp(cmd.c_str(), plugin_metadata_map[i].command)){ DMSG2(ctx, DDEBUG, "match: %s => %d\n", plugin_metadata_map[i].command, plugin_metadata_map[i].type); return plugin_metadata_map[i].type; } } return plugin_meta_invalid; } const char * PLUGINCLASS::prepare_metadata_type(metadata_type type) { for (int i = 0; plugin_metadata_map[i].command != NULL; i++){ if (plugin_metadata_map[i].type == type){ return plugin_metadata_map[i].command; } } return "METADATA_STREAM\n"; } /* * Sends ACL data from restore stream to backend. * TODO: The method has a limitation and accept a single xacl_pkt call for * a single file. As the restored acl stream and records are the same as * was saved during backup, you can expect no more then a single PM_BSOCK * and about 64kB of acl data send to backend. * * in: * bpContext - for Bacula debug and jobinfo messages * xacl_pkt - the restored ACL data for backend * out: * bRC_OK - when ACL data was restored successfully * bRC_Error - on any error during acl data restore */ bRC PLUGINCLASS::perform_write_acl(bpContext* ctx, const xacl_pkt* xacl) { if (xacl->count > 0) { POOL_MEM cmd(PM_FNAME); /* send command ACL */ pm_strcpy(cmd, "ACL\n"); backend.ctx->write_command(ctx, cmd.c_str()); /* send acls data */ DMSG1(ctx, DINFO, "writeACL: %i\n", xacl->count); int rc = backend.ctx->write_data(ctx, xacl->content, xacl->count); if (rc < 0) { /* got some error */ return bRC_Error; } /* signal end of acls data to restore and get ack */ if (!backend.ctx->send_ack(ctx)) { return bRC_Error; } } return bRC_OK; } /* * Sends XATTR data from restore stream to backend. * TODO: The method has a limitation and accept a single xacl_pkt call for * a single file. As the restored acl stream and records are the same as * was saved during backup, you can expect no more then a single PM_BSOCK * and about 64kB of acl data send to backend. * * in: * bpContext - for Bacula debug and jobinfo messages * xacl_pkt - the restored XATTR data for backend * out: * bRC_OK - when XATTR data was restored successfully * bRC_Error - on any error during acl data restore */ bRC PLUGINCLASS::perform_write_xattr(bpContext* ctx, const xacl_pkt* xacl) { if (xacl->count > 0) { POOL_MEM cmd(PM_FNAME); /* send command XATTR */ pm_strcpy(cmd, "XATTR\n"); backend.ctx->write_command(ctx, cmd.c_str()); /* send xattrs data */ DMSG1(ctx, DINFO, "writeXATTR: %i\n", xacl->count); int rc = backend.ctx->write_data(ctx, xacl->content, xacl->count); if (rc < 0) { /* got some error */ return bRC_Error; } /* signal end of xattrs data to restore and get ack */ if (!backend.ctx->send_ack(ctx)) { return bRC_Error; } } return bRC_OK; } /* * The method works as a dispatcher for expected commands received from backend. * It handles a three commands associated with file attributes/metadata: * - FNAME:... - the next file to backup * - ACL - next data will be acl data, so perform_read_acl() * - XATTR - next data will be xattr data, so perform_read_xattr() * and additionally when no more files to backup it handles EOD. * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when plugin read the command, dispatched a work and setup flags * bRC_Error - on any error during backup */ bRC PLUGINCLASS::perform_read_metacommands(bpContext *ctx) { POOL_MEM cmd(PM_FNAME); DMSG0(ctx, DDEBUG, "perform_read_metacommands()\n"); // setup flags nextfile = readacl = readxattr = false; objectsent = false; // loop on metadata from backend or EOD which means no more files to backup while (true) { if (backend.ctx->read_command(ctx, cmd) > 0){ /* yup, should read FNAME, ACL or XATTR from backend, check which one */ DMSG(ctx, DDEBUG, "read_command(1): %s\n", cmd.c_str()); if (scan_parameter_str(cmd, "FNAME:", fname)){ /* got FNAME: */ nextfile = true; object = FileObject; return bRC_OK; } if (scan_parameter_str(cmd, "PLUGINOBJ:", fname)){ /* got Plugin Object header */ nextfile = true; object = PluginObject; // pluginobject = true; return bRC_OK; } if (scan_parameter_str(cmd, "RESTOREOBJ:", fname)){ /* got Restore Object header */ nextfile = true; object = RestoreObject; // restoreobject = true; return bRC_OK; } if (scan_parameter_str(cmd, "CHECK:", fname)){ /* got accurate check query */ perform_accurate_check(ctx); continue; } if (scan_parameter_str(cmd, "CHECKGET:", fname)){ /* got accurate get query */ perform_accurate_check_get(ctx); continue; } if (bstrcmp(cmd.c_str(), "ACL")){ /* got ACL header */ perform_read_acl(ctx); continue; } if (bstrcmp(cmd.c_str(), "XATTR")){ /* got XATTR header */ perform_read_xattr(ctx); continue; } if (bstrcmp(cmd.c_str(), "FileIndex")){ /* got FileIndex query */ perform_file_index_query(ctx); continue; } /* error in protocol */ DMSG(ctx, DERROR, "Protocol error, got unknown command: %s\n", cmd.c_str()); JMSG(ctx, M_FATAL, "Protocol error, got unknown command: %s\n", cmd.c_str()); return bRC_Error; } else { if (backend.ctx->is_fatal()){ /* raise up error from backend */ return bRC_Error; } if (backend.ctx->is_eod()){ /* no more files to backup */ DMSG0(ctx, DDEBUG, "No more files to backup from backend.\n"); return bRC_OK; } } } return bRC_Error; } /** * @brief Respond to the file index query command from backend. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @return bRC bRC_OK when success, bRC_Error if not */ bRC PLUGINCLASS::perform_file_index_query(bpContext *ctx) { POOL_MEM cmd(PM_FNAME); int32_t fileindex; getBaculaVar(bVarFileIndex, (void *)&fileindex); Mmsg(cmd, "%d\n", fileindex); if (backend.ctx->write_command(ctx, cmd) < 0){ /* error */ return bRC_Error; } return bRC_OK; } /** * @brief * * @param ctx bpContext - for Bacula debug and jobinfo messages * @return bRC bRC_OK when success, bRC_Error if not */ bRC PLUGINCLASS::perform_accurate_check(bpContext *ctx) { if (strlen(fname.c_str()) == 0){ // input variable is not valid return bRC_Error; } DMSG0(ctx, DDEBUG, "perform_accurate_check()\n"); POOL_MEM cmd(PM_FNAME); struct save_pkt sp; memset(&sp, 0, sizeof(sp)); // supported sequence is `STAT` followed by `TSTAMP` if (backend.ctx->read_command(ctx, cmd) < 0) { // error return bRC_Error; } metaplugin::attributes::Status status = metaplugin::attributes::read_scan_stat_command(ctx, cmd, &sp); if (status == metaplugin::attributes::Status_OK) { if (backend.ctx->read_command(ctx, cmd) < 0) { // error return bRC_Error; } status = metaplugin::attributes::read_scan_tstamp_command(ctx, cmd, &sp); if (status == metaplugin::attributes::Status_OK) { // success we can perform accurate check for stat packet bRC rc = bRC_OK; // return 'OK' as a default if (accurate_mode) { sp.fname = fname.c_str(); rc = checkChanges(&sp); } else { if (!accurate_mode_err) { DMSG0(ctx, DERROR, "Backend CHECK command require accurate mode on!\n"); JMSG0(ctx, M_ERROR, "Backend CHECK command require accurate mode on!\n"); accurate_mode_err = true; } } POOL_MEM checkstatus(PM_NAME); Mmsg(checkstatus, "%s\n", rc == bRC_Seen ? "SEEN" : "OK"); DMSG1(ctx, DINFO, "perform_accurate_check(): %s", checkstatus.c_str()); if (!backend.ctx->write_command(ctx, checkstatus)) { DMSG0(ctx, DERROR, "Cannot send checkChanges() response to backend\n"); JMSG0(ctx, backend.ctx->jmsg_err_level(), "Cannot send checkChanges() response to backend\n"); return bRC_Error; } return bRC_OK; } } else { // check possible errors switch (status) { case metaplugin::attributes::Invalid_File_Type: JMSG2(ctx, M_ERROR, "Invalid file type: %c for %s\n", sp.type, fname.c_str()); return bRC_Error; case metaplugin::attributes::Invalid_Stat_Packet: JMSG1(ctx, backend.ctx->jmsg_err_level(), "Invalid stat packet: %s\n", cmd.c_str()); return bRC_Error; default: break; } // future extension for `ATTR` command // ... } return bRC_Error; } /** * @brief Perform accurate query check and resturn accurate data to backend. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @return bRC bRC_OK when success, bRC_Error if not */ bRC PLUGINCLASS::perform_accurate_check_get(bpContext *ctx) { POOL_MEM cmd(PM_FNAME); if (strlen(fname.c_str()) == 0){ // input variable is not valid return bRC_Error; } DMSG0(ctx, DDEBUG, "perform_accurate_check_get()\n"); if (!accurate_mode) { // the job is not accurate, so no accurate data will be available at all pm_strcpy(cmd, "NOACCJOB\n"); if (!backend.ctx->signal_error(ctx, cmd)) { DMSG0(ctx, DERROR, "Cannot send 'No Accurate Job' info to backend\n"); JMSG0(ctx, backend.ctx->jmsg_err_level(), "Cannot send 'No Accurate Job' info to backend\n"); return bRC_Error; } return bRC_OK; } accurate_attribs_pkt attribs; memset(&attribs, 0, sizeof(attribs)); attribs.fname = fname.c_str(); bRC rc = getAccurateAttribs(&attribs); struct restore_pkt rp; switch (rc) { case bRC_Seen: memcpy(&rp.statp, &attribs.statp, sizeof(rp.statp)); rp.type = FT_MASK; // This is a special metaplugin protocol hack // because the current Bacula accurate code does // not handle FileType on catalog attributes, yet. // STAT:... metaplugin::attributes::make_stat_command(ctx, cmd, &rp); backend.ctx->write_command(ctx, cmd); // TSTAMP:... if (metaplugin::attributes::make_tstamp_command(ctx, cmd, &rp) == metaplugin::attributes::Status_OK) { backend.ctx->write_command(ctx, cmd); DMSG(ctx, DINFO, "createFile:%s", cmd.c_str()); } break; default: pm_strcpy(cmd, "UNAVAIL\n"); if (!backend.ctx->write_command(ctx, cmd)) { DMSG0(ctx, DERROR, "Cannot send 'UNAVAIL' response to backend\n"); JMSG0(ctx, backend.ctx->jmsg_err_level(), "Cannot send 'UNAVAIL' response to backend\n"); return bRC_Error; } break; } return bRC_OK; } /** * @brief * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param sp save_pkt from startBackupFile() * @return bRC bRC_OK when success, bRC_Error if not */ bRC PLUGINCLASS::perform_read_pluginobject(bpContext *ctx, struct save_pkt *sp) { POOL_MEM cmd(PM_FNAME); if (strlen(fname.c_str()) == 0){ // input variable is not valid return bRC_Error; } sp->plugin_obj.path = fname.c_str(); DMSG0(ctx, DDEBUG, "perform_read_pluginobject()\n"); // loop on plugin objects parameters from backend and EOD while (true){ if (backend.ctx->read_command(ctx, cmd) > 0){ DMSG(ctx, DDEBUG, "read_command(3): %s\n", cmd.c_str()); if (scan_parameter_str(cmd, "PLUGINOBJ_CAT:", plugin_obj_cat)){ DMSG1(ctx, DDEBUG, "category: %s\n", plugin_obj_cat.c_str()); sp->plugin_obj.object_category = plugin_obj_cat.c_str(); continue; } if (scan_parameter_str(cmd, "PLUGINOBJ_TYPE:", plugin_obj_type)){ DMSG1(ctx, DDEBUG, "type: %s\n", plugin_obj_type.c_str()); sp->plugin_obj.object_type = plugin_obj_type.c_str(); continue; } if (scan_parameter_str(cmd, "PLUGINOBJ_NAME:", plugin_obj_name)){ DMSG1(ctx, DDEBUG, "name: %s\n", plugin_obj_name.c_str()); sp->plugin_obj.object_name = plugin_obj_name.c_str(); continue; } if (scan_parameter_str(cmd, "PLUGINOBJ_SRC:", plugin_obj_src)){ DMSG1(ctx, DDEBUG, "src: %s\n", plugin_obj_src.c_str()); sp->plugin_obj.object_source = plugin_obj_src.c_str(); continue; } if (scan_parameter_str(cmd, "PLUGINOBJ_UUID:", plugin_obj_uuid)){ DMSG1(ctx, DDEBUG, "uuid: %s\n", plugin_obj_uuid.c_str()); sp->plugin_obj.object_uuid = plugin_obj_uuid.c_str(); continue; } POOL_MEM param(PM_NAME); if (scan_parameter_str(cmd, "PLUGINOBJ_SIZE:", param)){ if (!size_to_uint64(param.c_str(), strlen(param.c_str()), &plugin_obj_size)){ // error in convert DMSG1(ctx, DERROR, "Cannot convert Plugin Object Size to integer! p=%s\n", param.c_str()); JMSG1(ctx, M_ERROR, "Cannot convert Plugin Object Size to integer! p=%s\n", param.c_str()); return bRC_Error; } DMSG1(ctx, DDEBUG, "size: %llu\n", plugin_obj_size); sp->plugin_obj.object_size = plugin_obj_size; continue; } if (scan_parameter_str(cmd, "PLUGINOBJ_COUNT:", param)){ uint32_t count = str_to_int64(param.c_str()); DMSG1(ctx, DDEBUG, "count: %lu\n", count); sp->plugin_obj.count = count; continue; } /* error in protocol */ DMSG(ctx, DERROR, "Protocol error, got unknown command: %s\n", cmd.c_str()); JMSG(ctx, M_FATAL, "Protocol error, got unknown command: %s\n", cmd.c_str()); return bRC_Error; } else { if (backend.ctx->is_fatal()){ /* raise up error from backend */ return bRC_Error; } if (backend.ctx->is_eod()){ /* no more plugin object params to backup */ DMSG0(ctx, DINFO, "No more Plugin Object params from backend.\n"); // pluginobject = false; // pluginobjectsent = true; objectsent = true; return bRC_OK; } } } return bRC_Error; } /** * @brief Receives a Restore Object data and populates save_pkt. * * @param ctx bpContext - for Bacula debug and jobinfo messages * @param sp save_pkt from startBackupFile() * @return bRC bRC_OK when success, bRC_Error if not */ bRC PLUGINCLASS::perform_read_restoreobject(bpContext *ctx, struct save_pkt *sp) { POOL_MEM cmd(PM_FNAME); sp->restore_obj.object = NULL; if (strlen(fname.c_str()) == 0){ // input variable is not valid return bRC_Error; } DMSG0(ctx, DDEBUG, "perform_read_restoreobject()\n"); // read object length required param if (backend.ctx->read_command(ctx, cmd) > 0) { DMSG(ctx, DDEBUG, "read_command(4): %s\n", cmd.c_str()); POOL_MEM param(PM_NAME); uint64_t length; if (scan_parameter_str(cmd, "RESTOREOBJ_LEN:", param)) { if (!size_to_uint64(param.c_str(), strlen(param.c_str()), &length)){ // error in convert DMSG1(ctx, DERROR, "Cannot convert Restore Object length to integer! p=%s\n", param.c_str()); JMSG1(ctx, M_ERROR, "Cannot convert Restore Object length to integer! p=%s\n", param.c_str()); return bRC_Error; } DMSG1(ctx, DDEBUG, "size: %llu\n", length); sp->restore_obj.object_len = length; robjbuf.check_size(length + 1); } else { // no required param DMSG0(ctx, DERROR, "Cannot read Restore Object length!\n"); JMSG0(ctx, M_ERROR, "Cannot read Restore Object length!\n"); return bRC_Error; } } else { if (backend.ctx->is_fatal()){ /* raise up error from backend */ return bRC_Error; } } int32_t recv_len = 0; if (backend.ctx->recv_data(ctx, robjbuf, &recv_len) != bRC_OK) { DMSG0(ctx, DERROR, "Cannot read data from backend!\n"); return bRC_Error; } /* no more restore object data to backup */ DMSG0(ctx, DINFO, "No more Restore Object data from backend.\n"); objectsent = true; if (recv_len != sp->restore_obj.object_len) { DMSG2(ctx, DERROR, "Backend reported RO length:%ld read:%ld\n", sp->restore_obj.object_len, recv_len); JMSG2(ctx, M_ERROR, "Backend reported RO length:%ld read:%ld\n", sp->restore_obj.object_len, recv_len); sp->restore_obj.object_len = recv_len; } sp->restore_obj.object = robjbuf.c_str(); return bRC_OK; } /* * Handle Bacula Plugin I/O API for backend * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error * io->status, io->io_errno - correspond to a plugin io operation status */ bRC PLUGINCLASS::pluginIO(bpContext *ctx, struct io_pkt *io) { static int rw = 0; // this variable handles single debug message { // synchronie access to job_cancelled variable // lock_guard lg(&mutex); - removed on request if (job_cancelled) { return bRC_Error; } } /* assume no error from the very beginning */ io->status = 0; io->io_errno = 0; switch (io->func) { case IO_OPEN: DMSG(ctx, D2, "IO_OPEN: (%s)\n", io->fname); switch (mode){ case BACKUP_FULL: case BACKUP_INCR: case BACKUP_DIFF: return perform_backup_open(ctx, io); case RESTORE: nodata = true; break; default: return bRC_Error; } break; case IO_READ: if (!rw) { rw = 1; DMSG2(ctx, D2, "IO_READ buf=%p len=%d\n", io->buf, io->count); } switch (mode){ case BACKUP_FULL: case BACKUP_INCR: case BACKUP_DIFF: return perform_read_data(ctx, io); default: return bRC_Error; } break; case IO_WRITE: if (!rw) { rw = 1; DMSG2(ctx, D2, "IO_WRITE buf=%p len=%d\n", io->buf, io->count); } switch (mode){ case RESTORE: return perform_write_data(ctx, io); default: return bRC_Error; } break; case IO_CLOSE: DMSG0(ctx, D2, "IO_CLOSE\n"); rw = 0; if (!backend.ctx->close_extpipe(ctx)){ return bRC_Error; } switch (mode){ case RESTORE: return perform_write_end(ctx, io); case BACKUP_FULL: case BACKUP_INCR: case BACKUP_DIFF: return perform_read_metacommands(ctx); default: return bRC_Error; } break; } return bRC_OK; } /* * Get all required information from backend to populate save_pkt for Bacula. * It handles a Restore Object (FT_PLUGIN_CONFIG) for every Full backup and * new Plugin Backup Command if setup in FileSet. It uses a help from * endBackupFile() handling the next FNAME command for the next file to * backup. The communication protocol requires some file attributes command * required it raise the error when insufficient parameters received from * backend. It assumes some parameters at save_pkt struct to be automatically * set like: sp->portable, sp->statp.st_blksize, sp->statp.st_blocks. * * in: * bpContext - for Bacula debug and jobinfo messages * save_pkt - Bacula Plugin API save packet structure * out: * bRC_OK - when save_pkt prepared successfully and we have file to backup * bRC_Max - when no more files to backup * bRC_Error - in any error */ bRC PLUGINCLASS::startBackupFile(bpContext *ctx, struct save_pkt *sp) { POOL_MEM cmd(PM_FNAME); int reqparams = 2; if (job_cancelled) { return bRC_Error; } /* The first file in Full backup, is the RestoreObject */ if (!estimate && mode == BACKUP_FULL && pluginconfigsent == false) { ConfigFile ini; ini.register_items(plugin_items_dump, sizeof(struct ini_items)); sp->restore_obj.object_name = (char *)INI_RESTORE_OBJECT_NAME; sp->restore_obj.object_len = ini.serialize(robjbuf.handle()); sp->restore_obj.object = robjbuf.c_str(); sp->type = FT_PLUGIN_CONFIG; DMSG2(ctx, DINFO, "Prepared RestoreObject/%s (%d) sent.\n", INI_RESTORE_OBJECT_NAME, FT_PLUGIN_CONFIG); return bRC_OK; } // check if this is the first file from backend to backup if (!nextfile){ // so read FNAME or EOD/Error if (perform_read_metacommands(ctx) != bRC_OK){ // signal error return bRC_Error; } if (!nextfile){ // got EOD, so no files to backup at all! // if we return a value different from bRC_OK then Bacula will finish // backup process, which at first call means no files to archive return bRC_Max; } } // setup required fname in save_pkt DMSG(ctx, DINFO, "fname:%s\n", fname.c_str()); sp->fname = fname.c_str(); switch (object) { case RestoreObject: // handle Restore Object parameters and data if (perform_read_restoreobject(ctx, sp) != bRC_OK) { // signal error return bRC_Error; } sp->restore_obj.object_name = fname.c_str(); sp->type = FT_RESTORE_FIRST; sp->statp.st_size = sp->restore_obj.object_len; sp->statp.st_mode = 0700 | S_IFREG; { time_t now = time(NULL); sp->statp.st_ctime = now; sp->statp.st_mtime = now; sp->statp.st_atime = now; } break; case PluginObject: // handle Plugin Object parameters if (perform_read_pluginobject(ctx, sp) != bRC_OK) { // signal error return bRC_Error; } sp->type = FT_PLUGIN_OBJECT; sp->statp.st_size = sp->plugin_obj.object_size; break; default: // here we handle standard file metadata information reqparams--; // ensure clear state for metadatas sp->plug_meta = NULL; metadatas.reset(); metadatas_list.destroy(); while (backend.ctx->read_command(ctx, cmd) > 0) { DMSG(ctx, DINFO, "read_command(2): %s\n", cmd.c_str()); metaplugin::attributes::Status status = metaplugin::attributes::read_scan_stat_command(ctx, cmd, sp); switch (status) { case metaplugin::attributes::Invalid_File_Type: JMSG2(ctx, M_ERROR, "Invalid file type: %c for %s\n", sp->type, fname.c_str()); return bRC_Error; case metaplugin::attributes::Invalid_Stat_Packet: JMSG1(ctx, backend.ctx->jmsg_err_level(), "Invalid stat packet: %s\n", cmd.c_str()); return bRC_Error; case metaplugin::attributes::Status_OK: if (sp->type != FT_LNK) { reqparams--; } continue; default: break; } status = metaplugin::attributes::read_scan_tstamp_command(ctx, cmd, sp); switch (status) { case metaplugin::attributes::Status_OK: continue; default: break; } if (scan_parameter_str(cmd, "LSTAT:", lname) == 1) { sp->link = lname.c_str(); reqparams--; DMSG(ctx, DINFO, "LSTAT:%s\n", lname.c_str()); continue; } POOL_MEM tmp(PM_FNAME); if (scan_parameter_str(cmd, "PIPE:", tmp)) { /* handle PIPE command */ DMSG(ctx, DINFO, "read pipe at: %s\n", tmp.c_str()); int extpipe = open(tmp.c_str(), O_RDONLY); if (extpipe > 0) { DMSG0(ctx, DINFO, "ExtPIPE file available.\n"); backend.ctx->set_extpipe(extpipe); pm_strcpy(tmp, "OK\n"); backend.ctx->write_command(ctx, tmp.c_str()); } else { /* here are common error signaling */ berrno be; DMSG(ctx, DERROR, "ExtPIPE file open error! Err=%s\n", be.bstrerror()); JMSG(ctx, backend.ctx->jmsg_err_level(), "ExtPIPE file open error! Err=%s\n", be.bstrerror()); pm_strcpy(tmp, "Err\n"); backend.ctx->signal_error(ctx, tmp.c_str()); return bRC_Error; } continue; } metadata_type mtype = scan_metadata_type(ctx, cmd); if (mtype != plugin_meta_invalid) { DMSG1(ctx, DDEBUG, "metaData handling: %d\n", mtype); if (perform_read_metadata_info(ctx, mtype, sp) != bRC_OK) { DMSG0(ctx, DERROR, "Cannot perform_read_metadata_info!\n"); JMSG0(ctx, backend.ctx->jmsg_err_level(), "Cannot perform_read_metadata_info!\n"); return bRC_Error; } continue; } else { DMSG1(ctx, DERROR, "Invalid File Attributes command: %s\n", cmd.c_str()); JMSG1(ctx, backend.ctx->jmsg_err_level(), "Invalid File Attributes command: %s\n", cmd.c_str()); return bRC_Error; } } DMSG0(ctx, DINFO, "File attributes end.\n"); if (reqparams > 0) { DMSG0(ctx, DERROR, "Protocol error, not enough file attributes from backend.\n"); JMSG0(ctx, M_FATAL, "Protocol error, not enough file attributes from backend.\n"); return bRC_Error; } break; } if (backend.ctx->is_error()) { return bRC_Error; } sp->portable = true; sp->statp.st_blksize = 4096; sp->statp.st_blocks = sp->statp.st_size / 4096 + 1; DMSG3(ctx, DINFO, "TSDebug: %ld(at) %ld(mt) %ld(ct)\n", sp->statp.st_atime, sp->statp.st_mtime, sp->statp.st_ctime); return bRC_OK; } /* * Check for a next file to backup or the end of the backup loop. * The next file to backup is indicated by a FNAME command from backend and * no more files to backup as EOD. It helps startBackupFile handling FNAME * for next file. * * in: * bpContext - for Bacula debug and jobinfo messages * save_pkt - Bacula Plugin API save packet structure * out: * bRC_OK - when no more files to backup * bRC_More - when Bacula should expect a next file * bRC_Error - in any error */ bRC PLUGINCLASS::endBackupFile(bpContext *ctx) { POOL_MEM cmd(PM_FNAME); { // synchronie access to job_cancelled variable // lock_guard lg(&mutex); - removed on request if (job_cancelled) { return bRC_Error; } } if (!estimate){ /* The current file was the restore object, so just ask for the next file */ if (mode == BACKUP_FULL && pluginconfigsent == false) { pluginconfigsent = true; return bRC_More; } } // check for next file only when no previous error if (!openerror) { if (estimate || objectsent) { objectsent = false; if (perform_read_metacommands(ctx) != bRC_OK) { /* signal error */ return bRC_Error; } } if (nextfile) { DMSG1(ctx, DINFO, "nextfile %s backup!\n", fname.c_str()); return bRC_More; } } return bRC_OK; } /* * The PLUGIN is using this callback to handle Core restore. */ bRC PLUGINCLASS::startRestoreFile(bpContext *ctx, const char *cmd) { if (restoreobject_list.size() > 0) { restore_object_class *ropclass; POOL_MEM backcmd(PM_FNAME); foreach_alist(ropclass, &restoreobject_list) { if (!ropclass->sent && strcmp(cmd, ropclass->plugin_name.c_str()) == 0) { Mmsg(backcmd, "RESTOREOBJ:%s\n", ropclass->object_name.c_str()); DMSG1(ctx, DINFO, "%s", backcmd.c_str()); ropclass->sent = true; if (!backend.ctx->write_command(ctx, backcmd.c_str())) { DMSG0(ctx, DERROR, "Error sending RESTOREOBJ command\n"); return bRC_Error; } Mmsg(backcmd, "RESTOREOBJ_LEN:%d\n", ropclass->length); if (!backend.ctx->write_command(ctx, backcmd.c_str())) { DMSG0(ctx, DERROR, "Error sending RESTOREOBJ_LEN command\n"); return bRC_Error; } /* send data */ if (backend.ctx->send_data(ctx, ropclass->data, ropclass->length) != bRC_OK) { DMSG0(ctx, DERROR, "Error sending RestoreObject data\n"); return bRC_Error; } } } } return bRC_OK; } /* * The PLUGIN is not using this callback to handle restore. */ bRC PLUGINCLASS::endRestoreFile(bpContext *ctx) { return bRC_OK; } /* * Prepares a file to restore attributes based on data from restore_pkt. * It handles a response from backend to show if * * in: * bpContext - bacula plugin context * restore_pkt - Bacula Plugin API restore packet structure * out: * bRC_OK - when success reported from backend * rp->create_status = CF_EXTRACT - the backend will restore the file * with pleasure * rp->create_status = CF_SKIP - the backend wants to skip restoration, i.e. * the file already exist and Replace=n was set * bRC_Error, rp->create_status = CF_ERROR - in any error */ bRC PLUGINCLASS::createFile(bpContext *ctx, struct restore_pkt *rp) { POOL_MEM cmd(PM_FNAME); // char type; { // synchronie access to job_cancelled variable // lock_guard lg(&mutex); - removed on request if (job_cancelled) { return bRC_Error; } } skipextract = false; acldatalen = 0; xattrdatalen = 0; if (CORELOCALRESTORE && islocalpath(where)) { DMSG0(ctx, DDEBUG, "createFile:Forwarding restore to Core\n"); rp->create_status = CF_CORE; } else { // FNAME:$fname$ Mmsg(cmd, "FNAME:%s\n", rp->ofname); backend.ctx->write_command(ctx, cmd); DMSG(ctx, DINFO, "createFile:%s", cmd.c_str()); // STAT:... metaplugin::attributes::make_stat_command(ctx, cmd, rp); backend.ctx->write_command(ctx, cmd); last_type = rp->type; DMSG(ctx, DINFO, "createFile:%s", cmd.c_str()); // TSTAMP:... if (metaplugin::attributes::make_tstamp_command(ctx, cmd, rp) == metaplugin::attributes::Status_OK) { backend.ctx->write_command(ctx, cmd); DMSG(ctx, DINFO, "createFile:%s", cmd.c_str()); } // LSTAT:$link$ if (rp->type == FT_LNK && rp->olname != NULL){ Mmsg(cmd, "LSTAT:%s\n", rp->olname); backend.ctx->write_command(ctx, cmd); DMSG(ctx, DINFO, "createFile:%s", cmd.c_str()); } backend.ctx->signal_eod(ctx); // check if backend accepted the file if (backend.ctx->read_command(ctx, cmd) > 0){ DMSG(ctx, DINFO, "createFile:resp: %s\n", cmd.c_str()); if (strcmp(cmd.c_str(), "OK") == 0){ rp->create_status = CF_EXTRACT; } else if (strcmp(cmd.c_str(), "SKIP") == 0){ rp->create_status = CF_SKIP; skipextract = true; } else if (strcmp(cmd.c_str(), "CORE") == 0){ rp->create_status = CF_CORE; } else { DMSG(ctx, DERROR, "Wrong backend response to create file, got: %s\n", cmd.c_str()); JMSG(ctx, backend.ctx->jmsg_err_level(), "Wrong backend response to create file, got: %s\n", cmd.c_str()); rp->create_status = CF_ERROR; return bRC_Error; } } else { if (backend.ctx->is_error()){ /* raise up error from backend */ rp->create_status = CF_ERROR; return bRC_Error; } } } return bRC_OK; } /* * Unimplemented, always return bRC_OK. */ bRC PLUGINCLASS::setFileAttributes(bpContext *ctx, struct restore_pkt *rp) { return bRC_OK; } /** * @brief * * @param ctx * @param exepath * @return bRC */ void PLUGINCLASS::setup_backend_command(bpContext *ctx, POOL_MEM &exepath) { DMSG(ctx, DINFO, "ExePath: %s\n", exepath.c_str()); Mmsg(backend_cmd, "%s/%s", exepath.c_str(), BACKEND_CMD); DMSG(ctx, DINFO, "BackendPath: %s\n", backend_cmd.c_str()); if (access(backend_cmd.c_str(), X_OK) < 0) { berrno be; DMSG2(ctx, DERROR, "Unable to use backend: %s Err=%s\n", backend_cmd.c_str(), be.bstrerror()); pm_strcpy(backend_error, be.bstrerror()); backend_available = false; } else { DMSG0(ctx, DINFO, "Backend available\n"); backend_available = true; } } /** * @brief * * @param ctx * @param xacl * @return bRC */ bRC PLUGINCLASS::handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl) { { // synchronie access to job_cancelled variable // lock_guard lg(&mutex); - removed on request if (job_cancelled) { return bRC_Error; } } switch (xacl->func) { case BACL_BACKUP: if (readacl) { DMSG0(ctx, DINFO, "bacl_backup\n"); xacl->count = acldatalen; xacl->content = acldata.c_str(); readacl= false; } else { xacl->count = 0; } break; case BACL_RESTORE: DMSG1(ctx, DINFO, "bacl_restore: %d\n", last_type); if (!skipextract) { if (last_type != FT_DIREND) { return perform_write_acl(ctx, xacl); } else { DMSG0(ctx, DDEBUG, "delay ACL stream restore\n"); acldatalen = xacl->count; pm_memcpy(acldata, xacl->content, acldatalen); } } break; case BXATTR_BACKUP: if (readxattr){ DMSG0(ctx, DINFO, "bxattr_backup\n"); xacl->count = xattrdatalen; xacl->content = xattrdata.c_str(); readxattr= false; } else { xacl->count = 0; } break; case BXATTR_RESTORE: DMSG1(ctx, DINFO, "bxattr_restore: %d\n", last_type); if (!skipextract) { if (last_type != FT_DIREND) { return perform_write_xattr(ctx, xacl); } else { DMSG0(ctx, DDEBUG, "delay XATTR stream restore\n"); xattrdatalen = xacl->count; pm_memcpy(xattrdata, xacl->content, xattrdatalen); } } break; } return bRC_OK; } /* * QueryParameter interface */ bRC PLUGINCLASS::queryParameter(bpContext *ctx, struct query_pkt *qp) { DMSG0(ctx, D1, "PLUGINCLASS::queryParameter\n"); // check if it is our Plugin command if (!isourplugincommand(PLUGINPREFIX, qp->command) != 0){ // it is not our plugin prefix return bRC_OK; } { // synchronie access to job_cancelled variable // lock_guard lg(&mutex); - removed on request if (job_cancelled) { return bRC_Error; } } POOL_MEM cmd(PM_MESSAGE); if (listing == None) { listing = Query; Mmsg(cmd, "%s query=%s", qp->command, qp->parameter); if (prepare_backend(ctx, BACKEND_JOB_INFO_ESTIMATE, cmd.c_str()) == bRC_Error){ return bRC_Error; } } /* read backend response */ char pkt = 0; int32_t pktlen = backend.ctx->read_any(ctx, &pkt, cmd); if (pktlen < 0) { DMSG(ctx, DERROR, "Cannot read backend query response for %s command.\n", qp->parameter); JMSG(ctx, backend.ctx->jmsg_err_level(), "Cannot read backend query response for %s command.\n", qp->parameter); return bRC_Error; } bRC ret = bRC_More; /* check EOD */ if (backend.ctx->is_eod()){ /* got EOD so the backend finish response, so terminate the chat */ DMSG0(ctx, D1, "PLUGINCLASS::queryParameter: got EOD\n"); backend.ctx->signal_term(ctx); backend.ctx->terminate(ctx); qp->result = NULL; ret = bRC_OK; } else { switch (pkt) { case 'C': { OutputWriter ow(qp->api_opts); char *p, *q, *t; alist values(10, not_owned_by_alist); key_pair *kp; /* * here we have: * key=value[,key2=value2[,...]] * parameters we should decompose */ p = cmd.c_str(); while (*p != '\0') { q = strchr(p, ','); if (q != NULL) { *q++ = '\0'; } // single key=value DMSG(ctx, D1, "PLUGINCLASS::queryParameter:scan %s\n", p); if ((t = strchr(p, '=')) != NULL) { *t++ = '\0'; } else { t = (char*)""; // pointer to empty string } DMSG2(ctx, D1, "PLUGINCLASS::queryParameter:pair '%s' = '%s'\n", p, t); if (strlen(p) > 0) { // push values only when we have key name kp = New(key_pair(p, t)); values.append(kp); } p = q != NULL ? q : (char*)""; } // if more values then one then it is a list if (values.size() > 1) { DMSG0(ctx, D1, "PLUGINCLASS::queryParameter: will render list\n") ow.start_list(qp->parameter); } // render all values foreach_alist(kp, &values) { ow.get_output(OT_STRING, kp->key.c_str(), kp->value.c_str(), OT_END); delete kp; } if (values.size() > 1) { ow.end_list(); } pm_strcpy(robjbuf, ow.get_output(OT_END)); qp->result = robjbuf.c_str(); } break; case 'D': pm_memcpy(robjbuf, cmd.c_str(), pktlen); qp->result = robjbuf.c_str(); break; default: DMSG(ctx, DERROR, "PLUGINCLASS::queryParameter: got invalid packet: %c\n", pkt); JMSG(ctx, M_ERROR, "PLUGINCLASS::queryParameter: got invalid packet: %c\n", pkt); backend.ctx->signal_term(ctx); backend.ctx->terminate(ctx); qp->result = NULL; ret = bRC_Error; break; } } return ret; } /** * @brief Sends metadata to backend for restore. * * @param ctx for Bacula debug and jobinfo messages * @param mp * @return bRC */ bRC PLUGINCLASS::metadataRestore(bpContext *ctx, struct meta_pkt *mp) { { // synchronie access to job_cancelled variable // lock_guard lg(&mutex); - removed on request if (job_cancelled) { return bRC_Error; } } if (!skipextract){ POOL_MEM cmd(PM_FNAME); if (mp->buf != NULL && mp->buf_len > 0){ /* send command METADATA */ pm_strcpy(cmd, prepare_metadata_type(mp->type)); backend.ctx->write_command(ctx, cmd.c_str()); /* send metadata stream data */ DMSG1(ctx, DINFO, "writeMetadata: %i\n", mp->buf_len); int rc = backend.ctx->write_data(ctx, (char*)mp->buf, mp->buf_len); if (rc < 0){ /* got some error */ return bRC_Error; } // signal end of metadata stream to restore and get ack backend.ctx->signal_eod(ctx); // check if backend accepted the file if (backend.ctx->read_command(ctx, cmd) > 0) { DMSG(ctx, DINFO, "metadataRestore:resp: %s\n", cmd.c_str()); if (bstrcmp(cmd.c_str(), "SKIP")) { // SKIP! skipextract = true; return bRC_Skip; } if (!bstrcmp(cmd.c_str(), "OK")) { DMSG(ctx, DERROR, "Wrong backend response to metadataRestore, got: %s\n", cmd.c_str()); JMSG(ctx, backend.ctx->jmsg_err_level(), "Wrong backend response to metadataRestore, got: %s\n", cmd.c_str()); return bRC_Error; } } else { if (backend.ctx->is_error()) { // raise up error from backend return bRC_Error; } } } } return bRC_OK; } /** * @brief Implements default metaplugin checkFile() callback. * When fname match plugin configured namespace then it return bRC_Seen by default * or calls custom checkFile() callback defined by backend developer. * * @param ctx for Bacula debug and jobinfo messages * @param fname file name to check * @return bRC bRC_Seen or bRC_OK */ bRC PLUGINCLASS::checkFile(bpContext * ctx, char *fname) { if ((!CUSTOMNAMESPACE && isourpluginfname(PLUGINPREFIX, fname)) || (CUSTOMNAMESPACE && isourpluginfname(PLUGINNAMESPACE, fname))) { // synchronie access to job_cancelled variable // lock_guard lg(&mutex); - removed on request if (!job_cancelled) { if (::checkFile != NULL) { return ::checkFile(ctx, fname); } } return bRC_Seen; } return bRC_OK; } #endif } // namespace pluginlib bacula-15.0.3/src/plugins/fd/pluginlib/Makefile0000644000175000017500000001232414771010173021152 0ustar bsbuildbsbuild# # Makefile for building FD plugins PluginLibrary for Bacula # # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Author: Radoslaw Korzeniewski, radoslaw@korzeniewski.net # include ../Makefile.inc PLUGINLIBSSRC = pluginlib.cpp pluginlib.h PLUGINLIBSOBJ = $(filter %.lo,$(PLUGINLIBSSRC:.cpp=.lo)) ISO8601SRC = iso8601.cpp iso8601.h ISO8601OBJ = $(filter %.lo,$(ISO8601SRC:.cpp=.lo)) EXECPROGSRC = execprog.cpp execprog.h EXECPROGOBJ = $(filter %.lo,$(EXECPROGSRC:.cpp=.lo)) PTCOMMSRC = ptcomm.cpp ptcomm.h PTCOMMOBJ = $(filter %.lo,$(PTCOMMSRC:.cpp=.lo)) PLUGINBASESRC = pluginbase.h # pluginbase.cpp PLUGINBASEOBJ = $(filter %.lo,$(PLUGINBASESRC:.cpp=.lo)) PLUGINCLASSSRC = pluginclass.h # pluginclass.cpp PLUGINCLASSOBJ = $(filter %.lo,$(PLUGINCLASSSRC:.cpp=.lo)) PLUGINCTXSRC = pluginctx.h pluginctx.cpp PLUGINCTXOBJ = $(filter %.lo,$(PLUGINCTXSRC:.cpp=.lo)) COMMCTXSRC = commctx.h SMARTALISTSRC = smartalist.h SMARTPTRSRC = smartptr.h PLUGINLIBSTEST = pluginlib_test.cpp $(PLUGINLIBSSRC) PLUGINLIBSTESTOBJ = $(filter %.lo,$(PLUGINLIBSTEST:.cpp=.lo)) $(INIOBJ) $(UNITTESTSOBJ) ISO8601TEST = iso8601_test.cpp $(ISO8601SRC) ISO8601TESTOBJ = $(filter %.lo,$(ISO8601TEST:.cpp=.lo)) $(UNITTESTSOBJ) COMMCTXTEST = commctx_test.cpp $(COMMCTXSRC) $(SMARTALISTSRC) COMMCTXTESTOBJ = $(filter %.lo,$(COMMCTXTEST:.cpp=.lo)) $(INIOBJ) $(PLUGINLIBSOBJ) $(UNITTESTSOBJ) SMARTALISTTEST = smartalist_test.cpp $(SMARTALISTSRC) SMARTALISTTESTOBJ = $(filter %.lo,$(SMARTALISTTEST:.cpp=.lo)) $(INIOBJ) $(PLUGINLIBSOBJ) $(UNITTESTSOBJ) SMARTPTRTEST = smartptr_test.cpp $(SMARTPTRSRC) SMARTPTRTESTOBJ = $(filter %.lo,$(SMARTPTRTEST:.cpp=.lo)) $(INIOBJ) $(PLUGINLIBSOBJ) $(UNITTESTSOBJ) METAPLUGINTEST = metaplugin_test.cpp $(METAPLUGINSRC) METAPLUGINTESTOBJ = $(filter %.lo,$(METAPLUGINTEST:.cpp=.lo)) $(PTCOMMOBJ) $(PLUGINLIBSOBJ) $(METAPLUGINOBJ) $(UNITTESTSOBJ) $(LIBBACOBJ) $(LIBBACCFGOBJ) TESTMETAPLUGINBACKENDSRC = test_metaplugin_backend.c TESTMETAPLUGINBACKENDOBJ = $(TESTMETAPLUGINBACKENDSRC:.c=.lo) # COMMONPLUGINOBJ = $(PLUGINLIBSOBJ) $(ISO8601OBJ) $(EXECPROGOBJ) COMMONPLUGINOBJ = $(PLUGINLIBSOBJ) $(PTCOMMOBJ) $(METAPLUGINOBJ) $(PLUGINCLASSOBJ) $(PLUGINBASEOBJ) # COMMONPLUGINTESTS = iso8601_test COMMONPLUGINTESTS = pluginlib_test smartalist_test smartptr_test commctx_test iso8601_test .c.lo: @echo "Compiling $< ..." $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I$(SRCDIR) -I$(FDDIR) -I$(FDPLUGDIR) -I$(LIBDIR) -I$(FINDLIBDIR) -I. -c $< .cpp.lo: @echo "Compiling c++ $< ..." $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I$(SRCDIR) -I$(FDDIR) -I$(FDPLUGDIR) -I$(LIBDIR) -I$(FINDLIBDIR) -I. -c $< %.lo: %.cpp %.h @echo "Pattern compiling c++ $< ..." $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I$(SRCDIR) -I$(FDDIR) -I$(FDPLUGDIR) -I$(LIBDIR) -I$(FINDLIBDIR) -I. -c $< all: $(COMMONPLUGINOBJ) # $(TESTMETAPLUGINBACKENDOBJ) tests: $(COMMONPLUGINTESTS) test_metaplugin_backend.lo: $(TESTMETAPLUGINBACKENDSRC) @echo "Compiling backend $< ..." $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I${SRCDIR} -I${FDDIR} -DLOGDIR=\"$(DESTDIR)$(working_dir)\" -c $< test_metaplugin_backend_fd: $(TESTMETAPLUGINBACKENDOBJ) test_metaplugin_backend_fd.lo @echo "Building $@ ..." $(NO_ECHO)$(LIBTOOL_LINK) --silent $(CXX) $(LDFLAGS) test_metaplugin_backend_fd.lo $(TESTMETAPLUGINBACKENDOBJ) -o $@ test_metaplugin-fd.la: $(PTCOMMOBJ) $(PLUGINLIBSOBJ) $(METAPLUGINOBJ) test_metaplugin-fd.lo @echo "Linking $(@:.la=.so) ..." $(NO_ECHO)$(LIBTOOL_LINK) --silent $(CXX) $(LDFLAGS) -shared $(filter %.lo, $^) -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version pluginlib_test: $(PLUGINLIBSTESTOBJ) $(PLUGINLIBSTEST) $(UNITTESTSOBJ) $(LIBBACOBJ) @echo "Building $@ ..." $(NO_ECHO)$(LIBTOOL_LINK) --silent $(CXX) $(LDFLAGS) -L$(LIBDIR) -lbac $(filter %.lo, $^) -o $@ smartalist_test: $(SMARTALISTTESTOBJ) $(SMARTALISTTEST) $(UNITTESTSOBJ) $(LIBBACOBJ) @echo "Building $@ ..." $(NO_ECHO)$(LIBTOOL_LINK) --silent $(CXX) $(LDFLAGS) -L$(LIBDIR) -lbac $(filter %.lo, $^) -o $@ smartptr_test: $(SMARTPTRTESTOBJ) $(SMARTPTRTEST) $(UNITTESTSOBJ) $(LIBBACOBJ) @echo "Building $@ ..." $(NO_ECHO)$(LIBTOOL_LINK) --silent $(CXX) $(LDFLAGS) -L$(LIBDIR) -lbac $(filter %.lo, $^) -o $@ commctx_test: $(COMMCTXTESTOBJ) $(COMMCTXTEST) $(UNITTESTSOBJ) $(LIBBACOBJ) @echo "Building $@ ..." $(NO_ECHO)$(LIBTOOL_LINK) --silent $(CXX) $(LDFLAGS) -L$(LIBDIR) -lbac $(filter %.lo, $^) -o $@ iso8601_test: $(ISO8601TESTOBJ) $(ISO8601SRC) $(UNITTESTSOBJ) $(LIBBACOBJ) @echo "Building $@ ..." $(NO_ECHO)$(LIBTOOL_LINK) --silent $(CXX) $(LDFLAGS) -L$(LIBDIR) -lbac $(filter %.lo, $^) -o $@ metaplugin_test: $(METAPLUGINTESTOBJ) @echo "Building $@ ..." $(LIBTOOL_LINK) --silent $(CXX) $(LDFLAGS) -L$(LIBDIR) $(LIBBACCFGOBJ) $(LIBBACOBJ) $(filter %.lo, $^) -o $@ #$(NO_ECHO) install: all $(MKDIR) $(DESTDIR)$(plugindir) libtool-clean: @find . -name '*.lo' -print | xargs $(LIBTOOL_CLEAN) $(RMF) @find . -name '*.la' -print | xargs $(LIBTOOL_CLEAN) $(RMF) @$(RMF) -r .libs _libs clean: libtool-clean @rm -f main *.so *.o @rm -f $(COMMONPLUGINTESTS) test_metaplugin_backend_fd distclean: clean libtool-uninstall: depend: uninstall: $(LIBTOOL_UNINSTALL_TARGET) bacula-15.0.3/src/plugins/fd/pluginlib/smartalist.h0000644000175000017500000001033314771010173022044 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file commctx.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a simple smart array list (alist) resource guard conceptually based on C++11 - RAII. * @version 1.1.0 * @date 2020-12-23 * * @copyright Copyright (c) 2020 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #ifndef _SMARTALIST_H_ #define _SMARTALIST_H_ // This is for custom alist iterator // which is not working as Bacula can't use any of the modern C++ headers (yet?) // #if __cplusplus >= 201104 // #include // For std::forward_iterator_tag // #include // For std::ptrdiff_t // #endif /** * @brief This is a simple smart array list (alist) resource guard conceptually based on C++11 - RAII. * * @tparam T is a type for a pointer to allocate on smart_alist. */ template class smart_alist : public alist { private: void _destroy(bool _remove_items = true) { if (items) { for (int i = 0; i < max_items; i++) { if (items[i]) { T *item = (T*)items[i]; items[i] = NULL; delete item; } } if (_remove_items) { free(items); items = NULL; } } num_items = 0; last_item = 0; max_items = 0; } public: // This is a custom alist iteration for modern C++ // #if __cplusplus >= 201104 // struct Iterator // { // using iterator_category = std::forward_iterator_tag; // using difference_type = std::ptrdiff_t; // using value_type = T; // using pointer = T*; // or also value_type* // using reference = T&; // or also value_type& // private: // pointer m_ptr; // public: // Iterator(pointer ptr) : m_ptr(ptr) {} // reference operator*() const { return *m_ptr; } // pointer operator->() { return m_ptr; } // // Prefix increment // Iterator& operator++() { m_ptr++; return *this; } // // Postfix increment // Iterator operator++(int) { Iterator tmp = *this; ++(*this); return tmp; } // friend bool operator== (const Iterator& a, const Iterator& b) { return a.m_ptr == b.m_ptr; }; // friend bool operator!= (const Iterator& a, const Iterator& b) { return a.m_ptr != b.m_ptr; }; // }; // // our iterator interface // inline Iterator begin() { return Iterator(&items[0]); } // inline Iterator end() { return Iterator(NULL); } // #endif smart_alist(int num = 10) : alist(num, not_owned_by_alist) {} ~smart_alist() { _destroy(); } inline void smart_destroy() { _destroy(); } // This is a simple copy operator // it requires a T(T*) constructor to work correctly inline smart_alist &operator=(const smart_alist &other) { if (items != other.items) { _destroy(false); if (other.items) { for (int i = 0; i < other.max_items; i++) { if (other.items[i]) { T *item = New(T((const T*)other.items[i])); this->append(item); } } } } return *this; } #if __cplusplus >= 201104 // This is a simple move operator inline smart_alist &operator=(smart_alist &&other) { if (items != other.items) { _destroy(); items = other.items; other.init(other.num_grow, own_items); } return *this; } #endif }; #endif /* _SMARTALIST_H_ */ bacula-15.0.3/src/plugins/fd/docker/0000755000175000017500000000000014771010173016772 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/docker/dkcommctx.c0000644000175000017500000023432114771010173021134 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file dkcommctx.c * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin for backup/restore Docker using native tools. * @version 1.2.1 * @date 2020-01-05 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "dkcommctx.h" #define PLUGINPREFIX "dkcommctx:" /* * Constructor, does simple initialization. */ DKCOMMCTX::DKCOMMCTX(const char *cmd) : bpipe(NULL), param_include_container(NULL), param_include_image(NULL), param_exclude_container(NULL), param_exclude_image(NULL), param_container(NULL), param_image(NULL), param_volume(NULL), param_mode(DKPAUSE), /* default backup mode */ param_container_create(true), param_container_run(false), param_container_imageid(false), param_container_defaultnames(false), param_docker_host(PM_FNAME), param_timeout(0), abort_on_error(false), all_containers(NULL), all_images(NULL), all_volumes(NULL), objs_to_backup(NULL), all_to_backup(false), all_vols_to_backup(false), f_eod(false), f_error(false), f_fatal(false), ini(NULL), workingvolume(PM_FNAME), workingdir(PM_FNAME) { /* setup initial plugin command here */ command = bstrdup(cmd); /* prepare backup list */ objs_to_backup = New(alist(32, not_owned_by_alist)); param_timeout = 30; // this is a default you can overwrite }; /* * Destructor, releases all memory allocated. */ DKCOMMCTX::~DKCOMMCTX() { if (command){ free(command); } if (ini){ delete ini; } release_all_dkinfo_list(&all_containers); release_all_dkinfo_list(&all_images); release_all_dkinfo_list(&all_volumes); if (objs_to_backup){ delete objs_to_backup; } release_all_pm_list(¶m_include_container); release_all_pm_list(¶m_exclude_container); release_all_pm_list(¶m_include_image); release_all_pm_list(¶m_exclude_image); release_all_pm_list(¶m_container); release_all_pm_list(¶m_image); release_all_pm_list(¶m_volume); }; /* * sets runtime workingdir variable used in working volume creation. * * in: * workdir - the file daemon working directory parameter * out: * none */ void DKCOMMCTX::setworkingdir(char* workdir) { pm_strcpy(workingdir, workdir); DMSG1(NULL, DVDEBUG, "workingdir: %s\n", workingdir.c_str()); }; /* * Releases the memory allocated by all_* list. * * in: * alist - a list to release * out: * none */ void DKCOMMCTX::release_all_dkinfo_list(alist **list) { DKINFO *dkinfo; if (*list){ foreach_alist(dkinfo, *list){ if (dkinfo){ delete dkinfo; } } delete *list; } *list = NULL; } /* * Releases the memory allocated by param_* list. * * in: * alist - a list to release * out: * none */ void DKCOMMCTX::release_all_pm_list(alist **list) { POOLMEM *pm; if (*list){ foreach_alist(pm, *list){ free_and_null_pool_memory(pm); } delete *list; } *list = NULL; } /* * Prepare a docker volume directory for container execution. * * in: * bpContext - required for debug/job messages * jobid - for volume directory distinguish and jobid tracking * out: * bRC_OK - on success * bRC_Error - on any error */ bRC DKCOMMCTX::prepare_working_volume(bpContext *ctx, int jobid) { char *dir; struct stat statp; pid_t pid = getpid(); DMSG0(ctx, DINFO, "prepare_working_volume called\n"); /* create dir template for mkdtemp function */ const char *wd = strlen(workingdir.c_str()) > 0 ? workingdir.c_str() : WORKDIR; Mmsg(workingvolume, "%s/docker-%d-%d-XXXXXX", wd, jobid, pid); dir = mkdtemp(workingvolume.c_str()); if (dir == NULL){ /* failback to standard method */ Mmsg(workingvolume, "%s/docker-%d-%d", wd, jobid, pid); if (stat(workingvolume.c_str(), &statp) != 0){ berrno be; /* if the path does not exist then create one */ if (be.code() != ENOENT || mkdir(workingvolume.c_str(), 0700) != 0){ /* creation or other error, set new errno and proceed to inform user */ be.set_errno(errno); DMSG2(ctx, DERROR, "working volume path (%s) creation Err=%s\n", workingvolume.c_str(), be.bstrerror()); JMSG2(ctx, abort_on_error ? M_FATAL : M_ERROR, "Working volume path (%s) creation Err=%s!\n", workingvolume.c_str(), be.bstrerror()); return bRC_Error; } } else if (!S_ISDIR(statp.st_mode)){ /* the expected working dir/volume is already available and it is not a directory, strange */ DMSG2(ctx, DERROR, "working volume path (%s) is not directory mode=%o\n", workingvolume.c_str(), statp.st_mode); JMSG2(ctx, abort_on_error ? M_FATAL : M_ERROR, "Working volume path (%s) is not directory mode=%o\n", workingvolume.c_str(), statp.st_mode); return bRC_Error; } } DMSG1(ctx, DINFO, "prepare_working_volume finish: %s\n", workingvolume.c_str()); return bRC_OK; }; /* * It removes files and a volume directory after a successful backup/restore * * in: * bpContext - required for debug/job messages * out * none */ void DKCOMMCTX::clean_working_volume(bpContext* ctx) { POOL_MEM fname(PM_FNAME); int status; int a; bool ferr = false; const char *ftab[] = { BACULACONTAINERERRLOG, BACULACONTAINERARCHLOG, BACULACONTAINERFIN, BACULACONTAINERFOUT, NULL, }; DMSG0(ctx, DDEBUG, "clean_working_volume called\n"); for (a = 0; ftab[a] != NULL; a++) { render_working_volume_filename(fname, ftab[a]); status = unlink(fname.c_str()); if (status < 0){ /* unlink error - report to user */ berrno be; if (be.code() == ENOENT){ continue; } ferr = true; DMSG2(ctx, DERROR, "unlink error: %s Err=%s\n", fname.c_str(), be.bstrerror()); JMSG2(ctx, M_ERROR, "Cannot unlink a file: %s Err=%s\n", fname.c_str(), be.bstrerror()); } DMSG1(ctx, DDEBUG, "removing: %s\n", fname.c_str()) } if (!ferr){ status = rmdir(workingvolume.c_str()); if (status < 0){ /* unlink error - report to user */ berrno be; DMSG2(ctx, DERROR, "rmdir error: %s Err=%s\n", workingvolume.c_str(), be.bstrerror()); JMSG2(ctx, M_ERROR, "Cannot remove directory: %s Err=%s\n", workingvolume.c_str(), be.bstrerror()); } } pm_strcpy(workingvolume, NULL); DMSG0(ctx, DDEBUG, "clean_working_volume finish.\n"); }; /* * Terminate the connection represented by BPIPE object. * it shows a debug and job messages when connection close * is unsuccessful and when ctx is available only. * * in: * bpContext - Bacula Plugin context required for debug/job * messages to show, it could be NULL in this * case no messages will be shown. * out: * none */ void DKCOMMCTX::terminate(bpContext *ctx) { int status; if (is_closed()){ return; } DMSG(ctx, DDEBUG, "Terminating PID=%d\n", bpipe->worker_pid); status = close_bpipe(bpipe); if (status){ /* error during close */ berrno be; f_error = true; DMSG(ctx, DERROR, "Error closing backend. Err=%s\n", be.bstrerror(status)); JMSG(ctx, is_fatal() ? M_FATAL : M_ERROR, "Error closing backend. Err=%s\n", be.bstrerror(status)); } if (bpipe->worker_pid){ /* terminate the backend */ kill(bpipe->worker_pid, SIGTERM); } bpipe = NULL; }; /* * Run the command using *_CMD compile variable and prepared * parameters. * * in: * bpContext - for Bacula debug and jobinfo messages * cmd - the command type to execute * args - the command arguments * out: * True - when command execute successfully * False - when execution return error */ bool DKCOMMCTX::execute_command(bpContext *ctx, POOL_MEM &args) { return execute_command(ctx, args.c_str()); } /* * Run the command using *_CMD compile variable and prepared * parameters. * * in: * bpContext - for Bacula debug and jobinfo messages * cmd - the command type to execute * args - the command arguments * out: * True - when command execute successfully * False - when execution return error */ bool DKCOMMCTX::execute_command(bpContext *ctx, const char *args) { return execute_command(ctx, (char*)args); } /* * Run the command using *_CMD compile variable and prepared * parameters. * * in: * bpContext - for Bacula debug and jobinfo messages * cmd - the command type to execute * args - the command arguments * out: * True - when command execute successfully * False - when execution return error */ bool DKCOMMCTX::execute_command(bpContext *ctx, POOLMEM *args) { POOL_MEM exe_cmd(PM_FNAME); POOL_MEM DH(PM_NAME); const char *command = DOCKER_CMD; char *envp[3]; int a = 0; if (args == NULL){ /* cannot execute command with args NULL */ DMSG0(ctx, DERROR, "Logic error: Cannot execute empty command tool!\n"); JMSG0(ctx, M_FATAL, "Logic error: Cannot execute empty command tool!\n"); return false; } /* check if command is still available to Bacula */ if (access(command, X_OK) < 0){ berrno be; DMSG2(ctx, DERROR, "Unable to access %s command. Err=%s\n", command, be.bstrerror()); JMSG2(ctx, M_FATAL, "Unable to access %s command. Err=%s\n", command, be.bstrerror()); return false; } /* yes, we still have access to it. * the format of a command line to execute is: */ Mmsg(exe_cmd, "%s %s", command, args); DMSG(ctx, DINFO, "Executing: %s\n", exe_cmd.c_str()); /* preparing envinroment variables */ envp[a++] = bstrdup("LANG=C"); if (strlen(param_docker_host.c_str()) > 0) { Mmsg(DH, "DOCKER_HOST=%s", param_docker_host.c_str()); envp[a++] = bstrdup(DH.c_str()); } envp[a] = NULL; bpipe = open_bpipe(exe_cmd.c_str(), 0, "rw", envp); a = 0; while (envp[a] != NULL){ free(envp[a++]); } if (bpipe == NULL){ berrno be; DMSG(ctx, DERROR, "Unable to execute command. Err=%s\n", be.bstrerror()); JMSG(ctx, M_FATAL, "Unable to execute command. Err=%s\n", be.bstrerror()); return false; } DMSG(ctx, DINFO, "Command executed at PID=%d\n", get_backend_pid()); return true; } /* * Read all output from command tool - until eod and save it * in the out buffer. * * in: * bpContext - for Bacula debug jobinfo messages * out - the POOL_MEM buffer we will read data * out: * -1 - when we've got any error; the function will report * it to Bacula when ctx is not NULL * 0 - when no more data to read - EOD * - the size of received message */ int32_t DKCOMMCTX::read_output(bpContext *ctx, POOL_MEM &out) { int status; int rbytes; bool ndone; if (is_closed()){ f_error = true; DMSG0(ctx, DERROR, "BPIPE to command tool is closed, cannot get data.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE to command tool is closed, cannot get data.\n"); return -1; } /* set variables */ rbytes = 0; ndone = true; /* wait a bit for a command to execute */ bmicrosleep(0, 1000); // sleep 1mS /* read all output data */ while (ndone){ status = read_data(ctx, out.c_str() + rbytes, out.size() - rbytes); if (status < 0){ /* error */ return -1; } rbytes += status; if (is_eod()){ /* we read all data available */ ndone = false; continue; } /* it seems out buffer is too small for all data */ out.check_size(rbytes + 1024); } return rbytes; } /* * Reads a single data block from command tool. * It reads as more data as is available on the other size and will fit into * a memory buffer - buf. When EOD encountered during reading it will set * f_eod flag, so checking this flag is mandatory! * * in: * bpContext - for Bacula debug jobinfo messages * buf - a memory buffer for data * len - the length of the memory buffer - buf * out: * -1 - when we've got any error; the function reports it to Bacula when * ctx is not NULL * when no more data to read - EOD * - the size of received data */ int32_t DKCOMMCTX::read_data(bpContext *ctx, POOLMEM *buf, int32_t len) { int status; int nbytes; int rbytes; int timeout; if (buf == NULL || len < 1){ /* we have no space to read data */ f_error = true; DMSG0(ctx, DERROR, "No space to read data from command tool.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "No space to read data from command tool.\n"); return -1; } if (is_closed()){ f_error = true; DMSG0(ctx, DERROR, "BPIPE to command tool is closed, cannot get data.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE to command tool is closed, cannot get data.\n"); return -1; } /* we will read no more then len bytes available in the buf */ nbytes = len; rbytes = 0; /* clear flags */ f_eod = f_error = f_fatal = false; timeout = 200; // timeout of 200ms while (nbytes){ status = fread(buf + rbytes, 1, nbytes, bpipe->rfd); if (status == 0){ berrno be; if (ferror(bpipe->rfd) != 0){ f_error = true; DMSG(ctx, DERROR, "BPIPE read error: ERR=%s\n", be.bstrerror()); JMSG(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE read error: ERR=%s\n", be.bstrerror()); return -1; } if (feof(bpipe->rfd) != 0){ f_eod = true; return rbytes; } bmicrosleep(0, 1000); // sleep 1mS if (!timeout--){ /* reach timeout*/ f_error = true; DMSG0(ctx, DERROR, "BPIPE read timeout.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE read timeout.\n"); return -1; } } else { timeout = 200; // reset timeout } nbytes -= status; rbytes += status; } return rbytes; } /* * Sends a raw data block to command tool. * * in: * bpContext - for Bacula debug and jobinfo messages * buf - a message buffer contains data to send * len - the length of the data to send * out: * -1 - when encountered any error * - the number of bytes sent, success */ int32_t DKCOMMCTX::write_data(bpContext *ctx, POOLMEM *buf, int32_t len) { int status; int nbytes; int wbytes; int timeout; if (buf == NULL){ /* we have no data to write */ f_error = true; DMSG0(ctx, DERROR, "No data to send to command tool.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "No data to send to command tool.\n"); return -1; } if (is_closed()){ f_error = true; DMSG0(ctx, DERROR, "BPIPE to command tool is closed, cannot send data.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE to command tool is closed, cannot send data.\n"); return -1; } /* we will write len bytes available in the buf */ nbytes = len; wbytes = 0; /* clear flags */ f_eod = f_error = f_fatal = false; timeout = 200; // timeout of 200ms while (nbytes){ status = fwrite(buf + wbytes, 1, nbytes, bpipe->wfd); if (status == 0){ berrno be; if (ferror(bpipe->wfd) != 0){ f_error = true; DMSG(ctx, DERROR, "BPIPE write error: ERR=%s\n", be.bstrerror()); JMSG(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE write error: ERR=%s\n", be.bstrerror()); return -1; } bmicrosleep(0, 1000); // sleep 1mS if (!timeout--){ /* reached timeout*/ f_error = true; DMSG0(ctx, DERROR, "BPIPE write timeout.\n"); JMSG0(ctx, is_fatal() ? M_FATAL : M_ERROR, "BPIPE write timeout.\n"); return -1; } } else { timeout = 200; // reset timeout } nbytes -= status; wbytes += status; } return wbytes; } // /* // * Render a command tool parameter for string value. // * // * in: // * bpContext - for Bacula debug and jobinfo messages // * param - a pointer to the param variable where we will render a parameter // * pname - a name of the parameter to compare // * fmt - a low-level parameter name // * name - a name of the parameter from parameter list // * value - a value to render // * out: // * True if parameter was rendered // * False if it was not the parameter required // */ // bool DKCOMMCTX::render_param(bpContext *ctx, POOLMEM **param, const char *pname, const char *fmt, const char *name, char *value) // { // if (bstrcasecmp(name, pname)){ // if (!*param){ // *param = get_pool_memory(PM_NAME); // Mmsg(*param, " -%s '%s' ", fmt, value); // DMSG(ctx, DDEBUG, "render param:%s\n", *param); // } // return true; // } // return false; // } // /* // * Render a command tool parameter for integer value. // * // * in: // * bpContext - for Bacula debug and jobinfo messages // * param - a pointer to the param variable where we will render a parameter // * pname - a name of the parameter to compare // * fmt - a low-level parameter name // * name - a name of the parameter from parameter list // * value - a value to render // * out: // * True if parameter was rendered // * False if it was not the parameter required // */ // bool DKCOMMCTX::render_param(bpContext *ctx, POOLMEM **param, const char *pname, const char *fmt, const char *name, int value) // { // if (bstrcasecmp(name, pname)){ // if (!*param){ // *param = get_pool_memory(PM_NAME); // Mmsg(*param, " -%s %d ", value); // DMSG(ctx, DDEBUG, "render param:%s\n", *param); // } // return true; // } // return false; // } // /* // * Render a command tool parameter for string value. // * // * in: // * bpContext - for Bacula debug and jobinfo messages // * param - a pointer to the param variable where we will render a parameter // * pname - a name of the parameter to compare // * name - a name of the parameter from parameter list // * value - a value to render // * out: // * True if parameter was rendered // * False if it was not the parameter required // */ // bool DKCOMMCTX::render_param(bpContext *ctx, POOLMEM **param, const char *pname, const char *name, char *value) // { // if (bstrcasecmp(name, pname)){ // if (!*param){ // *param = get_pool_memory(PM_NAME); // Mmsg(*param, "%s", value); // DMSG(ctx, DDEBUG, "render param:%s\n", *param); // } // return true; // } // return false; // } // /* // * Setup DKCOMMCTX parameter for boolean value. // * // * in: // * bpContext - for Bacula debug and jobinfo messages // * param - a pointer to the param variable where we will render a parameter // * pname - a name of the parameter to compare // * name - a name of the parameter from parameter list // * value - a value to render // * out: // * True if parameter was rendered // * False if it was not the parameter required // */ // bool DKCOMMCTX::render_param(bpContext *ctx, bool *param, const char *pname, const char *name, bool value) // { // if (bstrcasecmp(name, pname)){ // if (param){ // *param = value; // DMSG2(ctx, DDEBUG, "render param: %s=%s\n", pname, *param ? "True" : "False"); // } // return true; // } // return false; // } // /* // * Setup DKCOMMCTX parameter for int32_t value. // * // * in: // * bpContext - for Bacula debug and jobinfo messages // * param - a pointer to the param variable where we will render a parameter // * pname - a name of the parameter to compare // * name - a name of the parameter from parameter list // * value - a value to render // * out: // * True if parameter was rendered // * False if it was not the parameter required // */ // bool DKCOMMCTX::render_param(bpContext *ctx, int32_t *param, const char *pname, const char *name, int32_t value) // { // if (bstrcasecmp(name, pname)){ // if (param){ // *param = value; // DMSG2(ctx, DDEBUG, "render param: %s=%d\n", pname, *param); // } // return true; // } // return false; // } // /* // * Setup DKCOMMCTX parameter for boolean from string value. // * The parameter value will be false if value start with '0' character and // * will be true in any other case. So, when a plugin will have a following: // * param // * param=... // * param=1 // * then a param will be set to true. // * // * in: // * bpContext - for Bacula debug and jobinfo messages // * param - a pointer to the param variable where we will render a parameter // * pname - a name of the parameter to compare // * name - a name of the parameter from parameter list // * value - a value to render // * out: // * True if parameter was rendered // * False if it was not the parameter required // */ // bool DKCOMMCTX::parse_param(bpContext *ctx, bool *param, const char *pname, const char *name, char *value) // { // if (bstrcasecmp(name, pname)){ // if (value && *value == '0'){ // *param = false; // } else { // *param = true; // } // DMSG2(ctx, DINFO, "%s parameter: %s\n", name, *param ? "True" : "False"); // return true; // } // return false; // } // /* // * Setup DKCOMMCTX parameter for integer from string value. // * // * in: // * bpContext - for Bacula debug and jobinfo messages // * param - a pointer to the param variable where we will render a parameter // * pname - a name of the parameter to compare // * name - a name of the parameter from parameter list // * value - a value to render // * out: // * True if parameter was rendered // * False if it was not the parameter required // */ // bool DKCOMMCTX::parse_param(bpContext *ctx, int32_t *param, const char *pname, const char *name, char *value) // { // if (value && bstrcasecmp(name, pname)){ // /* convert str to integer */ // *param = atoi(value); // if (*param == 0){ // /* error in conversion */ // f_error = true; // DMSG2(ctx, DERROR, "Invalid %s parameter: %s\n", name, value); // JMSG2(ctx, M_ERROR, "Invalid %s parameter: %s\n", name, value); // return false; // } // DMSG2(ctx, DINFO, "%s parameter: %d\n", name, *param); // return true; // } // return false; // } /* * Setup DKCOMMCTX parameter for DOCKER_BACKUP_MODE_T from string value. * supported values are: pause, nopause * any other will be ignored. * * in: * bpContext - for Bacula debug and jobinfo messages * param - a pointer to the param variable where we will render a parameter * pname - a name of the parameter to compare * name - a name of the parameter from parameter list * value - a value to render * out: * True if parameter was rendered * False if it was not the parameter required */ bool DKCOMMCTX::parse_param_mode(bpContext *ctx, DOCKER_BACKUP_MODE_T *param, const char *pname, const char *name, char *value) { if (bstrcasecmp(name, pname)){ if (value){ if (strcasecmp(value, "pause") == 0){ *param = DKPAUSE; } else if (strcasecmp(value, "nopause") == 0){ *param = DKNOPAUSE; } } switch (*param){ case DKPAUSE: DMSG(ctx, DINFO, "%s parameter: DKPAUSE\n", name); break; case DKNOPAUSE: DMSG(ctx, DINFO, "%s parameter: DKNOPAUSE\n", name); break; } return true; } return false; } // /* // * Render and add a parameter for string value to alist. // * When alist is NULL (uninitialized) then it creates a new list to use. // * // * in: // * bpContext - for Bacula debug and jobinfo messages // * list - pointer to alist class to use // * pname - a name of the parameter to compare // * name - a name of the parameter from parameter list // * value - a value to render // * out: // * True if parameter was rendered // * False if it was not the parameter required // */ // bool DKCOMMCTX::add_param_str(bpContext *ctx, alist **list, const char *pname, const char *name, char *value) // { // POOLMEM *param; // if (bstrcasecmp(name, pname)){ // if (!*list){ // *list = New(alist(8, not_owned_by_alist)); // } // param = get_pool_memory(PM_NAME); // Mmsg(param, "%s", value); // (*list)->append(param); // DMSG2(ctx, DDEBUG, "add param: %s=%s\n", name, value); // return true; // } // return false; // } // /* // * Render and set a parameter for string value. // * When param is NULL (uninitialized) then it allocates a new string. // * // * in: // * bpContext - for Bacula debug and jobinfo messages // * list - pointer to alist class to use // * pname - a name of the parameter to compare // * name - a name of the parameter from parameter list // * value - a value to render // * out: // * True if parameter was rendered // * False if it was not the parameter required // */ // bool DKCOMMCTX::parse_param(bpContext *ctx, POOLMEM **param, const char *pname, const char *name, char *value) // { // if (bstrcasecmp(name, pname)){ // if (!*param){ // *param = get_pool_memory(PM_NAME); // } // pm_strcpy(param, value); // DMSG2(ctx, DDEBUG, "add param: %s=%s\n", name, value); // return true; // } // return false; // } /* * Parse a restore plugin parameters for DKCOMMCTX class (single at a time). * * in: * bpContext - for Bacula debug and jobinfo messages * item - restore plugin parameter to parse * out: * if parameter found it will be rendered in class variable */ void DKCOMMCTX::parse_parameters(bpContext *ctx, ini_items &item) { /* container_create variable */ if (setup_param(param_container_create, "container_create", item.name, item.val.boolval)){ return; } /* container_create variable */ if (setup_param(param_container_run, "container_run", item.name, item.val.boolval)){ return; } /* container_create variable */ if (setup_param(param_container_imageid, "container_imageid", item.name, item.val.boolval)){ return; } /* container_create variable */ if (setup_param(param_container_defaultnames, "container_defaultnames", item.name, item.val.boolval)){ return; } /* docker_host variable */ if (setup_param(param_docker_host, "docker_host", item.name, item.val.strval)){ return; } /* timeout variable */ if (setup_param(param_timeout, "timeout", item.name, item.val.int32val)){ return; } f_error = true; DMSG(ctx, DERROR, "INI: Unknown parameter: %s\n", item.name); JMSG(ctx, M_ERROR, "INI: Unknown parameter: %s\n", item.name); } /* * Check and render DKCOMMCTX required parameters (single at a time). * * in: * bpContext - for Bacula debug and jobinfo messages * argk - the parameter name * argv - the parameter value (it could be null) * out: * bRC_OK - when parameter found and rendered successfully * bRC_Error - on any error * bRC_Max - when parameter not found and should be checked elsewhere */ bRC DKCOMMCTX::parse_parameters(bpContext *ctx, char *argk, char *argv) { /* check abort_on_error parameter */ if (parse_param(abort_on_error, "abort_on_error", argk, argv)){ return bRC_OK; } /* check allvolumes parameter */ if (parse_param(all_vols_to_backup, "allvolumes", argk, argv)){ return bRC_OK; } /* check and handle container list */ if (parse_param_add_str(¶m_container, "container", argk, argv)){ return bRC_OK; } /* check and handle include_container list */ if (parse_param_add_str(¶m_include_container, "include_container", argk, argv)){ return bRC_OK; } /* check and handle exclude_container list */ if (parse_param_add_str(¶m_exclude_container, "exclude_container", argk, argv)){ return bRC_OK; } /* check and handle image list */ if (parse_param_add_str(¶m_image, "image", argk, argv)){ return bRC_OK; } /* check and handle include_image list */ if (parse_param_add_str(¶m_include_image, "include_image", argk, argv)){ return bRC_OK; } /* check and handle exclude_image list */ if (parse_param_add_str(¶m_exclude_image, "exclude_image", argk, argv)){ return bRC_OK; } /* check and handle volume list */ if (parse_param_add_str(¶m_volume, "volume", argk, argv)){ return bRC_OK; } /* check and handle timeout parameter */ if (parse_param(param_timeout, "timeout", argk, argv)){ return bRC_OK; } /* check mode parameter */ if (parse_param_mode(ctx, ¶m_mode, "mode", argk, argv)){ return bRC_OK; } /* check docker_host parameter */ if (parse_param(param_docker_host, "docker_host", argk, argv)){ return bRC_OK; } /* parameter unknown */ return bRC_Max; } /* * Used for dumping current restore object contents for debugging. */ void DKCOMMCTX::dump_robjdebug(bpContext* ctx, restore_object_pkt* rop) { POOL_MEM out(PM_FNAME); if (rop){ out.check_size(rop->object_len + 1); pm_memcpy(out, rop->object, rop->object_len); DMSG1(ctx, DERROR, "failed restore object:\n%s\n", out.c_str()); } } /* * Parse a Restore Object saved during backup and modified by user during restore. * Every RO received will allocate a dedicated command context which is used * by bEventRestoreCommand to handle default parameters for restore. * * in: * bpContext - for Bacula debug and jobinfo messages * rop - a restore object structure to parse * out: * bRC_OK - on success * bRC_Error - on error */ bRC DKCOMMCTX::parse_restoreobj(bpContext *ctx, restore_object_pkt *rop) { int i; DMSG(ctx, DINFO, "INIcmd: %s\n", command); if (!ini){ ini = new ConfigFile(); } if (!ini->dump_string(rop->object, rop->object_len)){ DMSG0(ctx, DERROR, "ini->dump_string failed.\n"); dump_robjdebug(ctx, rop); return bRC_OK; } ini->register_items(plugin_items_dump, sizeof(struct ini_items)); if (!ini->parse(ini->out_fname)){ DMSG0(ctx, DERROR, "ini->parse failed.\n"); dump_robjdebug(ctx, rop); return bRC_OK; } for (i=0; ini->items[i].name; i++){ if (ini->items[i].found){ parse_parameters(ctx, ini->items[i]); } } return bRC_OK; } /* * Sets all to backup variables. * * in: * bpContext - for Bacula debug and jobinfo messages * out: * none, internal variables set */ void DKCOMMCTX::set_all_to_backup(bpContext* ctx) { set_all_containers_to_backup(ctx); set_all_images_to_backup(ctx); set_all_volumes_to_backup(ctx); all_to_backup = true; } /* * Sets objs_to_backup list for all containers backup. */ void DKCOMMCTX::set_all_containers_to_backup(bpContext *ctx) { DKINFO *container; if (all_containers){ foreach_alist(container, all_containers){ objs_to_backup->append(container); }; } all_to_backup = true; }; /* * Sets objs_to_backup list for all images backup. */ void DKCOMMCTX::set_all_images_to_backup(bpContext *ctx) { DKINFO *image; if (all_images){ foreach_alist(image, all_images){ objs_to_backup->append(image); }; } all_to_backup = true; }; /* * Sets objs_to_backup list for all volumes backup. */ void DKCOMMCTX::set_all_volumes_to_backup(bpContext *ctx) { DKINFO *volume; if (all_volumes){ foreach_alist(volume, all_volumes){ objs_to_backup->append(volume); }; } all_to_backup = true; }; /* * Sets objs_to_backup list for all containers or images which match the * container/image id or container/image names parameters from plugin * command parameters. * * in: * bpContext - for Bacula debug and jobinfo messages * params - a list of parameters to compare * dklist - a list of containers/images available * estimate - set to true when doing estimate * out: * this->objs_to_backup updated if required */ void DKCOMMCTX::filter_param_to_backup(bpContext *ctx, alist *params, alist *dklist, bool estimate) { DKID dkid; DKINFO *dkinfo; POOLMEM *pobj; bool found; if (params){ /* container parameter configured */ foreach_alist(pobj, params){ found = false; foreach_alist(dkinfo, dklist){ DMSG3(ctx, DDEBUG, "compare: %s/%s vs %s\n", (char*)*dkinfo->id(), dkinfo->name(), pobj); /* we have to check container id or container names */ dkid = pobj; if (bstrcmp(pobj, dkinfo->name()) || dkid == *(dkinfo->id()) || bstrcmp(pobj, dkinfo->get_image_repository())){ /* container or image to backup found */ objs_to_backup->append(dkinfo); found = true; DMSG3(ctx, DINFO, "adding %s to backup (1): %s (%s)\n", dkinfo->type_str(), dkinfo->name(), (char*)*dkinfo->id()); break; }; }; if (!found){ /* docker object not found */ f_error = true; if (!estimate){ DMSG1(ctx, DERROR, "Not found to backup: %s!\n", pobj); JMSG1(ctx, is_fatal() ? M_FATAL : M_ERROR, "Not found to backup: %s!\n", pobj); } else { DMSG1(ctx, DERROR, "Not found to estimate: %s!\n", pobj); JMSG1(ctx, is_fatal() ? M_FATAL : M_ERROR, "Not found to estimate: %s!\n", pobj); }; }; }; }; }; /* * It is called when 'allvolumes' parameter is set for backup and add * a volumes mounted in containers selected to backup by reading * a Mounts parameter list from docker container. * * in: * bpContext - required for debug/job messages * out: * none */ void DKCOMMCTX::add_container_volumes_to_backup(bpContext* ctx) { DKINFO *container; DKINFO *volume; DKINFO *obj; char *p, *q; POOL_MEM buf(PM_MESSAGE); int len; bool found; alist containerlist(16, not_owned_by_alist); DMSG0(ctx, DDEBUG, "add_container_volumes_to_backup called\n"); /* prepare containers to backup list */ foreach_alist(container, objs_to_backup){ if (container->type() == DOCKER_CONTAINER){ containerlist.append(container); } } /* proceed if any container to backup */ if (!containerlist.empty()){ foreach_alist(container, &containerlist){ DMSG1(ctx, DDEBUG, "processing container: %s\n", container->get_container_id()); p = container->get_container_mounts(); if (p != NULL && *p != 0){ /* the container has mounts, so iterate on them and check volumes to backup */ len = strlen(p); pm_strcpy(buf, p); p = buf.c_str(); while (*p != 0){ if ((q = strchr(p, ',')) != NULL){ *q = 0; // terminate comma as a string separator } else { q = buf.c_str() + len - 1; } DMSG1(ctx, DDEBUG, "volmount: %s\n", p); /* at 'p' we have mounted docker volume name as string * check if volume already selected for backup */ found = false; foreach_alist(obj, objs_to_backup){ if (obj->type() == DOCKER_VOLUME && bstrcmp(obj->get_volume_name(), p)){ found = true; DMSG0(ctx, DDEBUG, "volume found in objs_to_backup, good!\n"); break; } }; /* not? simple check in volume list and add it to backup */ if (!found){ foreach_alist(volume, all_volumes){ if (bstrcmp(volume->get_volume_name(), p)){ /* this volume we should add for backup */ objs_to_backup->append(volume); DMSG0(ctx, DDEBUG, "adding volume to backup!\n"); break; } }; } /* next in list */ p = q + 1; } } } } DMSG0(ctx, DDEBUG, "add_container_volumes_to_backup finish.\n"); }; /* * It creates a list of volumes to backup for a particular container * which are selected manually to backup and should be reflected in * catalog database as a volumes links. It is called after 'allvolumes' * parameter verification. * * in: * bpContext - required for debug/job messages * out: * none */ void DKCOMMCTX::select_container_vols(bpContext* ctx) { DKINFO *container; DKINFO *volume; DKVOLS *vols; char *p, *q; alist vollist(16, not_owned_by_alist); int len; POOL_MEM buf(PM_MESSAGE); DMSG0(ctx, DDEBUG, "select_container_vols called\n"); /* prepare volume to backup list */ foreach_alist(volume, objs_to_backup){ if (volume->type() == DOCKER_VOLUME){ vollist.append(volume); } } /* proceed if any volume to backup */ if (!vollist.empty()){ foreach_alist(container, objs_to_backup){ if (container->type() == DOCKER_CONTAINER){ DMSG1(ctx, DDEBUG, "processing container: %s\n", container->get_container_id()); p = container->get_container_mounts(); if (p != NULL && *p != 0){ /* the container has mounts, so iterate on them and check volumes to backup */ len = strlen(p); pm_strcpy(buf, p); p = buf.c_str(); while (*p != 0){ if ((q = strchr(p, ',')) != NULL){ *q = 0; // terminate comma as a string separator } else { q = buf.c_str() + len - 1; } DMSG1(ctx, DDEBUG, "volmount: %s\n", p); if (*p != '/'){ foreach_alist(volume, &vollist){ if (bstrcmp(volume->get_volume_name(), p)){ volume->inc_volume_linknr(); vols = New(DKVOLS(volume)); update_vols_mounts(ctx, container, vols); container->container_append_vols(vols); DMSG0(ctx, DDEBUG, "adding to vols\n"); break; } }; } /* next param */ p = q + 1; } } } }; } DMSG0(ctx, DDEBUG, "select_container_vols finish.\n"); }; /* * Checks for common Docker error strings. * * in: * bpContext - required for debug/job messages * buf - the string to scan * out: * true - when a common error found * false - no common errors found */ bool DKCOMMCTX::check_for_docker_errors(bpContext* ctx, char* buf) { const char *err1 = "Cannot connect to the Docker daemon"; const char *err2 = "Unable to find image '" BACULATARIMAGE "' locally"; int len; /* no docker running error */ len = strlen(err1); if (strncmp(buf, err1, len) == 0){ DMSG1(ctx, DERROR, "no docker running error! Err=%s\n", buf); JMSG0(ctx, abort_on_error ? M_FATAL : M_ERROR, "No Docker is running. Cannot continue!\n"); return true; } /* cannot find baculatar image */ len = strlen(err2); if (strncmp(buf, err2, len) == 0){ DMSG1(ctx, DERROR, "cannot find baculatar image! Err=%s\n", buf); JMSG0(ctx, abort_on_error ? M_FATAL : M_ERROR, "Docker is unable to find required Bacula container backup image. Cannot continue!\n"); return true; } return false; }; /* * Sets objs_to_backup list for all containers or images which match the * container/image names based on include_* / exclude_* regex parameters from plugin * command parameters. * * in: * bpContext - for Bacula debug and jobinfo messages * params_include - a list of include regex parameters to match * params_exclude - a list of exclude regex parameters to not match * dklist - a list of containers/images available * out: * this->objs_to_backup updated if required */ void DKCOMMCTX::filter_incex_to_backup(bpContext* ctx, alist* params_include, alist *params_exclude, alist* dklist) { alist inex_list(16, not_owned_by_alist); POOLMEM *expr; bool found; int options = REG_EXTENDED | REG_ICASE; int rc, indx; char prbuf[500]; DKINFO *dkinfo; /* prepare a list of objects from include regex */ if (params_include){ foreach_alist(expr, params_include){ DMSG(ctx, DDEBUG, "processing include: %s\n", expr); rc = regcomp(&preg, expr, options); if (rc != 0) { f_error = true; regerror(rc, &preg, prbuf, sizeof(prbuf)); DMSG(ctx, DERROR, "include regex compilation error: %s\n", prbuf); JMSG(ctx, is_fatal() ? M_FATAL : M_ERROR, "include_container regex compilation error: %s\n", prbuf); continue; } /* include regex compiled, so iterate through all_containers */ foreach_alist(dkinfo, dklist){ rc = regexec(&preg, dkinfo->name(), 0, NULL, 0); if (rc == 0){ /* found */ inex_list.append(dkinfo); DMSG2(ctx, DDEBUG, "include %s found: %s\n", dkinfo->type_str(), dkinfo->name()); } }; regfree(&preg); }; } /* exclude objects from include list using exclude regex */ if (params_exclude){ foreach_alist(expr, params_exclude){ DMSG(ctx, DDEBUG, "processing exclude: %s\n", expr); rc = regcomp(&preg, expr, options); if (rc != 0) { f_error = true; regerror(rc, &preg, prbuf, sizeof(prbuf)); DMSG(ctx, DERROR, "exclude regex compilation error: %s\n", prbuf); JMSG(ctx, is_fatal() ? M_FATAL : M_ERROR, "exclude regex compilation error: %s\n", prbuf); continue; } /* iterate through objects list found used params_include */ found = true; while (found){ foreach_alist(dkinfo, &inex_list){ DMSG2(ctx, DDEBUG, "exclude processing %s: %s\n", dkinfo->type_str(), dkinfo->name()); rc = regexec(&preg, dkinfo->name(), 0, NULL, 0); if (rc == 0){ /* found */ indx = inex_list.current() - 1; DMSG(ctx, DVDEBUG, "inex_list_indx: %d\n", indx); inex_list.remove(indx); /* we have to start again as inex_list->cur_item points to the wrong position */ DMSG2(ctx, DDEBUG, "exclude %s found: %s\n", dkinfo->type_str(), dkinfo->name()); break; } }; if (!dkinfo){ DMSG0(ctx, DDEBUG, "exclude no more objects to check\n"); found = false; } } regfree(&preg); }; } if (inex_list.size()){ /* move dkinfos to objs_to_backup list */ foreach_alist(dkinfo, &inex_list){ objs_to_backup->append(dkinfo); DMSG3(ctx, DINFO, "adding %s to backup (2): %s (%s)\n", dkinfo->type_str(), dkinfo->name(), (char*)*dkinfo->id()); }; } }; /* * Prepares a DKCOMMCTX class for a single Plugin parameters for backup and estimate jobs. * The main purpose is to set a objs_to_backup list for a list of vms to backup. * * in: * bpContext - for Bacula debug and jobinfo messages * estimate - if the preparation for estimate (true) or backup (false) job * out: * bRC_OK - when preparation was successful * bRC_Error - on any error */ bRC DKCOMMCTX::prepare_bejob(bpContext *ctx, bool estimate) { /* get list of all objects */ if (!get_all_containers(ctx) || !get_all_images(ctx)){ return bRC_Error; } /* when docker_host defined then skip all volumes */ if (strlen(param_docker_host.c_str()) == 0 && !get_all_volumes(ctx)){ return bRC_Error; } /* when no volume/container/image/include/exclude parameters found that all objects should be saved */ if (!param_container && !param_image && !param_include_container && !param_exclude_container && !param_include_image && !param_exclude_image && !param_volume){ set_all_to_backup(ctx); } else { all_to_backup = false; /* find all objects on param_* lists */ filter_param_to_backup(ctx, param_container, all_containers, estimate); filter_param_to_backup(ctx, param_image, all_images, estimate); if (param_volume && strlen(param_docker_host.c_str()) == 0){ filter_param_to_backup(ctx, param_volume, all_volumes, estimate); } /* handle include/exclude regex for containers and images only */ filter_incex_to_backup(ctx, param_include_container, param_exclude_container, all_containers); filter_incex_to_backup(ctx, param_include_image, param_exclude_image, all_images); /* handle allvolumes for containers backup */ if (all_vols_to_backup && strlen(param_docker_host.c_str()) == 0){ add_container_volumes_to_backup(ctx); } /* generate a warning message if required */ if ((param_volume || all_vols_to_backup) && strlen(param_docker_host.c_str()) > 0){ DMSG0(ctx, DINFO, "Docker Volume backup with docker_host is unsupported!\n"); JMSG0(ctx, M_WARNING, "Docker Volume backup with docker_host is unsupported!\n"); } } select_container_vols(ctx); return bRC_OK; } /* * Prepares a DKCOMMCTX class for a single Plugin parameters for restore job. * The main purpose is to handle storage_res restore parameter. * * in: * bpContext - for Bacula debug and jobinfo messages * out: * bRC_OK - when preparation was successful * bRC_Error - on any error (unimplemented) */ bRC DKCOMMCTX::prepare_restore(bpContext* ctx) { DMSG0(ctx, DDEBUG, "prepare_restore called\n"); return bRC_OK; } /* * Setup DKINFO class values based on object type and string parameters from * docker command output. * * in: * bpContext - for Bacula debug and jobinfo messages * type - a docker object type * paramtab - a table of docker command output values * dkinfo - a class to setup * out: * dkinfo updated */ void DKCOMMCTX::setup_dkinfo(bpContext* ctx, DKINFO_OBJ_t type, char *paramtab[], DKINFO *dkinfo) { switch (type){ case DOCKER_CONTAINER: setup_container_dkinfo(ctx, paramtab, dkinfo); break; case DOCKER_IMAGE: setup_image_dkinfo(ctx, paramtab, dkinfo); break; case DOCKER_VOLUME: setup_volume_dkinfo(ctx, paramtab, dkinfo); break; } }; /* * Setup DKINFO container class values based on string parameters from docker command output. * It is required to setup a following parameters in paramtab array * [0] - container id * [1] - container name * [2] - container size * other parameters will be ignored. * * in: * bpContext - for Bacula debug and jobinfo messages * paramtab - a table of docker command output values * dkinfo - a class to setup * out: * dkinfo updated */ void DKCOMMCTX::setup_container_dkinfo(bpContext* ctx, char *paramtab[], DKINFO *dkinfo) { dkinfo->set_container_id(paramtab[0]); dkinfo->set_container_names(paramtab[1]); dkinfo->scan_container_size(paramtab[2]); dkinfo->set_container_mounts(paramtab[3]); DMSG3(ctx, DINFO, "setup_container_dkinfo: %s %s %d\n", (char*)*dkinfo->get_container_id(), dkinfo->get_container_names(), dkinfo->get_container_size()); DMSG1(ctx, DINFO, "setup_container_dkinfo: %s\n", dkinfo->get_container_mounts()); }; /* * Setup DKINFO image class values based on string parameters from docker command output. * It is required to setup a following parameters in paramtab array * [0] - image id * [1] - image repository * [2] - image tag * [3] - image size * [4] - image creation date * other parameters will be ignored. * * in: * bpContext - for Bacula debug and jobinfo messages * paramtab - a table of docker command output values * dkinfo - a class to setup * out: * dkinfo updated */ void DKCOMMCTX::setup_image_dkinfo(bpContext* ctx, char *paramtab[], DKINFO *dkinfo) { dkinfo->set_image_id(paramtab[0]); dkinfo->set_image_repository(paramtab[1]); dkinfo->set_image_tag(paramtab[2]); dkinfo->scan_image_size(paramtab[3]); dkinfo->set_image_created(str_to_utime(paramtab[4])); DMSG3(ctx, DINFO, "setup_image_dkinfo: %s %s : %s\n", (char*)*dkinfo->get_image_id(), dkinfo->get_image_repository(), dkinfo->get_image_tag()); DMSG2(ctx, DINFO, "setup_image_dkinfo: %d %ld\n", dkinfo->get_image_size(), dkinfo->get_image_created()); }; /* * Setup DKINFO volume class values based on string parameters from docker command output. * It is required to setup a following parameters in paramtab array * [0] - volume name * [1] - volume size * other parameters will be ignored. * * To get a volume created time you have to inspect details of the particular volume: * $ docker volume inspect --format "{{.CreatedAt}}" 0a5f9bf16602f2de6c9e701e6fb6d4ce1292ee336348c7c2624f4a08aaacebc4 * 2019-06-21T17:11:33+02:00 * * in: * bpContext - for Bacula debug and jobinfo messages * paramtab - a table of docker command output values * dkinfo - a class to setup * out: * dkinfo updated */ void DKCOMMCTX::setup_volume_dkinfo(bpContext* ctx, char *paramtab[], DKINFO *dkinfo) { dkinfo->set_volume_name(paramtab[0]); dkinfo->scan_volume_size(paramtab[1]); DMSG2(ctx, DINFO, "setup_volume_dkinfo: %s %ld\n", dkinfo->get_volume_name(), dkinfo->get_volume_size()); }; /* * Setup a list of objects based on docker command execution. * The docker command should format its output into a number of columns separated by a tab character - '/t' * The function support no more then 10 columns to scan. In current implementation we are using no more then 6. * It will setup a list of all docker objects at 'dklist' list based on 'type' of docker objects. * * in: * bpContext - for Bacula debug and jobinfo messages * cmd - a docker command to execute * cols - a number of columns to scan * dklist - a pointer to dklist variable to update * type - a docker object type to setup * out: * dklist - a new list allocated and populated */ alist *DKCOMMCTX::get_all_list_from_docker(bpContext* ctx, const char *cmd, int cols, alist **dklist, DKINFO_OBJ_t type) { POOL_MEM out(PM_MESSAGE); int a; char *paramtab[10]; int status; DKINFO *dkinfo; char *p, *q, *t; if (cols > 10){ DMSG1(ctx, DERROR, "BUG! unsupported number of parameter columns: %d\n", cols); JMSG1(ctx, M_FATAL, "Unsupported number of parameter columns: %d You should call a support!\n", cols); return NULL; } /* invalid pointer to list*/ if (!dklist){ DMSG0(ctx, DERROR, "BUG! invalid pointer to dklist\n"); return NULL; } if (!*dklist){ DMSG0(ctx, DINFO, "get_all_list_from_docker called\n"); /* first get all containers list */ if (!execute_command(ctx, cmd)){ /* some error executing command */ DMSG0(ctx, DERROR, "get_all_list_from_docker execution error\n"); return NULL; } /* allocate a new list */ *dklist = New(alist(32, not_owned_by_alist)); memset(out.c_str(), 0, out.size()); if ((status = read_output(ctx, out)) > 0){ /* we read a string, so terminate it with nul char */ p = out.c_str(); p[status] = 0; while (*p != 0 && (q = strchr(p, '\n')) != NULL){ /* p is the start of the string and q is the end of line */ *q = 0; // q+1 will be the next line DMSG(ctx, DVDEBUG, "get_all_list_from_docker scanning: %s\n", p); if (check_for_docker_errors(ctx, p)){ goto bailout; // TODO: remove this goto } t = p; /* expect 5 tabs-separators for 6 parameters handled in-place */ for (a = 0; a < cols; a++){ paramtab[a] = t; t = strchr(t, '\t'); if (t != NULL){ *t = 0; t++; // next param } else { break; // finish scanning } } for (a = 0; a < cols; a++){ DMSG2(ctx, DDEBUG, "get_all_list_from_docker paramtab[%d]: %s\n", a, paramtab[a]); } /* so, the single line is between ( p ; q ) and consist of 6 nul terminated string parameters */ dkinfo = New(DKINFO(type)); setup_dkinfo(ctx, type, paramtab, dkinfo); (*dklist)->append(dkinfo); if (dkinfo->type() != DOCKER_VOLUME){ DMSG3(ctx, DDEBUG, "found %s: %s -> %s\n", dkinfo->type_str(), (char*)*dkinfo->id(), dkinfo->name()); } else { DMSG2(ctx, DDEBUG, "found %s: %s\n", dkinfo->type_str(), dkinfo->name()); } /* next line */ DMSG0(ctx, DVDEBUG, "get_all_list_from_docker next line\n"); p = q + 1; } } else { DMSG0(ctx, DINFO, "get_all_list_from_docker no container found.\n"); } terminate(ctx); } else { DMSG1(ctx, DINFO, "get_all_list_from_docker used cached data: %p\n", *dklist); } bailout: DMSG0(ctx, DINFO, "get_all_list_from_docker finish.\n"); return *dklist; } /* * Updates a docker volume mount point in docker list of mounted vols * for proper volume support file (link) name rendering as: -> * * in: * bpContext - for Bacula debug and jobinfo messages * dkinfo - the container to scan * dkvols - the volume mount information to update * out: * none */ void DKCOMMCTX::update_vols_mounts(bpContext* ctx, DKINFO *container, DKVOLS *volume) { POOL_MEM out(PM_MESSAGE); POOL_MEM cmd(PM_MESSAGE); int status; char *p, *q, *t; DMSG0(ctx, DINFO, "update_volume_mounts called\n"); if (container && volume){ /* get details about container mounts */ Mmsg(cmd, "container inspect --format '{{range .Mounts}}{{.Name}}{{print \"\\t\"}}{{println .Destination}}{{end}}' %s", container->get_container_id()); if (!execute_command(ctx, cmd)){ /* some error executing command */ DMSG0(ctx, DERROR, "update_volume_mounts execution error\n"); return; } /* process data: * aa9d3074f8c65a5afafddc6eaaf9827e99bb51f676aafaacc05cfca0188e65bf /var/log\n * 9194f415cafcf8d234673478f3358728d43e0203e58d0338b4ee18a4dca6646b /etc/logrotate.d\n * (...) */ if ((status = read_output(ctx, out)) > 0){ /* we read a string, so terminate it with nul char */ p = out.c_str(); p[status] = 0; while (*p != 0 && (q = strchr(p, '\n')) != NULL){ /* p is the start of the string and q is the end of line */ *q = 0; // q+1 will be the next line DMSG(ctx, DVDEBUG, "update_volume_mounts scanning: %s\n", p); if (check_for_docker_errors(ctx, p)){ return; } /* expect 1 tab-separator for 2 parameters handled in-place */ t = strchr(p, '\t'); if (t != NULL){ *t++ = 0; } else { /* scan error */ return; } DMSG2(ctx, DDEBUG, "update_volume_mounts volname: %s dest: %s\n", p, t); if (bstrcmp(volume->vol->get_volume_name(), p)){ /* this is the volume we are looking for */ pm_strcpy(volume->destination, t); return; } /* next line */ DMSG0(ctx, DVDEBUG, "get_all_list_from_docker next line\n"); p = q + 1; } } else { DMSG0(ctx, DINFO, "get_all_list_from_docker no container found.\n"); } terminate(ctx); } else { DMSG2(ctx, DERROR, "invalid parameters: c:%p v:%p\n", container, volume); return; } DMSG0(ctx, DINFO, "update_volume_mounts finish.\n"); } /* * Return a list of all containers available on Docker. * * the container list is described as the following text block: # docker ps -a --no-trunc=true --format "{{.ID}}\t{{.Names}}\t{{.Size}}\t{{.Mounts}}\t{{.Labels}}\t{{.Image}}" 66f45d8601bae26a6b2ffeb46922318534d3b3905377b3a224693bd78601cb3b brave_edison 0B (virtual 228MB) c0a478d317195ba27dda1370b73e5cb94a7773f2a611142d7dff690abdcfdcbf postgres\n * * in: * bpContext - for Bacula debug jobinfo messages * out: * NULL - on any error * empty alist - when no docker comntainers found * alist - a list of DKINFO class which describe a single docker container */ alist *DKCOMMCTX::get_all_containers(bpContext* ctx) { return get_all_list_from_docker(ctx, "ps -a --no-trunc=true --format \"{{.ID}}\\t{{.Names}}\\t{{.Size}}\\t{{.Mounts}}\\t{{.Labels}}\\t{{.Image}}\"", 6, &all_containers, DOCKER_CONTAINER); }; /* * Return a list of all images available on Docker. * * the images list is described as the following text block: # # docker image ls --no-trunc=true --format "{{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.Size}}" restore/a6ba1cb597d5 latest sha256:44c34a8a510dc08b7b0a8f6961257e89120152739e61611f564353e8feb95e68 319MB\n * * in: * bpContext - for Bacula debug jobinfo messages * out: * NULL - on any error * empty alist - when no vms found * alist - a list of DKINFO class which describe a single VM */ alist *DKCOMMCTX::get_all_images(bpContext* ctx) { return get_all_list_from_docker(ctx, "image ls --no-trunc=true --format \"{{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.Size}}\\t{{.CreatedAt}}\"", 5, &all_images, DOCKER_IMAGE); } /* * Return a list of all volumes available on Docker. * * the volumes list is described as the following text block: # # docker volume ls --format "{{.Name}}\t{{.Size}}" 0a5f9bf16602f2de6c9e701e6fb6d4ce1292ee336348c7c2624f4a08aaacebc4 N/A\n * * in: * bpContext - for Bacula debug jobinfo messages * out: * NULL - on any error * empty alist - when no vms found * alist - a list of DKINFO class which describe a single VM */ alist *DKCOMMCTX::get_all_volumes(bpContext* ctx) { return get_all_list_from_docker(ctx, "volume ls --format \"{{.Name}}\\t{{.Size}}\"", 2, &all_volumes, DOCKER_VOLUME); } /* * Rename/set the docker image tag on selected image. */ bRC DKCOMMCTX::docker_tag(bpContext* ctx, DKID &dkid, POOLMEM *tag) { bRC rc = bRC_OK; POOL_MEM cmd(PM_FNAME); POOL_MEM out(PM_BSOCK); int status; DMSG0(ctx, DINFO, "docker_tag called.\n"); if (!tag){ DMSG0(ctx, DERROR, "docker_tag tag is NULL!\n"); return bRC_Error; } Mmsg(cmd, "image tag %s %s", (char*)dkid, tag); DMSG1(ctx, DDEBUG, "%s\n", cmd.c_str()); if (!execute_command(ctx, cmd)){ /* some error executing command */ DMSG0(ctx, DERROR, "docker_tag execution error\n"); JMSG0(ctx, abort_on_error ? M_FATAL : M_ERROR, "docker_tag execution error\n"); return bRC_Error; } memset(out.c_str(), 0, out.size()); status = read_output(ctx, out); if (status < 0){ /* error reading data from docker command */ DMSG0(ctx, DERROR, "docker_tag error reading data from docker command\n"); JMSG0(ctx, abort_on_error ? M_FATAL : M_ERROR, "docker_tag error reading data from docker command\n"); rc = bRC_Error; } else if (status > 0 && check_for_docker_errors(ctx, out.c_str())){ rc = bRC_Error; } /* we do not expect any output here */ terminate(ctx); DMSG0(ctx, DINFO, "docker_tag finish.\n"); return rc; }; /* * Waits for restore commands to finish. * Closes a BPIPE write descriptor which means EOF to command tool. Then try * to read from tools output and checks if restore was successful or not. * * in: * bpContext - for Bacula debug jobinfo messages * out: * bRC_OK - restore was successful and vmuuid is filled with VM UUID restored. * bRC_Error - on any error */ bRC DKCOMMCTX::wait_for_restore(bpContext *ctx, DKID &dkid) { POOL_MEM out(PM_BSOCK); POOL_MEM buf(PM_BSOCK); int status; char *p; bRC rc = bRC_OK; DMSG0(ctx, DINFO, "wait_for_restore called.\n"); /* first flush any outstanding restore data and close write descriptor */ close_wpipe(bpipe); /* now read the status from command */ while ((status = read_output(ctx, out)) != 0){ if (status < 0){ /* error reading data from command tool */ DMSG0(ctx, DERROR, "error reading data from command tool\n"); rc = bRC_Error; goto bailout; } pm_strcat(buf, out); p = buf.c_str(); p[status] = 0; } /* check for errors */ DMSG1(ctx, DVDEBUG, "bufout: %s\n", buf.c_str()); p = buf.c_str(); if (strstr(p, "Loaded image ID: ") == NULL){ /* error, missing confirmation*/ DMSG0(ctx, DERROR, "wait_for_restore confirmation error!\n"); JMSG1(ctx, abort_on_error ? M_FATAL : M_ERROR, "Image restore commit error: %s\n", p); rc = bRC_Error; } else { dkid = (char*)(p+17); DMSG1(ctx, DDEBUG, "scanned dkid: %s\n", (char*)dkid); } bailout: terminate(ctx); DMSG0(ctx, DINFO, "wait_for_restore finish.\n"); return rc; } /* * Executes docker command tool to make a container image commit. * Setup dkinfo->data.container.imagesave with image id of newly created image * which should be used at image save method. * * in: * bpContext - for Bacula debug jobinfo messages * dkinfo - container for commit * out: * bRC_OK - when command execution was successful * bRC_Error - on any error */ bRC DKCOMMCTX::container_commit(bpContext* ctx, DKINFO *dkinfo, int jobid) { POOL_MEM cmd(PM_FNAME); POOL_MEM imagename(PM_FNAME); POOL_MEM out(PM_MESSAGE); const char *mode = ""; const char *PAUSE = "-p"; DKID imagesave; bRC rc = bRC_OK; int status; char *p; DMSG0(ctx, DINFO, "container_commit called.\n"); if (dkinfo->type() != DOCKER_CONTAINER){ /* commit works on containers only */ return bRC_Error; } if (param_mode == DKPAUSE){ mode = PAUSE; } // commit -p 66f45d8601bae26a6b2ffeb46922318534d3b3905377b3a224693bd78601cb3b mcache1/66f45d8601ba:backup render_imagesave_name(imagename, dkinfo, jobid); Mmsg(cmd, "commit %s %s %s", mode, (char*)*dkinfo->get_container_id(), imagename.c_str()); if (!execute_command(ctx, cmd)){ /* some error executing command */ DMSG0(ctx, DERROR, "container_commit execution error\n"); JMSG0(ctx, abort_on_error ? M_FATAL : M_ERROR, "container_commit execution error\n"); return bRC_Error; } memset(out.c_str(), 0, out.size()); status = read_output(ctx, out); if (status < 0){ /* error reading data from docker command */ DMSG0(ctx, DERROR, "container_commit error reading data from docker command\n"); JMSG0(ctx, abort_on_error ? M_FATAL : M_ERROR, "container_commit error reading data from docker command\n"); rc = bRC_Error; } else { /* terminate committed image id string */ p = out.c_str(); p[status] = 0; strip_trailing_junk(out.c_str()); /* check a known output error */ if (status > 0 && check_for_docker_errors(ctx, out.c_str())){ rc = bRC_Error; } else { // should return a string: sha256:290835d692069c376072061362cb11b1e99efd555f6fb83b7be3e524ba6067fc imagesave = p; if (imagesave.id() < 0){ /* error scanning image id */ DMSG1(ctx, DERROR, "container_commit cannot scan commit image id. Err=%s\n", p); JMSG1(ctx, abort_on_error ? M_FATAL : M_ERROR, "container_commit cannot scan commit image id. Err=%s\n", p); rc = bRC_Error; } else { dkinfo->set_container_imagesave(imagesave); dkinfo->set_container_imagesave_tag(imagename); DMSG(ctx, DINFO, "Commit created: %s\n", dkinfo->get_container_imagesave_tag()); JMSG(ctx, M_INFO, "Commit created: %s\n", dkinfo->get_container_imagesave_tag()); } } } terminate(ctx); DMSG0(ctx, DINFO, "container_commit finish.\n"); return rc; } /* * Executes docker command tool to make a container image commit. * Setup dkinfo->data.container.imagesave with image id of newly created image * which should be used at image save method. * * in: * bpContext - for Bacula debug jobinfo messages * dkinfo - container for commit * out: * bRC_OK - when command execution was successful * bRC_Error - on any error */ bRC DKCOMMCTX::delete_container_commit(bpContext* ctx, DKINFO *dkinfo, int jobid) { POOL_MEM cmd(PM_FNAME); POOL_MEM imagename(PM_FNAME); POOL_MEM out(PM_MESSAGE); DKID imagesave; bRC rc = bRC_OK; int status; char *p, *q; int noerror = 0; DMSG0(ctx, DINFO, "delete_container_commit called.\n"); if (dkinfo->type() != DOCKER_CONTAINER){ /* commit works on containers only, silently ignore images */ return bRC_OK; } if (dkinfo->get_container_imagesave()->id() > 0){ /* container has commit image */ /* # docker rmi e7cd2a7f1c52a1fa8d88ab812abdcd814064e4884a12bd1f9acde16133023a69 Untagged: mcache1/66f45d8601ba/123:backup Deleted: sha256:e7cd2a7f1c52a1fa8d88ab812abdcd814064e4884a12bd1f9acde16133023a69 */ Mmsg(cmd, "rmi %s", (char*)*dkinfo->get_container_imagesave()); if (!execute_command(ctx, cmd)){ /* some error executing command */ DMSG0(ctx, DERROR, "delete_container_commit execution error\n"); JMSG0(ctx, abort_on_error ? M_FATAL : M_ERROR, "delete_container_commit execution error\n"); return bRC_Error; } memset(out.c_str(), 0, out.size()); status = read_output(ctx, out); if (status < 0){ /* error reading data from docker command */ DMSG0(ctx, DERROR, "delete_container_commit error reading data from docker command\n"); JMSG0(ctx, abort_on_error ? M_FATAL : M_ERROR, "delete_container_commit error reading data from docker command\n"); rc = bRC_Error; goto bailout; } /* terminate output string */ p = out.c_str(); p[status] = 0; /* check a known output error */ if (status > 0 && (strncmp(out.c_str(), "Cannot connect to the Docker daemon", 35) == 0)){ DMSG1(ctx, DERROR, "No Docker is running. Cannot continue! Err=%s\n", out.c_str()); JMSG1(ctx, abort_on_error ? M_FATAL : M_ERROR, "No Docker is running. Err=%s\n", out.c_str()); rc = bRC_Error; goto bailout; } render_imagesave_name(imagename, dkinfo, jobid); /* it should return the following string: Untagged: mcache1/66f45d8601ba:backup\n Deleted: sha256:e7cd2a7f1c52a1fa8d88ab812abdcd814064e4884a12bd1f9acde16133023a69\n */ while (*p != 0 && (q = strchr(p, '\n')) != NULL){ /* p is the start of the string and q is the end of line */ *q = 0; DMSG(ctx, DVDEBUG, "delete_container_commit scanning: %s\n", p); if (strstr(p, "Untagged: ") == p && strstr(p, imagename.c_str()) != NULL){ /* above message means 1/3 of success */ noerror++; } if (strstr(p, "Deleted: ") == p){ /* check if it deleted what we requested for */ noerror++; imagesave = (char*)(p + 9); if (imagesave == *dkinfo->get_container_imagesave()){ /* yes it deleted exactly what we are requesting for */ noerror++; } } /* next line */ DMSG0(ctx, DVDEBUG, "delete_snapshot next line\n"); p = q + 1; } if (noerror < 3){ /* error deleting snapshot */ strip_trailing_junk(out.c_str()); DMSG(ctx, DERROR, "Error deleting commit image. Err=%s\n", out.c_str()); JMSG(ctx, abort_on_error ? M_FATAL : M_ERROR, "Error deleting commit image. Err=%s\n", out.c_str()); rc = bRC_Error; goto bailout; } DMSG(ctx, DINFO, "Commit removed: %s\n", dkinfo->get_container_imagesave_tag()); JMSG(ctx, M_INFO, "Commit removed: %s\n", dkinfo->get_container_imagesave_tag()); bailout: terminate(ctx); } else { DMSG0(ctx, DINFO, "container_commit no imagesave available.\n"); } DMSG0(ctx, DINFO, "container_commit finish.\n"); return rc; } /* * Executes docker command tool to save docker image. * The data to backup is generated on docker stdout channel and will be saved * on pluginIO calls. * * in: * bpContext - for Bacula debug jobinfo messages * dkid - docker image to save information * out: * bRC_OK - when command execution was successful * bRC_Error - on any error */ bRC DKCOMMCTX::image_save(bpContext* ctx, DKID *dkid) { POOL_MEM cmd(PM_FNAME); DMSG0(ctx, DINFO, "image_save called.\n"); Mmsg(cmd, "save %s", (char*)*dkid); if (!execute_command(ctx, cmd)){ /* some error executing command */ DMSG0(ctx, DERROR, "image_save execution error\n"); JMSG0(ctx, abort_on_error ? M_FATAL : M_ERROR, "image_save execution error\n"); return bRC_Error; } DMSG0(ctx, DINFO, "image_save finish, now we can read all the data.\n"); return bRC_OK; } /* * It runs a Bacula Archive container for Docker Volume files data backup. * * in: * bpContext - for Bacula debug jobinfo messages * cmd - the command for Bacula Archive container execution ('backup' and 'restore' are supported) * volname - a volume name to backup from or restore to * jobid - required for proper support volume creation * out: * bRC_OK - when command execution was successful * bRC_Error - on any error */ bRC DKCOMMCTX::run_container_volume_cmd(bpContext* ctx, const char *cmd, POOLMEM *volname, int jobid) { POOL_MEM bactarcmd(PM_FNAME); POOL_MEM out(PM_MESSAGE); int status; char *p; DMSG1(ctx, DINFO, "run_container_volume_cmd called: %s.\n", cmd); if (strlen(workingvolume.c_str()) == 0 && prepare_working_volume(ctx, jobid) != bRC_OK){ return bRC_Error; } /* Here we will run archive container for volume backup */ Mmsg(bactarcmd, "run -d --rm -v %s:/%s -v %s:/logs %s %s", volname, cmd, workingvolume.c_str(), BACULATARIMAGE, cmd); if (!execute_command(ctx, bactarcmd)){ /* some error executing command */ DMSG0(ctx, DERROR, "run_container_volume_cmd execution error\n"); JMSG0(ctx, abort_on_error ? M_FATAL : M_ERROR, "run_container_volume_cmd execution error\n"); return bRC_Error; } /* setup output buffer */ memset(out.c_str(), 0, out.size()); status = read_output(ctx, out); if (status < 0){ /* error reading data from docker command */ DMSG0(ctx, DERROR, "run_container_volume_cmd error reading data from docker command\n"); JMSG0(ctx, abort_on_error ? M_FATAL : M_ERROR, "run_container_volume_cmd error reading data from docker command\n"); return bRC_Error; } /* terminate container id string */ p = out.c_str(); p[status] = 0; strip_trailing_junk(out.c_str()); /* check a known output error */ if (status > 0 && check_for_docker_errors(ctx, out.c_str())){ return bRC_Error; } DMSG2(ctx, DINFO, "run_container_volume_cmd finish - acc: %s, now we can %s all the data.\n", out.c_str(), cmd); return bRC_OK; } /* * Execute a Bacula Archive Container for volume backup. * * in: * bpContext - required for debug/job messages * volname - a volume name to backup from * jobid - required for proper support volume creation * out: * bRC_OK - when command execution was successful * bRC_Error - on any error */ bRC DKCOMMCTX::run_container_volume_save(bpContext* ctx, POOLMEM* volname, int jobid) { return run_container_volume_cmd(ctx, "backup", volname, jobid); }; /* * Execute a Bacula Archive Container for volume restore. * * in: * bpContext - required for debug/job messages * volname - a volume name to restore to * jobid - required for proper support volume creation * out: * bRC_OK - when command execution was successful * bRC_Error - on any error */ bRC DKCOMMCTX::run_container_volume_load(bpContext* ctx, POOLMEM* volname, int jobid) { return run_container_volume_cmd(ctx, "restore", volname, jobid); }; /* * Execute Docker commands to perform backup procedure. * Commit container then save committed image for container backup * or simply save docker image for image backup. * * in: * bpContext - for Bacula debug jobinfo messages * dkinfo - the docker object to backup * jobid - bacula jobid number * out: * bRC_OK - when command execution was successful * bRC_Error - on any error */ bRC DKCOMMCTX::backup_docker(bpContext *ctx, DKINFO *dkinfo, int jobid) { DMSG0(ctx, DINFO, "backup_docker called.\n"); switch (dkinfo->type()){ case DOCKER_CONTAINER: /* create container commit */ if (container_commit(ctx, dkinfo, jobid) == bRC_OK){ if (dkinfo->get_container_imagesave()->id() > 0){ return image_save(ctx, dkinfo->get_container_imagesave()); } } break; case DOCKER_IMAGE: return image_save(ctx, dkinfo->get_image_id()); case DOCKER_VOLUME: return run_container_volume_save(ctx, dkinfo->get_volume_name(), jobid); default: break; } DMSG0(ctx, DINFO, "backup_docker finish with error.\n"); return bRC_Error; }; /* * Executes Docker commands to perform restore. * The data to restore is gathered on command stdin channel and will be sent * on pluginIO calls. * * in: * bpContext - for Bacula debug jobinfo messages * out: * bRC_OK - when command execution was successful * bRC_Error - on any error */ bRC DKCOMMCTX::restore_docker(bpContext *ctx, DKINFO *dkinfo, int jobid) { DMSG0(ctx, DINFO, "restore_docker called.\n"); if (dkinfo != NULL && dkinfo->type() == DOCKER_VOLUME){ return run_container_volume_load(ctx, dkinfo->get_volume_name(), jobid); } else { if (!execute_command(ctx, "load")){ /* some error executing command */ DMSG0(ctx, DERROR, "restore_docker execution error\n"); return bRC_Error; } } DMSG0(ctx, DINFO, "restore_docker finish, now we can write the data.\n"); return bRC_OK; }; /* * Create or run docker container based on restored image. * * # docker container create --name mcache1_59 mcache1/b97d4dd88063/59:restore * * in: * bpContext - for Bacula debug jobinfo messages * dkinfo - restored image for container creation or run * out: * bRC_OK - when creation/run was successful * bRC_Error - on any error */ bRC DKCOMMCTX::docker_create_run_container(bpContext* ctx, DKINFO *dkinfo) { POOL_MEM cmd(PM_FNAME); POOL_MEM out(PM_BSOCK); bRC rc = bRC_OK; int status; char *p; char *imagelabel; const char *namepar; const char *nameval; DKID containerid; if (!param_container_create && !param_container_run){ DMSG0(ctx, DINFO, "docker_create_container skipped on request.\n"); return bRC_OK; } DMSG0(ctx, DINFO, "docker_create_container called.\n"); if (dkinfo){ imagelabel = param_container_imageid ? (char*)*dkinfo->get_container_imagesave() : dkinfo->get_container_imagesave_tag(); namepar = param_container_defaultnames ? "" : "--name "; nameval = param_container_defaultnames ? "" : dkinfo->get_container_names(); if (param_container_run){ // create and run the container Mmsg(cmd, "run -d %s%s %s", namepar, nameval, imagelabel); } else { // create only Mmsg(cmd, "container create %s%s %s", namepar, nameval, imagelabel); } if (!execute_command(ctx, cmd.c_str())){ /* some error executing command */ DMSG0(ctx, DERROR, "docker_create_container execution error\n"); return bRC_Error; } memset(out.c_str(), 0, out.size()); status = read_output(ctx, out); if (status < 0){ /* error reading data from docker command */ DMSG0(ctx, DERROR, "docker_create_container error reading data from docker command\n"); JMSG0(ctx, abort_on_error ? M_FATAL : M_ERROR, "docker_create_container error reading data from docker command\n"); rc = bRC_Error; goto bailout; } /* terminate committed image id string */ p = out.c_str(); p[status] = 0; strip_trailing_junk(out.c_str()); /* check a known output error */ if (status > 0 && (strncmp(out.c_str(), "Cannot connect to the Docker daemon", 35) == 0)){ DMSG1(ctx, DERROR, "No Docker is running. Cannot continue! Err=%s\n", out.c_str()); JMSG1(ctx, abort_on_error ? M_FATAL : M_ERROR, "No Docker is running. Err=%s\n", out.c_str()); rc = bRC_Error; goto bailout; } // should return a string like: 5dd2e72fd9981184ddb8b04aaea06003617fd3f09ad0764921694e20be680c54 containerid = p; if (containerid.id() < 0){ /* error scanning container id */ DMSG1(ctx, DERROR, "docker_create_container cannot scan commit image id. Err=%s\n", p); JMSG1(ctx, abort_on_error ? M_FATAL : M_ERROR, "docker_create_container cannot scan commit image id. Err=%s\n", p); rc = bRC_Error; goto bailout; } else { dkinfo->set_container_id(containerid); if (param_container_run){ DMSG1(ctx, DINFO, "docker_create_container successfully run container as: %s\n", (char*)containerid); JMSG1(ctx, M_INFO, "Successfully run container as: (%s)\n", containerid.digest_short()); } } } bailout: terminate(ctx); DMSG0(ctx, DINFO, "docker_create_container finish.\n"); return rc; }; bacula-15.0.3/src/plugins/fd/docker/baculatar/0000755000175000017500000000000014771010173020730 5ustar bsbuildbsbuildbacula-15.0.3/src/plugins/fd/docker/baculatar/baculatar0000755000175000017500000000205114771010173022612 0ustar bsbuildbsbuild#!/bin/sh # # Bacula® - The Network Backup Solution # # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Bacula(R) is a registered trademark of Kern Sibbald. # # This is a Bacula archive tool for backup/restore files on Docker volumes. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # #echo "EXEC:" $0 #echo "ARGV:" $* if [ "x$0" == "x$1" ] then shift 1 fi ARGV="backup" if [ "x$1" != "x" ] then ARGV=$1 fi #echo "params:" $ARGV if [ $ARGV == "gimmetheshell" ] then exec /bin/sh fi rm -f /logs/docker.err /logs/docker.log RC=0 # this is an ack to plugin to confirm proper execution echo "200:OK" case $ARGV in "backup") /tar -cvvf /logs/fout -C /backup . 2> /logs/docker.err > /logs/docker.log RC=$? ;; "restore") /tar -xvvf /logs/fin -C /restore 2> /logs/docker.err > /logs/docker.log RC=$? ;; "*") echo "404:Invalid option!" > /logs/docker.err esac if [ $RC -ne 0 ] then echo "500:exit status: $RC" >> /logs/docker.err fi exit 0 bacula-15.0.3/src/plugins/fd/docker/baculatar/Dockerfile0000644000175000017500000000141414771010173022722 0ustar bsbuildbsbuild#!/bin/sh # # Bacula® - The Network Backup Solution # # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Bacula(R) is a registered trademark of Kern Sibbald. # # This is a Bacula archive tool for backup/restore files on Docker volumes. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # FROM busybox:latest LABEL maintainer="Radosław Korzeniewski " LABEL org.label-schema.schema-version="1.0" LABEL org.label-schema.description="This is a Bacula container for backup/restore Docker volumes using archive tools." LABEL org.label-schema.vendor="Bacula Systems S.A." LABEL org.label-schema.version="1.2" COPY baculatar /baculatar COPY tar /tar CMD ["/baculatar", "backup"] ENTRYPOINT ["/baculatar"] bacula-15.0.3/src/plugins/fd/docker/baculatar/createimage.sh0000755000175000017500000000212414771010173023534 0ustar bsbuildbsbuild#!/bin/sh # # Bacula® - The Network Backup Solution # # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Bacula(R) is a registered trademark of Kern Sibbald. # # This is a Bacula archive tool for backup/restore files on Docker volumes. # Author: Radosław Korzeniewski, radekk@inteos.pl, Inteos Sp. z o.o. # echo "This script will build a custom static BaculaTar archive!" echo echo "To doing so it needs a full C building toolchain, upx executable compression library" echo "and full Internet access for downloading required source code and dependencies." echo "You should provide it for successful build." echo echo "When ready - hit enter -" if [ "x$1" != "xyes" ] then read a fi rm -rf archbuild git clone https://github.com/ebl/tar-static.git archbuild cd archbuild ./build.sh cd .. cp archbuild/releases/tar . rm -rf archbuild #D=`date +%d%b%y` D=`grep DOCKER_TAR_IMAGE ../../../../version.h | awk '{print $3}' | sed 's/"//g'` docker build -t baculatar:$D . docker tag baculatar:$D baculatar:latest docker save -o baculatar-$D.docker.tar baculatar:latest bacula-15.0.3/src/plugins/fd/docker/baculatar/README0000644000175000017500000000047314771010173021614 0ustar bsbuildbsbuildThe Docker Plugin uses a static version of tar to backup/restore Docker external volumes. To doing so it needs - a full C building toolchain - the upx executable compression library A full Internet access for downloading required source code and dependencies. You should provide them for a successful build. bacula-15.0.3/src/plugins/fd/docker/dkid.c0000644000175000017500000001174014771010173020054 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file dkid.c * @author Radoslaw Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin for backup/restore Docker using native tools. * @version 1.2.1 * @date 2020-01-05 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "dkid.h" #include #include /* * DKID class constructor, does default initialization. */ DKID::DKID() : ShortD(DKIDInvalid), #if __cplusplus > 201103L Digest {0}, DigestShort {0}, #endif shortonly(false) { #if ! __cplusplus > 201103L bmemzero(Digest, DKIDDIGESTSIZE_Len); bmemzero(DigestShort, DKIDDIGESTShortSIZE_Len); #endif }; /* * DKID initialization from string. * as the usable area of short sha256 version used in Docker is 6bytes/48bits * and we are using a 64bit (signed) integer then we have a plenty of space to mark * invalid sha256 conversion with a negative ShortD value. */ void DKID::init(const char* data) { char *dig = (char*)data; if (dig != NULL){ /* check for sha256: prefix*/ if (strstr(dig, "sha256:") == dig){ dig += 7; } int len = strlen(dig); /* check for invalid input data */ bool valid = true; for (int a = 0; a < (len > DKIDDIGESTShortSIZE ? DKIDDIGESTShortSIZE : len); a++){ // we are checking for ASCII codes, a subset of UTF-8 for short digest only unsigned char c = (unsigned char)dig[a]; if (c > 'f' || (c > '9' && c < 'A') || (c > 'F' && c < 'a')){ valid = false; break; } } if (valid){ if (len > DKIDDIGESTShortSIZE){ /* initialize from full data */ memcpy(Digest, dig, DKIDDIGESTSIZE); Digest[DKIDDIGESTSIZE] = 0; shortonly = false; } else { /* handle short data */ memcpy(Digest, dig, len); memcpy(Digest + len, "(...)\0", 6); shortonly = true; } memcpy(DigestShort, dig, DKIDDIGESTShortSIZE); DigestShort[DKIDDIGESTShortSIZE] = 0; ShortD = strtol(DigestShort, NULL, 16); } else { ShortD = DKIDInvalid; shortonly = false; } } }; /* * Basic assignment operator overloading for string. * * in: * data - the null terminated string where up to 64 chars will be used * out: * reinitialized DKID class */ DKID& DKID::operator= (char* data) { init(data); return *this; }; /* * Basic assignment operator overloading for POOL_MEM class. * * in: * data - a reference to POOL_MEM class instance which is used as a source * of null terminated string for initialization * out: * reinitialized DKID class */ DKID& DKID::operator =(POOL_MEM &data) { init(data.c_str()); return *this; } /* * Basic assignment operator overloading for DKID class. * * in: * other - a reference to another DKID class instance which will be used for * assignment * out: * reinitialized DKID class */ DKID& DKID::operator =(DKID &other) { memcpy(Digest, other.Digest, DKIDDIGESTSIZE); memcpy(DigestShort, other.DigestShort, DKIDDIGESTShortSIZE); Digest[DKIDDIGESTSIZE] = 0; DigestShort[DKIDDIGESTShortSIZE] = 0; ShortD = other.ShortD; shortonly = other.shortonly; return *this; } /* * Equal to operator overloading for DKID class. * * in: * other - a reference to another DKID class instance which will be used for * comparison * out: * true - if both ShortD are the same * false - if ShortD variables differ or any DKID is invalid */ bool DKID::operator ==(DKID &other) { if (ShortD >= 0 && other.ShortD >= 0 && ShortD == other.ShortD && (shortonly || other.shortonly || bstrcmp(Digest, other.Digest))){ return true; } return false; } /* * Not-Equal to operator overloading for DKID class. * * in: * other - a reference to another DKID class instance which will be used for * comparison * out: * true - if ShortD variables differ and none of them are invalid * false - if both ShortD are the same or any DKID is invalid */ bool DKID::operator !=(DKID &other) { if (ShortD >= 0 && other.ShortD >= 0 && ShortD != other.ShortD){ return true; } if (!shortonly && !other.shortonly && !bstrcmp(Digest, other.Digest)){ return true; } return false; } bacula-15.0.3/src/plugins/fd/docker/dkid.h0000644000175000017500000000517414771010173020065 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file dkid.h * @author Radoslaw Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin for backup/restore Docker using native tools. * @version 1.2.1 * @date 2020-01-05 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #ifndef _DKID_H_ #define _DKID_H_ #include "bacula.h" #define DKIDDIGESTSIZE 64 // the size of string array for hex chars, without trailing nul #define DKIDDIGESTSIZE_Len (DKIDDIGESTSIZE + 1) // the size of string array for hex chars, with trailing nul #define DKIDDIGESTShortSIZE 12 // the number of hex characters in short digest, without trailing nul #define DKIDDIGESTShortSIZE_Len (DKIDDIGESTShortSIZE + 1) // the number of hex characters in short digest, without trailing nul #define DKIDInvalid -256 // the non-trivial negative value :) /* * This is a simple storage class to handle Docker Container IDs */ class DKID: public SMARTALLOC { public: DKID(); DKID(const char *data) { init(data); } DKID(POOL_MEM &data) { init(data.c_str()); } #if __cplusplus > 201103L ~DKID() = default; #else ~DKID() {}; #endif inline int64_t id() { return ShortD; } inline char *digest() { return Digest; } inline char *digest_short() { return DigestShort; } inline operator int64_t () { return ShortD; } inline operator char* () { return Digest; } DKID& operator= (char *data); DKID& operator= (DKID &other); DKID& operator= (POOL_MEM &data); bool operator== (DKID &other); bool operator!= (DKID &other); private: int64_t ShortD; // default short digest on Docker is 48bits/6bytes/12hex chars // https://github.com/docker/cli/blob/master/vendor/github.com/docker/docker/pkg/stringid/stringid.go char Digest[DKIDDIGESTSIZE_Len]; char DigestShort[DKIDDIGESTShortSIZE_Len]; bool shortonly; void init(const char* d); }; #endif /* _DKID_H_ */ bacula-15.0.3/src/plugins/fd/docker/docker-fd.c0000644000175000017500000020626214771010173021004 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file docker-fd.c * @author Radoslaw Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin for backup/restore Docker using native tools. * @version 1.2.1 * @date 2020-01-05 * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "docker-fd.h" #include #include #include #include /* * libbac uses its own sscanf implementation which is not compatible with * libc implementation, unfortunately. * use bsscanf for Bacula sscanf flavor */ #ifdef sscanf #undef sscanf #endif extern DLL_IMP_EXP int64_t debug_level; /* Forward referenced functions */ static bRC newPlugin(bpContext *ctx); static bRC freePlugin(bpContext *ctx); static bRC getPluginValue(bpContext *ctx, pVariable var, void *value); static bRC setPluginValue(bpContext *ctx, pVariable var, void *value); static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); static bRC endBackupFile(bpContext *ctx); static bRC pluginIO(bpContext *ctx, struct io_pkt *io); static bRC startRestoreFile(bpContext *ctx, const char *cmd); static bRC endRestoreFile(bpContext *ctx); static bRC createFile(bpContext *ctx, struct restore_pkt *rp); static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); // Not used! static bRC checkFile(bpContext *ctx, char *fname); static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl); /* Pointers to Bacula functions */ bFuncs *bfuncs = NULL; bInfo *binfo = NULL; static pFuncs pluginFuncs = { sizeof(pluginFuncs), FD_PLUGIN_INTERFACE_VERSION, /* Entry points into plugin */ newPlugin, freePlugin, getPluginValue, setPluginValue, handlePluginEvent, startBackupFile, endBackupFile, startRestoreFile, endRestoreFile, pluginIO, createFile, setFileAttributes, NULL, /* No checkFile */ handleXACLdata }; #ifdef __cplusplus extern "C" { #endif /* Plugin Information structure */ static pInfo pluginInfo = { sizeof(pluginInfo), FD_PLUGIN_INTERFACE_VERSION, FD_PLUGIN_MAGIC, DOCKER_LICENSE, DOCKER_AUTHOR, DOCKER_DATE, DOCKER_VERSION, DOCKER_DESCRIPTION, }; /* * Plugin called here when it is first loaded. */ bRC DLL_IMP_EXP loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo ** pinfo, pFuncs ** pfuncs) { bfuncs = lbfuncs; /* set Bacula function pointers */ binfo = lbinfo; /* we are very paranoid here to double check it */ if (access(DOCKER_CMD, X_OK) < 0){ berrno be; Dmsg2(DERROR, "Unable to use command tool: %s Err=%s\n", DOCKER_CMD, be.bstrerror()); return bRC_Error; } Dmsg3(DINFO, "%s Plugin version %s %s (c) 2020 by Inteos\n", PLUGINNAME, DOCKER_VERSION, DOCKER_DATE); *pinfo = &pluginInfo; /* return pointer to our info */ *pfuncs = &pluginFuncs; /* return pointer to our functions */ return bRC_OK; } /* * Plugin called here when it is unloaded, normally when Bacula is going to exit. */ bRC DLL_IMP_EXP unloadPlugin() { return bRC_OK; } #ifdef __cplusplus } #endif /* * Main DOCKER Plugin class constructor. * Initializes all variables required. */ DOCKER::DOCKER(bpContext *bpctx) : mode(DOCKER_NONE), JobId(0), JobName(NULL), since(0), where(NULL), regexwhere(NULL), replace(0), robjsent(false), estimate(false), accurate_warning(false), local_restore(false), backup_finish(false), unsupportedlevel(false), param_notrunc(false), errortar(false), volumewarning(false), dockerworkclear(0), dkcommctx(NULL), commandlist(NULL), fname(NULL), lname(NULL), dkfd(0), robjbuf(NULL), currdkinfo(NULL), restoredkinfo(NULL), currvols(NULL), listing_mode(DOCKER_LISTING_NONE), listing_objnr(0), parser(NULL), workingdir(NULL) { /* TODO: we have a ctx variable stored internally, decide if we use it * for every method or rip it off as not required in our code */ ctx = bpctx; } /* * Main DOCKER Plugin class destructor, handles variable release on delete. * * in: none * out: freed internal variables and class allocated during job execution */ DOCKER::~DOCKER() { /* free standard variables */ free_and_null_pool_memory(fname); free_and_null_pool_memory(lname); free_and_null_pool_memory(robjbuf); free_and_null_pool_memory(workingdir); /* free backend contexts */ if (commandlist){ /* free all backend contexts */ foreach_alist(dkcommctx, commandlist){ delete dkcommctx; } delete commandlist; } if (parser){ delete parser; } if (restoredkinfo){ delete restoredkinfo; } } /* * sets runtime workingdir variable used in working volume creation. * * in: * workdir - the file daemon working directory parameter * out: * none */ void DOCKER::setworkingdir(char* workdir) { if (workingdir == NULL){ /* not allocated yet */ workingdir = get_pool_memory(PM_FNAME); } pm_strcpy(&workingdir, workdir); DMSG1(NULL, DVDEBUG, "workingdir: %s\n", workingdir); }; /* * Parse a Restore Object saved during backup and modified by user during restore. * Every RO received will allocate a dedicated command context which is used * by bEventRestoreCommand to handle default parameters for restore. * * in: * bpContext - Bacula Plugin context structure * rop - a restore object structure to parse * out: * bRC_OK - on success * bRC_Error - on error */ bRC DOCKER::parse_plugin_restoreobj(bpContext *ctx, restore_object_pkt *rop) { if (!rop){ return bRC_OK; /* end of rop list */ } if (bstrcmp(rop->object_name, INI_RESTORE_OBJECT_NAME)){ /* we have a single RO for every command */ switch_commandctx(ctx, rop->plugin_name); /* all restore parameters are DKCOMMCTX specific, so forward parsing to it */ return dkcommctx->parse_restoreobj(ctx, rop); } return bRC_OK; } /* * Parsing a plugin command. * Plugin command e.g. plugin = :[parameters [parameters]...] * * in: * bpContext - Bacula Plugin context structure * command - plugin command string to parse * out: * bRC_OK - on success * bRC_Error - on error */ bRC DOCKER::parse_plugin_command(bpContext *ctx, const char *command) { int i, a; bRC status; DMSG(ctx, DINFO, "Parse command: %s\n", command); /* allocate a new parser if required */ if (parser == NULL){ parser = new cmd_parser(); } /* and parse command */ if (parser->parse_cmd(command) != bRC_OK) { DMSG0(ctx, DERROR, "Unable to parse Plugin command line.\n"); JMSG0(ctx, M_FATAL, "Unable to parse Plugin command line.\n"); return bRC_Error; } /* switch dkcommctx to the required context or allocate a new context */ switch_commandctx(ctx, command); /* the first (zero) parameter is a plugin name, we should skip it */ for (i = 1; i < parser->argc; i++) { /* loop over all parsed parameters */ if (estimate && bstrcmp(parser->argk[i], "listing")){ /* we have a listing parameter which for estimate means .ls command */ listing_objnr = 1; listing_mode = DOCKER_LISTING_TOP; a = 0; while (docker_objects[a].name){ if (bstrcmp(parser->argv[i], docker_objects[a].name) || (*parser->argv[i] == '/' && bstrcmp(parser->argv[i]+1, docker_objects[a].name))){ listing_mode = docker_objects[a].mode; break; } a++; } continue; } if (estimate && bstrcmp(parser->argk[i], "notrunc")){ /* we are doing estimate and user requested notrunc in display */ param_notrunc = true; continue; } /* handle it with dkcommctx */ status = dkcommctx->parse_parameters(ctx, parser->argk[i], parser->argv[i]); switch (status){ case bRC_OK: /* the parameter was handled by dkcommctx, proceed to the next */ continue; case bRC_Error: /* parsing returned error, raise it up */ return bRC_Error; default: break; } DMSG(ctx, DERROR, "Unknown parameter: %s\n", parser->argk[i]); JMSG(ctx, M_ERROR, "Unknown parameter: %s\n", parser->argk[i]); } return bRC_OK; } /* * Allocate and initialize new command context list at commandlist. * * in: * bpContext - for Bacula debug and jobinfo messages * command - a Plugin command for a job as a first backend context * out: * New backend contexts list at commandlist allocated and initialized. * The only error we can get here is out of memory error, handled internally * by Bacula itself. */ void DOCKER::new_commandctx(bpContext *ctx, const char *command) { /* our new command context */ dkcommctx = New(DKCOMMCTX(command)); /* add command context to our list */ commandlist->append(dkcommctx); DMSG(ctx, DINFO, "Command context allocated for: %s\n", command); /* setup runtime workingdir */ dkcommctx->setworkingdir(workingdir); } /* * The function manages the command contexts list. * * in: * bpContext - for Bacula debug and jobinfo messages * command - a Plugin command for a job as a first backend context * out: * this.dkcommctx - the DKCOMMCTX allocated/switched for command */ void DOCKER::switch_commandctx(bpContext *ctx, const char *command) { DKCOMMCTX *dkctx; if (commandlist == NULL){ /* new command list required, we assumed 8 command contexts at start, should be sufficient */ commandlist = New(alist(8, not_owned_by_alist)); /* our first command context */ new_commandctx(ctx, command); } else { /* command list available, so search for already allocated context */ foreach_alist(dkctx, commandlist){ if (bstrcmp(dkctx->command, command)){ /* found, set dkcommctx to it and return */ dkcommctx = dkctx; DMSG(ctx, DINFO, "Command context switched to: %s\n", command); return; } } /* well, command context not found, so allocate a new one */ new_commandctx(ctx, command); } } /* * Prepares a single Plugin command for backup or estimate job. * Make a preparation by parse a plugin command and check the * backup/estimate/listing mode and make a proper dkcommctx initialization. * * in: * bpContext - for Bacula debug and jobinfo messages * command - a Plugin command to prepare * out: * bRC_OK - when preparation was successful * bRC_Error - on any error */ bRC DOCKER::prepare_bejob(bpContext* ctx, char *command) { /* first, parse backup command */ if (parse_plugin_command(ctx, command) != bRC_OK){ return bRC_Error; } switch (listing_mode){ case DOCKER_LISTING_NONE: /* other will prepare backup job in dkcommctx context */ return dkcommctx->prepare_bejob(ctx, estimate); case DOCKER_LISTING_CONTAINER: /* listing require all */ if (!dkcommctx->get_all_containers(ctx)){ return bRC_Error; } dkcommctx->set_all_containers_to_backup(ctx); break; case DOCKER_LISTING_IMAGE: if (!dkcommctx->get_all_images(ctx)){ return bRC_Error; } dkcommctx->set_all_images_to_backup(ctx); break; case DOCKER_LISTING_VOLUME: if (!dkcommctx->get_all_volumes(ctx)){ return bRC_Error; } dkcommctx->set_all_volumes_to_backup(ctx); break; default: break; } return bRC_OK; } /* * Prepares a single Plugin command for backup. * * in: * bpContext - for Bacula debug and jobinfo messages * command - a Plugin command to prepare * out: * bRC_OK - when preparation was successful * bRC_Error - on any error */ bRC DOCKER::prepare_backup(bpContext* ctx, char *command) { estimate = false; if (prepare_bejob(ctx, command) != bRC_OK){ return bRC_Error; } return bRC_OK; } /* * Prepares a single Plugin command for estimate/listing. * * in: * bpContext - for Bacula debug and jobinfo messages * command - a Plugin command to prepare * out: * bRC_OK - when preparation was successful * bRC_Error - on any error */ bRC DOCKER::prepare_estimate(bpContext* ctx, char *command) { estimate = true; if (prepare_bejob(ctx, command) != bRC_OK){ return bRC_Error; } dkcommctx->clear_abort_on_error(); return bRC_OK; } /* * Prepares a single Plugin command for restore. * Make a preparation by parse a plugin command and make a proper dkcommctx * initialization. * in: * bpContext - for Bacula debug and jobinfo messages * command - a Plugin command to prepare * out: * bRC_OK - when preparation was successful * bRC_Error - on any error */ bRC DOCKER::prepare_restore(bpContext* ctx, char *command) { /* first, parse backup command */ if (parse_plugin_command(ctx, command) != bRC_OK){ return bRC_Error; } /* prepare restore */ return dkcommctx->prepare_restore(ctx); } /* * This is the main method for handling events generated by Bacula. * The behavior of the method depends on event type generated, but there are * some events which does nothing, just return with bRC_OK. Every event is * tracked in debug trace file to verify the event flow during development. * * in: * bpContext - for Bacula debug and jobinfo messages * event - a Bacula event structure * value - optional event value * out: * bRC_OK - in most cases signal success/no error * bRC_Error - in most cases signal error * - depend on Bacula Plugin API if applied */ bRC DOCKER::handlePluginEvent(bpContext *ctx, bEvent *event, void *value) { switch (event->eventType) { case bEventJobStart: DMSG_EVENT_STR(event, value); getBaculaVar(bVarJobId, (void *)&JobId); getBaculaVar(bVarJobName, (void *)&JobName); break; case bEventJobEnd: DMSG_EVENT_STR(event, value); if (dockerworkclear == 1){ dkcommctx->clean_working_volume(ctx); dockerworkclear = 0; } break; case bEventLevel: char lvl; lvl = (char)((intptr_t) value & 0xff); DMSG_EVENT_CHAR(event, lvl); /* the plugin support a FULL backup only as Docker does not support other levels */ mode = DOCKER_BACKUP_FULL; if (lvl != 'F'){ unsupportedlevel = true; } break; case bEventSince: since = (time_t) value; DMSG_EVENT_LONG(event, since); break; case bEventStartBackupJob: DMSG_EVENT_STR(event, value); break; case bEventEndBackupJob: DMSG_EVENT_STR(event, value); break; case bEventStartRestoreJob: DMSG_EVENT_STR(event, value); getBaculaVar(bVarWhere, &where); DMSG(ctx, DINFO, "Where=%s\n", NPRT(where)); getBaculaVar(bVarReplace, &replace); DMSG(ctx, DINFO, "Replace=%c\n", replace); mode = DOCKER_RESTORE; break; case bEventEndRestoreJob: DMSG_EVENT_STR(event, value); break; /* Plugin command e.g. plugin = :parameters */ case bEventEstimateCommand: DMSG_EVENT_STR(event, value); estimate = true; free_and_null_pool_memory(fname); return prepare_estimate(ctx, (char*) value); /* Plugin command e.g. plugin = :parameters */ case bEventBackupCommand: DMSG_EVENT_STR(event, value); robjsent = false; free_and_null_pool_memory(fname); return prepare_backup(ctx, (char*)value); /* Plugin command e.g. plugin = :parameters */ case bEventRestoreCommand: DMSG_EVENT_STR(event, value); getBaculaVar(bVarRegexWhere, ®exwhere); DMSG(ctx, DINFO, "RegexWhere=%s\n", NPRT(regexwhere)); if (regexwhere){ /* the plugin cannot support regexwhere, so raise the error */ DMSG0(ctx, DERROR, "Cannot support RegexWhere restore parameter. Aborting Job.\n"); JMSG0(ctx, M_FATAL, "Cannot support RegexWhere restore parameter. Aborting Job.\n"); return bRC_Error; } return prepare_restore(ctx, (char*)value); /* Plugin command e.g. plugin = :parameters */ case bEventPluginCommand: DMSG_EVENT_STR(event, value); if (unsupportedlevel){ DMSG0(ctx, DERROR, "Unsupported backup level. Doing FULL backup.\n"); JMSG0(ctx, M_ERROR, "Unsupported backup level. Doing FULL backup.\n"); /* single error message is enough */ unsupportedlevel = false; } // check accurate mode backup int accurate; getBaculaVar(bVarAccurate, &accurate); DMSG(ctx, DINFO, "Accurate=%d\n", accurate); if (accurate > 0 && !accurate_warning){ DMSG0(ctx, DERROR, "Accurate mode is not supported. Please disable Accurate mode for this job.\n"); JMSG0(ctx, M_WARNING, "Accurate mode is not supported. Please disable Accurate mode for this job.\n"); /* single error message is enough */ accurate_warning = true; } break; case bEventOptionPlugin: case bEventHandleBackupFile: DMSG0(ctx, DERROR, "Invalid handle Option Plugin called!\n"); JMSG2(ctx, M_FATAL, "The %s plugin doesn't support the Option Plugin configuration.\n" "Please review your FileSet and move the Plugin=%s" "... command into the Include {} block.\n", PLUGINNAME, PLUGINPREFIX); return bRC_Error; break; case bEventEndFileSet: DMSG_EVENT_STR(event, value); break; case bEventRestoreObject: /* Restore Object handle - a plugin configuration for restore and user supplied parameters */ if (!value){ DMSG0(ctx, DINFO, "End restore objects.\n"); break; } DMSG_EVENT_PTR(event, value); return parse_plugin_restoreobj(ctx, (restore_object_pkt *) value); default: // enabled only for Debug DMSG2(ctx, D2, "Unknown event: %s (%d) \n", eventtype2str(event), event->eventType); } return bRC_OK; } /* * Reads a data from command tool on backup. * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error */ bRC DOCKER::perform_read_data(bpContext *ctx, struct io_pkt *io) { int rc; if (dkcommctx->is_eod()){ /* TODO: we signal EOD as rc=0, so no need to explicity check for EOD, right? */ io->status = 0; } else { rc = dkcommctx->read_data(ctx, io->buf, io->count); io->status = rc; if (rc < 0){ io->io_errno = EIO; return bRC_Error; } } return bRC_OK; } /* * Reads a data from command tool on backup. * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error */ bRC DOCKER::perform_read_volume_data(bpContext *ctx, struct io_pkt *io) { io->status = read(dkfd, io->buf, io->count); if (io->status < 0){ io->io_errno = errno; return bRC_Error; } return bRC_OK; } /* * Writes data to command tool on restore. * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error */ bRC DOCKER::perform_write_data(bpContext *ctx, struct io_pkt *io) { int rc = 0; if (dkfd){ rc = write(dkfd, io->buf, io->count); } else { rc = dkcommctx->write_data(ctx, io->buf, io->count); } io->status = rc; if (rc < 0){ io->io_errno = EIO; return bRC_Error; } return bRC_OK; } /* * Execute a backup command. * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error * io->status, io->io_errno - set to error on any error */ bRC DOCKER::perform_backup_open(bpContext *ctx, struct io_pkt *io) { POOL_MEM wname(PM_FNAME); struct stat statp; DMSG1(ctx, DDEBUG, "perform_backup_open called: %s\n", io->fname); /* prepare backup for DOCKER_VOLUME */ if (currdkinfo->type() == DOCKER_VOLUME){ if (dkcommctx->prepare_working_volume(ctx, JobId) != bRC_OK){ io->status = -1; io->io_errno = EIO; return bRC_Error; } dkcommctx->render_working_volume_filename(wname, BACULACONTAINERFOUT); if (stat(wname.c_str(), &statp) != 0){ berrno be; /* if the path does not exist then create one */ if (be.code() != ENOENT || mkfifo(wname.c_str(), 0600) != 0){ /* error creating named pipe */ be.set_errno(errno); io->status = -1; io->io_errno = be.code(); dkcommctx->set_error(); DMSG2(ctx, DERROR, "cannot create file: %s Err=%s\n", wname.c_str(), be.bstrerror()); JMSG2(ctx, dkcommctx->is_abort_on_error() ? M_FATAL : M_ERROR, "Cannot create file: %s Err=%s\n", wname.c_str(), be.bstrerror()); return bRC_Error; } } else { /* check if it is a proper file */ if (!S_ISFIFO(statp.st_mode)){ /* not fifo, not good */ DMSG2(ctx, DERROR, "file is not fifo: %s [%o]\n", wname.c_str(), statp.st_mode); JMSG2(ctx, dkcommctx->is_abort_on_error() ? M_FATAL : M_ERROR, "Improper file type: %s [%o]\n", wname.c_str(), statp.st_mode); return bRC_Error; } } } /* execute backup docker */ if (dkcommctx->backup_docker(ctx, currdkinfo, JobId) != bRC_OK){ io->status = -1; io->io_errno = EIO; if (dkcommctx->is_abort_on_error()){ /* abort_on_error set, so terminate other backup for other container */ dkcommctx->finish_backup_list(ctx); } return bRC_Error; } /* finish preparation for DOCKER_VOLUME */ if (currdkinfo->type() == DOCKER_VOLUME){ btimer_t *timer = start_thread_timer(NULL, pthread_self(), dkcommctx->timeout()); dkfd = open(wname.c_str(), O_RDONLY); stop_thread_timer(timer); if (dkfd < 0){ /* error opening file to read */ berrno be; io->status = -1; io->io_errno = be.code(); dkcommctx->set_error(); DMSG2(ctx, DERROR, "cannot open archive file: %s Err=%s\n", wname.c_str(), be.bstrerror()); JMSG2(ctx, dkcommctx->is_abort_on_error() ? M_FATAL : M_ERROR, "Cannot open archive file: %s Err=%s\n", wname.c_str(), be.bstrerror()); return bRC_Error; } mode = DOCKER_BACKUP_VOLUME_FULL; } dkcommctx->clear_eod(); return bRC_OK; } /* * Perform a restore file creation and open when restore to local server or * restore command execution when restore to Docker. * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error * io->status, io->io_errno - set to error on any error */ bRC DOCKER::perform_restore_open(bpContext* ctx, io_pkt* io) { POOL_MEM wname(PM_FNAME); int status; btimer_t *timer; /* first local restore as a simpler case */ if (local_restore){ /* restore local */ dkfd = open(fname, O_CREAT|O_WRONLY, 0640); if (dkfd < 0){ /* error opening file to write */ io->status = -1; io->io_errno = errno; return bRC_Error; } } else { /* prepare restore for DOCKER_VOLUME */ if (restoredkinfo->type() == DOCKER_VOLUME){ if (dkcommctx->prepare_working_volume(ctx, JobId) != bRC_OK){ io->status = -1; io->io_errno = EIO; return bRC_Error; } dkcommctx->render_working_volume_filename(wname, BACULACONTAINERFIN); status = mkfifo(wname.c_str(), 0600); if (status < 0){ /* error creating named pipe */ berrno be; io->status = -1; io->io_errno = be.code(); dkcommctx->set_error(); DMSG2(ctx, DERROR, "cannot create file: %s Err=%s\n", wname.c_str(), be.bstrerror()); JMSG2(ctx, dkcommctx->is_abort_on_error() ? M_FATAL : M_ERROR, "Cannot create file: %s Err=%s\n", wname.c_str(), be.bstrerror()); return bRC_Error; } } /* execute backup docker */ if (dkcommctx->restore_docker(ctx, restoredkinfo, JobId) != bRC_OK){ io->status = -1; io->io_errno = EIO; return bRC_Error; } /* finish preparation for DOCKER_VOLUME */ if (restoredkinfo->type() == DOCKER_VOLUME){ timer = start_thread_timer(NULL, pthread_self(), dkcommctx->timeout()); dkfd = open(wname.c_str(), O_WRONLY); stop_thread_timer(timer); if (dkfd < 0){ /* error opening file to write */ berrno be; io->status = -1; io->io_errno = be.code(); dkcommctx->set_error(); DMSG2(ctx, DERROR, "cannot open archive file: %s Err=%s\n", wname.c_str(), be.bstrerror()); JMSG2(ctx, dkcommctx->is_abort_on_error() ? M_FATAL : M_ERROR, "Cannot open archive file: %s Err=%s\n", wname.c_str(), be.bstrerror()); return bRC_Error; } mode = DOCKER_RESTORE_VOLUME; } dkcommctx->clear_eod(); } return bRC_OK; } /* * Perform command tool termination when backup finish. * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error */ bRC DOCKER::perform_backup_close(bpContext *ctx, struct io_pkt *io) { bRC status = bRC_OK; dkcommctx->terminate(ctx); if (currdkinfo->type() == DOCKER_VOLUME){ if (close(dkfd) < 0){ io->status = -1; io->io_errno = errno; status = bRC_Error; } mode = DOCKER_BACKUP_FULL; errortar = check_container_tar_error(ctx, currdkinfo->get_volume_name()); } return status; } /* * Perform a restore file close when restore to local server or wait for restore * command finish execution when restore to Docker. * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error */ bRC DOCKER::perform_restore_close(bpContext *ctx, struct io_pkt *io) { bRC status = bRC_OK; DKID dkid; POOL_MEM buf(PM_NAME); POOL_MEM names(PM_NAME); /* both local_restore and volume restore uses dkfd */ if (dkfd > 0){ if (close(dkfd) < 0){ io->status = -1; io->io_errno = errno; status = bRC_Error; } dkfd = 0; if (mode == DOCKER_RESTORE_VOLUME && restoredkinfo && restoredkinfo->type() == DOCKER_VOLUME){ mode = DOCKER_RESTORE; errortar = check_container_tar_error(ctx, restoredkinfo->get_volume_name()); } } else { status = dkcommctx->wait_for_restore(ctx, dkid); if (status != bRC_OK){ io->status = -1; io->io_errno = EIO; } else { switch (restoredkinfo->type()){ case DOCKER_IMAGE: /* when restore image then rename it only */ status = dkcommctx->docker_tag(ctx, dkid, restoredkinfo->get_image_repository_tag()); break; case DOCKER_CONTAINER: /* on container image we need to create a container itself, first tag the restored image */ Mmsg(buf, "%s/%s/%d:restore", restoredkinfo->name(), restoredkinfo->id()->digest_short(), JobId); status = dkcommctx->docker_tag(ctx, dkid, buf.c_str()); if (status != bRC_OK){ DMSG1(ctx, DERROR, "perform_restore_close cannot tag restored image: %s\n", buf.c_str()); JMSG1(ctx, M_ERROR, "perform_restore_close cannot tag restored image: %s\n", buf.c_str()); break; } /* update image information on restoring container */ restoredkinfo->set_container_imagesave(dkid); restoredkinfo->set_container_imagesave_tag(buf); /* update a container name */ pm_strcpy(names, restoredkinfo->get_container_names()); Mmsg(buf, "%s_%d", names.c_str(), JobId); restoredkinfo->set_container_names(buf); status = dkcommctx->docker_create_run_container(ctx, restoredkinfo); if (status != bRC_OK){ DMSG1(ctx, DERROR, "perform_restore_close cannot create container: %s\n", restoredkinfo->get_container_names()); JMSG1(ctx, M_ERROR, "perform_restore_close cannot create container: %s\n", restoredkinfo->get_container_names()); break; } break; case DOCKER_VOLUME: /* XXX */ break; } } } return status; } /* * This is to check how Bacula archive container finish its job. * We are doing this by examining docker.err file contents. * * in: * bpContext - for Bacula debug and jobinfo messages * out: * false - when no errors found * true - errors found and reported to user */ bool DOCKER::check_container_tar_error(bpContext* ctx, char *volname) { struct stat statp; POOL_MEM flog(PM_FNAME); int rc; if (dockerworkclear == 0){ dockerworkclear = 1; } dkcommctx->render_working_volume_filename(flog, BACULACONTAINERERRLOG); if (stat(flog.c_str(), &statp) == 0){ if (statp.st_size > 0){ /* the error file has some content, so archive command was unsuccessful, report it */ POOL_MEM errlog(PM_MESSAGE); int fd; char *p; fd = open(flog.c_str(), O_RDONLY); if (fd < 0){ /* error opening errorlog, strange */ berrno be; DMSG2(ctx, DERROR, "error opening archive errorlog file: %s Err=%s\n", flog.c_str(), be.bstrerror()); JMSG2(ctx, dkcommctx->is_abort_on_error() ? M_FATAL : M_ERROR, "Error opening archive errorlog file: %s Err=%s\n", flog.c_str(), be.bstrerror()); return true; } rc = read(fd, errlog.c_str(), errlog.size() - 1); close(fd); if (rc < 0){ /* we should read some data, right? */ berrno be; DMSG2(ctx, DERROR, "error reading archive errorlog file: %s Err=%s\n", flog.c_str(), be.bstrerror()); JMSG2(ctx, dkcommctx->is_abort_on_error() ? M_FATAL : M_ERROR, "Error reading archive errorlog file: %s Err=%s\n", flog.c_str(), be.bstrerror()); return true; } /* clear last newline */ p = errlog.c_str(); if (p[rc-1] == '\n') p[rc-1] = 0; /* display error to user */ DMSG1(ctx, DERROR, "errorlog: %s\n", errlog.c_str()); JMSG1(ctx, dkcommctx->is_abort_on_error() ? M_FATAL : M_ERROR, "Archive error: %s\n", errlog.c_str()); /* rename log files for future use */ if (debug_level > 200){ POOL_MEM nflog(PM_FNAME); dockerworkclear = 2; Mmsg(nflog, "%s.%s", flog.c_str(), volname); rc = rename(flog.c_str(), nflog.c_str()); if (rc < 0){ /* error renaming, report */ berrno be; DMSG2(ctx, DERROR, "error renaming archive errorlog to: %s Err=%s\n", nflog.c_str(), be.bstrerror()); JMSG2(ctx, M_ERROR, "Error renaming archive errorlog file to: %s Err=%s\n", nflog.c_str(), be.bstrerror()); } dkcommctx->render_working_volume_filename(flog, BACULACONTAINERARCHLOG); Mmsg(nflog, "%s.%s", flog.c_str(), volname); rc = rename(flog.c_str(), nflog.c_str()); if (rc < 0){ /* error renaming, report */ berrno be; DMSG2(ctx, DERROR, "error renaming archive log to: %s Err=%s\n", nflog.c_str(), be.bstrerror()); JMSG2(ctx, M_ERROR, "Error renaming archive log file to: %s Err=%s\n", nflog.c_str(), be.bstrerror()); } } return true; } } else { /* error access to BACULACONTAINERERRLOG, strange, report it */ berrno be; DMSG2(ctx, DERROR, "error access archive errorlog file: %s Err=%s\n", flog.c_str(), be.bstrerror()); JMSG2(ctx, M_ERROR, "Error access archive errorlog file: %s Err=%s\n", flog.c_str(), be.bstrerror()); } return false; }; /* * Handle Bacula Plugin I/O API for backend * * in: * bpContext - for Bacula debug and jobinfo messages * io - Bacula Plugin API I/O structure for I/O operations * out: * bRC_OK - when successful * bRC_Error - on any error * io->status, io->io_errno - correspond to a plugin io operation status */ bRC DOCKER::pluginIO(bpContext *ctx, struct io_pkt *io) { static int rw = 0; // this variable handles single debug message /* assume no error from the very beginning */ io->status = 0; io->io_errno = 0; switch (io->func) { case IO_OPEN: DMSG(ctx, D2, "IO_OPEN: (%s)\n", io->fname); switch (mode){ case DOCKER_BACKUP_FULL: case DOCKER_BACKUP_INCR: case DOCKER_BACKUP_DIFF: case DOCKER_BACKUP_VOLUME_FULL: return perform_backup_open(ctx, io); case DOCKER_RESTORE: case DOCKER_RESTORE_VOLUME: return perform_restore_open(ctx, io); default: return bRC_Error; } break; case IO_READ: if (!rw) { rw = 1; DMSG2(ctx, D2, "IO_READ buf=%p len=%d\n", io->buf, io->count); } switch (mode){ case DOCKER_BACKUP_FULL: case DOCKER_BACKUP_INCR: case DOCKER_BACKUP_DIFF: return perform_read_data(ctx, io); case DOCKER_BACKUP_VOLUME_FULL: return perform_read_volume_data(ctx, io); default: return bRC_Error; } break; case IO_WRITE: if (!rw) { rw = 1; DMSG2(ctx, D2, "IO_WRITE buf=%p len=%d\n", io->buf, io->count); } switch (mode){ case DOCKER_RESTORE: case DOCKER_RESTORE_VOLUME: return perform_write_data(ctx, io); default: return bRC_Error; } break; case IO_CLOSE: DMSG0(ctx, D2, "IO_CLOSE\n"); rw = 0; switch (mode){ case DOCKER_RESTORE: case DOCKER_RESTORE_VOLUME: return perform_restore_close(ctx, io); case DOCKER_BACKUP_FULL: case DOCKER_BACKUP_VOLUME_FULL: case DOCKER_BACKUP_INCR: case DOCKER_BACKUP_DIFF: return perform_backup_close(ctx, io); default: return bRC_Error; } break; } return bRC_OK; } /* * Unimplemented, always return bRC_OK. */ bRC DOCKER::getPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Unimplemented, always return bRC_OK. */ bRC DOCKER::setPluginValue(bpContext *ctx, pVariable var, void *value) { return bRC_OK; } /* * Get all required information from Docker to populate save_pkt for Bacula. * It handles a Restore Object (FT_PLUGIN_CONFIG) for every backup and * new Plugin Backup Command if setup in FileSet. It handles * backup/estimate/listing modes of operation. * * in: * bpContext - for Bacula debug and jobinfo messages * save_pkt - Bacula Plugin API save packet structure * out: * bRC_OK - when save_pkt prepared successfully and we have file to backup * bRC_Max - when no more files to backup * bRC_Error - in any error */ bRC DOCKER::startBackupFile(bpContext *ctx, struct save_pkt *sp) { /* handle listing mode if requested */ if (estimate && listing_mode == DOCKER_LISTING_TOP){ sp->fname = (char*)docker_objects[listing_objnr++].name; sp->type = FT_DIREND; sp->statp.st_size = 0; sp->statp.st_nlink = 1; sp->statp.st_uid = 0; sp->statp.st_gid = 0; sp->statp.st_mode = 040750; sp->statp.st_blksize = 4096; sp->statp.st_blocks = 1; sp->statp.st_atime = sp->statp.st_mtime = sp->statp.st_ctime = time(NULL); return bRC_OK; } /* The first file in Full backup, is the RestoreObject */ if (!estimate && mode == DOCKER_BACKUP_FULL && robjsent == false) { ConfigFile ini; /* robj for the first time, allocate the buffer */ if (!robjbuf){ robjbuf = get_pool_memory(PM_FNAME); } ini.register_items(plugin_items_dump, sizeof(struct ini_items)); sp->restore_obj.object_name = (char *)INI_RESTORE_OBJECT_NAME; sp->restore_obj.object_len = ini.serialize(&robjbuf); sp->restore_obj.object = robjbuf; sp->type = FT_PLUGIN_CONFIG; DMSG0(ctx, DINFO, "Prepared RestoreObject sent.\n"); return bRC_OK; } /* check for forced backup finish */ if (backup_finish){ DMSG0(ctx, DINFO, "forced backup finish!\n"); backup_finish = false; return bRC_Max; } /* check if this is the first container to backup/estimate/listing */ if (currdkinfo == NULL){ /* set all_to_backup list at first element */ currdkinfo = dkcommctx->get_first_to_backup(ctx); if (!currdkinfo){ /* no docker objects to backup at all */ DMSG0(ctx, DDEBUG, "No Docker containers or objects to backup found.\n"); JMSG0(ctx, dkcommctx->is_abort_on_error() ? M_FATAL : M_ERROR, "No Docker containers or objects to backup found.\n"); return bRC_Max; } } /* in currdkinfo we have all info about docker object to backup */ if (!estimate && mode != DOCKER_BACKUP_CONTAINER_VOLLIST){ if (currdkinfo->type() != DOCKER_VOLUME){ DMSG3(ctx, DINFO, "Start Backup %s: %s (%s)\n", currdkinfo->type_str(), currdkinfo->name(), currdkinfo->id()->digest_short()); JMSG3(ctx, M_INFO, "Start Backup %s: %s (%s)\n", currdkinfo->type_str(), currdkinfo->name(), currdkinfo->id()->digest_short()); } else { DMSG2(ctx, DINFO, "Start Backup %s: %s\n", currdkinfo->type_str(), currdkinfo->name()); JMSG2(ctx, M_INFO, "Start Backup %s: %s\n", currdkinfo->type_str(), currdkinfo->name()); } } /* generate the filename in backup/estimate */ if (!fname){ fname = get_pool_memory(PM_FNAME); } if (!lname){ lname = get_pool_memory(PM_FNAME); } /* populate common statp */ sp->statp.st_nlink = 1; sp->statp.st_uid = 0; sp->statp.st_gid = 0; sp->portable = true; sp->statp.st_blksize = 4096; // TODO: use created time of image and volume objects sp->statp.st_atime = sp->statp.st_mtime = sp->statp.st_ctime = time(NULL); sp->statp.st_mode = S_IFREG | 0640; // standard file with '-rw-r----' permissions if (mode == DOCKER_BACKUP_CONTAINER_VOLLIST && currvols){ sp->statp.st_size = currvols->vol->size(); sp->statp.st_blocks = sp->statp.st_size / 4096 + 1; sp->type = FT_LNK; if (!estimate){ Mmsg(fname, "%s%s/%s/volume: %s -> %s", PLUGINNAMESPACE, CONTAINERNAMESPACE, currdkinfo->name(), currvols->vol->get_volume_name(), currvols->destination); *lname = 0; } else { Mmsg(fname, "%s%s/%s/volume: %s", PLUGINNAMESPACE, CONTAINERNAMESPACE, currdkinfo->name(), currvols->vol->get_volume_name()); lname = currvols->destination; } sp->link = lname; sp->statp.st_mode = S_IFLNK | 0640; } else { sp->statp.st_size = currdkinfo->size(); sp->statp.st_blocks = sp->statp.st_size / 4096 + 1; sp->type = FT_REG; // exported archive is a standard file switch(listing_mode){ case DOCKER_LISTING_NONE: /* generate a backup/estimate filename */ switch (currdkinfo->type()){ case DOCKER_CONTAINER: Mmsg(fname, "%s%s/%s/%s.tar", PLUGINNAMESPACE, CONTAINERNAMESPACE, currdkinfo->name(), (char*)*currdkinfo->id()); break; case DOCKER_IMAGE: Mmsg(fname, "%s%s/%s/%s.tar", PLUGINNAMESPACE, IMAGENAMESPACE, currdkinfo->name(), (char*)*currdkinfo->id()); break; case DOCKER_VOLUME: Mmsg(fname, "%s%s/%s.tar", PLUGINNAMESPACE, VOLUMENAMESPACE, currdkinfo->name()); break; default: DMSG1(ctx, DERROR, "unknown object type to backup: %s\n", currdkinfo->type_str()); JMSG1(ctx, M_ERROR, "Unknown object type to backup: %s\n", currdkinfo->type_str()); return bRC_Error; } break; case DOCKER_LISTING_VOLUME: sp->statp.st_mode = S_IFBLK | 0640; // standard block device with 'brw-r----' permissions Mmsg(fname, "%s", currdkinfo->name()); break; case DOCKER_LISTING_IMAGE: sp->statp.st_mode = S_IFBLK | 0640; // standard block device with 'brw-r----' permissions case DOCKER_LISTING_CONTAINER: Mmsg(lname, "%s", param_notrunc?(char*)*currdkinfo->id():currdkinfo->id()->digest_short()); Mmsg(fname, "%s", currdkinfo->name()); sp->link = lname; sp->type = FT_LNK; break; default: /* error */ break; } } /* populate rest of statp */ sp->fname = fname; return bRC_OK; } /* * Finish the Docker backup and clean temporary objects. * For estimate/listing modes it handles next object to display. * * in: * bpContext - for Bacula debug and jobinfo messages * save_pkt - Bacula Plugin API save packet structure * out: * bRC_OK - when no more files to backup * bRC_More - when Bacula should expect a next file * bRC_Error - in any error */ bRC DOCKER::endBackupFile(bpContext *ctx) { if (!estimate && mode != DOCKER_BACKUP_CONTAINER_VOLLIST){ /* If the current file was the restore object, so just ask for the next file */ if (mode == DOCKER_BACKUP_FULL && robjsent == false) { robjsent = true; return bRC_More; } switch (currdkinfo->type()){ case DOCKER_CONTAINER: /* delete backup commit image */ if (dkcommctx->delete_container_commit(ctx, currdkinfo, JobId) != bRC_OK){ /* TODO: report problem to the user but not abort backup */ return bRC_Error; } case DOCKER_IMAGE: DMSG4(ctx, DINFO, "Backup of %s: %s (%s) %s.\n", currdkinfo->type_str(), currdkinfo->name(), currdkinfo->id()->digest_short(), dkcommctx->is_error() ? "Failed" : "OK"); JMSG4(ctx, M_INFO, "Backup of %s: %s (%s) %s.\n", currdkinfo->type_str(), currdkinfo->name(), currdkinfo->id()->digest_short(), dkcommctx->is_error() ? "Failed" : "OK"); break; case DOCKER_VOLUME: /* check */ DMSG3(ctx, DINFO, "Backup of %s: %s %s.\n", currdkinfo->type_str(), currdkinfo->name(), dkcommctx->is_error() || errortar ? "Failed" : "OK"); JMSG3(ctx, M_INFO, "Backup of %s: %s %s.\n", currdkinfo->type_str(), currdkinfo->name(), dkcommctx->is_error() || errortar ? "Failed" : "OK"); break; }; } /* handle listing and next file to backup */ if (listing_mode == DOCKER_LISTING_TOP){ /* handle top-level listing mode */ if (docker_objects[listing_objnr].name){ /* next object available */ return bRC_More; } } else { /* check if container we just backup has any vols mounted */ if (currdkinfo->type() == DOCKER_CONTAINER && !currvols && currdkinfo->container_has_vols() && mode != DOCKER_BACKUP_CONTAINER_VOLLIST){ /* yes, so prepare the flow for symbolic link backup */ currvols = currdkinfo->container_first_vols(); mode = DOCKER_BACKUP_CONTAINER_VOLLIST; DMSG0(ctx, DDEBUG, "docker vols to backup found\n"); return bRC_More; } /* check if we already in symbolic link backup mode */ if (mode == DOCKER_BACKUP_CONTAINER_VOLLIST && currvols){ /* yes, so check for next symbolic link to backup */ currvols = currdkinfo->container_next_vols(); if (currvols){ DMSG0(ctx, DDEBUG, "docker next vols to backup found\n"); return bRC_More; } else { /* it was the last symbolic link, so finish this mode */ mode = DOCKER_BACKUP_FULL; currvols = NULL; } } /* check if next object to backup/estimate/listing */ currdkinfo = dkcommctx->get_next_to_backup(ctx); if (currdkinfo){ DMSG0(ctx, DDEBUG, "next docker object to backup found\n"); return bRC_More; } } return bRC_OK; } /* * Start Restore File. */ bRC DOCKER::startRestoreFile(bpContext *ctx, const char *cmd) { return bRC_OK; } /* * End Restore File. * Handles the next vm state. */ bRC DOCKER::endRestoreFile(bpContext *ctx) { /* release restore dkinfo */ if (restoredkinfo){ delete restoredkinfo; restoredkinfo = NULL; } return bRC_OK; } /* * Search in Docker all available images if image we are restoring already exist. * * in: * bpContext - bacula plugin context * this->restoredkinfo - current image to restore * out: * *DKINFO from Docker all_images if image to restore found * NULL when not found */ DKINFO *DOCKER::search_docker_image(bpContext *ctx) { alist *allimages; DKINFO *image = NULL; allimages = dkcommctx->get_all_images(ctx); if (allimages){ DMSG1(ctx, DDEBUG, "search allimages for: %s\n", (char*)restoredkinfo->get_image_id()); /* check if image which we are restoring exist on Docker already */ foreach_alist(image, allimages){ DMSG1(ctx, DDEBUG, "compare: %s\n", (char*)image->get_image_id()); if (image && *image->get_image_id() == *restoredkinfo->get_image_id()){ DMSG0(ctx, DINFO, "image to restore found available\n"); break; } }; } return image; }; /* * Search in Docker all available volumes if volume we are restoring already exist. * * in: * bpContext - bacula plugin context * this->restoredkinfo - current volume to restore * out: * *DKINFO from Docker all_volumes if volume to restore found * NULL when not found */ DKINFO *DOCKER::search_docker_volume(bpContext *ctx) { alist *allvolumes; DKINFO *volume = NULL; allvolumes = dkcommctx->get_all_volumes(ctx); if (allvolumes){ DMSG1(ctx, DDEBUG, "search allvolumes for: %s\n", restoredkinfo->get_volume_name()); /* check if image which we are restoring exist on Docker already */ foreach_alist(volume, allvolumes){ DMSG1(ctx, DDEBUG, "compare: %s\n", volume->get_volume_name()); if (volume && bstrcmp(volume->get_volume_name(), restoredkinfo->get_volume_name())){ DMSG0(ctx, DINFO, "volume to restore found available\n"); break; } }; } return volume; }; /* * When restore to local server then handle restored file creation else * inform user about starting a restore. * * in: * bpContext - bacula plugin context * restore_pkt - Bacula Plugin API restore packet structure * out: * bRC_OK - when success reported from backend * rp->create_status = CF_EXTRACT - the backend will restore the file * with pleasure * rp->create_status = CF_SKIP - the backend wants to skip restoration, i.e. * the file already exist and Replace=n was set * bRC_Error, rp->create_status = CF_ERROR - in any error */ bRC DOCKER::createFile(bpContext *ctx, struct restore_pkt *rp) { POOL_MEM fmt(PM_FNAME); POOL_MEM fmt2(PM_FNAME); POOL_MEM imageid(PM_FNAME); POOL_MEM label(PM_FNAME); struct stat statp; char *dir, *p; int len; int status; DKINFO *image; /* skip a support volume file link */ if (rp->type == FT_LNK && S_ISLNK(rp->statp.st_mode)){ DMSG1(ctx, DDEBUG, "skipping support file: %s\n", rp->ofname); rp->create_status = CF_SKIP; } else { /* it seems something to restore */ if (!fname){ fname = get_pool_memory(PM_FNAME); } /* where=/ then we'll restore to Docker else we'll restore local */ if (where && strlen(where) > 1 && *where == PathSeparator){ local_restore = true; len = strlen(where); DMSG(ctx, DINFO, "local restore to: %s\n", where); pm_strcpy(fmt, rp->ofname); dir = strrchr(fmt.c_str(), '.'); if (dir && bstrcmp(dir, ".tar")){ *dir = 0; JMSG(ctx, M_INFO, "Docker local restore: %s\n", fmt.c_str() + len + strlen(PLUGINNAMESPACE) + 1); } /* compose a destination fname */ pm_strcpy(fname, where); pm_strcat(fname, rp->ofname + len + strlen(PLUGINNAMESPACE)); DMSG(ctx, DDEBUG, "composed fname: %s\n", fname); /* prepare a destination directory */ pm_strcpy(fmt, fname); dir = dirname(fmt.c_str()); if (!dir){ berrno be; DMSG2(ctx, DERROR, "dirname error for %s Err=%s\n", fmt.c_str(), be.bstrerror()); JMSG2(ctx, dkcommctx->is_fatal() ? M_FATAL : M_ERROR, "dirname error for %s Err=%s\n", fmt.c_str(), be.bstrerror()); rp->create_status = CF_ERROR; return bRC_Error; } DMSG(ctx, DDEBUG, "dirname: %s\n", fmt.c_str()); if (pluglib_mkpath(ctx, dir, dkcommctx->is_fatal()) != bRC_OK){ rp->create_status = CF_ERROR; return bRC_Error; } switch (replace){ case REPLACE_ALWAYS: rp->create_status = CF_EXTRACT; break; case REPLACE_NEVER: /* check if file exist locally */ if (stat(fname, &statp) == 0){ /* exist, so skip restore */ rp->create_status = CF_SKIP; break; } rp->create_status = CF_EXTRACT; break; case REPLACE_IFNEWER: if (stat(fname, &statp) == 0){ /* exist, so check if newer */ if (statp.st_mtime < rp->statp.st_mtime){ rp->create_status = CF_SKIP; break; } } rp->create_status = CF_EXTRACT; break; case REPLACE_IFOLDER: if (stat(fname, &statp) == 0){ /* exist, so check if newer */ if (statp.st_mtime > rp->statp.st_mtime){ rp->create_status = CF_SKIP; break; } } rp->create_status = CF_EXTRACT; break; } } else { /* TODO: report docker restore start */ local_restore = false; pm_strcpy(fname, rp->ofname + strlen(PLUGINNAMESPACE)); DMSG(ctx, DINFO, "scanning fname to restore: %s\n", fname); /* * first scan for Container backup file * the dirtmp variable has a sscanf format to scan which is dynamically generated * based on the size of label and imageid variables. this limits the size of the scan * and prevents any memory overflow. the destination scan format is something like this: * "/container/%256[^/]/%256[^.]", so it will scan two string variables up to * 256 characters long */ Mmsg(fmt, "%s/%%%d[^/]/%%%d[^.]", CONTAINERNAMESPACE, label.size(), imageid.size()); // DMSG(ctx, DVDEBUG, "container scan str: %s\n", dirtmp.c_str()); status = sscanf(fname, fmt.c_str(), label.c_str(), imageid.c_str()); if (status == 2){ /* insanity check for memleak */ if (restoredkinfo != NULL){ delete restoredkinfo; } restoredkinfo = New(DKINFO(DOCKER_CONTAINER)); restoredkinfo->set_container_id(imageid); restoredkinfo->set_container_names(label); pm_strcpy(fmt, label.c_str()); // Well there is no a pm_strcpy(POOL_MEM&, POOL_MEM&), strange Mmsg(label, "%s/%s", fmt.c_str(), restoredkinfo->get_container_id()->digest_short()); DMSG2(ctx, DINFO, "scanned: %s %s\n", restoredkinfo->get_container_names(), (char*)restoredkinfo->get_container_id()); /* we replace container always? */ rp->create_status = CF_EXTRACT; } else { /* * scan for Volume backup file * the dirtmp variable has a sscanf format to scan which is dynamically generated * based on the size of label variable. this limits the size of the scan * and prevents any memory overflow. the destination scan format is something like this: * "/volume/%256s", so it will scan a single string variable up to * 256 characters long */ Mmsg(fmt, "%s/%%%ds", VOLUMENAMESPACE, label.size()); // DMSG(ctx, DVDEBUG, "volume scan str: %s\n", dirtmp.c_str()); status = sscanf(fname, fmt.c_str(), label.c_str()); if (status == 1){ /* terminate volume name, so fname without '.tar. */ p = strstr(label.c_str(), ".tar"); *p = 0; /* insanity check for memleak */ if (restoredkinfo != NULL){ delete restoredkinfo; } restoredkinfo = New(DKINFO(DOCKER_VOLUME)); restoredkinfo->set_volume_name(label); DMSG1(ctx, DINFO, "scanned: %s\n", restoredkinfo->get_volume_name()); /* check for remote docker operations as this is not supported currently */ if (dkcommctx->is_remote_docker()){ DMSG1(ctx, DINFO, "volume %s restore with docker_host skipped.\n", restoredkinfo->get_volume_name()); if (!volumewarning){ JMSG0(ctx, M_WARNING, "Docker Volume restore with docker_host is unsupported! All volumes restore skipped.\n"); volumewarning = true; } rp->create_status = CF_SKIP; return bRC_OK; } switch (replace){ case REPLACE_ALWAYS: rp->create_status = CF_EXTRACT; break; case REPLACE_NEVER: case REPLACE_IFNEWER: case REPLACE_IFOLDER: default: /* * check if volume exist on docker, * as we cannot check if the volume was modified * then we will treat it the same as REPLACE_NEVER flag */ if ((image = search_docker_volume(ctx)) != NULL){ /* exist, so skip restore */ DMSG1(ctx, DINFO, "volume exist, skipping restore of: %s\n", restoredkinfo->get_volume_name()); JMSG1(ctx, M_INFO, "Volume exist, skipping restore of: %s\n", restoredkinfo->get_volume_name()); rp->create_status = CF_SKIP; break; } rp->create_status = CF_EXTRACT; break; } } else { /* now scan for Image backup */ p = strrchr(fname, '/'); if (p){ /* found the last (first in reverse) path_separator, * so before $p we have a path and after $p we have digest to scan */ *p++ = 0; } /* * scan path to separate image repository:tag data from filename * the dirtmp and tmp2 variables have a sscanf format to scan which is dynamically * generated based on the size of label and imageid variables. this limits the size * of the scan and prevents any memory overflow. the destination scan format is * something like this: * "/image/%256s", for image repository:tag encoded in filename and * "%256[^.]", for imageid part of the encoded filename, so it will scan * two string variables in two sscanf up to 256 characters long each */ Mmsg(fmt, "%s/%%%ds", IMAGENAMESPACE, label.size()); Mmsg(fmt2, "%%%d[^.]", imageid.size()); // DMSG(ctx, DVDEBUG, "image scan str: %s\n", dirtmp.c_str()); if (sscanf(fname, fmt.c_str(), label.c_str()) == 1 && sscanf(p, fmt2.c_str(), imageid.c_str()) == 1){ /* insanity check for memleak */ if (restoredkinfo != NULL){ delete restoredkinfo; } /* we will restore the Docker Image */ restoredkinfo = New(DKINFO(DOCKER_IMAGE)); restoredkinfo->set_image_id(imageid); restoredkinfo->scan_image_repository_tag(label); DMSG2(ctx, DINFO, "scanned: %s %s\n", restoredkinfo->get_image_repository_tag(), (char*)restoredkinfo->get_image_id()); switch (replace){ case REPLACE_ALWAYS: rp->create_status = CF_EXTRACT; break; case REPLACE_NEVER: /* check if image exist on docker */ if ((image = search_docker_image(ctx)) != NULL){ /* exist, so skip restore */ DMSG1(ctx, DINFO, "image exist, skipping restore of: %s\n", restoredkinfo->get_image_repository_tag()); JMSG1(ctx, M_INFO, "Image exist, skipping restore of: %s\n", restoredkinfo->get_image_repository_tag()); rp->create_status = CF_SKIP; break; } rp->create_status = CF_EXTRACT; break; case REPLACE_IFNEWER: if ((image = search_docker_image(ctx)) != NULL){ /* exist, so check if newer */ if (image->get_image_created() < rp->statp.st_mtime){ DMSG1(ctx, DINFO, "image exist and is newer, skipping restore of: %s\n", restoredkinfo->get_image_repository_tag()); JMSG1(ctx, M_INFO, "Image exist and is newer, skipping restore of: %s\n", restoredkinfo->get_image_repository_tag()); rp->create_status = CF_SKIP; break; } } rp->create_status = CF_EXTRACT; break; case REPLACE_IFOLDER: if ((image = search_docker_image(ctx)) != NULL){ /* exist, so check if newer */ if (image->get_image_created() > rp->statp.st_mtime){ rp->create_status = CF_SKIP; DMSG1(ctx, DINFO, "image exist and is older, skipping restore of: %s\n", restoredkinfo->get_image_repository_tag()); JMSG1(ctx, M_INFO, "Image exist and is older, skipping restore of: %s\n", restoredkinfo->get_image_repository_tag()); break; } } rp->create_status = CF_EXTRACT; break; } } else { // fname scanning error DMSG1(ctx, DERROR, "Filename scan error on: %s\n", fmt.c_str()); JMSG1(ctx, dkcommctx->is_abort_on_error() ? M_FATAL : M_ERROR, "Filename scan error on: %s\n", fmt.c_str()); rp->create_status = CF_ERROR; return bRC_Error; } } } if (rp->create_status == CF_EXTRACT){ /* display info about a restore to the user */ DMSG2(ctx, DINFO, "%s restore: %s\n", restoredkinfo-> type_str(), label.c_str()); JMSG2(ctx, M_INFO, "%s restore: %s\n", restoredkinfo->type_str(), label.c_str()); } } } return bRC_OK; } /* * Unimplemented, always return bRC_OK. */ bRC DOCKER::setFileAttributes(bpContext *ctx, struct restore_pkt *rp) { return bRC_OK; } #if 0 /* * Unimplemented, always return bRC_Seen. */ bRC DOCKER::checkFile(bpContext *ctx, char *fname) { if (!accurate_warning){ accurate_warning = true; JMSG0(ctx, M_WARNING, "Accurate mode is not supported. Please disable Accurate mode for this job.\n"); } return bRC_Seen; } #endif /* * We will not generate any acl/xattr data, always return bRC_OK. */ bRC DOCKER::handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl) { return bRC_OK; } /* * Called here to make a new instance of the plugin -- i.e. when * a new Job is started. There can be multiple instances of * each plugin that are running at the same time. Your * plugin instance must be thread safe and keep its own * local data. */ static bRC newPlugin(bpContext *ctx) { int JobId; DOCKER *self = New(DOCKER(ctx)); char *workdir; if (!self){ return bRC_Error; } ctx->pContext = (void*) self; getBaculaVar(bVarJobId, (void *)&JobId); DMSG(ctx, DINFO, "newPlugin JobId=%d\n", JobId); /* get dynamic working directory from file daemon */ getBaculaVar(bVarWorkingDir, (void *)&workdir); self->setworkingdir(workdir); return bRC_OK; } /* * Release everything concerning a particular instance of * a plugin. Normally called when the Job terminates. */ static bRC freePlugin(bpContext *ctx) { if (!ctx){ return bRC_Error; } DOCKER *self = pluginclass(ctx); DMSG(ctx, D1, "freePlugin this=%p\n", self); if (!self){ return bRC_Error; } delete self; return bRC_OK; } /* * Called by core code to get a variable from the plugin. * Not currently used. */ static bRC getPluginValue(bpContext *ctx, pVariable var, void *value) { ASSERT_CTX; DMSG0(ctx, D3, "getPluginValue called.\n"); DOCKER *self = pluginclass(ctx); return self->getPluginValue(ctx,var, value); } /* * Called by core code to set a plugin variable. * Not currently used. */ static bRC setPluginValue(bpContext *ctx, pVariable var, void *value) { ASSERT_CTX; DMSG0(ctx, D3, "setPluginValue called.\n"); DOCKER *self = pluginclass(ctx); return self->setPluginValue(ctx, var, value); } /* * Called by Bacula when there are certain events that the * plugin might want to know. The value depends on the * event. */ static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value) { ASSERT_CTX; DMSG(ctx, D1, "handlePluginEvent (%i)\n", event->eventType); DOCKER *self = pluginclass(ctx); return self->handlePluginEvent(ctx, event, value); } /* * Called when starting to backup a file. Here the plugin must * return the "stat" packet for the directory/file and provide * certain information so that Bacula knows what the file is. * The plugin can create "Virtual" files by giving them * a name that is not normally found on the file system. */ static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp) { ASSERT_CTX; if (!sp){ return bRC_Error; } DMSG0(ctx, D1, "startBackupFile.\n"); DOCKER *self = pluginclass(ctx); return self->startBackupFile(ctx, sp); } /* * Done backing up a file. */ static bRC endBackupFile(bpContext *ctx) { ASSERT_CTX; DMSG0(ctx, D1, "endBackupFile.\n"); DOCKER *self = pluginclass(ctx); return self->endBackupFile(ctx); } /* * Called when starting restore the file, right after a createFile(). */ static bRC startRestoreFile(bpContext *ctx, const char *cmd) { ASSERT_CTX; DMSG0(ctx, D1, "startRestoreFile.\n"); DOCKER *self = pluginclass(ctx); return self->startRestoreFile(ctx, cmd); } /* * Done restore the file. */ static bRC endRestoreFile(bpContext *ctx) { ASSERT_CTX; DMSG0(ctx, D1, "endRestoreFile.\n"); DOCKER *self = pluginclass(ctx); return self->endRestoreFile(ctx); } /* * Do actual I/O. Bacula calls this after startBackupFile * or after startRestoreFile to do the actual file * input or output. */ static bRC pluginIO(bpContext *ctx, struct io_pkt *io) { ASSERT_CTX; DMSG0(ctx, DVDEBUG, "pluginIO.\n"); DOCKER *self = pluginclass(ctx); return self->pluginIO(ctx, io); } /* * Called here to give the plugin the information needed to * re-create the file on a restore. It basically gets the * stat packet that was created during the backup phase. * This data is what is needed to create the file, but does * not contain actual file data. */ static bRC createFile(bpContext *ctx, struct restore_pkt *rp) { ASSERT_CTX; DMSG0(ctx, D1, "createFile.\n"); DOCKER *self = pluginclass(ctx); return self->createFile(ctx, rp); } #if 0 /* * checkFile used for accurate mode backup */ static bRC checkFile(bpContext *ctx, char *fname) { ASSERT_CTX; DMSG(ctx, D1, "checkFile: %s\n", fname); DOCKER *self = pluginclass(ctx); return self->checkFile(ctx, fname); } #endif /* * Called after the file has been restored. This can be used to * set directory permissions, ... */ static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp) { ASSERT_CTX; DMSG0(ctx, D1, "setFileAttributes.\n"); DOCKER *self = pluginclass(ctx); return self->setFileAttributes(ctx, rp); } /* * handleXACLdata used for ACL/XATTR backup and restore */ static bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl) { ASSERT_CTX; DMSG(ctx, D1, "handleXACLdata: %i\n", xacl->func); DOCKER *self = pluginclass(ctx); return self->handleXACLdata(ctx, xacl); } bacula-15.0.3/src/plugins/fd/docker/dkcommctx.h0000644000175000017500000002655714771010173021153 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file dkcommctx.h * @author Radosław Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin for backup/restore Docker using native tools. * @version 1.2.1 * @date 2020-01-05 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #ifndef _DKCOMMCTX_H_ #define _DKCOMMCTX_H_ #include "pluginlib/pluginlib.h" #include "lib/ini.h" #include "lib/bregex.h" #define USE_CMD_PARSER #include "fd_common.h" #include "dkinfo.h" /* Plugin compile time variables */ #ifndef DOCKER_CMD #ifndef HAVE_WIN32 #define DOCKER_CMD "/usr/bin/docker" #else #define DOCKER_CMD "C:/Program Files/Docker/Docker/resources/bin/docker.exe" #endif #endif #ifndef WORKDIR #define WORKDIR "/opt/bacula/working" #endif #define BACULATARIMAGE "baculatar:" DOCKER_TAR_IMAGE #define BACULACONTAINERFOUT "fout" #define BACULACONTAINERFIN "fin" #define BACULACONTAINERERRLOG "docker.err" #define BACULACONTAINERARCHLOG "docker.log" /* * Supported backup modes */ typedef enum { DKPAUSE, DKNOPAUSE, } DOCKER_BACKUP_MODE_T; /* * The list of restore options saved to the RestoreObject. */ static struct ini_items plugin_items_dump[] = { // name handler comment required default {"container_create", ini_store_bool, "Create container on restore", 0, "*Yes*"}, {"container_run", ini_store_bool, "Run container on restore", 0, "*No*"}, {"container_imageid", ini_store_bool, "Use Image Id for container creation/start", 0, "*No*"}, {"container_defaultnames", ini_store_bool, "Use default docker Names on container creation", 0, "*No*"}, {"docker_host", ini_store_str, "Use defined docker host to restore", 0, "*local*"}, {"timeout", ini_store_int32, "Timeout connecting to volume container", 0, "*30*"}, {NULL, NULL, NULL, 0, NULL} }; /* * This is a low-level communication class which handles command tools execution. */ class DKCOMMCTX: public SMARTALLOC { public: char *command; alist *get_all_containers(bpContext *ctx); alist *get_all_images(bpContext *ctx); alist *get_all_volumes(bpContext *ctx); void release_all_dkinfo_list(alist **list); void release_all_pm_list(alist **list); void set_all_to_backup(bpContext *ctx); void set_all_containers_to_backup(bpContext *ctx); void set_all_images_to_backup(bpContext *ctx); void set_all_volumes_to_backup(bpContext *ctx); inline DKINFO *get_first_to_backup(bpContext *ctx) { return (DKINFO*)objs_to_backup->first(); } inline DKINFO *get_next_to_backup(bpContext *ctx) { return (DKINFO*)objs_to_backup->next(); } inline void finish_backup_list(bpContext *ctx) { objs_to_backup->last(); } bRC container_commit(bpContext *ctx, DKINFO *dkinfo, int jobid); bRC delete_container_commit(bpContext *ctx, DKINFO *dkinfo, int jobid); bRC image_save(bpContext *ctx, DKID *dkid); bRC backup_docker(bpContext *ctx, DKINFO *dkinfo, int jobid); bRC restore_docker(bpContext *ctx, DKINFO *dkinfo, int jobid); bRC docker_tag(bpContext* ctx, DKID &dkid, POOLMEM *tag); bRC docker_create_run_container(bpContext* ctx, DKINFO *dkinfo); bRC wait_for_restore(bpContext *ctx, DKID &dkid); void update_vols_mounts(bpContext* ctx, DKINFO *container, DKVOLS *volume); int32_t read_data(bpContext *ctx, POOLMEM *buf, int32_t len); int32_t read_output(bpContext *ctx, POOL_MEM &out); int32_t write_data(bpContext *ctx, POOLMEM *buf, int32_t len); void terminate(bpContext *ctx); inline int get_backend_pid() { if (bpipe){ return bpipe->worker_pid; } return -1; } bRC parse_parameters(bpContext *ctx, char *argk, char *argv); bRC parse_restoreobj(bpContext *ctx, restore_object_pkt *rop); bRC prepare_bejob(bpContext *ctx, bool estimate); bRC prepare_restore(bpContext *ctx); bRC prepare_working_volume(bpContext* ctx, int jobid); void clean_working_volume(bpContext* ctx); inline void render_working_volume_filename(POOL_MEM &buf, const char *fname) { Mmsg(buf, "%s/%s", workingvolume.c_str(), fname); } void setworkingdir(char *workdir); inline bool is_open() { return bpipe != NULL; } inline bool is_closed() { return bpipe == NULL; } inline bool is_error() { return f_error || f_fatal; } inline void set_error() { f_error = true; } inline bool is_fatal() { return f_fatal || (f_error && abort_on_error); } inline bool is_eod() { return f_eod; } inline void clear_eod() { f_eod = false; } inline void set_eod() { f_eod = true; } inline void set_abort_on_error() { abort_on_error = true; } inline void clear_abort_on_error() { abort_on_error = false; } inline bool is_abort_on_error() { return abort_on_error; } inline bool is_all_vols_to_backup() { return all_vols_to_backup; } inline bool is_remote_docker() { return strlen(param_docker_host.c_str()) > 0; } inline int32_t timeout() { return param_timeout; }; DKCOMMCTX(const char *cmd); ~DKCOMMCTX(); private: BPIPE *bpipe; /* this is our bpipe to communicate with command tools */ alist *param_include_container; /* the include parameter list which filter what container name to backup as regex */ alist *param_include_image; /* the include parameter list which filter what image name to backup as regex */ alist *param_exclude_container; /* the exclude parameter list which filter what container name to exclude from backup */ alist *param_exclude_image; /* the exclude parameter list which filter what image name to exclude from backup */ alist *param_container; /* the container parameter list which filter what container name or id to backup */ alist *param_image; /* the image parameter list which filter what image name or id to backup */ alist *param_volume; /* the volume parameter list which filter what volume name to backup */ DOCKER_BACKUP_MODE_T param_mode; /* the mode parameter which is used with docker commit, default is pause */ bool param_container_create; /* the restore parameter for container creation */ bool param_container_run; /* the restore parameter for container creation and execution */ bool param_container_imageid; /* the restore parameter for setting imageid during container creation/run */ bool param_container_defaultnames; /* the restore parameter for setting default docker names on container creation */ POOL_MEM param_docker_host; /* use defined docker host to docker operations */ int32_t param_timeout; /* a timeout opening container communication pipe, the default is 30 */ regex_t preg; /* this is a regex context for include/exclude */ bool abort_on_error; /* abort on error flag */ alist *all_containers; /* the list of all containers defined on Docker */ alist *all_images; /* the list of all docker images defined on Docker */ alist *all_volumes; /* the list of all docker volumes defined on Docker */ alist *objs_to_backup; /* the list of all docker objects selected to backup or filtered */ bool all_to_backup; /* if true use all_containers list to backup or containers_to_backup list when false */ bool all_vols_to_backup; /* if true use all volumes for container to backup */ bool f_eod; /* the command tool signaled EOD */ bool f_error; /* the plugin signaled an error */ bool f_fatal; /* the plugin signaled a fatal error */ ConfigFile *ini; /* restore object config parser */ POOL_MEM workingvolume; /* */ POOL_MEM workingdir; /* runtime working directory from file daemon */ bool execute_command(bpContext *ctx, POOLMEM *args); bool execute_command(bpContext *ctx, const char *args); bool execute_command(bpContext *ctx, POOL_MEM &args); void parse_parameters(bpContext *ctx, ini_items &item); // bool render_param(bpContext *ctx, POOLMEM **param, const char *pname, const char *fmt, const char *name, char *value); // bool render_param(bpContext *ctx, POOLMEM **param, const char *pname, const char *fmt, const char *name, int value); // bool render_param(bpContext *ctx, POOLMEM **param, const char *pname, const char *name, char *value); // bool render_param(bpContext *ctx, bool *param, const char *pname, const char *name, bool value); // bool render_param(bpContext *ctx, int32_t *param, const char *pname, const char *name, int32_t value); // bool add_param_str(bpContext *ctx, alist **list, const char *pname, const char *name, char *value); // bool parse_param(bpContext *ctx, POOLMEM **param, const char *pname, const char *name, char *value); // bool parse_param(bpContext *ctx, bool *param, const char *pname, const char *name, char *value); // bool parse_param(bpContext *ctx, int32_t *param, const char *pname, const char *name, char *value); bool parse_param_mode(bpContext *ctx, DOCKER_BACKUP_MODE_T *param, const char *pname, const char *name, char *value); void filter_param_to_backup(bpContext *ctx, alist *params, alist *dklist, bool estimate); void filter_incex_to_backup(bpContext *ctx, alist *params_include, alist *params_exclude, alist *dklist); void add_container_volumes_to_backup(bpContext *ctx); void select_container_vols(bpContext *ctx); alist *get_all_list_from_docker(bpContext* ctx, const char *cmd, int cols, alist **dklist, DKINFO_OBJ_t type); void setup_dkinfo(bpContext* ctx, DKINFO_OBJ_t type, char *paramtab[], DKINFO *dkinfo); void setup_container_dkinfo(bpContext* ctx, char *paramtab[], DKINFO *dkinfo); void setup_image_dkinfo(bpContext* ctx, char *paramtab[], DKINFO *dkinfo); void setup_volume_dkinfo(bpContext* ctx, char *paramtab[], DKINFO *dkinfo); bRC run_container_volume_cmd(bpContext* ctx, const char *cmd, POOLMEM *volname, int jobid); bRC run_container_volume_save(bpContext* ctx, POOLMEM *volname, int jobid); bRC run_container_volume_load(bpContext* ctx, POOLMEM *volname, int jobid); bool check_for_docker_errors(bpContext* ctx, char *buf); inline void render_imagesave_name(POOL_MEM &out, DKINFO *dkinfo, int jobid) { Mmsg(out, "%s/%s/%d:backup", dkinfo->get_container_names(), dkinfo->get_container_id()->digest_short(), jobid); } void dump_robjdebug(bpContext *ctx, restore_object_pkt *rop); }; #endif /* _DKCOMMCTX_H_ */ bacula-15.0.3/src/plugins/fd/docker/dkinfo.c0000644000175000017500000002322114771010173020410 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file dkinfo.c * @author Radoslaw Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin for backup/restore Docker using native tools. * @version 1.2.1 * @date 2020-01-05 * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "dkinfo.h" /* * libbac uses its own sscanf implementation which is not compatible with * libc implementation, unfortunately. * use bsscanf for Bacula sscanf flavor */ #ifdef sscanf #undef sscanf #endif /* * The base constructor */ DKVOLS::DKVOLS(DKINFO *dk) { vol = dk; destination = get_pool_memory(PM_FNAME); }; /* * Default class destructor */ DKVOLS::~DKVOLS() { // WARINIG: The this->destination is freed outside the class // TODO: It should be verified why and where the destination variable is freed! // free_and_null_pool_memory(destination); }; /* * DKINFO class constructor, does initialization for unknown. */ DKINFO::DKINFO(DKINFO_OBJ_t t) { init(t); }; /* * DKINFO base copy constructor by simple copy variables by value */ DKINFO::DKINFO(const DKINFO& dkinfo) { init(dkinfo.Type); switch (Type){ case DOCKER_CONTAINER: set_container_id(*dkinfo.data.container.containerid); set_container_names(dkinfo.data.container.names); set_container_size(dkinfo.data.container.size); set_container_mounts(dkinfo.data.container.mounts); set_container_status(dkinfo.data.container.status); set_container_imagesave(*dkinfo.data.container.imagesave); set_container_imagesave_tag(dkinfo.data.container.imagesave_tag); break; case DOCKER_IMAGE: set_image_id(*dkinfo.data.image.imageid); set_image_repository(dkinfo.data.image.repository); set_image_tag(dkinfo.data.image.tag); set_image_size(dkinfo.data.image.size); set_image_created(dkinfo.data.image.created); break; case DOCKER_VOLUME: set_volume_name(dkinfo.data.volume.name); set_volume_created(dkinfo.data.volume.created); set_volume_size(dkinfo.data.volume.size); set_volume_linknr(dkinfo.data.volume.linknr); break; } }; /* * DKINFO destructor, releases all memory allocated. */ DKINFO::~DKINFO() { DKVOLS *v; switch(Type){ case DOCKER_CONTAINER: if (data.container.containerid){ delete data.container.containerid; } if (data.container.imagesave){ delete data.container.imagesave; } if (data.container.vols){ foreach_alist(v, data.container.vols){ delete v; } delete data.container.vols; } free_and_null_pool_memory(data.container.names); free_and_null_pool_memory(data.container.mounts); free_and_null_pool_memory(data.container.imagesave_tag); break; case DOCKER_IMAGE: if (data.image.imageid){ delete data.image.imageid; } free_and_null_pool_memory(data.image.repository); free_and_null_pool_memory(data.image.tag); free_and_null_pool_memory(data.image.repository_tag); break; case DOCKER_VOLUME: free_and_null_pool_memory(data.volume.name); break; default: break; } }; /* * initialization of the DKINFO class. */ void DKINFO::init(DKINFO_OBJ_t t) { Type = t; switch(Type){ case DOCKER_CONTAINER: data.container.containerid = New(DKID); data.container.names = get_pool_memory(PM_NAME); data.container.size = 0; data.container.mounts = get_pool_memory(PM_MESSAGE); data.container.status = DKUNKNOWN; data.container.imagesave = New(DKID); data.container.imagesave_tag = get_pool_memory(PM_NAME); data.container.vols = New(alist(10, not_owned_by_alist)); break; case DOCKER_IMAGE: data.image.imageid = New(DKID); data.image.repository = get_pool_memory(PM_NAME); data.image.size = 0; data.image.tag = get_pool_memory(PM_NAME); data.image.repository_tag = get_pool_memory(PM_NAME); data.image.created = 0; break; case DOCKER_VOLUME: data.volume.name = get_pool_memory(PM_NAME); data.volume.created = 0; data.volume.linknr = 1; break; default: bmemzero(&data, sizeof(data)); } }; /* * Sets the container size based on the docker size string: * "123B (virtual 319MB)" * * in: * s - the string which represents the Docker container size * out: * none */ void DKINFO::scan_container_size(POOLMEM* str) { int status; float srw; char suff[2]; float sv; uint64_t srwsize, svsize; if (Type == DOCKER_CONTAINER && str){ status = sscanf(str, "%f%c%*c%*s%f%c", &srw, &suff[0], &sv, &suff[1]); if (status == 4){ /* scan successful */ srwsize = pluglib_size_suffix(srw, suff[0]); svsize = pluglib_size_suffix(sv, suff[1]); data.container.size = srwsize + svsize; } } }; /* * Sets the image size based on the docker size string: * "319MB" * * in: * s - the string which represents the Docker image size * out: * none */ void DKINFO::scan_image_size(POOLMEM* str) { int status; float fsize; char suff; if (Type == DOCKER_IMAGE && str){ status = sscanf(str, "%f%c", &fsize, &suff); if (status == 2){ /* scan successful */ data.image.size = pluglib_size_suffix(fsize, suff); } } }; /* * Sets the volume size based on the docker volume size string: * "319MB" * * in: * s - the string which represents the Docker volume size * out: * none */ void DKINFO::scan_volume_size(POOLMEM* str) { int status; float fsize; char suff; if (Type == DOCKER_VOLUME && str){ if (bstrcmp(str, "N/A")){ data.volume.size = 0; } else { status = sscanf(str, "%f%c", &fsize, &suff); if (status == 2){ /* scan successful */ data.volume.size = pluglib_size_suffix(fsize, suff); } } } }; /* * Setup an image repository/tag variables from a single image repository:tag string. * The class uses three variables to store repository:tag data. * - data.image.repository_tag is used for full info string * - data.image.repository is used to store repository part * - data.image.tag is used to store a tag part * TODO: optimize repository_tag storage * * in: * rt - a repository:tag string * out: * none */ void DKINFO::scan_image_repository_tag(POOL_MEM& rt) { char *colon; if (Type == DOCKER_IMAGE){ pm_strcpy(data.image.repository_tag, rt); colon = strchr(data.image.repository_tag, ':'); if (colon){ /* have a colon in string, so split it */ pm_strcpy(data.image.tag, colon); *colon = 0; // temporary usage pm_strcpy(data.image.repository, data.image.repository_tag); *colon = ':'; // restore } else { pm_strcpy(data.image.repository, rt); pm_strcpy(data.image.tag, NULL); } } }; /* * Sets the container status based on the status string. * * in: * s - the string which represents the status * out: * none */ void DKINFO::set_container_status(POOL_MEM &s) { if (Type == DOCKER_CONTAINER){ /* scan a container state and save it */ if (bstrcmp(s.c_str(), "exited")){ /* container exited */ data.container.status = DKEXITED; } else if (bstrcmp(s.c_str(), "running")){ /* vm is running */ data.container.status = DKRUNNING; } else if (bstrcmp(s.c_str(), "paused")){ /* container paused */ data.container.status = DKPAUSED; } else { data.container.status = DKUNKNOWN; } } } /* fake dkid for volumes */ static DKID volfakeid; /* * Return object ID based on object type. */ DKID *DKINFO::id() { switch(Type){ case DOCKER_CONTAINER: return data.container.containerid; case DOCKER_IMAGE: return data.image.imageid; case DOCKER_VOLUME: return &volfakeid; } return NULL; }; /* * Return object name based on object type. */ POOLMEM *DKINFO::name() { switch(Type){ case DOCKER_CONTAINER: return data.container.names; case DOCKER_IMAGE: return data.image.repository_tag; case DOCKER_VOLUME: return data.volume.name; } return NULL; }; /* * Return object type string constant. */ const char *DKINFO::type_str() { switch(Type){ case DOCKER_CONTAINER: return "Docker Container"; case DOCKER_IMAGE: return "Docker Image"; case DOCKER_VOLUME: return "Docker Volume"; } return "Unknown"; }; /* * Return object size info. */ uint64_t DKINFO::size() { switch(Type){ case DOCKER_CONTAINER: return data.container.size; case DOCKER_IMAGE: return data.image.size; default: break; } return 0; }; bacula-15.0.3/src/plugins/fd/docker/docker-fd.h0000644000175000017500000001647314771010173021014 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file docker-fd.h * @author Radoslaw Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin for backup/restore Docker using native tools. * @version 1.2.1 * @date 2020-01-05 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #ifndef _DOCKER_FD_H_ #define _DOCKER_FD_H_ #include "dkcommctx.h" /* Plugin Info definitions */ #define DOCKER_LICENSE "Bacula AGPLv3" #define DOCKER_AUTHOR "Radoslaw Korzeniewski" #define DOCKER_DATE "Jan 2020" #define DOCKER_VERSION "1.2.1" #define DOCKER_DESCRIPTION "Bacula Docker Plugin" /* Plugin compile time variables */ const char *PLUGINPREFIX = "docker:"; const char *PLUGINNAME = "Docker"; #define PLUGINNAMESPACE "/@docker" #define CONTAINERNAMESPACE "/container" #define IMAGENAMESPACE "/image" #define VOLUMENAMESPACE "/volume" /* types used by Plugin */ typedef enum { DOCKER_NONE = 0, DOCKER_BACKUP_FULL, DOCKER_BACKUP_INCR, DOCKER_BACKUP_DIFF, DOCKER_BACKUP_VOLUME_FULL, DOCKER_BACKUP_CONTAINER_VOLLIST, DOCKER_RESTORE, DOCKER_RESTORE_VOLUME, } DOCKER_MODE_T; #define pluginclass(ctx) (DOCKER*)ctx->pContext; /* listing mode */ typedef enum { DOCKER_LISTING_NONE = 0, DOCKER_LISTING_TOP, DOCKER_LISTING_IMAGE, DOCKER_LISTING_CONTAINER, DOCKER_LISTING_VOLUME, } DOCKER_LISTING_MODE; /* listing objects for plugin */ typedef struct { const char *name; DOCKER_LISTING_MODE mode; } DOCKER_LISTING_T; static DOCKER_LISTING_T docker_objects[] = { {"/", DOCKER_LISTING_TOP}, {"image", DOCKER_LISTING_IMAGE}, {"container", DOCKER_LISTING_CONTAINER}, {"volume", DOCKER_LISTING_VOLUME}, {NULL, DOCKER_LISTING_NONE}, }; /* * This is a main plugin API class. It manages a plugin context. * All the public methods correspond to a public Bacula API calls, even if * a callback is not implemented. */ class DOCKER: public SMARTALLOC { public: bRC getPluginValue(bpContext *ctx, pVariable var, void *value); bRC setPluginValue(bpContext *ctx, pVariable var, void *value); bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value); bRC startBackupFile(bpContext *ctx, struct save_pkt *sp); bRC endBackupFile(bpContext *ctx); bRC startRestoreFile(bpContext *ctx, const char *cmd); bRC endRestoreFile(bpContext *ctx); bRC pluginIO(bpContext *ctx, struct io_pkt *io); bRC createFile(bpContext *ctx, struct restore_pkt *rp); bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp); // Not used! bRC checkFile(bpContext *ctx, char *fname); bRC handleXACLdata(bpContext *ctx, struct xacl_pkt *xacl); void setworkingdir(char *workdir); DOCKER(bpContext *bpctx); ~DOCKER(); private: bpContext *ctx; /* Bacula Plugin Context */ DOCKER_MODE_T mode; /* Plugin mode of operation */ int JobId; /* Job ID */ char *JobName; /* Job name */ time_t since; /* Job since parameter */ char *where; /* the Where variable for restore job if set by user */ char *regexwhere; /* the RegexWhere variable for restore job if set by user */ char replace; /* the replace variable for restore job */ bool robjsent; /* set when RestoreObject was sent during Full backup */ bool estimate; /* used when mode is DOCKER_BACKUP_* but we are doing estimate only */ bool accurate_warning; /* for sending accurate mode warning once */ bool local_restore; /* if where parameter is set to local path then make a local restore */ bool backup_finish; /* the hack to force finish backup list */ bool unsupportedlevel; /* this flag show if plugin should report unsupported backup level*/ bool param_notrunc; /* when "notrunc" option specified, used in listing mode only */ bool errortar; /* show if container tar for volume archive had errors */ bool volumewarning; /* when set then a warning about remote docker volume restore was sent to user */ int dockerworkclear; /* set to 1 when docker working dir should be cleaned */ /* TODO: define a variable which will signal job cancel */ DKCOMMCTX *dkcommctx; /* the current command tool execution context */ alist *commandlist; /* the command context list for multiple config execution for a single job */ POOLMEM *fname; /* current file name to backup or restore */ POOLMEM *lname; /* current link name to estimate or listing */ int dkfd; /* the file descriptor for local restore and volume backup */ POOLMEM *robjbuf; /* the buffer for restore object data */ DKINFO *currdkinfo; /* current docker object - container or image to backup */ DKINFO *restoredkinfo; /* the current docker object - container or image to restore */ DKVOLS *currvols; /* current docker volume object for backup or restore */ DOCKER_LISTING_MODE listing_mode; /* the listing mode */ int listing_objnr; /* when at listing top mode iterate through docker_objects */ cmd_parser *parser; /* the plugin params parser */ POOLMEM *workingdir; /* runtime working directory from file daemon */ bRC parse_plugin_command(bpContext *ctx, const char *command); bRC parse_plugin_restoreobj(bpContext *ctx, restore_object_pkt *rop); bRC prepare_bejob(bpContext* ctx, char *command); bRC prepare_estimate(bpContext *ctx, char *command); bRC prepare_backup(bpContext *ctx, char *command); bRC prepare_restore(bpContext *ctx, char *command); bRC perform_backup_open(bpContext *ctx, struct io_pkt *io); bRC perform_restore_open(bpContext *ctx, struct io_pkt *io); bRC perform_read_data(bpContext *ctx, struct io_pkt *io); bRC perform_read_volume_data(bpContext *ctx, struct io_pkt *io); bRC perform_write_data(bpContext *ctx, struct io_pkt *io); bRC perform_restore_close(bpContext *ctx, struct io_pkt *io); bRC perform_backup_close(bpContext *ctx, struct io_pkt *io); void new_commandctx(bpContext *ctx, const char *command); void switch_commandctx(bpContext *ctx, const char *command); DKINFO *search_docker_image(bpContext *ctx); DKINFO *search_docker_volume(bpContext *ctx); bool check_container_tar_error(bpContext *ctx, char *volname); }; #endif /* _DOCKER_FD_H_ */ bacula-15.0.3/src/plugins/fd/docker/dkid_test.c0000644000175000017500000001417414771010173021117 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * This is a Bacula plugin for backup/restore Docker using native tools. */ /** * @file dkid_test.c * @author Radoslaw Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin for backup/restore Docker using native tools - unittest. * @version 1.2.1 * @date 2020-01-05 * This is a Bacula plugin for backup/restore Docker using native tools. * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #include "bacula.h" #include "unittests.h" #include "dkid.h" #include #include #include const char *dig1 = "66f45d8601bae26a6b2ffeb46922318534d3b3905377b3a224693bd78601cb3b"; const char *sdig1 = "66f45d8601ba"; const int64_t vdig1 = 0x66f45d8601ba; const char *dig2 = "B546087C43F75A2C1484B4AEE0737499AA69A09067B04237907FCCD4BDE938C7"; const char *sdig2 = "B546087C43F7"; const int64_t vdig2 = 0xb546087c43f7; const char *sdig3 = "0f601bcb1ef5"; const int64_t vdig3 = 0x0f601bcb1ef5; const char *sdig4 = "00571da76d"; const int64_t vdig4 = 0x00571da76d; const char *dig5 = "sha256:daabf4372f900cb1ad0db17d26abf3acce55224275d1850f02459180e4dacf1d"; const char *tdig5 = "daabf4372f900cb1ad0db17d26abf3acce55224275d1850f02459180e4dacf1d"; const char *sdig5 = "daabf4372f90"; const int64_t vdig5 = 0xdaabf4372f90; const char *sinv1 = "Invalid initialization string"; const char *sinv2 = "brave_edison"; const char *sinv3 = "0xDEADBEEF"; const char *sinv4 = "c0a478d317195b…"; const char *sinv5 = "a478d317195b…"; const char *sinv6 = "78d317195b…"; int main() { Unittests dkid_test("dkid_test"); char *p; int64_t v; POOL_MEM m(PM_FNAME); Pmsg0(0, "Initialize tests ...\n"); DKID id1; ok(id1.id() == DKIDInvalid, "Check default initialization short"); ok(strlen(id1.digest()) == 0, "Check default initialization full"); ok(strlen(id1.digest_short()) == 0, "Check short default initialization full"); DKID id2(dig1); ok(id2.id() == vdig1, "Check param initialization short"); ok(bstrcmp(id2.digest(), dig1), "Check param initialization full"); ok(bstrcmp(id2.digest_short(), sdig1), "Check short param initialization"); DKID id3(dig2); ok(id3.id() == vdig2, "Check param initialization short upper"); ok(bstrcmp(id3.digest(), dig2), "Check param initialization full upper"); ok(bstrcmp(id3.digest_short(), sdig2), "Check short param initialization full upper"); pm_strcpy(m, dig1); DKID id4(m); ok(id4.id() == vdig1, "Check pool_mem initialization short"); ok(bstrcmp(id4.digest(), dig1), "Check pool_mem initialization full"); ok(bstrcmp(id4.digest_short(), sdig1), "Check short pool_mem initialization full"); DKID id5(sdig3); ok(id5.id() == vdig3, "Check short digest initialization"); Mmsg(m, "%s(...)", sdig3); ok(bstrcmp(id5.digest(), m.c_str()), "Check short digest initialization full str"); ok(bstrcmp(id5.digest_short(), sdig3), "Check short for short digest initialization"); DKID id6(sdig4); ok(id6.id() == vdig4, "Check shorter digest initialization"); Mmsg(m, "%s(...)", sdig4); ok(bstrcmp(id6.digest(), m.c_str()), "Check shorter digest initialization full str"); ok(bstrcmp(id6.digest_short(), sdig4), "Check short for shorter digest initialization"); DKID id7(dig5); ok(id7.id() == vdig5, "Check param initialization with sha256: prefix"); ok(bstrcmp(id7.digest(), tdig5), "Check param initialization full with sha256: prefix"); ok(bstrcmp(id7.digest_short(), sdig5), "Check short param initialization with sha256: prefix"); Pmsg0(0, "Invalid initialization tests ...\n"); DKID id8(sinv1); ok(id8.id() < 0, "Checking invalid digest string long"); DKID id9(sinv2); ok(id9.id() < 0, "Checking invalid digest string short"); DKID id10 (sinv3); ok(id10.id() < 0, "Checking invalid digest string hex"); DKID id11(sinv4); ok(id11.id() >= 0, "Checking digest string with ellipsis"); DKID id12(sinv5); ok(id12.id() >= 0, "Checking digest string with ellipsis short"); DKID id13(sinv6); ok(id13.id() < 0, "Checking invalid digest string with ellipsis short"); Pmsg0(0, "Operators tests ...\n"); DKID id14(dig1); p = (char*)id14; ok(bstrcmp(p, dig1), "Checking operator char* ()"); v = id14; ok(v == vdig1, "Checking operator int64_t ()"); DKID *id14P = New(DKID(dig1)); p = (char*)*id14P; ok(bstrcmp(p, dig1), "Checking operator char* () on ptr"); v = *id14P; ok(v == vdig1, "Checking operator int64_t () on ptr"); delete(id14P); DKID id15(dig2); DKID id15A(dig1); id15 = id15A; ok(id15A.id() == vdig1, "Checking operator= (DKID&)"); ok(id15A == id15, "Checking operator== on the same"); nok(id15A != id15, "Checking operator!= on the same"); DKID id15B(dig1); id15A = (char*)dig2; ok(id15A.id() == vdig2, "Checking operator= (char*)"); nok(id15A == id15B, "Checking operator== on different"); ok(id15A != id15B, "Checking operator!= on different"); Mmsg(m, "%s", sdig1); id15A = m; ok(id15A.id() == vdig1, "Checking operator= (POOL_MEM&)"); id15A = (char*)dig2; ok(id15A.id() == vdig2, "Checking operator= (char*)"); DKID id16(sinv1); id15A = id16; nok (id15A == id16, "Checking operator== on invalid digest"); nok (id15A != id16, "Checking operator!= on invalid digest"); DKID id17(sdig1); id15A = (char*)dig1; ok (id15A == id17, "Checking operator== on full and short digest"); nok (id15A != id17, "Checking operator!= on full and short digest"); return report(); } bacula-15.0.3/src/plugins/fd/docker/dkinfo.h0000644000175000017500000002156014771010173020421 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * @file dkinfo.h * @author Radoslaw Korzeniewski (radoslaw@korzeniewski.net) * @brief This is a Bacula plugin for backup/restore Docker using native tools. * @version 1.2.1 * @date 2020-01-05 * * @copyright Copyright (c) 2021 All rights reserved. IP transferred to Bacula Systems according to agreement. */ #ifndef _DKINFO_H_ #define _DKINFO_H_ #include "dkid.h" #include "pluginlib/pluginlib.h" /* The Docker container Power states */ typedef enum { DKUNKNOWN = 0, DKCREATED, DKEXITED, DKRUNNING, DKPAUSED, } DOCKER_POWER_T; /* Docker object types */ typedef enum { DOCKER_CONTAINER, DOCKER_IMAGE, DOCKER_VOLUME, } DKINFO_OBJ_t; /* * Docker objects data variables. */ typedef union { struct { DKID *containerid; POOLMEM *names; uint64_t size; DOCKER_POWER_T status; DKID *imagesave; POOLMEM *imagesave_tag; POOLMEM *mounts; alist *vols; } container; struct { DKID *imageid; POOLMEM *repository; uint64_t size; POOLMEM *tag; POOLMEM *repository_tag; utime_t created; } image; struct { POOLMEM *name; utime_t created; uint64_t size; int linknr; } volume; } DOCKER_OBJ; /* forward reference */ class DKINFO; /* * This is a simple special struct for handling docker mounts destination mappings */ class DKVOLS : public SMARTALLOC { public: DKVOLS(DKINFO *dk); ~DKVOLS(); DKINFO *vol; POOLMEM *destination; }; /* * The class which handles DKINFO operations */ class DKINFO : public SMARTALLOC { public: DKINFO(DKINFO_OBJ_t t); DKINFO(const DKINFO &dkinfo); #if __cplusplus > 201103L DKINFO() = delete; DKINFO(DKINFO&) = delete; DKINFO(DKINFO&&) = delete; #endif ~DKINFO(); /* set methods dedicated to container */ inline void set_container_id(DKID &id) { if (Type == DOCKER_CONTAINER){ *data.container.containerid = id; }} inline void set_container_id(POOL_MEM &id) { if (Type == DOCKER_CONTAINER){ *data.container.containerid = id; }} inline void set_container_id(POOLMEM *id) { if (Type == DOCKER_CONTAINER){ *data.container.containerid = (char*)id; }} inline void set_container_names(POOLMEM *n) { if (Type == DOCKER_CONTAINER){ pm_strcpy(data.container.names, n); }} inline void set_container_names(POOL_MEM &n) { if (Type == DOCKER_CONTAINER){ pm_strcpy(data.container.names, n); }} inline void set_container_size(uint64_t s) { if (Type == DOCKER_CONTAINER){ data.container.size = s; }} inline void set_container_imagesave(DKID &id) { if (Type == DOCKER_CONTAINER){ *data.container.imagesave = id; }} inline void set_container_imagesave(POOL_MEM &id) { if (Type == DOCKER_CONTAINER){ *data.container.imagesave = id; }} inline void set_container_imagesave(POOLMEM *id) { if (Type == DOCKER_CONTAINER){ *data.container.imagesave = (char*)id; }} inline void set_container_imagesave_tag(POOL_MEM &n) { if (Type == DOCKER_CONTAINER){ pm_strcpy(data.container.imagesave_tag, n); }} inline void set_container_imagesave_tag(POOLMEM *n) { if (Type == DOCKER_CONTAINER){ pm_strcpy(data.container.imagesave_tag, n); }} inline void set_container_status(DOCKER_POWER_T s) { if (Type == DOCKER_CONTAINER){ data.container.status = s; }} void set_container_status(POOL_MEM &s); inline void set_container_mounts(POOLMEM *n) { if (Type == DOCKER_CONTAINER){ pm_strcpy(data.container.mounts, n); }} inline void set_container_mounts(POOL_MEM &n) { if (Type == DOCKER_CONTAINER){ pm_strcpy(data.container.mounts, n); }} /* special vols handling */ inline void container_append_vols(DKVOLS *dk) { if (Type == DOCKER_CONTAINER){ data.container.vols->append(dk); }} inline bool container_has_vols() { return Type == DOCKER_CONTAINER ? !data.container.vols->empty() : false; } inline DKVOLS *container_first_vols() { return Type == DOCKER_CONTAINER ? (DKVOLS*)data.container.vols->first() : NULL; } inline DKVOLS *container_next_vols() { return Type == DOCKER_CONTAINER ? (DKVOLS*)data.container.vols->next() : NULL; } /* set methods dedicated to image */ inline void set_image_id(DKID &id) { if (Type == DOCKER_IMAGE){ *data.image.imageid = id; }} inline void set_image_id(POOLMEM *id) { if (Type == DOCKER_IMAGE){ *data.image.imageid = (char*)id; }} inline void set_image_id(POOL_MEM &id) { if (Type == DOCKER_IMAGE){ *data.image.imageid = id; }} inline void set_image_repository(POOLMEM *n) { if (Type == DOCKER_IMAGE){ pm_strcpy(data.image.repository, n); render_image_repository_tag(); }} inline void set_image_repository(POOL_MEM &n) { if (Type == DOCKER_IMAGE){ pm_strcpy(data.image.repository, n); render_image_repository_tag(); }} inline void set_image_tag(POOLMEM *n) { if (Type == DOCKER_IMAGE){ pm_strcpy(data.image.tag, n); render_image_repository_tag(); }} inline void set_image_tag(POOL_MEM &n) { if (Type == DOCKER_IMAGE){ pm_strcpy(data.image.tag, n); render_image_repository_tag(); }} inline void set_image_size(uint64_t s) { if (Type == DOCKER_IMAGE){ data.image.size = s; }} inline void set_image_created(utime_t t) { if (Type == DOCKER_IMAGE){ data.image.created = t; }} /* set methods dedicated to volume */ inline void set_volume_name(POOLMEM *n) { if (Type == DOCKER_VOLUME){ pm_strcpy(data.volume.name, n); }} inline void set_volume_name(POOL_MEM &n) { if (Type == DOCKER_VOLUME){ pm_strcpy(data.volume.name, n); }} inline void set_volume_created(utime_t t) { if (Type == DOCKER_VOLUME){ data.volume.created = t; }} inline void set_volume_size(uint64_t s) { if (Type == DOCKER_VOLUME){ data.volume.size = s; }} inline void set_volume_linknr(int l) { if (Type == DOCKER_VOLUME){ data.volume.linknr = l; }} inline int inc_volume_linknr() { return Type == DOCKER_VOLUME ? ++data.volume.linknr : 0; } /* scanning methods */ void scan_container_size(POOLMEM *str); void scan_image_size(POOLMEM *str); void scan_image_repository_tag(POOL_MEM &rt); void scan_volume_size(POOLMEM *str); /* get methods dedicated to container */ inline DKID *get_container_id() { return Type == DOCKER_CONTAINER ? data.container.containerid : NULL; } inline DKID *get_container_imagesave() { return Type == DOCKER_CONTAINER ? data.container.imagesave : NULL; } inline POOLMEM *get_container_names() { return Type == DOCKER_CONTAINER ? data.container.names : NULL; } inline DOCKER_POWER_T get_container_status() { return Type == DOCKER_CONTAINER ? data.container.status : DKUNKNOWN; } inline uint64_t get_container_size() { return Type == DOCKER_CONTAINER ? data.container.size : 0; } inline POOLMEM *get_container_imagesave_tag() { return Type == DOCKER_CONTAINER ? data.container.imagesave_tag : NULL; } inline POOLMEM *get_container_mounts() { return Type == DOCKER_CONTAINER ? data.container.mounts : NULL; } /* get methods dedicated to image */ inline DKID *get_image_id() { return Type == DOCKER_IMAGE ? data.image.imageid : NULL; } inline POOLMEM *get_image_repository() { return Type == DOCKER_IMAGE ? data.image.repository : NULL; } inline POOLMEM *get_image_tag() { return Type == DOCKER_IMAGE ? data.image.tag : NULL; } inline POOLMEM *get_image_repository_tag() { return Type == DOCKER_IMAGE ? data.image.repository_tag : NULL; } inline uint64_t get_image_size() { return Type == DOCKER_IMAGE ? data.image.size : 0; } inline utime_t get_image_created() { return Type == DOCKER_IMAGE ? data.image.created : 0; } /* get methods dedicated to volume */ inline POOLMEM *get_volume_name() { return Type == DOCKER_VOLUME ? data.volume.name : NULL; } inline utime_t get_volume_created() { return Type == DOCKER_VOLUME ? data.volume.created : 0; } inline uint64_t get_volume_size() { return Type == DOCKER_VOLUME ? data.volume.size : 0; } inline int get_volume_linknr() { return Type == DOCKER_VOLUME ? data.volume.linknr : 0; } /* generic get methods which check dkinfo type */ DKID *id(); POOLMEM *name(); uint64_t size(); inline DKINFO_OBJ_t type() { return Type; } const char *type_str(); private: DKINFO_OBJ_t Type; DOCKER_OBJ data; void init(DKINFO_OBJ_t t); inline void render_image_repository_tag() { pm_strcpy(data.image.repository_tag, data.image.repository); pm_strcat(data.image.repository_tag, ":"); pm_strcat(data.image.repository_tag, data.image.tag); } }; #endif /* _DKINFO_H_ */ bacula-15.0.3/src/plugins/fd/docker/Makefile0000644000175000017500000000410314771010173020430 0ustar bsbuildbsbuild# # Makefile for building FD plugins PluginLibrary for Bacula # # Copyright (C) 2000-2020 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Author: Radoslaw Korzeniewski, radoslaw@korzeniewski.net # include ../Makefile.inc thisdir = $(FDPLUGDIR)/docker UNITTESTSOBJ = $(LIBDIR)/unittests.lo LIBBACOBJ = $(LIBDIR)/libbac.la DOCKERSRC = dkid.c dkinfo.c dkcommctx.c docker-fd.c DOCKERSRCH = dkid.h dkinfo.h dkcommctx.h docker-fd.h DOCKEROBJ = $(DOCKERSRC:.c=.lo) DOCKERTESTS = dkid_test all: docker-fd.la tests: $(DOCKERTESTS) .c.lo: @echo "Compiling $< ..." $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I${SRCDIR} -I${FDDIR} -I${FDPLUGDIR} -I${LIBDIR} -DWORKDIR=\"$(DESTDIR)$(working_dir)\" -c $< %.lo: %.c %.h @echo "Pattern compiling $< ..." $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I${SRCDIR} -I${FDDIR} -I${FDPLUGDIR} -I${LIBDIR} -DWORKDIR=\"$(DESTDIR)$(working_dir)\" -c $(@:.lo=.c) docker-fd.la: Makefile $(DOCKEROBJ) $(PLUGINLIBDIR)/pluginlib.lo $(DOCKERSRCH) @echo "Linking $(@:.la=.so) ..." $(NO_ECHO)$(LIBTOOL_LINK) --silent $(CXX) $(LDFLAGS) -shared $^ -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version dkid_test: Makefile dkid.lo dkid_test.lo $(UNITTESTSOBJ) $(LIBBACOBJ) @echo "Building $@ ..." $(NO_ECHO)$(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L$(LIBDIR) -o $@ dkid.lo dkid_test.lo $(UNITTESTSOBJ) $(DLIB) -lbac -lm $(LIBS) $(OPENSSL_LIBS) install: all install-docker install-docker: docker-fd.la @echo "Installing plugin $(^:.la=.so) ..." $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) docker-fd.la $(DESTDIR)$(plugindir) $(NO_ECHO)$(RMF) $(DESTDIR)$(plugindir)/docker-fd.la libtool-clean: @find . -name '*.lo' -print | xargs $(LIBTOOL_CLEAN) $(RMF) @find . -name '*.la' -print | xargs $(LIBTOOL_CLEAN) $(RMF) @$(RMF) -r .libs _libs clean: libtool-clean @$(RMF) -f main *.so *.o @$(RMF) -f $(DOCKERTESTS) distclean: clean libtool-uninstall: $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(plugindir)/docker-fd.so depend: uninstall: $(LIBTOOL_UNINSTALL_TARGET) bacula-15.0.3/src/plugins/fd/journal.h0000644000175000017500000000431214771010173017346 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #ifndef journal_H #define journal_H #include "settings-record.h" #include "folder-record.h" #include "file-record.h" #ifndef HAVE_WIN32 #include #endif #ifdef HAVE_WIN32 #define JOURNAL_CLI_FNAME "bcdp-cli.journal" #else #define JOURNAL_CLI_FNAME ".bcdp-cli.journal" #endif #define JOURNAL_VERSION 1 /** * @brief The Journal persists and retrieves @class FileRecord objects. * * Used by: * * 1-) The CDP Client, to store information about files that * should backed up. * * 2-) The CDP FD Plugin, to decide which file should be backed * up on a specific Job. * * The current implementation uses a plain text file to store the records. */ class Journal { private: FILE * _fp; int _fd; public: char *_jPath; bool hasTransaction; Journal(): _fp(NULL), _fd(-1), _jPath(NULL), hasTransaction(false) {} ~Journal() {} bool setJournalPath(const char *path); bool setJournalPath(const char *path, const char *spoolDir); bool migrateTo(const char* newPath); bool beginTransaction(const char *mode); void endTransaction(); bool writeSettings(SettingsRecord &record); SettingsRecord *readSettings(); bool writeFileRecord(const FileRecord &record); FileRecord *readFileRecord(); bool removeFolderRecord(const char *folder); bool writeFolderRecord(const FolderRecord &record); FolderRecord *readFolderRecord(); /** Public only because it's used by Unit Tests */ char *extract_val(const char *key_val); //TODO: warnSizeFull(); //TODO: removeRecord() //TODO: pruneRecords() }; #endif bacula-15.0.3/src/plugins/fd/Makefile0000644000175000017500000002042714771010173017170 0ustar bsbuildbsbuild# # Simple Makefile for building test FD plugins for Bacula # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # include Makefile.inc # No optimization for now for easy debugging KUBESRC = kubernetes-fd.c kubernetes-fd.h TESTKUBESRC = test_kubernetes_backend.c KUBEOBJ = $(KUBESRC:.c=.lo) $(METAPLUGINOBJ) TESTKUBEOBJ = $(TESTKUBESRC:.c=.lo) .SUFFIXES: .c .lo .c.lo: $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) -I${SRCDIR} -I${FDDIR} -I${LIBDIR} -I. -c $< all: bpipe-fd.la test-plugins: test-plugin-fd.la test-deltaseq-fd.la test-handlexacl-plugin-fd.la test_kubernetes_backend.lo: $(TESTKUBESRC) @echo "Compiling $< ..." $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I${SRCDIR} -I${FDDIR} -DLOGDIR=\"$(DESTDIR)$(working_dir)\" -c $< kubernetes-fd.la: Makefile $(KUBEOBJ) @echo "Linking $(@:.la=.so) ..." $(NO_ECHO)$(LIBTOOL_LINK) --silent $(CXX) $(LDFLAGS) -shared $(KUBEOBJ) -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version install-kubernetes: kubernetes-fd.la $(MAKE) -C kubernetes-backend install-kubernetes $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) kubernetes-fd.la $(DESTDIR)$(plugindir) $(NO_ECHO)$(RMF) $(DESTDIR)$(plugindir)/kubernetes-fd.la install-test-kubernetes: test_kubernetes_backend kubernetes-fd.la $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) kubernetes-fd.la $(DESTDIR)$(plugindir) $(NO_ECHO)$(RMF) $(DESTDIR)$(plugindir)/kubernetes-fd.la $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) test_kubernetes_backend $(DESTDIR)$(sbindir)/k8s_backend cdp: cdp-fd.la cdp-fd.lo: cdp-fd.c journal.h folder-record.h file-record.h settings-record.h ${FDDIR}/fd_plugins.h $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -Wno-unused-function -I../.. -I${FDDIR} -c cdp-fd.c journal.lo: journal.c journal.h folder-record.h file-record.h settings-record.h ${FDDIR}/fd_plugins.h $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -Wno-unused-function -I../.. -I${FDDIR} -c journal.c cdp-fd.la: Makefile cdp-fd$(DEFAULT_OBJECT_TYPE) journal.lo $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared cdp-fd.lo journal.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version install-cdp: cdp-fd.la $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) cdp-fd.la $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/cdp-fd.la rot13: rot13-fd.la all: bpipe-fd.la test-plugin-fd.la test-deltaseq-fd.la example-plugin-fd.lo: example-plugin-fd.c ${FDDIR}/fd_plugins.h $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I../.. -I${FDDIR} -c example-plugin-fd.c example-plugin-fd.la: Makefile example-plugin-fd$(DEFAULT_OBJECT_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared example-plugin-fd.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version bpipe-fd.lo: bpipe-fd.c ${FDDIR}/fd_plugins.h $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I../.. -I${FDDIR} -c bpipe-fd.c bpipe-fd.la: Makefile bpipe-fd$(DEFAULT_OBJECT_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared bpipe-fd.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version test-bpipe-fd.c: bpipe-fd.c cp -f bpipe-fd.c test-bpipe-fd.c test-bpipe-fd.lo: test-bpipe-fd.c ${FDDIR}/fd_plugins.h $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -DTEST_BPIPE_OFFSET -I../.. -I${FDDIR} -c test-bpipe-fd.c test-bpipe-fd.la: Makefile test-bpipe-fd$(DEFAULT_OBJECT_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared test-bpipe-fd.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version rot13-fd.lo: rot13-fd.c ${FDDIR}/fd_plugins.h $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I../.. -I${FDDIR} -c rot13-fd.c rot13-fd.la: Makefile rot13-fd$(DEFAULT_OBJECT_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared rot13-fd.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version namedpipe.lo: namedpipe.c namedpipe.h $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -c namedpipe.c test-deltaseq-fd.lo: test-deltaseq-fd.c ${FDDIR}/fd_plugins.h fd_common.h $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I../.. -I${FDDIR} -c test-deltaseq-fd.c test-deltaseq-fd.la: Makefile test-deltaseq-fd$(DEFAULT_OBJECT_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared test-deltaseq-fd.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version delta-fd.lo: delta-fd.c ${FDDIR}/fd_plugins.h fd_common.h $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(RSYNC_INC) $(CFLAGS) -I../.. -I${FDDIR} -c delta-fd.c delta-fd.la: Makefile delta-fd$(DEFAULT_OBJECT_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) $(RSYNC_LIBS) -shared delta-fd.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version test-verify-fd.lo: test-verify-fd.c ${FDDIR}/fd_plugins.h fd_common.h $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(RSYNC_INC) $(CFLAGS) -I../.. -I${FDDIR} -c test-verify-fd.c test-verify-fd.la: Makefile test-verify-fd$(DEFAULT_OBJECT_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared test-verify-fd.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version install-antivirus: antivirus-fd.la $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) antivirus-fd.la $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/antivirus-fd.la antivirus-fd.lo: antivirus-fd.c ${FDDIR}/fd_plugins.h fd_common.h $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I../.. -I${FDDIR} -c antivirus-fd.c antivirus-fd.la: Makefile antivirus-fd$(DEFAULT_OBJECT_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared antivirus-fd.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version test-plugin-fd.lo: test-plugin-fd.c ${FDDIR}/fd_plugins.h $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I../.. -I${FDDIR} -c test-plugin-fd.c test-plugin-fd.la: Makefile test-plugin-fd$(DEFAULT_OBJECT_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared test-plugin-fd.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version test-handlexacl-plugin-fd.lo: test-handlexacl-plugin-fd.c ${FDDIR}/fd_plugins.h $(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CPPFLAGS) $(CFLAGS) -I../.. -I${FDDIR} -c test-handlexacl-plugin-fd.c test-handlexacl-plugin-fd.la: Makefile test-handlexacl-plugin-fd$(DEFAULT_OBJECT_TYPE) $(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -shared test-handlexacl-plugin-fd.lo -o $@ -rpath $(plugindir) -module -export-dynamic -avoid-version test_kubernetes_backend: Makefile $(TESTKUBEOBJ) $(PLUGINLIBDIR)/test_metaplugin_backend.lo @echo "Building backend $@ ..." $(NO_ECHO)$(LIBTOOL_LINK) --silent $(CXX) $(LDFLAGS) test_kubernetes_backend.lo $(PLUGINLIBDIR)/test_metaplugin_backend.lo -o $@ install: all $(EXTRA_INSTALL_TARGET) $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bpipe-fd.la $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/bpipe-fd.la install-docker: $(MAKE) -C docker install-docker install-test-plugin: all test-plugins test-bpipe-fd.la rot13-fd.la $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) test-plugin-fd.la $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/test-plugin-fd.la $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) test-deltaseq-fd.la $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/test-deltaseq-fd.la $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) test-handlexacl-plugin-fd.la $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/test-handlexacl-plugin-fd.la $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) test-bpipe-fd.la $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/test-bpipe-fd.la $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) rot13-fd.la $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/rot13-fd.la install-test-dedup: test-dedup-fd.la $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) test-dedup-fd.la $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/test-dedup-fd.la install-delta-plugin: all delta-fd.la $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) delta-fd.la $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/delta-fd.la libtool-clean: @find . -name '*.lo' -print | xargs $(LIBTOOL_CLEAN) $(RMF) @$(RMF) *.la @$(RMF) -r .libs _libs clean: libtool-clean @rm -f main *.so *.o 1 2 3 *_test ndmp_idx_dump distclean: clean @rm -f Makefile libtool-uninstall: $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(plugindir)/bpipe-fd.so uninstall: $(LIBTOOL_UNINSTALL_TARGET) depend: bacula-15.0.3/src/plugins/Makefile0000644000175000017500000000104514771010173016572 0ustar bsbuildbsbuild# # Simple Makefile for building test plugins for Bacula # # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # first_rule: all dummy: all: dir fd sd dir: dummy (cd dir && make DESTDIR=$(DESTDIR) || exit 1) @echo fd: dummy (cd fd && make DESTDIR=$(DESTDIR) || exit 1) @echo sd: dummy (cd sd && make DESTDIR=$(DESTDIR) || exit 1) @echo clean: (cd dir && make DESTDIR=$(DESTDIR) clean || exit 1) (cd fd && make DESTDIR=$(DESTDIR) clean || exit 1) (cd sd && make DESTDIR=$(DESTDIR) clean || exit 1) bacula-15.0.3/src/bacula.h0000644000175000017500000001071214771010173015052 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * bacula.h -- main header file to include in all Bacula source * */ #ifndef _BACULA_H #define _BACULA_H 1 /* Disable FORTIFY_SOURCE, because bacula uses is own memory * manager */ #ifdef _FORTIFY_SOURCE #undef _FORTIFY_SOURCE #endif #ifdef __cplusplus /* Workaround for SGI IRIX 6.5 */ #define _LANGUAGE_C_PLUS_PLUS 1 #endif #if defined(HAVE_WIN32) #if defined(HAVE_MINGW) #include "winhdrs.h" #else #error "Only MINGW is supported" #include "winconfig.h" #endif #else #include "config.h" #endif #define __CONFIG_H #define _REENTRANT 1 #define _THREAD_SAFE 1 #define _POSIX_PTHREAD_SEMANTICS 1 /* System includes */ #if defined(HAVE_STDINT_H) #ifndef __sgi #include #endif #elif defined(HAVE_INTTYPES_H) #include #endif #if defined(HAVE_STDARG_H) #include #endif #include #if defined(HAVE_STDLIB_H) #include #endif #if HAVE_UNISTD_H # ifdef HAVE_HPUX_OS # undef _INCLUDE_POSIX1C_SOURCE # endif #include #endif #if HAVE_ALLOCA_H #include #endif #if defined(_MSC_VER) #include #include #include #endif #include #include /* O_NOATIME is defined at fcntl.h when supported */ #ifndef O_NOATIME #define O_NOATIME 0 #endif #if defined(_MSC_VER) extern "C" { #include "getopt.h" } #endif #ifdef xxxxx #ifdef HAVE_GETOPT_LONG #include #else #include "lib/getopt.h" #endif #endif #include #include #include #include #ifndef _SPLINT_ #include #endif #if HAVE_LIMITS_H #include #endif #include #include #include #include #include #ifdef HAVE_SYS_BITYPES_H #include #endif #include #ifdef HAVE_SYS_SOCKET_H #include #endif #if defined(HAVE_WIN32) & !defined(HAVE_MINGW) #include #endif #if !defined(HAVE_WIN32) & !defined(HAVE_MINGW) #include #endif #include #if HAVE_SYS_WAIT_H #include #endif #include #include #include #ifdef HAVE_OPENSSL /* fight OpenSSL namespace pollution */ #define STORE OSSL_STORE #include #include #include #include #include #include #undef STORE #endif /* Local Bacula includes. Be sure to put all the system * includes before these. */ #if defined(HAVE_WIN32) //#include #include "compat.h" #endif #include "version.h" #include "bc_types.h" #include "streams.h" #include "filetypes.h" #include "baconfig.h" #include "lib/lib.h" /* disable the buffereing of the network packet at restore time, that is required * to do async rehydration and speedup the restore. * #define DEDUP_UNBUFERED_REHYDRATION */ #define DEDUP_CLIENT_REC_BUF_SIZE 64 /* manually enable feature that you want to test in DEVELOPER mode*/ #ifdef DEVELOPER /* this force the local cache to randomly fail and ask the block to the SD */ //# define DEDUP_CLIENT_RANDOM_CACHE 1 /* this force some extra check in the DDE/DEDUP, like check if the hash match */ //# define DDE_EXTRA_CHECKS 1 #endif #ifdef DDE_EXTRA_CHECKS const bool have_dde_extra_check = true; #else const bool have_dde_extra_check = false; #endif /* * For wx-console compiles, we undo some Bacula defines. * This prevents conflicts between wx-Widgets and Bacula. * In wx-console files that malloc or free() Bacula structures * config/resources and interface to the Bacula libraries, * you must use bmalloc() and bfree(). */ #ifdef HAVE_WXCONSOLE #undef New #undef _ #undef free #undef malloc #endif #if defined(HAVE_WIN32) #include "winapi.h" #include "winhost.h" #else #include "host.h" #endif #ifndef HAVE_ZLIB_H #undef HAVE_LIBZ /* no good without headers */ #endif #endif bacula-15.0.3/src/jcr.h0000644000175000017500000010256414771010173014410 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Bacula JCR Structure definition for Daemons and the Library * This definition consists of a "Global" definition common * to all daemons and used by the library routines, and a * daemon specific part that is enabled with #defines. * * Written by Kern Sibbald, Nov MM */ #ifndef __JCR_H_ #define __JCR_H_ 1 /* Backup/Verify level code. These are stored in the DB */ #define L_FULL 'F' /* Full backup */ #define L_INCREMENTAL 'I' /* since last backup */ #define L_DIFFERENTIAL 'D' /* since last full backup */ #define L_SINCE 'S' #define L_VERIFY_CATALOG 'C' /* verify from catalog */ #define L_VERIFY_INIT 'V' /* verify save (init DB) */ #define L_VERIFY_VOLUME_TO_CATALOG 'O' /* verify Volume to catalog entries */ #define L_VERIFY_DISK_TO_CATALOG 'd' /* verify Disk attributes to catalog */ #define L_VERIFY_DATA 'A' /* verify data on volume */ #define L_BASE 'B' /* Base level job */ #define L_NONE ' ' /* None, for Restore and Admin */ #define L_VIRTUAL_FULL 'f' /* Virtual full backup */ /* Job Types. These are stored in the DB */ #define JT_BACKUP 'B' /* Backup Job */ #define JT_MIGRATED_JOB 'M' /* A previous backup job that was migrated */ #define JT_VERIFY 'V' /* Verify Job */ #define JT_RESTORE 'R' /* Restore Job */ #define JT_CONSOLE 'U' /* console program */ #define JT_SYSTEM 'I' /* internal system "job" */ #define JT_ADMIN 'D' /* admin job */ #define JT_ARCHIVE 'A' /* Archive Job */ #define JT_JOB_COPY 'C' /* Copy of a Job */ #define JT_COPY 'c' /* Copy Job */ #define JT_MIGRATE 'g' /* Migration Job */ #define JT_SCAN 'S' /* Scan Job */ /* Used to handle ClientAcl in various commands, not stored in the DB */ #define JT_BACKUP_RESTORE '*' /* Backup or Restore Job */ /* Job Status. Some of these are stored in the DB */ #define JS_Canceled 'A' /* canceled by user */ #define JS_Blocked 'B' /* blocked */ #define JS_Created 'C' /* created but not yet running */ #define JS_Differences 'D' /* Verify differences */ #define JS_ErrorTerminated 'E' /* Job terminated in error */ #define JS_WaitFD 'F' /* waiting on File daemon */ #define JS_Incomplete 'I' /* Incomplete Job */ #define JS_DataCommitting 'L' /* Committing data (last despool) */ #define JS_WaitMount 'M' /* waiting for Mount */ #define JS_Running 'R' /* running */ #define JS_WaitSD 'S' /* waiting on the Storage daemon */ #define JS_Terminated 'T' /* terminated normally */ #define JS_Warnings 'W' /* Terminated normally with warnings */ #define JS_AttrDespooling 'a' /* SD despooling attributes */ #define JS_WaitClientRes 'c' /* Waiting for Client resource */ #define JS_WaitMaxJobs 'd' /* Waiting for maximum jobs */ #define JS_Error 'e' /* Non-fatal error */ #define JS_FatalError 'f' /* Fatal error */ #define JS_AttrInserting 'i' /* Doing batch insert file records */ #define JS_WaitJobRes 'j' /* Waiting for job resource */ #define JS_DataDespooling 'l' /* Doing data despooling */ #define JS_WaitMedia 'm' /* waiting for new media */ #define JS_WaitPriority 'p' /* Waiting for higher priority jobs to finish */ #define JS_WaitDevice 'q' /* Queued waiting for device */ #define JS_WaitStoreRes 's' /* Waiting for storage resource */ #define JS_WaitStartTime 't' /* Waiting for start time */ #define JS_CloudUpload 'u' /* Cloud upload */ #define JS_WaitUser 'v' /* Waiting for User */ #define JS_CloudDownload 'w' /* Cloud download */ /* Helper for more descriptive job status * Edit jcr.c as well for the string mapping */ enum { JOB_TASK_ZERO = 0, JOB_TASK_QUEUED = 100, JOB_TASK_BEFORE_SCRIPT, JOB_TASK_ENDJOB_SCRIPT, JOB_TASK_AFTER_SCRIPT }; /* JCR->Encrypt bit field, can be encrypted by FD and/or SD */ enum { JOB_ENCRYPTED_BY_FD = 1, JOB_ENCRYPTED_BY_SD = 2 }; struct job_task { const uint32_t op_code; const char *op_message; }; const char *get_job_task(uint32_t op_code); /* Migration selection types. Do not change the order. */ enum { MT_SMALLEST_VOL = 1, MT_OLDEST_VOL, MT_POOL_OCCUPANCY, MT_POOL_TIME, MT_POOL_UNCOPIED_JOBS, MT_CLIENT, MT_VOLUME, MT_JOB, MT_SQLQUERY }; #define job_canceled(jcr) \ (jcr->JobStatus == JS_Canceled || \ jcr->JobStatus == JS_ErrorTerminated || \ jcr->JobStatus == JS_FatalError \ ) #define job_waiting(jcr) \ (jcr->job_started && \ (jcr->JobStatus == JS_WaitFD || \ jcr->JobStatus == JS_WaitSD || \ jcr->JobStatus == JS_WaitMedia || \ jcr->JobStatus == JS_WaitMount || \ jcr->JobStatus == JS_WaitStoreRes || \ jcr->JobStatus == JS_WaitJobRes || \ jcr->JobStatus == JS_WaitClientRes || \ jcr->JobStatus == JS_WaitMaxJobs || \ jcr->JobStatus == JS_WaitPriority || \ jcr->SDJobStatus == JS_WaitMedia || \ jcr->SDJobStatus == JS_WaitMount || \ jcr->SDJobStatus == JS_WaitDevice || \ jcr->SDJobStatus == JS_CloudUpload || \ jcr->SDJobStatus == JS_CloudDownload || \ jcr->SDJobStatus == JS_WaitMaxJobs)) #define foreach_jcr(jcr) \ for (jcr=jcr_walk_start(); jcr; (jcr=jcr_walk_next(jcr)) ) #define endeach_jcr(jcr) jcr_walk_end(jcr) #define SD_APPEND true #define SD_READ false /* Forward referenced structures */ class JCR; class BSOCK; struct FF_PKT; class BDB; struct ATTR_DBR; class Plugin; struct save_pkt; struct bpContext; class HashBlockDict; class HashList; class DedupFiledInterface; class DedupStoredInterfaceBase; #ifdef FILE_DAEMON class VSSClient; class htable; class BACL; class BXATTR; class snapshot_manager; class bnet_poll_manager; struct CRYPTO_CTX { bool pki_sign; /* Enable PKI Signatures? */ bool pki_encrypt; /* Enable PKI Encryption? */ DIGEST *digest; /* Last file's digest context */ X509_KEYPAIR *pki_keypair; /* Encryption key pair */ alist *pki_signers; /* Trusted Signers */ alist *pki_recipients; /* Trusted Recipients */ CRYPTO_SESSION *pki_session; /* PKE Public Keys + Symmetric Session Keys */ POOLMEM *pki_session_encoded; /* Cached DER-encoded copy of pki_session */ int32_t pki_session_encoded_size; /* Size of DER-encoded pki_session */ POOLMEM *crypto_buf; /* Encryption/Decryption buffer */ }; #endif typedef void (JCR_free_HANDLER)(JCR *jcr); /* Job Control Record (JCR) */ class JCR { private: pthread_mutex_t mutex; /* jcr mutex */ pthread_mutex_t mutex_auth; /* used during authentication */ volatile int32_t _use_count; /* use count */ int32_t m_JobType; /* backup, restore, verify ... */ int32_t m_JobLevel; /* Job level */ bool my_thread_killable; /* can we kill the thread? */ public: void lock() {P(mutex); }; void unlock() {V(mutex); }; void lock_auth() {P(mutex_auth);}; void unlock_auth() {V(mutex_auth);}; void inc_use_count(void) {lock(); _use_count++; unlock(); }; void dec_use_count(void) {lock(); _use_count--; unlock(); }; int32_t use_count() const { return _use_count; }; void init_mutex(void) { pthread_mutex_init(&mutex, NULL); pthread_mutex_init(&mutex_auth, NULL); }; void destroy_mutex(void) { pthread_mutex_destroy(&mutex_auth); pthread_mutex_destroy(&mutex); }; bool is_internal_job() {return (JobId == 0 || m_JobType == JT_SYSTEM || m_JobType == JT_CONSOLE); }; bool is_job_canceled() {return job_canceled(this); }; bool is_canceled() {return job_canceled(this); }; bool is_incomplete() { return JobStatus == JS_Incomplete; }; bool is_JobLevel(int32_t JobLevel) { return JobLevel == m_JobLevel; }; bool is_JobType(int32_t JobType) { return JobType == m_JobType; }; bool is_JobStatus(int32_t aJobStatus) { return aJobStatus == JobStatus; }; void setJobLevel(int32_t JobLevel) { m_JobLevel = JobLevel; }; void setJobType(int32_t JobType) { m_JobType = JobType; }; void forceJobStatus(int32_t aJobStatus) { JobStatus = aJobStatus; }; void setJobStarted(); int32_t getJobType() const { return m_JobType; }; int32_t getJobLevel() const { return m_JobLevel; }; int32_t getJobStatus() const { return JobStatus; }; bool use_client() const { switch (m_JobType) { case JT_BACKUP: case JT_RESTORE: case JT_VERIFY: return true; default: return false; } }; bool no_client_used() const { return (m_JobLevel == L_VIRTUAL_FULL); }; bool can_be_stopped(); /* in lib/jcr.c */ const char *get_OperationName(); /* in lib/jcr.c */ const char *get_ActionName(bool past); /* in lib/jcr.c */ void setJobStatus(int JobStatus); /* in lib/jcr.c */ bool sendJobStatus(); /* in lib/jcr.c */ bool sendJobStatus(int JobStatus); /* in lib/jcr.c */ bool JobReads(); /* in lib/jcr.c */ void my_thread_send_signal(int sig); /* in lib/jcr.c */ void set_killable(bool killable); /* in lib/jcr.c */ bool is_killable() const { return my_thread_killable; }; job_code_callback_t job_code_callback; /* Callback sending messages defined by the user */ /* Global part of JCR common to all daemons */ dlink link; /* JCR chain link */ pthread_t my_thread_id; /* id of thread controlling jcr */ BSOCK *dir_bsock; /* Director bsock or NULL if we are him */ BSOCK *store_bsock; /* Storage connection socket */ BSOCK *file_bsock; /* File daemon connection socket */ JCR_free_HANDLER *daemon_free_jcr; /* Local free routine */ dlist *msg_queue; /* Queued messages */ pthread_mutex_t msg_queue_mutex; /* message queue mutex */ bool dequeuing_msgs; /* Set when dequeuing messages */ alist job_end_push; /* Job end pushed calls */ alist *allowed_script_dirs; /* Daemon-specific Allowed directory list to run scripts/programs from */ POOLMEM *VolumeName; /* Volume name desired -- pool_memory */ POOLMEM *errmsg; /* edited error message */ char Job[MAX_NAME_LENGTH]; /* Unique name of this Job */ char event[MAX_NAME_LENGTH]; /* Current event (python) */ uint32_t eventType; /* Current event type (plugin) */ uint32_t job_task; /* Used to add description of current opearion being executed (e.g. 'running AfterJob script'). If not 0, it can be used to make current job status more descriptive. */ uint32_t JobId; /* Director's JobId */ uint32_t VolSessionId; uint32_t VolSessionTime; uint32_t JobFiles; /* Number of files written, this job */ uint32_t JobErrors; /* Number of non-fatal errors this job */ uint32_t SDErrors; /* Number of non-fatal SD errors */ uint32_t JobWarnings; /* Number of warning messages */ uint32_t LastRate; /* Last sample bytes/sec */ uint64_t JobBytes; /* Number of bytes processed this job */ uint64_t LastJobBytes; /* Last sample number bytes */ uint64_t ReadBytes; /* Bytes read -- before compression */ uint64_t CommBytes; /* FD comm line bytes sent to SD */ uint64_t CommCompressedBytes; /* FD comm line compressed bytes sent to SD */ uint32_t num_dirs_skipped; /* directories skipped by this job (see 'Allowed Backup Dir' directive */ FileId_t FileId; /* Last FileId used */ volatile int32_t JobStatus; /* ready, running, blocked, terminated */ int32_t JobPriority; /* Job priority */ time_t sched_time; /* job schedule time, i.e. when it should start */ time_t initial_sched_time; /* original sched time before any reschedules are done */ time_t start_time; /* when job actually started */ time_t run_time; /* used for computing speed */ time_t last_time; /* Last sample time */ time_t end_time; /* job end time */ time_t wait_time_sum; /* cumulative wait time since job start */ time_t wait_time; /* timestamp when job have started to wait */ time_t job_started_time; /* Time when the MaxRunTime start to count */ POOLMEM *client_name; /* client name */ POOLMEM *JobIds; /* User entered string of JobIds */ POOLMEM *stime; /* start time for incremental/differential */ char *RestoreBootstrap; /* Bootstrap file to restore */ char *sd_auth_key; /* SD auth key */ MSGS *jcr_msgs; /* Copy of message resource -- actually used */ uint32_t ClientId; /* Client associated with Job */ char *where; /* prefix to restore files to */ char *RegexWhere; /* file relocation in restore */ alist *where_bregexp; /* BREGEXP alist for path manipulation */ int32_t cached_pnl; /* cached path length */ POOLMEM *cached_path; /* cached path */ bool prefix_links; /* Prefix links with Where path */ bool gui; /* set if gui using console */ bool authenticated; /* set when client authenticated */ bool cached_attribute; /* set if attribute is cached */ bool batch_started; /* is batch mode already started ? */ bool cmd_plugin; /* Set when processing a command Plugin = */ bool opt_plugin; /* Set when processing an option Plugin = */ bool keep_path_list; /* Keep newly created path in a hash */ bool accurate; /* true if job is accurate */ bool HasBase; /* True if job use base jobs */ bool rerunning; /* rerunning an incomplete job */ bool job_started; /* Set when the job is actually started */ bool sd_calls_client; /* Set for SD to call client (FD/SD) */ bool exiting; /* Set when exiting */ void *Python_job; /* Python Job Object */ void *Python_events; /* Python Events Object */ POOLMEM *attr; /* Attribute string from SD */ BDB *db; /* database pointer */ BDB *db_batch; /* database pointer for batch and accurate */ uint64_t nb_base_files; /* Number of base files */ uint64_t nb_base_files_used; /* Number of useful files in base */ ATTR_DBR *ar; /* DB attribute record */ guid_list *id_list; /* User/group id to name list */ bpContext *plugin_ctx_list; /* list of contexts for plugins */ bpContext *plugin_ctx; /* current plugin context */ Plugin *plugin; /* plugin instance */ save_pkt *plugin_sp; /* plugin save packet */ char *plugin_options; /* user set options for plugin */ POOLMEM *comment; /* Comment for this Job */ int64_t max_bandwidth; /* Bandwidth limit for this Job */ htable *path_list; /* Directory list (used by findlib) */ int job_uid; /* UID used during job session */ char *job_user; /* Specific permission for a job */ char *job_group; /* Specific permission for a job */ POOLMEM *StatusErrMsg; /* Error message displayed in the job report */ char StatusInfo[MAX_NAME_LENGTH]; /* Additionnal information about the error status */ time_t last_stat_time; /* Last time stats sent to Dir */ time_t stat_interval; /* Stats send interval default 30s, set to -1 to disable */ uint32_t estimate_limit; /* Number of files to display in estimate */ uint32_t getErrors() { return JobErrors + SDErrors; }; /* Get error count */ bool sendProgressStatus(); /* Send the progress status to the dir (SD/FD) */ /* Daemon specific part of JCR */ /* This should be empty in the library */ #ifdef DIRECTOR_DAEMON /* Director Daemon specific data part of JCR */ bool SD_set_worm; /* Initiate the procedure to set the WORM attribute */ bool SD_msg_chan_started; /* True if the msg thread is started */ pthread_t SD_msg_chan; /* Message channel thread id */ pthread_cond_t term_wait; /* Wait for job termination */ workq_ele_t *work_item; /* Work queue item if scheduled */ BSOCK *ua; /* User agent */ JOB *job; /* Job resource */ JOB *verify_job; /* Job resource of verify previous job */ alist *plugin_config; /* List of ConfigFile needed for restore */ StorageManager *store_mngr; /* Storage manager object */ POOLMEM *write_dev; /* Write device (set by start_storage_dameon_job() */ POOLMEM *read_dev; /* Read device (set by start_storage_dameon_job() */ CLIENT *client; /* Client resource */ POOL *pool; /* Pool resource = write for migration */ POOL *next_pool; /* Next pool override */ POOL *rpool; /* Read pool. Used only in migration */ POOL *full_pool; /* Full backup pool resource */ POOL *vfull_pool; /* Virtual Full backup pool resource */ POOL *inc_pool; /* Incremental backup pool resource */ POOL *diff_pool; /* Differential backup pool resource */ FILESET *fileset; /* FileSet resource */ CAT *catalog; /* Catalog resource */ MSGS *messages; /* Default message handler */ uint32_t SDJobFiles; /* Number of files written, this job */ uint64_t SDJobBytes; /* Number of bytes processed this job */ volatile int32_t SDJobStatus; /* Storage Job Status */ volatile int32_t FDJobStatus; /* File daemon Job Status */ uint32_t ExpectedFiles; /* Expected restore files */ uint32_t MediaId; /* DB record IDs associated with this job */ btime_t next_qrunscript_execution;/* Next time we can execute the Queued runscript */ int32_t FileIndex; /* Last FileIndex processed */ utime_t MaxRunSchedTime; /* max run time in seconds from Initial Scheduled time */ POOLMEM *fname; /* name to put into catalog */ POOLMEM *component_fname; /* Component info file name */ POOLMEM *media_type; /* Set if user supplied Storage */ FILE *component_fd; /* Component info file desc */ JOB_DBR jr; /* Job DB record for current job */ JOB_DBR previous_jr; /* previous job database record */ JOB *previous_job; /* Job resource of migration previous job */ JCR *wjcr; /* JCR for migration/copy write job */ char FSCreateTime[MAX_TIME_LENGTH]; /* FileSet CreateTime as returned from DB */ char since[MAX_NAME_LENGTH]; /* since time */ char PrevJob[MAX_NAME_LENGTH]; /* Previous job name assiciated with since time */ union { JobId_t RestoreJobId; /* Id specified by UA */ JobId_t MigrateJobId; }; POOLMEM *client_uname; /* client uname */ POOLMEM *pool_source; /* Where pool came from */ POOLMEM *next_pool_source; /* Where next pool came from */ POOLMEM *rpool_source; /* Where migrate read pool came from */ POOLMEM *catalog_source; /* Where catalog came from */ POOLMEM *next_vol_list; /* Volumes previously requested */ rblist *bsr_list; /* Bootstrap that can be needed during restore */ uint32_t replace; /* Replace option */ int32_t NumVols; /* Number of Volume used in pool */ int32_t reschedule_count; /* Number of times rescheduled */ int32_t FDVersion; /* File daemon version number */ int32_t SDVersion; /* Storage daemon version number */ int64_t spool_size; /* Spool size for this job */ uint64_t client_version; /* Client version as a number */ utime_t snapshot_retention; /* Snapshot retention (from Client/Job resource) */ int32_t Encrypt; /* Encryption used by FD */ volatile bool sd_msg_thread_done; /* Set when Storage message thread done */ bool wasVirtualFull; /* set if job was VirtualFull */ bool IgnoreDuplicateJobChecking; /* set in migration jobs */ bool spool_data; /* Spool data in SD */ bool acquired_resource_locks; /* set if resource locks acquired */ bool term_wait_inited; /* Set when cond var inited */ bool fn_printed; /* printed filename */ bool write_part_after_job; /* Write part after job in SD */ bool needs_sd; /* set if SD needed by Job */ bool cloned; /* set if cloned */ bool unlink_bsr; /* Unlink bsr file created */ bool Snapshot; /* Snapshot used by FD (VSS on Windows) */ bool stats_enabled; /* Keep all job records in a table for long term statistics */ bool no_maxtime; /* Don't check Max*Time for this JCR */ bool keep_sd_auth_key; /* Clear or not the SD auth key after connection*/ bool use_accurate_chksum; /* Use or not checksum option in accurate code */ bool run_pool_override; bool cmdline_next_pool_override; /* Next pool is overridden */ bool run_next_pool_override; /* Next pool is overridden */ bool run_full_pool_override; bool run_vfull_pool_override; bool run_inc_pool_override; bool run_diff_pool_override; bool sd_canceled; /* set if SD canceled */ bool RescheduleIncompleteJobs; /* set if incomplete can be rescheduled */ bool use_all_JobIds; /* Use all jobids present in command line */ bool sd_client; /* This job runs as SD client */ bool dummy_jobmedia; /* Dummy JobMedia written */ bool estimate; /* if the job is an estimate instead of a run */ #endif /* DIRECTOR_DAEMON */ #ifdef FILE_DAEMON /* File Daemon specific part of JCR */ BSOCK *sd_calls_client_bsock; /* Socket used by SDCallsClient feature */ uint32_t num_files_examined; /* files examined this job */ POOLMEM *last_fname; /* last file saved/verified */ POOLMEM *job_metadata; /* VSS job metadata */ pthread_cond_t job_start_wait; /* Wait for SD to start Job */ BACL *bacl; /* ACLs for backup/restore */ BXATTR *bxattr; /* Extended Attributes for backup/restore */ int32_t last_type; /* type of last file saved/verified */ int incremental; /* set if incremental for SINCE */ utime_t mtime; /* begin time for SINCE */ int listing; /* job listing in estimate */ long Ticket; /* Ticket */ char *big_buf; /* I/O buffer */ POOLMEM *compress_buf; /* Compression buffer */ int32_t compress_buf_size; /* Length of compression buffer */ void *pZLIB_compress_workset; /* zlib compression session data */ void *LZO_compress_workset; /* lzo compression session data */ void *ZSTD_compress_workset; /* zstd compression session data */ void *ZSTD_decompress_workset; /* zstd decompression session data */ int32_t replace; /* Replace options */ int32_t buf_size; /* length of buffer */ FF_PKT *ff; /* Find Files packet */ char stored_addr[MAX_NAME_LENGTH]; /* storage daemon address */ char PrevJob[MAX_NAME_LENGTH]; /* Previous job name assiciated with since time */ uint32_t ExpectedFiles; /* Expected restore files */ uint32_t StartFile; uint32_t EndFile; uint32_t StartBlock; uint32_t EndBlock; bnet_poll_manager *sd_packet_mgr; /* Manage POLL requests to control the flow */ int32_t sd_dedup; /* set if SD has dedup */ int32_t sd_hash; /* SD hash type */ int32_t sd_hash_size; /* SD hash size */ uint32_t dedup_block_size; /* Default dedup block size */ uint32_t min_dedup_block_size; /* Minimum desired dedup block size */ uint32_t max_dedup_block_size; /* TODO, above this size send raw data */ pthread_t heartbeat_id; /* id of heartbeat thread */ volatile int hb_status; /* heartbeat running/stopped 0: not running, 1 running, -1 stopped */ BSOCK *hb_bsock; /* duped SD socket */ BSOCK *hb_dir_bsock; /* duped DIR socket */ alist *RunScripts; /* Commands to run before and after job */ CRYPTO_CTX crypto; /* Crypto ctx */ DIRRES* director; /* Director resource */ bool Snapshot; /* Snapshot used by FD (or VSS) */ bool got_metadata; /* set when found job_metatdata */ bool multi_restore; /* Dir can do multiple storage restore */ bool interactive_session; /* Use interactive session with the SD */ htable *file_list; /* Previous file list (accurate mode) */ uint64_t base_size; /* compute space saved with base job */ utime_t snapshot_retention; /* Snapshot retention (from director) */ snapshot_manager *snap_mgr; /* Snapshot manager */ DedupFiledInterface *dedup; /* help the FD to do deduplication */ bool dedup_use_cache; /* use client cache */ VSSClient *pVSSClient; /* VSS handler */ alist *plugin_verify; /* Registered plugins that need a copy of the data in verify job */ alist *plugin_options_list; /* list of the options to use in a job */ alist *fileevents; /* list of the current file events to record and send to the DIR */ bool estimate; /* if the job is an estimate instead of a run */ #endif /* FILE_DAEMON */ #ifdef STORAGE_DAEMON /* Storage Daemon specific part of JCR */ JCR *next_dev; /* next JCR attached to device */ JCR *prev_dev; /* previous JCR attached to device */ dlist *jobmedia_queue; /* JobMedia queue ***BEEF*** */ dlist *filemedia_queue; /* FileMedia queue ***BEEF*** */ char *dir_auth_key; /* Dir auth key */ pthread_cond_t job_start_wait; /* Wait for FD to start Job */ int32_t type; DCR *read_dcr; /* device context for reading */ DCR *dcr; /* device context record */ alist *dcrs; /* list of dcrs open */ POOLMEM *job_name; /* base Job name (not unique) */ POOLMEM *fileset_name; /* FileSet */ POOLMEM *fileset_md5; /* MD5 for FileSet */ char stored_addr[MAX_NAME_LENGTH]; /* storage daemon address */ char client_addr[MAX_NAME_LENGTH]; /* client daemon address */ int client_port; /* client port */ VOL_LIST *VolList; /* list to read, freed at the end of the job */ int32_t NumWriteVolumes; /* number of volumes written */ int32_t NumReadVolumes; /* total number of volumes to read */ int32_t CurReadVolume; /* current read volume number */ int32_t label_errors; /* count of label errors */ int32_t DIRVersion; /* Director version number */ int32_t FDVersion; /* File daemon version number */ int32_t SDVersion; /* Storage daemon version number */ bool session_opened; bool interactive_session; /* Interactive session with the FD */ bool is_ok_data_sent; /* the "3000 OK data" has been sent */ long Ticket; /* ticket for this job */ bool ignore_label_errors; /* ignore Volume label errors */ bool spool_attributes; /* set if spooling attributes */ bool no_attributes; /* set if no attributes wanted */ int64_t spool_size; /* Spool size for this job */ bool spool_data; /* set to spool data */ int32_t CurVol; /* Current Volume count */ DIRRES* director; /* Director resource */ alist *write_store; /* list of write storage devices sent by DIR */ alist *read_store; /* list of read devices sent by DIR */ alist *reserve_msgs; /* reserve fail messages */ bool write_part_after_job; /* Set to write part after job */ bool PreferMountedVols; /* Prefer mounted vols rather than new */ bool Resched; /* Job may be rescheduled */ bool bscan_insert_jobmedia_records; /*Bscan: needs to insert job media records */ bool bscan_created; /* Flag for bscan to know if this jcr was created by it or not */ bool bscan_files_purged; /* Flag for bscan to know if this jcr has purged files */ bool sd_client; /* Set if acting as client */ bool use_new_match_all; /* TODO: Remove when the match_bsr() will be well tested */ int32_t fd_dedup; /* fdcaps dedup */ int32_t fd_rehydration; /* fdcaps rehydration */ /* Parmaters for Open Read Session */ BSR *bsr; /* Bootstrap record -- has everything */ bool mount_next_volume; /* set to cause next volume mount */ uint32_t read_VolSessionId; uint32_t read_VolSessionTime; uint32_t read_StartFile; uint32_t read_EndFile; uint32_t read_StartBlock; uint32_t read_EndBlock; /* Device wait times */ int32_t min_wait; int32_t max_wait; int32_t max_num_wait; int32_t wait_sec; int32_t rem_wait_sec; int32_t num_wait; DedupStoredInterfaceBase *dedup; /*used by dedup for rehydration, not for backup*/ #endif /* STORAGE_DAEMON */ }; /* * Setting a NULL in tsd doesn't clear the tsd but instead tells * pthreads not to call the tsd destructor. Consequently, we * define this *invalid* jcr address and stuff it in the tsd * when the jcr is not valid. */ #define INVALID_JCR ((JCR *)(-1)) /* * Structure for all daemons that keeps some summary * info on the last job run. */ struct s_last_job { dlink link; int32_t Errors; /* FD/SD errors */ int32_t JobType; int32_t JobStatus; int32_t JobLevel; uint32_t JobId; uint32_t VolSessionId; uint32_t VolSessionTime; uint32_t JobFiles; uint64_t JobBytes; utime_t start_time; utime_t end_time; char Job[MAX_NAME_LENGTH]; }; union plugin_event_level { }; extern struct s_last_job last_job; extern DLL_IMP_EXP dlist *last_jobs; /* The following routines are found in lib/jcr.c */ extern int get_next_jobid_from_list(char **p, uint32_t *JobId); extern bool init_jcr_subsystem(void); extern JCR *new_jcr(int size, JCR_free_HANDLER *daemon_free_jcr); extern JCR *get_jcr_by_id(uint32_t JobId); extern JCR *get_jcr_by_session(uint32_t SessionId, uint32_t SessionTime); extern JCR *get_jcr_by_partial_name(char *Job); extern JCR *get_jcr_by_full_name(char *Job); extern JCR *get_next_jcr(JCR *jcr); extern void set_jcr_job_status(JCR *jcr, int JobStatus); extern int DLL_IMP_EXP num_jobs_run; #ifdef DEBUG extern void b_free_jcr(const char *file, int line, JCR *jcr); #define free_jcr(jcr) b_free_jcr(__FILE__, __LINE__, (jcr)) #else extern void free_jcr(JCR *jcr); #endif /* Used to display specific job information after a fatal signal */ typedef void (dbg_jcr_hook_t)(JCR *jcr, FILE *fp); extern void dbg_jcr_add_hook(dbg_jcr_hook_t *fct); #endif /* __JCR_H_ */ bacula-15.0.3/src/filetypes.h0000644000175000017500000000765014771010173015636 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * Stream definitions. Split from baconfig.h Nov 2010 * * Kern Sibbald, MM * */ #ifndef __BFILETYPES_H #define __BFILETYPES_H 1 /** * File type (Bacula defined). * NOTE!!! These are saved in the Attributes record on the tape, so * do not change them. If need be, add to them. * * This is stored as 32 bits on the Volume, but only FT_MASK (16) bits are * used for the file type. The upper bits are used to indicate * additional optional fields in the attribute record. */ #define FT_MASK 0xFFFF /* Bits used by FT (type) */ #define FT_LNKSAVED 1 /* hard link to file already saved */ #define FT_REGE 2 /* Regular file but empty */ #define FT_REG 3 /* Regular file */ #define FT_LNK 4 /* Soft Link */ #define FT_DIREND 5 /* Directory at end (saved) */ #define FT_SPEC 6 /* Special file -- chr, blk, fifo, sock */ #define FT_NOACCESS 7 /* Not able to access */ #define FT_NOFOLLOW 8 /* Could not follow link */ #define FT_NOSTAT 9 /* Could not stat file */ #define FT_NOCHG 10 /* Incremental option, file not changed */ #define FT_DIRNOCHG 11 /* Incremental option, directory not changed */ #define FT_ISARCH 12 /* Trying to save archive file */ #define FT_NORECURSE 13 /* No recursion into directory */ #define FT_NOFSCHG 14 /* Different file system, prohibited */ #define FT_NOOPEN 15 /* Could not open directory */ #define FT_RAW 16 /* Raw block device */ #define FT_FIFO 17 /* Raw fifo device */ /** * The DIRBEGIN packet is sent to the FD file processing routine so * that it can filter packets, but otherwise, it is not used * or saved */ #define FT_DIRBEGIN 18 /* Directory at beginning (not saved) */ #define FT_INVALIDFS 19 /* File system not allowed for */ #define FT_INVALIDDT 20 /* Drive type not allowed for */ #define FT_REPARSE 21 /* Win NTFS reparse point */ /* A FILE Symlink is a reparse point but it is handle as a FT_REG file */ #define FT_PLUGIN 22 /* Plugin generated filename */ #define FT_DELETED 23 /* Deleted file entry */ #define FT_BASE 24 /* Duplicate base file entry */ #define FT_RESTORE_FIRST 25 /* Restore this "object" first */ #define FT_JUNCTION 26 /* Win32 Junction point or Symlink to a dir */ #define FT_PLUGIN_CONFIG 27 /* Object for Plugin configuration */ #define FT_PLUGIN_CONFIG_FILLED 28 /* Object for Plugin configuration filled by Director */ #define FT_PLUGIN_OBJECT 29 /* Opaque Plugin Object used for Object Management*/ #define FT_SECURITY_OBJECT 30 /* Security report */ /* Definitions for upper part of type word (see above). */ #define AR_DATA_STREAM (1<<16) /* Data stream id present */ /* Quick way to know if a Filetype is about a plugin "Object" */ #define IS_FT_OBJECT(x) (((x) == FT_RESTORE_FIRST) || ((x) == FT_PLUGIN_CONFIG_FILLED) || ((x) == FT_PLUGIN_CONFIG) \ || ((x) == FT_PLUGIN_OBJECT) || ((x) == FT_SECURITY_OBJECT)) #endif /* __BFILETYPES_H */ bacula-15.0.3/src/streams.h0000644000175000017500000002540014771010173015301 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /** * Stream definitions. Split from baconfig.h Nov 2010 * * Kern Sibbald, MM * */ #ifndef __BSTREAMS_H #define __BSTREAMS_H 1 /* Stream bits -- these bits are new as of 24Nov10 */ #define STREAM_BIT_64 (1<<30) /* 64 bit stream (not yet implemented) */ #define STREAM_BIT_BITS (1<<29) /* Following bits may be set */ #define STREAM_BIT_PLUGIN (1<<28) /* Item written by a plugin */ #define STREAM_BIT_DELTA (1<<27) /* Stream contains delta data */ #define STREAM_BIT_OFFSETS (1<<26) /* Stream has data offset */ #define STREAM_BIT_PORTABLE_DATA (1<<25) /* Data is portable */ #define STREAM_BIT_DEDUPLICATION_DATA (1<<24) /* Stream contains dedup refs */ #define STREAM_BIT_NO_DEDUPLICATION (1<<23) /* Stream that must no be deduped */ #define STREAM_BIT_PREPARE (1<<22) /* Percona SD prepare */ /* TYPE represents our current (old) stream types -- e.g. values 0 - 2047 */ #define STREAMBASE_TYPE 0 /* base for types */ #define STREAMBITS_TYPE 11 /* type bit size */ #define STREAMMASK_TYPE (~((~0uL)<< STREAMBITS_TYPE) << STREAMBASE_TYPE) /* * Note additional base, bits, and masks can be defined for new * ranges or subranges of stream attributes. */ /** * Old, but currently used Stream definitions. Once defined these must NEVER * change as they go on the storage media. * Note, the following streams are passed from the SD to the DIR * so that they may be put into the catalog (actually only the * stat packet part of the attr record is put in the catalog. * * STREAM_UNIX_ATTRIBUTES * STREAM_UNIX_ATTRIBUTES_EX * STREAM_MD5_DIGEST * STREAM_SHA1_DIGEST * STREAM_SHA256_DIGEST * STREAM_SHA512_DIGEST */ #define STREAM_NONE 0 /* Reserved Non-Stream */ #define STREAM_UNIX_ATTRIBUTES 1 /* Generic Unix attributes */ #define STREAM_FILE_DATA 2 /* Standard uncompressed data */ #define STREAM_MD5_DIGEST 3 /* MD5 digest for the file */ #define STREAM_GZIP_DATA 4 /* GZip compressed file data */ #define STREAM_UNIX_ATTRIBUTES_EX 5 /* Extended Unix attr for Win32 EX - Deprecated */ #define STREAM_SPARSE_DATA 6 /* Sparse data stream */ #define STREAM_SPARSE_GZIP_DATA 7 /* Sparse gzipped data stream */ #define STREAM_PROGRAM_NAMES 8 /* program names for program data */ #define STREAM_PROGRAM_DATA 9 /* Data needing program */ #define STREAM_SHA1_SIGNATURE 10 /* deprecated */ #define STREAM_SHA1_DIGEST 10 /* SHA1 digest for the file */ #define STREAM_WIN32_DATA 11 /* Win32 BackupRead data */ #define STREAM_WIN32_GZIP_DATA 12 /* Gzipped Win32 BackupRead data */ #define STREAM_MACOS_FORK_DATA 13 /* Mac resource fork */ #define STREAM_HFSPLUS_ATTRIBUTES 14 /* Mac OS extra attributes */ #define STREAM_UNIX_ACCESS_ACL 15 /* Standard ACL attributes on UNIX - Deprecated */ #define STREAM_UNIX_DEFAULT_ACL 16 /* Default ACL attributes on UNIX - Deprecated */ #define STREAM_SHA256_DIGEST 17 /* SHA-256 digest for the file */ #define STREAM_SHA512_DIGEST 18 /* SHA-512 digest for the file */ #define STREAM_SIGNED_DIGEST 19 /* Signed File Digest, ASN.1, DER Encoded */ #define STREAM_ENCRYPTED_FILE_DATA 20 /* Encrypted, uncompressed data */ #define STREAM_ENCRYPTED_WIN32_DATA 21 /* Encrypted, uncompressed Win32 BackupRead data */ #define STREAM_ENCRYPTED_SESSION_DATA 22 /* Encrypted Session Data, ASN.1, DER Encoded */ #define STREAM_ENCRYPTED_FILE_GZIP_DATA 23 /* Encrypted, compressed data */ #define STREAM_ENCRYPTED_WIN32_GZIP_DATA 24 /* Encrypted, compressed Win32 BackupRead data */ #define STREAM_ENCRYPTED_MACOS_FORK_DATA 25 /* Encrypted, uncompressed Mac resource fork */ #define STREAM_PLUGIN_NAME 26 /* Plugin "file" string */ #define STREAM_PLUGIN_DATA 27 /* Plugin specific data */ #define STREAM_RESTORE_OBJECT 28 /* Plugin restore object */ /* * Non-gzip compressed streams. Those streams can handle arbitrary * compression algorithm data as an additional header is stored * at the beginning of the stream. See comp_stream_header definition * in ch.h for more details. */ #define STREAM_COMPRESSED_DATA 29 /* Compressed file data */ #define STREAM_SPARSE_COMPRESSED_DATA 30 /* Sparse compressed data stream */ #define STREAM_WIN32_COMPRESSED_DATA 31 /* Compressed Win32 BackupRead data */ #define STREAM_ENCRYPTED_FILE_COMPRESSED_DATA 32 /* Encrypted, compressed data */ #define STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA 33 /* Encrypted, compressed Win32 BackupRead data */ #define STREAM_PLUGIN_OBJECT 34 /* Plugin object */ #define STREAM_PLUGIN_META_BLOB 35 /* Plugin metadata (blob) for file being backed up */ #define STREAM_PLUGIN_META_CATALOG 36 /* Plugin metadata (to be stored in catalog) for file being backed up */ #define STREAM_UNIX_ATTRIBUTE_UPDATE 37 /* File's updated metadata */ #define STREAM_FILEEVENT 38 /* FileEvent associated with the current object */ #define STREAM_XXHASH64_DIGEST 39 /* XXHASH64 digest for the file */ #define STREAM_XXH3_64_DIGEST 40 /* XXH3_64 digest for the file */ #define STREAM_XXH3_128_DIGEST 41 /* XXH3_128 digest for the file */ #define STREAM_ADATA_BLOCK_HEADER 200 /* Adata block header */ #define STREAM_ADATA_RECORD_HEADER 201 /* Adata record header */ /* * Additional Stream definitions. Once defined these must NEVER * change as they go on the storage media. * * The Stream numbers from 1000-1999 are reserved for ACL and extended attribute streams. * Each different platform has its own stream id(s), if a platform supports multiple stream types * it should supply different handlers for each type it supports and this should be called * from the stream dispatch function. Currently in this reserved space we allocate the * different acl streams from 1000 on and the different extended attributes streams from * 1999 down. So the two naming spaces grows towards each other. * * Rationalize names a bit to correspond to the new XACL classes * */ #define STREAM_XACL_AIX_TEXT 1000 /* AIX string of acl_get */ #define STREAM_XACL_DARWIN_ACCESS 1001 /* Darwin (OSX) acl_t string of acl_to_text (POSIX acl) */ #define STREAM_XACL_FREEBSD_DEFAULT 1002 /* FreeBSD acl_t string of acl_to_text (POSIX acl) for default acls */ #define STREAM_XACL_FREEBSD_ACCESS 1003 /* FreeBSD acl_t string of acl_to_text (POSIX acl) for access acls */ #define STREAM_XACL_HPUX_ACL_ENTRY 1004 /* HPUX acl_entry string of acltostr (POSIX acl) */ #define STREAM_XACL_IRIX_DEFAULT 1005 /* IRIX acl_t string of acl_to_text (POSIX acl) for default acls */ #define STREAM_XACL_IRIX_ACCESS 1006 /* IRIX acl_t string of acl_to_text (POSIX acl) for access acls */ #define STREAM_XACL_LINUX_DEFAULT 1007 /* Linux acl_t string of acl_to_text (POSIX acl) for default acls */ #define STREAM_XACL_LINUX_ACCESS 1008 /* Linux acl_t string of acl_to_text (POSIX acl) for access acls */ #define STREAM_XACL_TRU64_DEFAULT 1009 /* Tru64 acl_t string of acl_to_text (POSIX acl) for default acls */ #define STREAM_XACL_TRU64_DEFAULT_DIR 1010 /* Tru64 acl_t string of acl_to_text (POSIX acl) for default acls */ #define STREAM_XACL_TRU64_ACCESS 1011 /* Tru64 acl_t string of acl_to_text (POSIX acl) for access acls */ #define STREAM_XACL_SOLARIS_POSIX 1012 /* Solaris aclent_t string of acltotext or acl_totext (POSIX acl) */ #define STREAM_XACL_SOLARIS_NFS4 1013 /* Solaris ace_t string of of acl_totext (NFSv4 or ZFS acl) */ #define STREAM_XACL_AFS_TEXT 1014 /* AFS string of pioctl */ #define STREAM_XACL_AIX_AIXC 1015 /* AIX string of aclx_printStr (POSIX acl) */ #define STREAM_XACL_AIX_NFS4 1016 /* AIX string of aclx_printStr (NFSv4 acl) */ #define STREAM_XACL_FREEBSD_NFS4 1017 /* FreeBSD acl_t string of acl_to_text (NFSv4 or ZFS acl) */ #define STREAM_XACL_HURD_DEFAULT 1018 /* GNU HURD acl_t string of acl_to_text (POSIX acl) for default acls */ #define STREAM_XACL_HURD_ACCESS 1019 /* GNU HURD acl_t string of acl_to_text (POSIX acl) for access acls */ #define STREAM_XACL_PLUGIN_ACL 1020 /* Plugin ACL data for plugin specific acls */ #define STREAM_XACL_GPFS_ACL_DEFAULT 1021 /* GPFS Default ACL data using gpfs_getacl()/gpfs_putacl() */ #define STREAM_XACL_GPFS_ACL_ACCESS 1022 /* GPFS Access ACL data using gpfs_getacl()/gpfs_putacl() */ #define STREAM_XACL_GPFS_XATTR 1987 /* GPFS XATTR data using gpfs_fgetattrs()/gpfs_fputattrs() */ #define STREAM_XACL_PLUGIN_XATTR 1988 /* Plugin XATTR data for plugin specific xattrs */ #define STREAM_XACL_HURD_XATTR 1989 /* GNU HURD extended attributes */ #define STREAM_XACL_IRIX_XATTR 1990 /* IRIX extended attributes */ #define STREAM_XACL_TRU64_XATTR 1991 /* TRU64 extended attributes */ #define STREAM_XACL_AIX_XATTR 1992 /* AIX extended attributes */ #define STREAM_XACL_OPENBSD_XATTR 1993 /* OpenBSD extended attributes */ #define STREAM_XACL_SOLARIS_SYS_XATTR 1994 /* Solaris extensible attributes or * otherwise named extended system attributes. */ #define STREAM_XACL_SOLARIS_XATTR 1995 /* Solaris extented attributes */ #define STREAM_XACL_DARWIN_XATTR 1996 /* Darwin (OSX) extended attributes */ #define STREAM_XACL_FREEBSD_XATTR 1997 /* FreeBSD extended attributes */ #define STREAM_XACL_LINUX_XATTR 1998 /* Linux specific attributes */ #define STREAM_XACL_NETBSD_XATTR 1999 /* NetBSD extended attributes */ /* WARNING!!! do not define more than 2047 of these old types */ #endif /* __BSTREAMS_H */ bacula-15.0.3/src/findlib/0000755000175000017500000000000014771010173015060 5ustar bsbuildbsbuildbacula-15.0.3/src/findlib/find.h0000644000175000017500000002250614771010173016156 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * File types as returned by find_files() * * Kern Sibbald MMI */ #ifndef __FILES_H #define __FILES_H #include "jcr.h" #include "fileopts.h" #include "bfile.h" #include "../filed/fd_plugins.h" #ifdef HAVE_DIRENT_H #include #endif #include #if !defined(HAVE_WIN32) || defined(HAVE_MINGW) #include #endif #if HAVE_UTIME_H #include #else struct utimbuf { long actime; long modtime; }; #endif #define MODE_RALL (S_IRUSR|S_IRGRP|S_IROTH) #include "lib/fnmatch.h" // #include "lib/enh_fnmatch.h" #ifndef HAVE_REGEX_H #include "lib/bregex.h" #else #include #endif /* For options FO_xxx values see src/fileopts.h */ struct s_included_file { struct s_included_file *next; uint64_t options; /* backup options */ uint32_t algo; /* compression algorithm. 4 letters stored as an interger */ int Dedup_level; /* Dedup level */ int Compress_level; /* compression level */ int len; /* length of fname */ int pattern; /* set if wild card pattern */ char VerifyOpts[20]; /* Options for verify */ char fname[1]; }; struct s_excluded_file { struct s_excluded_file *next; int len; char fname[1]; }; enum { state_none, state_options, state_include, state_error }; /* File options structure */ struct findFOPTS { uint64_t flags; /* options in bits */ uint32_t Compress_algo; /* compression algorithm. 4 letters stored as an interger */ int Compress_level; /* compression level */ int Dedup_level; /* dedup level 0=None, 1=Global, 2=Client */ int strip_path; /* strip path count */ char VerifyOpts[MAX_FOPTS]; /* verify options */ char AccurateOpts[MAX_FOPTS]; /* accurate mode options */ char BaseJobOpts[MAX_FOPTS]; /* basejob mode options */ char *plugin; /* Plugin that handle this section */ alist regex; /* regex string(s) */ alist regexdir; /* regex string(s) for directories */ alist regexfile; /* regex string(s) for files */ alist wild; /* wild card strings */ alist wilddir; /* wild card strings for directories */ alist wildfile; /* wild card strings for files */ alist wildbase; /* wild card strings for basenames */ alist base; /* list of base names */ alist fstype; /* file system type limitation */ alist drivetype; /* drive type limitation */ }; /* This is either an include item or an exclude item */ struct findINCEXE { findFOPTS *current_opts; /* points to current options structure */ alist opts_list; /* options list */ dlist name_list; /* filename list -- holds dlistString */ dlist plugin_list; /* plugin list -- holds dlistString */ char *ignoredir; /* ignore directories with this file */ bool list_drives; /* list drives on win32 (File=/) */ }; /* * FileSet Resource * */ struct findFILESET { int state; findINCEXE *incexe; /* current item */ alist include_list; alist exclude_list; }; struct HFSPLUS_INFO { unsigned long length; /* Mandatory field */ char fndrinfo[32]; /* Finder Info */ off_t rsrclength; /* Size of resource fork */ }; /* * Definition of the find_files packet passed as the * first argument to the find_files callback subroutine. */ struct FF_PKT { char *top_fname; /* full filename before descending */ char *fname; /* full filename */ char *snap_fname; /* the path on the snapshot or fname */ char *link; /* link if file linked */ char *plugin; /* Current Options{Plugin=} name */ char *accurate_chksum; /* keep the checksum from the accurate database */ /* Specific snapshot part */ char *volume_path; /* volume path */ char *snapshot_path; /* snapshot path */ char *top_fname_save; POOLMEM *snap_top_fname; bool (*snapshot_convert_fct)(JCR *jcr, FF_PKT *ff, dlist *filelist, dlistString *node); bool root_of_volume; /* the root of a volume, like C:\ or C:\mount_point\ssd */ POOLMEM *fname_save; /* save when stripping path */ POOLMEM *link_save; /* save when stripping path */ POOLMEM *ignoredir_fname; /* used to ignore directories */ alist *allowed_backup_dirs; /* List of allowed directories with absolute paths */ alist *excluded_backup_dirs; /* List of excluded directories with absolute paths */ char *digest; /* set to file digest when the file is a hardlink */ struct stat statp; /* stat packet */ bool stat_update; /* Only file's metada needds to be updated */ uint32_t digest_len; /* set to the digest len when the file is a hardlink*/ int32_t digest_stream; /* set to digest type when the file is hardlink */ int32_t FileIndex; /* FileIndex of this file */ int32_t LinkFI; /* FileIndex of main hard linked file */ int32_t delta_seq; /* Delta Sequence number */ struct restore_object restore_obj; /* Info about restore object */ struct plugin_object plugin_obj; /* Plugin Object */ const plugin_metadata *plug_meta; /* Metadata packet provided by plugin */ struct f_link *linked; /* Set if this file is hard linked */ int type; /* FT_ type from above */ int ff_errno; /* errno */ BFILE bfd; /* Bacula file descriptor */ time_t save_time; /* start of incremental time */ bool accurate_found; /* Found in the accurate hash (valid after check_changes()) */ bool dereference; /* follow links (not implemented) */ bool null_output_device; /* using null output device */ bool incremental; /* incremental save */ bool no_read; /* Do not read this file when using Plugin */ char VerifyOpts[20]; char AccurateOpts[20]; char BaseJobOpts[20]; struct s_included_file *included_files_list; struct s_excluded_file *excluded_files_list; struct s_excluded_file *excluded_paths_list; findFILESET *fileset; int (*file_save)(JCR *, FF_PKT *, bool); /* User's callback */ int (*plugin_save)(JCR *, FF_PKT *, bool); /* User's callback */ bool (*check_fct)(JCR *, FF_PKT *); /* optionnal user fct to check file changes */ /* Values set by accept_file while processing Options */ uint64_t flags; /* backup options */ uint32_t Compress_algo; /* compression algorithm. 4 letters stored as an interger */ int Compress_level; /* compression level */ int Dedup_level; /* dedup level 0=None, 1=Global, 2=Client */ int strip_path; /* strip path count */ bool cmd_plugin; /* set if we have a command plugin */ bool opt_plugin; /* set if we have an option plugin */ rblist *mtab_list; /* List of mtab entries */ uint64_t last_fstype; /* cache last file system type */ char last_fstypename[32]; /* cache last file system type name */ alist fstypes; /* allowed file system types */ alist drivetypes; /* allowed drive types */ alist mount_points; /* Possible mount points to be snapshotted */ /* List of all hard linked files found */ struct f_link **linkhash; /* hard linked files */ /* Darwin specific things. * To avoid clutter, we always include rsrc_bfd and volhas_attrlist */ BFILE rsrc_bfd; /* fd for resource forks */ bool volhas_attrlist; /* Volume supports getattrlist() */ struct HFSPLUS_INFO hfsinfo; /* Finder Info and resource fork size */ }; typedef void (mtab_handler_t)(void *user_ctx, struct stat *st, const char *fstype, const char *mountpoint, const char *mntopts, const char *fsname); bool read_mtab(mtab_handler_t *mtab_handler, void *user_ctx); void dump_name_list(const char* file, int lineno, int lvl, const char *prefix, findFILESET *fileset); #include "protos.h" #endif /* __FILES_H */ bacula-15.0.3/src/findlib/protos.h0000644000175000017500000000743014771010173016563 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Prototypes for finlib directory of Bacula * */ /* from attribs.c */ bool check_directory_acl(char **last_dir, alist *dir_acl, const char *path); bool has_access(alist *uid, alist *gid, struct stat *statp); void encode_stat (char *buf, struct stat *statp, int stat_size, int32_t LinkFI, int data_stream); int decode_stat (char *buf, struct stat *statp, int stat_size, int32_t *LinkFI); int32_t decode_LinkFI (char *buf, struct stat *statp, int stat_size); int encode_attribsEx (JCR *jcr, char *attribsEx, FF_PKT *ff_pkt); bool set_attributes (JCR *jcr, ATTR *attr, BFILE *ofd); int select_data_stream(FF_PKT *ff_pkt); /* from create_file.c */ int create_file (JCR *jcr, ATTR *attr, BFILE *ofd, int replace); /* From find.c */ FF_PKT *init_find_files(); void set_find_snapshot_function(FF_PKT *ff, bool convert_path(JCR *jcr, FF_PKT *ff, dlist *filelist, dlistString *node)); void set_find_options(FF_PKT *ff, int incremental, time_t mtime); void set_find_changed_function(FF_PKT *ff, bool check_fct(JCR *jcr, FF_PKT *ff)); int find_files(JCR *jcr, FF_PKT *ff, int file_sub(JCR *, FF_PKT *ff_pkt, bool), int plugin_sub(JCR *, FF_PKT *ff_pkt, bool)); int match_files(JCR *jcr, FF_PKT *ff, int sub(JCR *, FF_PKT *ff_pkt, bool)); int term_find_files(FF_PKT *ff); bool is_in_fileset(FF_PKT *ff); bool accept_file(JCR *jcr, FF_PKT *ff); /* From match.c */ void init_include_exclude_files(FF_PKT *ff); void term_include_exclude_files(FF_PKT *ff); void add_fname_to_include_list(FF_PKT *ff, int prefixed, const char *fname); void add_fname_to_exclude_list(FF_PKT *ff, const char *fname); int file_is_excluded(FF_PKT *ff, const char *file); int file_is_included(FF_PKT *ff, const char *file); struct s_included_file *get_next_included_file(FF_PKT *ff, struct s_included_file *inc); /* From find_one.c */ int find_one_file(JCR *jcr, FF_PKT *ff, int handle_file(JCR *jcr, FF_PKT *ff_pkt, bool top_level), char *fname, char *snap_fname, dev_t parent_device, bool top_level); int term_find_one(FF_PKT *ff); bool has_file_changed(JCR *jcr, FF_PKT *ff_pkt); bool check_changes(JCR *jcr, FF_PKT *ff_pkt); void ff_pkt_set_link_digest(FF_PKT *ff_pkt, int32_t digest_stream, const char *digest, uint32_t len); /* From get_priv.c */ int enable_backup_privileges(JCR *jcr, int ignore_errors); /* from makepath.c */ bool makepath(ATTR *attr, const char *path, mode_t mode, mode_t parent_mode, uid_t owner, gid_t group, int keep_dir_modes); void free_path_list(JCR *jcr); bool path_list_lookup(JCR *jcr, char *fname); bool path_list_add(JCR *jcr, uint32_t len, char *fname); /* from fstype.c */ bool fstype(FF_PKT *ff_pkt, char *fs, int fslen); bool fstype(char *fname, FF_PKT *ff_pkt, char *fs, int fslen); bool check_current_fs(char *fname, FF_PKT *ff, const char *fstype_name); bool check_current_fs(char *fname, FF_PKT *ff, uint64_t fstype_magic); /* from drivetype.c */ bool drivetype(const char *fname, char *fs, int fslen); /* from bfile.c -- see bfile.h */ /* from namedpipe.c -- see namedpipe.h */ bacula-15.0.3/src/findlib/bfile.c0000644000175000017500000007777614771010173016335 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Bacula low level File I/O routines. This routine simulates * open(), read(), write(), and close(), but using native routines. * I.e. on Windows, we use Windows APIs. * * Kern Sibbald, April MMIII * */ #include "bacula.h" #include "find.h" const int dbglvl = 200; int (*plugin_bopen)(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode) = NULL; int (*plugin_bclose)(BFILE *bfd) = NULL; ssize_t (*plugin_bread)(BFILE *bfd, void *buf, size_t count) = NULL; ssize_t (*plugin_bwrite)(BFILE *bfd, void *buf, size_t count) = NULL; boffset_t (*plugin_blseek)(BFILE *bfd, boffset_t offset, int whence) = NULL; #ifdef HAVE_DARWIN_OS #include #endif #if !defined(HAVE_FDATASYNC) #define fdatasync(fd) #endif #ifdef HAVE_WIN32 void pause_msg(const char *file, const char *func, int line, const char *msg) { char buf[1000]; if (msg) { bsnprintf(buf, sizeof(buf), "%s:%s:%d %s", file, func, line, msg); } else { bsnprintf(buf, sizeof(buf), "%s:%s:%d", file, func, line); } MessageBox(NULL, buf, "Pause", MB_OK); } #endif /* =============================================================== * * U N I X AND W I N D O W S * * =============================================================== */ bool is_win32_stream(int stream) { switch (stream & STREAMMASK_TYPE) { case STREAM_WIN32_DATA: case STREAM_WIN32_GZIP_DATA: case STREAM_WIN32_COMPRESSED_DATA: case STREAM_ENCRYPTED_WIN32_DATA: case STREAM_ENCRYPTED_WIN32_GZIP_DATA: case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: return true; } return false; } const char *stream_to_ascii(int stream) { static char buf[20]; switch (stream & STREAMMASK_TYPE) { case STREAM_UNIX_ATTRIBUTES: return _("Unix attributes"); case STREAM_UNIX_ATTRIBUTE_UPDATE: return _("Unix attributes update"); case STREAM_FILE_DATA: return _("File data"); case STREAM_MD5_DIGEST: return _("MD5 digest"); case STREAM_GZIP_DATA: return _("GZIP data"); case STREAM_COMPRESSED_DATA: return _("Compressed data"); case STREAM_UNIX_ATTRIBUTES_EX: return _("Extended attributes"); case STREAM_SPARSE_DATA: return _("Sparse data"); case STREAM_SPARSE_GZIP_DATA: return _("GZIP sparse data"); case STREAM_SPARSE_COMPRESSED_DATA: return _("Compressed sparse data"); case STREAM_PROGRAM_NAMES: return _("Program names"); case STREAM_PROGRAM_DATA: return _("Program data"); case STREAM_SHA1_DIGEST: return _("SHA1 digest"); case STREAM_WIN32_DATA: return _("Win32 data"); case STREAM_WIN32_GZIP_DATA: return _("Win32 GZIP data"); case STREAM_WIN32_COMPRESSED_DATA: return _("Win32 compressed data"); case STREAM_MACOS_FORK_DATA: return _("MacOS Fork data"); case STREAM_HFSPLUS_ATTRIBUTES: return _("HFS+ attribs"); case STREAM_UNIX_ACCESS_ACL: return _("Standard Unix ACL attribs"); case STREAM_UNIX_DEFAULT_ACL: return _("Default Unix ACL attribs"); case STREAM_SHA256_DIGEST: return _("SHA256 digest"); case STREAM_SHA512_DIGEST: return _("SHA512 digest"); case STREAM_XXHASH64_DIGEST: return _("XXHASH64 digest"); case STREAM_XXH3_64_DIGEST: return _("XXH3_64 digest"); case STREAM_XXH3_128_DIGEST: return _("XXH3_128 digest"); case STREAM_SIGNED_DIGEST: return _("Signed digest"); case STREAM_ENCRYPTED_FILE_DATA: return _("Encrypted File data"); case STREAM_ENCRYPTED_WIN32_DATA: return _("Encrypted Win32 data"); case STREAM_ENCRYPTED_SESSION_DATA: return _("Encrypted session data"); case STREAM_ENCRYPTED_FILE_GZIP_DATA: return _("Encrypted GZIP data"); case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: return _("Encrypted compressed data"); case STREAM_ENCRYPTED_WIN32_GZIP_DATA: return _("Encrypted Win32 GZIP data"); case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: return _("Encrypted Win32 Compressed data"); case STREAM_ENCRYPTED_MACOS_FORK_DATA: return _("Encrypted MacOS fork data"); case STREAM_PLUGIN_NAME: return _("Plugin Name"); case STREAM_PLUGIN_DATA: return _("Plugin Data"); case STREAM_RESTORE_OBJECT: return _("Restore Object"); case STREAM_PLUGIN_OBJECT: return _("Plugin Object"); case STREAM_XACL_AIX_TEXT: return _("AIX ACL attribs"); case STREAM_XACL_DARWIN_ACCESS: return _("Darwin ACL attribs"); case STREAM_XACL_FREEBSD_DEFAULT: return _("FreeBSD Default ACL attribs"); case STREAM_XACL_FREEBSD_ACCESS: return _("FreeBSD Access ACL attribs"); case STREAM_XACL_HPUX_ACL_ENTRY: return _("HPUX ACL attribs"); case STREAM_XACL_IRIX_DEFAULT: return _("Irix Default ACL attribs"); case STREAM_XACL_IRIX_ACCESS: return _("Irix Access ACL attribs"); case STREAM_XACL_LINUX_DEFAULT: return _("Linux Default ACL attribs"); case STREAM_XACL_LINUX_ACCESS: return _("Linux Access ACL attribs"); case STREAM_XACL_TRU64_DEFAULT: return _("TRU64 Default ACL attribs"); case STREAM_XACL_TRU64_ACCESS: return _("TRU64 Access ACL attribs"); case STREAM_XACL_SOLARIS_POSIX: return _("Solaris POSIX ACL attribs"); case STREAM_XACL_SOLARIS_NFS4: return _("Solaris NFSv4/ZFS ACL attribs"); case STREAM_XACL_AFS_TEXT: return _("AFS ACL attribs"); case STREAM_XACL_AIX_AIXC: return _("AIX POSIX ACL attribs"); case STREAM_XACL_AIX_NFS4: return _("AIX NFSv4 ACL attribs"); case STREAM_XACL_FREEBSD_NFS4: return _("FreeBSD NFSv4/ZFS ACL attribs"); case STREAM_XACL_HURD_DEFAULT: return _("GNU Hurd Default ACL attribs"); case STREAM_XACL_HURD_ACCESS: return _("GNU Hurd Access ACL attribs"); case STREAM_XACL_HURD_XATTR: return _("GNU Hurd Extended attribs"); case STREAM_XACL_IRIX_XATTR: return _("IRIX Extended attribs"); case STREAM_XACL_TRU64_XATTR: return _("TRU64 Extended attribs"); case STREAM_XACL_AIX_XATTR: return _("AIX Extended attribs"); case STREAM_XACL_OPENBSD_XATTR: return _("OpenBSD Extended attribs"); case STREAM_XACL_SOLARIS_SYS_XATTR: return _("Solaris Extensible attribs or System Extended attribs"); case STREAM_XACL_SOLARIS_XATTR: return _("Solaris Extended attribs"); case STREAM_XACL_DARWIN_XATTR: return _("Darwin Extended attribs"); case STREAM_XACL_FREEBSD_XATTR: return _("FreeBSD Extended attribs"); case STREAM_XACL_LINUX_XATTR: return _("Linux Extended attribs"); case STREAM_XACL_NETBSD_XATTR: return _("NetBSD Extended attribs"); case STREAM_PLUGIN_META_BLOB: return _("Plugin Metadata Blob"); case STREAM_PLUGIN_META_CATALOG: return _("Plugin Metadata Catalog"); case STREAM_FILEEVENT: return _("FileEvent"); default: sprintf(buf, "%d", stream); return (const char *)buf; } } /** * Convert a 64 bit little endian to a big endian */ void int64_LE2BE(int64_t* pBE, const int64_t v) { /* convert little endian to big endian */ if (htonl(1) != 1L) { /* no work if on little endian machine */ memcpy(pBE, &v, sizeof(int64_t)); } else { int i; uint8_t rv[sizeof(int64_t)]; uint8_t *pv = (uint8_t *) &v; for (i = 0; i < 8; i++) { rv[i] = pv[7 - i]; } memcpy(pBE, &rv, sizeof(int64_t)); } } /** * Convert a 32 bit little endian to a big endian */ void int32_LE2BE(int32_t* pBE, const int32_t v) { /* convert little endian to big endian */ if (htonl(1) != 1L) { /* no work if on little endian machine */ memcpy(pBE, &v, sizeof(int32_t)); } else { int i; uint8_t rv[sizeof(int32_t)]; uint8_t *pv = (uint8_t *) &v; for (i = 0; i < 4; i++) { rv[i] = pv[3 - i]; } memcpy(pBE, &rv, sizeof(int32_t)); } } /** * Read a BackupRead block and pull out the file data */ bool processWin32BackupAPIBlock (BFILE *bfd, void *pBuffer, ssize_t dwSize) { int64_t len = dwSize; char *dat=(char *)pBuffer; int64_t use_len; while (len>0 && bfd->win32filter.have_data(&dat, &len, &use_len)) { if (bwrite(bfd, dat, use_len) != (ssize_t)use_len) { return false; } dat+=use_len; } return true; } /* =============================================================== * * W I N D O W S * * =============================================================== */ #if defined(HAVE_WIN32) void unix_name_to_win32(POOLMEM **win32_name, const char *name); extern "C" HANDLE get_osfhandle(int fd); void binit(BFILE *bfd) { bmemset((void *)bfd, 0, sizeof(BFILE)); bfd->fid = -1; bfd->mode = BF_CLOSED; bfd->use_backup_api = have_win32_api(); bfd->cmd_plugin = false; } /* * Enables using the Backup API (win32_data). * Returns 1 if function worked * Returns 0 if failed (i.e. do not have Backup API on this machine) */ bool set_win32_backup(BFILE *bfd) { /* We enable if possible here */ bfd->use_backup_api = have_win32_api(); return bfd->use_backup_api; } void set_fattrs(BFILE *bfd, struct stat *statp) { bfd->fattrs = statp->st_fattrs; Dmsg1(200, "set_fattrs 0x%x\n", bfd->fattrs); } bool set_portable_backup(BFILE *bfd) { bfd->use_backup_api = false; return true; } bool set_cmd_plugin(BFILE *bfd, JCR *jcr) { bfd->cmd_plugin = true; bfd->jcr = jcr; return true; } /* * Return 1 if we are NOT using Win32 BackupWrite() * return 0 if are */ bool is_portable_backup(BFILE *bfd) { return !bfd->use_backup_api; } bool is_plugin_data(BFILE *bfd) { return bfd->cmd_plugin; } bool have_win32_api() { return p_BackupRead && p_BackupWrite; } /* * Return true if we support the stream * false if we do not support the stream * * This code is running under Win32, so we * do not need #ifdef on MACOS ... */ bool is_restore_stream_supported(int stream) { switch (stream) { /* Streams known not to be supported */ #ifndef HAVE_LIBZ case STREAM_GZIP_DATA: case STREAM_SPARSE_GZIP_DATA: case STREAM_WIN32_GZIP_DATA: #endif #if !defined(HAVE_LZO) && !defined(HAVE_ZSTD) case STREAM_COMPRESSED_DATA: case STREAM_SPARSE_COMPRESSED_DATA: case STREAM_WIN32_COMPRESSED_DATA: case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: #endif case STREAM_MACOS_FORK_DATA: case STREAM_HFSPLUS_ATTRIBUTES: case STREAM_ENCRYPTED_MACOS_FORK_DATA: return false; /* Known streams */ #ifdef HAVE_LIBZ case STREAM_GZIP_DATA: case STREAM_SPARSE_GZIP_DATA: case STREAM_WIN32_GZIP_DATA: #endif #if defined(HAVE_LZO) || defined(HAVE_ZSTD) case STREAM_COMPRESSED_DATA: case STREAM_SPARSE_COMPRESSED_DATA: case STREAM_WIN32_COMPRESSED_DATA: #endif case STREAM_WIN32_DATA: case STREAM_UNIX_ATTRIBUTES: case STREAM_FILE_DATA: case STREAM_MD5_DIGEST: case STREAM_UNIX_ATTRIBUTES_EX: case STREAM_SPARSE_DATA: case STREAM_PROGRAM_NAMES: case STREAM_PROGRAM_DATA: case STREAM_SHA1_DIGEST: #ifdef HAVE_SHA2 case STREAM_SHA256_DIGEST: case STREAM_SHA512_DIGEST: #endif #ifdef HAVE_CRYPTO case STREAM_SIGNED_DIGEST: case STREAM_ENCRYPTED_FILE_DATA: case STREAM_ENCRYPTED_FILE_GZIP_DATA: case STREAM_ENCRYPTED_WIN32_DATA: case STREAM_ENCRYPTED_WIN32_GZIP_DATA: #if defined(HAVE_LZO) || defined(HAVE_ZSTD) case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: #endif #endif /* !HAVE_CRYPTO */ case 0: /* compatibility with old tapes */ return true; } return false; } HANDLE bget_handle(BFILE *bfd) { return bfd->fh; } /* * The following code was contributed by Graham Keeling from his * burp project. August 2014 */ static int encrypt_bopen(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode) { ULONG ulFlags = 0; POOLMEM *win32_fname; POOLMEM *win32_fname_wchar; bfd->mode = BF_CLOSED; bfd->fid = -1; if (!(p_OpenEncryptedFileRawA || p_OpenEncryptedFileRawW)) { Dmsg0(50, "No OpenEncryptedFileRawA and no OpenEncryptedFileRawW APIs!!!\n"); return -1; } /* Convert to Windows path format */ win32_fname = get_pool_memory(PM_FNAME); win32_fname_wchar = get_pool_memory(PM_FNAME); unix_name_to_win32(&win32_fname, (char *)fname); if (p_CreateFileW && p_MultiByteToWideChar) { make_win32_path_UTF8_2_wchar(&win32_fname_wchar, fname); } if ((flags & O_CREAT) || (flags & O_WRONLY)) { ulFlags = CREATE_FOR_IMPORT | OVERWRITE_HIDDEN; if (bfd->fattrs & FILE_ATTRIBUTE_DIRECTORY) { mkdir(fname, 0777); ulFlags |= CREATE_FOR_DIR; } bfd->mode = BF_WRITE; Dmsg0(200, "encrypt_bopen for write.\n"); } else { /* Open existing for read */ ulFlags = CREATE_FOR_EXPORT; bfd->mode = BF_READ; Dmsg0(200, "encrypt_bopen for read.\n"); } if (p_OpenEncryptedFileRawW && p_MultiByteToWideChar) { /* unicode open */ if (p_OpenEncryptedFileRawW((LPCWSTR)win32_fname_wchar, ulFlags, &(bfd->pvContext))) { bfd->mode = BF_CLOSED; errno = b_errno_win32; bfd->berrno = b_errno_win32; } } else { /* ascii open */ if (p_OpenEncryptedFileRawA(win32_fname, ulFlags, &(bfd->pvContext))) { bfd->mode = BF_CLOSED; errno = b_errno_win32; bfd->berrno = b_errno_win32; } } free_pool_memory(win32_fname_wchar); free_pool_memory(win32_fname); bfd->fid = (bfd->mode == BF_CLOSED) ? -1 : 0; return bfd->mode==BF_CLOSED ? -1: 1; } static int encrypt_bclose(BFILE *bfd) { Dmsg0(200, "encrypt_bclose\n"); if (p_CloseEncryptedFileRaw) { p_CloseEncryptedFileRaw(bfd->pvContext); } bfd->pvContext = NULL; bfd->mode = BF_CLOSED; bfd->fattrs = 0; bfd->fid = -1; return 0; } /* Windows */ int bopen(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode) { POOLMEM *win32_fname_wchar; DWORD dwaccess, dwflags, dwshare; /* Do not pass this descriptor to a sub process */ SECURITY_ATTRIBUTES sec; sec.nLength = sizeof(sec); sec.lpSecurityDescriptor = NULL; sec.bInheritHandle = false; if (bfd->fattrs & FILE_ATTRIBUTE_ENCRYPTED) { return encrypt_bopen(bfd, fname, flags, mode); } /* Convert to Windows path format */ win32_fname_wchar = get_pool_memory(PM_FNAME); if (bfd->cmd_plugin && plugin_bopen) { int rtnstat; Dmsg1(50, "call plugin_bopen fname=%s\n", fname); rtnstat = plugin_bopen(bfd, fname, flags, mode); Dmsg1(50, "return from plugin_bopen status=%d\n", rtnstat); if (rtnstat >= 0) { if (flags & O_CREAT || flags & O_WRONLY) { /* Open existing for write */ Dmsg1(50, "plugin_open for write OK file=%s.\n", fname); bfd->mode = BF_WRITE; } else { Dmsg1(50, "plugin_open for read OK file=%s.\n", fname); bfd->mode = BF_READ; } bfd->fid = -1; /* The file descriptor is invalid */ } else { bfd->mode = BF_CLOSED; bfd->fid = -1; Dmsg1(10, "==== plugin_bopen returned bad status=%d\n", rtnstat); } free_pool_memory(win32_fname_wchar); return bfd->mode == BF_CLOSED ? -1 : 1; } Dmsg0(100, "=== NO plugin\n"); if (!(p_CreateFileA || p_CreateFileW)) { Dmsg0(50, "No CreateFileA and no CreateFileW!!!!!\n"); return 0; } if (p_CreateFileW && p_MultiByteToWideChar) { make_win32_path_UTF8_2_wchar(&win32_fname_wchar, fname); } if (flags & O_CREAT) { /* Create */ if (bfd->use_backup_api) { dwaccess = GENERIC_WRITE|FILE_ALL_ACCESS|WRITE_OWNER|WRITE_DAC|ACCESS_SYSTEM_SECURITY; dwflags = FILE_FLAG_BACKUP_SEMANTICS; } else { dwaccess = GENERIC_WRITE; dwflags = 0; } // unicode open for create write Dmsg1(100, "Create CreateFileW=%ls\n", win32_fname_wchar); bfd->fh = p_CreateFileW((LPCWSTR)win32_fname_wchar, dwaccess, /* Requested access */ 0, /* Shared mode */ &sec, /* SecurityAttributes */ CREATE_ALWAYS, /* CreationDisposition */ dwflags, /* Flags and attributes */ NULL); /* TemplateFile */ bfd->mode = BF_WRITE; } else if (flags & O_WRONLY) { /* Open existing for write */ if (bfd->use_backup_api) { dwaccess = GENERIC_READ|GENERIC_WRITE|WRITE_OWNER|WRITE_DAC; if (bfd->fattrs & FILE_ATTRIBUTE_DIRECTORY) { /* Avoid issues with directories * ERR=The process cannot access the file because it is being used by another */ dwshare = FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE; } else { dwshare = 0; } /* If deduped, we do not want to open the reparse point */ if (bfd->fattrs & FILE_ATTRIBUTE_DEDUP) { /* Maybe check for bfd->reparse_point */ dwflags = FILE_FLAG_BACKUP_SEMANTICS; } else { dwflags = FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT; } } else { dwaccess = GENERIC_READ|GENERIC_WRITE; dwflags = 0; dwshare = 0; } // unicode open for open existing write Dmsg1(100, "Write only CreateFileW=%ls\n", win32_fname_wchar); bfd->fh = p_CreateFileW((LPCWSTR)win32_fname_wchar, dwaccess, /* Requested access */ dwshare, /* Shared mode */ &sec, /* SecurityAttributes */ OPEN_EXISTING, /* CreationDisposition */ dwflags, /* Flags and attributes */ NULL); /* TemplateFile */ bfd->mode = BF_WRITE; } else { /* Read */ if (bfd->use_backup_api) { dwaccess = GENERIC_READ|READ_CONTROL|ACCESS_SYSTEM_SECURITY; dwflags = FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_SEQUENTIAL_SCAN | FILE_FLAG_OPEN_REPARSE_POINT; dwshare = FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE; } else { dwaccess = GENERIC_READ; dwflags = 0; dwshare = FILE_SHARE_READ|FILE_SHARE_WRITE; } // unicode open for open existing read Dmsg1(100, "Read CreateFileW=%ls\n", win32_fname_wchar); bfd->fh = p_CreateFileW((LPCWSTR)win32_fname_wchar, dwaccess, /* Requested access */ dwshare, /* Share modes */ &sec, /* SecurityAttributes */ OPEN_EXISTING, /* CreationDisposition */ dwflags, /* Flags and attributes */ NULL); /* TemplateFile */ bfd->mode = BF_READ; } if (bfd->fh == INVALID_HANDLE_VALUE) { berrno be; bfd->lerror = GetLastError(); bfd->berrno = b_errno_win32; errno = b_errno_win32; bfd->mode = BF_CLOSED; Dmsg1(100, "Open failed: %s\n", be.bstrerror()); } bfd->block = 0; bfd->total_bytes = 0; bfd->errmsg = NULL; bfd->lpContext = NULL; bfd->win32filter.init(); free_pool_memory(win32_fname_wchar); bfd->fid = (bfd->mode == BF_CLOSED) ? -1 : 0; return bfd->mode == BF_CLOSED ? -1 : 1; } /* * Returns 0 on success * -1 on error */ /* Windows */ int bclose(BFILE *bfd) { int stat = 0; if (bfd->mode == BF_CLOSED) { Dmsg0(50, "=== BFD already closed.\n"); return 0; } if (bfd->cmd_plugin && plugin_bclose) { stat = plugin_bclose(bfd); Dmsg0(50, "==== BFD closed!!!\n"); goto all_done; } if (bfd->fattrs & FILE_ATTRIBUTE_ENCRYPTED) { return encrypt_bclose(bfd); } /* * We need to tell the API to release the buffer it * allocated in lpContext. We do so by calling the * API one more time, but with the Abort bit set. */ if (bfd->use_backup_api && bfd->mode == BF_READ) { BYTE buf[10]; if (bfd->lpContext && !p_BackupRead(bfd->fh, buf, /* buffer */ (DWORD)0, /* bytes to read */ &bfd->rw_bytes, /* bytes read */ 1, /* Abort */ 1, /* ProcessSecurity */ &bfd->lpContext)) { /* Read context */ errno = b_errno_win32; stat = -1; } } else if (bfd->use_backup_api && bfd->mode == BF_WRITE) { BYTE buf[10]; if (bfd->lpContext && !p_BackupWrite(bfd->fh, buf, /* buffer */ (DWORD)0, /* bytes to read */ &bfd->rw_bytes, /* bytes written */ 1, /* Abort */ 1, /* ProcessSecurity */ &bfd->lpContext)) { /* Write context */ errno = b_errno_win32; stat = -1; } } if (!CloseHandle(bfd->fh)) { stat = -1; errno = b_errno_win32; } all_done: if (bfd->errmsg) { free_pool_memory(bfd->errmsg); bfd->errmsg = NULL; } bfd->mode = BF_CLOSED; bfd->fattrs = 0; bfd->fid = -1; bfd->lpContext = NULL; bfd->cmd_plugin = false; return stat; } /* Returns: bytes read on success * 0 on EOF * -1 on error */ /* Windows */ ssize_t bread(BFILE *bfd, void *buf, size_t count) { bfd->rw_bytes = 0; if (bfd->cmd_plugin && plugin_bread) { return plugin_bread(bfd, buf, count); } if (bfd->use_backup_api) { if (!p_BackupRead(bfd->fh, (BYTE *)buf, count, &bfd->rw_bytes, 0, /* no Abort */ 1, /* Process Security */ &bfd->lpContext)) { /* Context */ berrno be; bfd->lerror = GetLastError(); bfd->berrno = b_errno_win32; errno = b_errno_win32; Dmsg1(100, "Read failed: %s\n", be.bstrerror()); return -1; } } else { if (!ReadFile(bfd->fh, buf, count, &bfd->rw_bytes, NULL)) { bfd->lerror = GetLastError(); bfd->berrno = b_errno_win32; errno = b_errno_win32; return -1; } } bfd->block++; if (bfd->rw_bytes > 0) { bfd->total_bytes += bfd->rw_bytes; } return (ssize_t)bfd->rw_bytes; } /* Windows */ ssize_t bwrite(BFILE *bfd, void *buf, size_t count) { bfd->rw_bytes = 0; if (bfd->cmd_plugin && plugin_bwrite) { return plugin_bwrite(bfd, buf, count); } if (bfd->use_backup_api) { if (!p_BackupWrite(bfd->fh, (BYTE *)buf, count, &bfd->rw_bytes, 0, /* No abort */ 1, /* Process Security */ &bfd->lpContext)) { /* Context */ berrno be; bfd->lerror = GetLastError(); bfd->berrno = b_errno_win32; errno = b_errno_win32; Dmsg1(100, "Write failed: %s\n", be.bstrerror()); return -1; } } else { if (!WriteFile(bfd->fh, buf, count, &bfd->rw_bytes, NULL)) { bfd->lerror = GetLastError(); bfd->berrno = b_errno_win32; errno = b_errno_win32; return -1; } } bfd->block++; if (bfd->rw_bytes > 0) { bfd->total_bytes += bfd->rw_bytes; } return (ssize_t)bfd->rw_bytes; } /* Windows */ bool is_bopen(BFILE *bfd) { return bfd->mode != BF_CLOSED; } /* Windows */ boffset_t blseek(BFILE *bfd, boffset_t offset, int whence) { LONG offset_low = (LONG)offset; LONG offset_high = (LONG)(offset >> 32); DWORD dwResult; if (bfd->cmd_plugin && plugin_blseek) { return plugin_blseek(bfd, offset, whence); } dwResult = SetFilePointer(bfd->fh, offset_low, &offset_high, whence); if (dwResult == INVALID_SET_FILE_POINTER && GetLastError() != NO_ERROR) { return (boffset_t)-1; } return ((boffset_t)offset_high << 32) | dwResult; } #else /* Unix systems */ /* =============================================================== * * U N I X * * =============================================================== */ /* Unix */ void binit(BFILE *bfd) { bmemset(bfd, 0, sizeof(BFILE)); bfd->fid = -1; } /* Unix */ bool have_win32_api() { return false; /* no can do */ } /* * Enables using the Backup API (win32_data). * Returns true if function worked * Returns false if failed (i.e. do not have Backup API on this machine) */ /* Unix */ bool set_win32_backup(BFILE *bfd) { return false; /* no can do */ } /* Unix */ bool set_portable_backup(BFILE *bfd) { return true; /* no problem */ } bool is_plugin_data(BFILE *bfd) { return bfd->cmd_plugin; } /* * Return true if we are writing in portable format * return false if not */ /* Unix */ bool is_portable_backup(BFILE *bfd) { return true; /* portable by definition */ } /* Unix */ bool set_prog(BFILE *bfd, char *prog, JCR *jcr) { return false; } /* Unix */ bool set_cmd_plugin(BFILE *bfd, JCR *jcr) { bfd->cmd_plugin = true; bfd->jcr = jcr; return true; } /* * This code is running on a non-Win32 machine */ /* Unix */ bool is_restore_stream_supported(int stream) { /* No Win32 backup on this machine */ switch (stream) { #ifndef HAVE_LIBZ case STREAM_GZIP_DATA: case STREAM_SPARSE_GZIP_DATA: case STREAM_WIN32_GZIP_DATA: #endif #if !defined(HAVE_LZO) && !defined(HAVE_ZSTD) case STREAM_COMPRESSED_DATA: case STREAM_SPARSE_COMPRESSED_DATA: case STREAM_WIN32_COMPRESSED_DATA: case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: #endif #ifndef HAVE_DARWIN_OS case STREAM_MACOS_FORK_DATA: case STREAM_HFSPLUS_ATTRIBUTES: #endif return false; /* Known streams */ #ifdef HAVE_LIBZ case STREAM_GZIP_DATA: case STREAM_SPARSE_GZIP_DATA: case STREAM_WIN32_GZIP_DATA: #endif #if defined(HAVE_LZO) || defined(HAVE_ZSTD) case STREAM_COMPRESSED_DATA: case STREAM_SPARSE_COMPRESSED_DATA: case STREAM_WIN32_COMPRESSED_DATA: case STREAM_ENCRYPTED_FILE_COMPRESSED_DATA: case STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA: #endif case STREAM_WIN32_DATA: case STREAM_UNIX_ATTRIBUTES: case STREAM_FILE_DATA: case STREAM_MD5_DIGEST: case STREAM_UNIX_ATTRIBUTES_EX: case STREAM_SPARSE_DATA: case STREAM_PROGRAM_NAMES: case STREAM_PROGRAM_DATA: case STREAM_SHA1_DIGEST: #ifdef HAVE_SHA2 case STREAM_SHA256_DIGEST: case STREAM_SHA512_DIGEST: #endif #ifdef HAVE_CRYPTO case STREAM_SIGNED_DIGEST: case STREAM_ENCRYPTED_FILE_DATA: case STREAM_ENCRYPTED_FILE_GZIP_DATA: case STREAM_ENCRYPTED_WIN32_DATA: case STREAM_ENCRYPTED_WIN32_GZIP_DATA: #endif #ifdef HAVE_DARWIN_OS case STREAM_MACOS_FORK_DATA: case STREAM_HFSPLUS_ATTRIBUTES: #ifdef HAVE_CRYPTO case STREAM_ENCRYPTED_MACOS_FORK_DATA: #endif /* HAVE_CRYPTO */ #endif /* HAVE_DARWIN_OS */ case 0: /* compatibility with old tapes */ return true; } return false; } /* Unix */ int bopen(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode) { if (bfd->cmd_plugin && plugin_bopen) { Dmsg1(400, "call plugin_bopen fname=%s\n", fname); bfd->fid = plugin_bopen(bfd, fname, flags, mode); Dmsg2(400, "Plugin bopen fid=%d file=%s\n", bfd->fid, fname); return bfd->fid; } /* Normal file open */ Dmsg1(dbglvl, "open file %s\n", fname); /* We use fnctl to set O_NOATIME if requested to avoid open error */ bfd->fid = open(fname, (flags | O_CLOEXEC) & ~O_NOATIME, mode); /* Set O_NOATIME if possible */ if (bfd->fid != -1 && flags & O_NOATIME) { int oldflags = fcntl(bfd->fid, F_GETFL, 0); if (oldflags == -1) { bfd->berrno = errno; close(bfd->fid); bfd->fid = -1; } else { int ret = fcntl(bfd->fid, F_SETFL, oldflags | O_NOATIME); /* EPERM means setting O_NOATIME was not allowed */ if (ret == -1 && errno != EPERM) { bfd->berrno = errno; close(bfd->fid); bfd->fid = -1; } } } bfd->berrno = errno; bfd->m_flags = flags; bfd->block = 0; bfd->total_bytes = 0; Dmsg1(400, "Open file %d\n", bfd->fid); errno = bfd->berrno; bfd->win32filter.init(); #if defined(HAVE_POSIX_FADVISE) && defined(POSIX_FADV_WILLNEED) /* If not RDWR or WRONLY must be Read Only */ if (bfd->fid != -1 && !(flags & (O_RDWR|O_WRONLY))) { int stat = posix_fadvise(bfd->fid, 0, 0, POSIX_FADV_WILLNEED); Dmsg3(400, "Did posix_fadvise WILLNEED on %s fid=%d stat=%d\n", fname, bfd->fid, stat); } #endif return bfd->fid; } #ifdef HAVE_DARWIN_OS /* Open the resource fork of a file. */ int bopen_rsrc(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode) { POOLMEM *rsrc_fname; rsrc_fname = get_pool_memory(PM_FNAME); pm_strcpy(rsrc_fname, fname); pm_strcat(rsrc_fname, _PATH_RSRCFORKSPEC); bopen(bfd, rsrc_fname, flags, mode); free_pool_memory(rsrc_fname); return bfd->fid; } #else /* Unix */ /* Unix */ int bopen_rsrc(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode) { return -1; } #endif /* Unix */ int bclose(BFILE *bfd) { int stat; Dmsg2(400, "Close bfd=%p file %d\n", bfd, bfd->fid); if (bfd->fid == -1) { return 0; } if (bfd->cmd_plugin && plugin_bclose) { stat = plugin_bclose(bfd); bfd->fid = -1; bfd->cmd_plugin = false; } #if defined(HAVE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED) /* If not RDWR or WRONLY must be Read Only */ if (!(bfd->m_flags & (O_RDWR|O_WRONLY))) { fdatasync(bfd->fid); /* sync the file */ /* Tell OS we don't need it any more */ posix_fadvise(bfd->fid, 0, 0, POSIX_FADV_DONTNEED); Dmsg1(400, "Did posix_fadvise DONTNEED on fid=%d\n", bfd->fid); } #endif /* Close normal file */ stat = close(bfd->fid); bfd->berrno = errno; bfd->fid = -1; bfd->cmd_plugin = false; return stat; } /* Unix */ ssize_t bread(BFILE *bfd, void *buf, size_t count) { ssize_t stat; if (bfd->cmd_plugin && plugin_bread) { return plugin_bread(bfd, buf, count); } stat = read(bfd->fid, buf, count); bfd->berrno = errno; bfd->block++; if (stat > 0) { bfd->total_bytes += stat; } return stat; } /* Unix */ ssize_t bwrite(BFILE *bfd, void *buf, size_t count) { ssize_t stat; if (bfd->cmd_plugin && plugin_bwrite) { return plugin_bwrite(bfd, buf, count); } stat = write(bfd->fid, buf, count); bfd->berrno = errno; bfd->block++; if (stat > 0) { bfd->total_bytes += stat; } return stat; } /* Unix */ bool is_bopen(BFILE *bfd) { return bfd->fid >= 0; } /* Unix */ boffset_t blseek(BFILE *bfd, boffset_t offset, int whence) { boffset_t pos; if (bfd->cmd_plugin && plugin_bwrite) { return plugin_blseek(bfd, offset, whence); } pos = (boffset_t)lseek(bfd->fid, offset, whence); bfd->berrno = errno; return pos; } #endif bacula-15.0.3/src/findlib/match.c0000644000175000017500000002466514771010173016335 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Old style * * Routines used to keep and match include and exclude * filename/pathname patterns. * * Note, this file is used for the old style include and * excludes, so is deprecated. The new style code is * found in find.c. * This code is still used for lists in testls and bextract. * * Kern E. Sibbald, December MMI * */ #include "bacula.h" #include "find.h" #include "ch.h" #include #ifndef FNM_LEADING_DIR #define FNM_LEADING_DIR 0 #endif /* Fold case in fnmatch() on Win32 */ #ifdef HAVE_WIN32 static const int fnmode = FNM_CASEFOLD; #else static const int fnmode = 0; #endif #undef bmalloc #define bmalloc(x) sm_malloc(__FILE__, __LINE__, x) int match_files(JCR *jcr, FF_PKT *ff, int file_save(JCR *, FF_PKT *ff_pkt, bool)) { ff->file_save = file_save; struct s_included_file *inc = NULL; /* This is the old deprecated way */ while (!job_canceled(jcr) && (inc = get_next_included_file(ff, inc))) { /* Copy options for this file */ bstrncat(ff->VerifyOpts, inc->VerifyOpts, sizeof(ff->VerifyOpts)); Dmsg1(100, "find_files: file=%s\n", inc->fname); if (!file_is_excluded(ff, inc->fname)) { if (find_one_file(jcr, ff, file_save, inc->fname, inc->fname, (dev_t)-1, true) ==0) { return 0; /* error return */ } } } return 1; } /* * Done doing filename matching, release all * resources used. */ void term_include_exclude_files(FF_PKT *ff) { struct s_included_file *inc, *next_inc; struct s_excluded_file *exc, *next_exc; for (inc=ff->included_files_list; inc; ) { next_inc = inc->next; free(inc); inc = next_inc; } ff->included_files_list = NULL; for (exc=ff->excluded_files_list; exc; ) { next_exc = exc->next; free(exc); exc = next_exc; } ff->excluded_files_list = NULL; for (exc=ff->excluded_paths_list; exc; ) { next_exc = exc->next; free(exc); exc = next_exc; } ff->excluded_paths_list = NULL; } /* * Add a filename to list of included files */ void add_fname_to_include_list(FF_PKT *ff, int prefixed, const char *fname) { int len, j; struct s_included_file *inc; char *p; const char *rp; len = strlen(fname); inc =(struct s_included_file *)bmalloc(sizeof(struct s_included_file) + len + 1); inc->options = 0; inc->VerifyOpts[0] = 'V'; inc->VerifyOpts[1] = ':'; inc->VerifyOpts[2] = 0; /* prefixed = preceded with options */ if (prefixed) { for (rp=fname; *rp && *rp != ' '; rp++) { switch (*rp) { case 'a': /* alway replace */ case '0': /* no option */ break; case 'f': inc->options |= FO_MULTIFS; break; case 'h': /* no recursion */ inc->options |= FO_NO_RECURSION; break; case 'M': /* MD5 */ inc->options |= FO_MD5; break; case 'n': inc->options |= FO_NOREPLACE; break; case 'p': /* use portable data format */ inc->options |= FO_PORTABLE; break; case 'r': /* read fifo */ inc->options |= FO_READFIFO; break; case 'S': inc->options |= FO_SHA1; break; case 's': inc->options |= FO_SPARSE; break; case 'm': inc->options |= FO_MTIMEONLY; break; case 'k': inc->options |= FO_KEEPATIME; break; case 'V': /* verify options */ /* Copy Verify Options */ for (j=0; *rp && *rp != ':'; rp++) { inc->VerifyOpts[j] = *rp; if (j < (int)sizeof(inc->VerifyOpts) - 1) { j++; } } inc->VerifyOpts[j] = 0; break; case 'w': inc->options |= FO_IF_NEWER; break; case 'A': inc->options |= FO_ACL; break; case 'Z': /* compression */ rp++; /* skip Z */ if (*rp >= '0' && *rp <= '9') { inc->options |= FO_COMPRESS; inc->algo = COMPRESS_GZIP; inc->Compress_level = *rp - '0'; } else if (*rp == 'o') { inc->options |= FO_COMPRESS; inc->algo = COMPRESS_LZO1X; inc->Compress_level = 1; /* not used with LZO */ } Dmsg2(200, "Compression alg=%d level=%d\n", inc->algo, inc->Compress_level); break; case 'd': /* Deduplication 0=none 1=Global 2=Local */ rp++; /* Skip z */ if (*rp >= '0' && *rp <= '2') { inc->Dedup_level = *rp - '0'; } break; case 'K': inc->options |= FO_NOATIME; break; case 'X': inc->options |= FO_XATTR; break; default: Emsg1(M_ERROR, 0, _("Unknown include/exclude option: %c\n"), *rp); break; } } /* Skip past space(s) */ for ( ; *rp == ' '; rp++) {} } else { rp = fname; } strcpy(inc->fname, rp); p = inc->fname; len = strlen(p); /* Zap trailing slashes. */ p += len - 1; while (p > inc->fname && IsPathSeparator(*p)) { *p-- = 0; len--; } inc->len = len; /* Check for wild cards */ inc->pattern = 0; for (p=inc->fname; *p; p++) { if (*p == '*' || *p == '[' || *p == '?') { inc->pattern = 1; break; } } #if defined(HAVE_WIN32) /* Convert any \'s into /'s */ for (p=inc->fname; *p; p++) { if (*p == '\\') { *p = '/'; } } #endif inc->next = NULL; /* Chain this one on the end of the list */ if (!ff->included_files_list) { /* First one, so set head */ ff->included_files_list = inc; } else { struct s_included_file *next; /* Walk to end of list */ for (next=ff->included_files_list; next->next; next=next->next) { } next->next = inc; } Dmsg4(100, "add_fname_to_include prefix=%d compres=%d alg= %d fname=%s\n", prefixed, !!(inc->options & FO_COMPRESS), inc->algo, inc->fname); } /* * We add an exclude name to either the exclude path * list or the exclude filename list. */ void add_fname_to_exclude_list(FF_PKT *ff, const char *fname) { int len; struct s_excluded_file *exc, **list; Dmsg1(20, "Add name to exclude: %s\n", fname); if (first_path_separator(fname) != NULL) { list = &ff->excluded_paths_list; } else { list = &ff->excluded_files_list; } len = strlen(fname); exc = (struct s_excluded_file *)bmalloc(sizeof(struct s_excluded_file) + len + 1); exc->next = *list; exc->len = len; strcpy(exc->fname, fname); #if defined(HAVE_WIN32) /* Convert any \'s into /'s */ for (char *p=exc->fname; *p; p++) { if (*p == '\\') { *p = '/'; } } #endif *list = exc; } /* * Get next included file */ struct s_included_file *get_next_included_file(FF_PKT *ff, struct s_included_file *ainc) { struct s_included_file *inc; if (ainc == NULL) { inc = ff->included_files_list; } else { inc = ainc->next; } /* * copy inc_options for this file into the ff packet */ if (inc) { ff->flags = inc->options; ff->Compress_algo = inc->algo; ff->Compress_level = inc->Compress_level; ff->Dedup_level = inc->Dedup_level; } return inc; } /* * Walk through the included list to see if this * file is included possibly with wild-cards. */ int file_is_included(FF_PKT *ff, const char *file) { struct s_included_file *inc = ff->included_files_list; int len; for ( ; inc; inc=inc->next ) { if (inc->pattern) { if (fnmatch(inc->fname, file, fnmode|FNM_LEADING_DIR) == 0) { return 1; } continue; } /* * No wild cards. We accept a match to the * end of any component. */ Dmsg2(900, "pat=%s file=%s\n", inc->fname, file); len = strlen(file); if (inc->len == len && strcmp(inc->fname, file) == 0) { return 1; } if (inc->len < len && IsPathSeparator(file[inc->len]) && strncmp(inc->fname, file, inc->len) == 0) { return 1; } if (inc->len == 1 && IsPathSeparator(inc->fname[0])) { return 1; } } return 0; } /* * This is the workhorse of excluded_file(). * Determine if the file is excluded or not. */ static int file_in_excluded_list(struct s_excluded_file *exc, const char *file) { if (exc == NULL) { Dmsg0(900, "exc is NULL\n"); } for ( ; exc; exc=exc->next ) { if (fnmatch(exc->fname, file, fnmode|FNM_PATHNAME) == 0) { Dmsg2(900, "Match exc pat=%s: file=%s:\n", exc->fname, file); return 1; } Dmsg2(900, "No match exc pat=%s: file=%s:\n", exc->fname, file); } return 0; } /* * Walk through the excluded lists to see if this * file is excluded, or if it matches a component * of an excluded directory. */ int file_is_excluded(FF_PKT *ff, const char *file) { const char *p; #if defined(HAVE_WIN32) /* * ***NB*** this removes the drive from the exclude * rule. Why????? */ if (file[1] == ':') { file += 2; } #endif if (file_in_excluded_list(ff->excluded_paths_list, file)) { return 1; } /* Try each component */ for (p = file; *p; p++) { /* Match from the beginning of a component only */ if ((p == file || (!IsPathSeparator(*p) && IsPathSeparator(p[-1]))) && file_in_excluded_list(ff->excluded_files_list, p)) { return 1; } } return 0; } bacula-15.0.3/src/findlib/savecwd.h0000644000175000017500000000226514771010173016672 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Kern Sibbald, August MMVII * */ #ifndef _SAVECWD_H #define _SAVECWD_H 1 class saveCWD { bool m_saved; /* set if we should do chdir i.e. save_cwd worked */ int m_fd; /* fd of current dir before chdir */ char *m_cwd; /* cwd before chdir if fd fchdir() works */ public: saveCWD() { m_saved=false; m_fd=-1; m_cwd=NULL; }; ~saveCWD() { release(); }; bool save(JCR *jcr); bool restore(JCR *jcr); void release(); bool is_saved() { return m_saved; }; }; #endif /* _SAVECWD_H */ bacula-15.0.3/src/findlib/mkpath.c0000644000175000017500000002004714771010173016513 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Kern Sibbald, September MMVII * * This is tricky code, especially when writing from scratch. Fortunately, * a non-copyrighted version of mkdir was available to consult. * * ***FIXME*** the mkpath code could be significantly optimized by * walking up the path chain from the bottom until it either gets * to the top or finds an existing directory then walk back down * creating the path components. Currently, it always starts at * the top, which can be rather inefficient for long path names. * */ #include "bacula.h" #include "jcr.h" #define dbglvl 50 /* * For old systems that don't have lchown() or lchmod() */ #ifndef HAVE_LCHOWN #define lchown chown #endif #ifndef HAVE_LCHMOD #define lchmod chmod #endif /* Defined in attribs.c */ void set_own_mod(ATTR *attr, char *path, uid_t owner, gid_t group, mode_t mode); typedef struct PrivateCurDir { hlink link; char fname[1]; } CurDir; /* Initialize the path hash table */ static bool path_list_init(JCR *jcr) { CurDir *elt = NULL; jcr->path_list = (htable *)malloc(sizeof(htable)); /* Hard to know in advance how many directories will * be stored in this hash */ jcr->path_list->init(elt, &elt->link, 10000); return true; } /* Add a path to the hash when we create a directory * with the replace=NEVER option */ bool path_list_add(JCR *jcr, uint32_t len, char *fname) { bool ret = true; CurDir *item; if (!jcr->path_list) { path_list_init(jcr); } /* we store CurDir, fname in the same chunk */ item = (CurDir *)jcr->path_list->hash_malloc(sizeof(CurDir)+len+1); memset(item, 0, sizeof(CurDir)); memcpy(item->fname, fname, len+1); jcr->path_list->insert(item->fname, item); Dmsg1(dbglvl, "add fname=<%s>\n", fname); return ret; } void free_path_list(JCR *jcr) { if (jcr->path_list) { jcr->path_list->destroy(); free(jcr->path_list); jcr->path_list = NULL; } } bool path_list_lookup(JCR *jcr, char *fname) { bool found=false; char bkp; if (!jcr->path_list) { return false; } /* Strip trailing / */ int len = strlen(fname); if (len == 0) { return false; } len--; bkp = fname[len]; if (fname[len] == '/') { /* strip any trailing slash */ fname[len] = 0; } CurDir *temp = (CurDir *)jcr->path_list->lookup(fname); if (temp) { found=true; } Dmsg2(dbglvl, "lookup <%s> %s\n", fname, found?"ok":"not ok"); fname[len] = bkp; /* restore last / */ return found; } static bool makedir(JCR *jcr, char *path, mode_t mode, int *created) { struct stat statp; if (mkdir(path, mode) != 0) { berrno be; *created = false; if (lstat(path, &statp) != 0) { Jmsg2(jcr, M_ERROR, 0, _("Cannot create directory %s: ERR=%s\n"), path, be.bstrerror()); return false; } else if (!S_ISDIR(statp.st_mode)) { Jmsg1(jcr, M_ERROR, 0, _("%s exists but is not a directory.\n"), path); return false; } return true; /* directory exists */ } #if 0 /* TODO: This code rely on statp that is not initialized, we need to do a stat() */ if (S_ISLNK(statp.st_mode)) { /* * Note, we created a directory, not a link, so if we find a * link, there is a security problem here. */ Jmsg1(jcr, M_FATAL, 0, _("Security problem!! We created directory %s, but it is a link.\n"), path); return false; } #endif if (jcr->keep_path_list) { /* When replace=NEVER, we keep track of all directories newly created */ path_list_add(jcr, strlen(path), path); } *created = true; return true; } /* * mode is the mode bits to use in creating a new directory * * parent_mode are the parent's modes if we need to create parent * directories. * * owner and group are to set on any created dirs * * keep_dir_modes if set means don't change mode bits if dir exists */ bool makepath(ATTR *attr, const char *apath, mode_t mode, mode_t parent_mode, uid_t owner, gid_t group, int keep_dir_modes) { struct stat statp; mode_t omask, tmode; char *path = (char *)apath; char *p; int len; bool ok = false; int created; char new_dir[5000]; int ndir = 0; int i = 0; int max_dirs = (int)sizeof(new_dir); JCR *jcr = attr->jcr; if (stat(path, &statp) == 0) { /* Does dir exist? */ if (!S_ISDIR(statp.st_mode)) { Jmsg1(jcr, M_ERROR, 0, _("%s exists but is not a directory.\n"), path); return false; } /* Full path exists */ if (keep_dir_modes) { return true; } set_own_mod(attr, path, owner, group, mode); return true; } omask = umask(0); umask(omask); len = strlen(apath); path = (char *)alloca(len+1); bstrncpy(path, apath, len+1); strip_trailing_slashes(path); /* * Now for one of the complexities. If we are not running as root, * then if the parent_mode does not have wx user perms, or we are * setting the userid or group, and the parent_mode has setuid, setgid, * or sticky bits, we must create the dir with open permissions, then * go back and patch all the dirs up with the correct perms. * Solution, set everything to 0777, then go back and reset them at the * end. */ tmode = 0777; #if defined(HAVE_WIN32) /* Validate drive letter */ if (path[1] == ':') { char drive[4] = "X:\\"; drive[0] = path[0]; UINT drive_type = GetDriveType(drive); if (drive_type == DRIVE_UNKNOWN || drive_type == DRIVE_NO_ROOT_DIR) { Jmsg1(jcr, M_ERROR, 0, _("%c: is not a valid drive.\n"), path[0]); goto bail_out; } if (path[2] == '\0') { /* attempt to create a drive */ ok = true; goto bail_out; /* OK, it is already there */ } p = &path[3]; } else { p = path; } #else p = path; #endif /* Skip leading slash(es) */ while (IsPathSeparator(*p)) { p++; } while ((p = first_path_separator(p))) { char save_p; save_p = *p; *p = 0; if (!makedir(jcr, path, tmode, &created)) { goto bail_out; } if (ndir < max_dirs) { new_dir[ndir++] = created; } *p = save_p; while (IsPathSeparator(*p)) { p++; } } /* Create final component */ if (!makedir(jcr, path, tmode, &created)) { goto bail_out; } if (ndir < max_dirs) { new_dir[ndir++] = created; } if (ndir >= max_dirs) { Jmsg0(jcr, M_WARNING, 0, _("Too many subdirectories. Some permissions not reset.\n")); } /* Now set the proper owner and modes */ #if defined(HAVE_WIN32) /* Don't propagate the hidden or encrypted attributes to parent directories */ parent_mode &= ~S_ISVTX; parent_mode &= ~S_ISGID; if (path[1] == ':') { p = &path[3]; } else { p = path; } #else p = path; #endif /* Skip leading slash(es) */ while (IsPathSeparator(*p)) { p++; } while ((p = first_path_separator(p))) { char save_p; save_p = *p; *p = 0; if (i < ndir && new_dir[i++] && !keep_dir_modes) { set_own_mod(attr, path, owner, group, parent_mode); } *p = save_p; while (IsPathSeparator(*p)) { p++; } } /* Set for final component */ if (i < ndir && new_dir[i++]) { set_own_mod(attr, path, owner, group, mode); } ok = true; bail_out: umask(omask); return ok; } bacula-15.0.3/src/findlib/namedpipe.h0000644000175000017500000000246014771010173017175 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* Basic abstraction for named pipe between windows and linux */ #ifndef NAMEDPIPE_H #define NAMEDPIPE_H #ifdef HAVE_WIN32 #include #endif #include typedef struct { #ifdef HAVE_WIN32 HANDLE fd; mode_t mode; int connected; #else char *name; int fd; #endif int ifd; int use_msg; } NamedPipe; void namedpipe_init(NamedPipe *self); void namedpipe_free(NamedPipe *self); int namedpipe_create(NamedPipe *self, const char *path, mode_t mode); void namedpipe_use_messages(NamedPipe *self); intptr_t namedpipe_open(NamedPipe *self, const char *path, mode_t mode); int namedpipe_get_fd(NamedPipe *self); #endif bacula-15.0.3/src/findlib/namedpipe.c0000644000175000017500000002336714771010173017201 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* Written by Eric Bollengier */ #ifdef HAVE_WIN32 # include # include # include # include #else /* !HAVE_WIN32 */ # include # include # include # include # include #endif /* HAVE_WIN32 */ /* Common include */ #include #include #include #include "namedpipe.h" #ifdef NEED_DMSG #define Dmsg(level, format, ...) d_msg(__FILE__, __LINE__, level, format, __VA_ARGS__) void d_msg(const char *file, int line, int level, const char *fmt,...); #endif #ifdef TEST_PROGRAM # define Dmsg(level, ...) printf(__VA_ARGS__ ) #endif void namedpipe_use_messages(NamedPipe *self) { self->use_msg = true; } #ifdef HAVE_WIN32 void namedpipe_init(NamedPipe *self) { self->fd = INVALID_HANDLE_VALUE; self->ifd = -1; self->use_msg = false; } void namedpipe_free(NamedPipe *self) { if (self->fd != INVALID_HANDLE_VALUE) { CloseHandle(self->fd); self->fd = INVALID_HANDLE_VALUE; self->ifd = -1; self->use_msg = false; } } #define BUFSIZE 8192 int namedpipe_create(NamedPipe *self, const char *path, mode_t mode) { DWORD flag = 0; if (self->use_msg) { flag = PIPE_READMODE_MESSAGE | PIPE_TYPE_MESSAGE; // message type pipe } /* On windows, */ self->fd = CreateNamedPipeA( path, // pipe name PIPE_ACCESS_DUPLEX, // read/write access flag | PIPE_WAIT, // blocking mode PIPE_UNLIMITED_INSTANCES, // max. instances BUFSIZE, // output buffer size BUFSIZE, // input buffer size 0, // client time-out NULL); // default security attribute if (self->fd == INVALID_HANDLE_VALUE) { Dmsg(10, "CreateNamedPipe failed, ERR=%d.\n", (int)GetLastError()); return -1; } return 0; } intptr_t namedpipe_open(NamedPipe *self, const char *path, mode_t mode) { int retry = 30; self->connected = false; self->mode = mode; /* Do not pass this descriptor to a sub process */ SECURITY_ATTRIBUTES sec; sec.nLength = sizeof(sec); sec.lpSecurityDescriptor = NULL; sec.bInheritHandle = false; if (self->fd != INVALID_HANDLE_VALUE) { /* server mode */ self->connected = ConnectNamedPipe(self->fd, NULL) ? TRUE : (GetLastError() == ERROR_PIPE_CONNECTED); } else { /* client mode */ /* Need to wait for creation */ while (retry-- > 0) { self->fd = CreateFileA( path, // pipe name GENERIC_WRITE | GENERIC_READ, 0, // no sharing &sec, // default security attributes OPEN_EXISTING, // opens existing pipe 0, // default attributes NULL); // no template file // Break if the pipe handle is valid. if (self->fd != INVALID_HANDLE_VALUE) { break; } /* Wait a little bit for the other side to create the fifo */ if (GetLastError() == ERROR_FILE_NOT_FOUND) { Dmsg(10, "File not found, ERR=%d.\n", (int)GetLastError()); Sleep(20000); continue; } // Exit if an error other than ERROR_PIPE_BUSY occurs. if (GetLastError() != ERROR_PIPE_BUSY) { Dmsg(10, "CreateFile failed, ERR=%d.\n", (int)GetLastError()); return -1; } // All pipe instances are busy, so wait for 20 seconds. if (!WaitNamedPipeA(path, 20000)) { Dmsg(10, "WaitNamedPipe failed, ERR=%d.\n", (int)GetLastError()); return -1; } } } DWORD dwMode = 0; if (self->use_msg) { dwMode = PIPE_READMODE_MESSAGE; } self->connected = SetNamedPipeHandleState( self->fd, // pipe handle &dwMode, // new pipe mode NULL, // don't set maximum bytes NULL); // don't set maximum time if (!self->connected) { Dmsg(10, "SetNamedPipeHandleState failed, ERR=%d.\n", (int)GetLastError()); } return (intptr_t)self->fd; } int namedpipe_get_fd(NamedPipe *self) { if (self->fd == INVALID_HANDLE_VALUE) { return -1; } if (self->connected) { int m = 0; if (self->mode & O_WRONLY || self->mode & O_APPEND) { m |= O_APPEND; } else if (self->mode & O_RDONLY) { m |= O_RDONLY; } self->ifd = _open_osfhandle((intptr_t)self->fd, m); self->fd = INVALID_HANDLE_VALUE; } return self->ifd; } #else /* !HAVE_WIN32 */ int namedpipe_get_fd(NamedPipe *self) { return self->ifd; } void namedpipe_init(NamedPipe *self) { self->fd = -1; self->ifd = -1; self->use_msg = 0; self->name = NULL; } void namedpipe_free(NamedPipe *self) { if (self->fd != -1) { close(self->fd); self->fd = -1; self->ifd = -1; self->use_msg = 0; } if (self->name) { unlink(self->name); free(self->name); self->name = NULL; } } int namedpipe_create(NamedPipe *self, const char *path, mode_t mode) { self->name = (char *)malloc(strlen(path) + 1); strcpy(self->name, path); if (mkfifo(path, mode) < 0 && errno != EEXIST) { return -1; } return 0; } intptr_t namedpipe_open(NamedPipe *self, const char *path, mode_t mode) { self->ifd = self->fd = open(path, mode); return (intptr_t)self->fd; } #endif /* HAVE_WIN32 */ #ifdef TEST_PROGRAM #include #include #define BUF "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n" int main(int argc, char **argv) { FILE *fp; NamedPipe p; char buf[65*1024], file[128]; int fd; int mode; int n, m, o; if (argc < 4) { printf("Usage: %s client|server pipe file\n", argv[0]); exit(3); } namedpipe_init(&p); if (strcmp(argv[1], "server") == 0) { mode = O_WRONLY; if (namedpipe_create(&p, argv[2], 0600) < 0) { exit(2); } } else { mode = O_RDONLY; } printf("Trying to open %s mode=%d\n", argv[2], mode); fd = namedpipe_open(&p, argv[2], mode); if (fd < 0) { printf("Unable to open pipe\n"); exit(1); } if (strcmp(argv[1], "server") == 0) { if (write(fd, BUF, strlen(BUF)+1) != strlen(BUF)+1) { printf("Unable to write data\n"); exit(4); } if (write(fd, BUF, strlen(BUF)+1) != strlen(BUF)+1) { printf("Unable to write data\n"); exit(4); } fp = bfopen(argv[3], "rb"); if (!fp) { printf("Unable to open %s for reading\n", argv[3]); exit(4); } fseek(fp, 0, SEEK_END); m = ftell(fp); fseek(fp, 0, SEEK_SET); snprintf(buf, sizeof(buf), "%.10d\n", m); write(fd, buf, strlen(buf)+1); while (m > 0 && !feof(fp)) { n = fread(buf, 1, sizeof(buf), fp); Dmsg(000, "read %d from file\n", n); if (write(fd, buf, n) != n) { printf("Unable to write data from file\n"); exit(5); } m -= n; } Dmsg(000, "EOF found\n"); fclose(fp); } else { if ((n = read(fd, buf, sizeof(buf))) != strlen(BUF)+1) { Dmsg(000, "read failed (%d != %d), ERR=%d.\n", n, (int)strlen(BUF)+1, errno); exit(4); } if (read(fd, buf, sizeof(buf)) != strlen(BUF)+1) { Dmsg(000, "read failed, ERR=%d.\n", errno); exit(4); } printf("buf=[%s]\n", buf); snprintf(file, sizeof(file), "%s.out", argv[3]); fp = bfopen(file, "wb"); if (!fp) { printf("Unable to open %s for writing\n", buf); exit(4); } if ((n = read(fd, buf, sizeof(buf))) != 12) { Dmsg(000, "read failed (%d != %d), ERR=%d.\n", n, 12, errno); exit(4); } m = atoi(buf); Dmsg(000, "will read %d from fifo\n", m); while (m > 0) { n = read(fd, buf, sizeof(buf)); Dmsg(000, "Got %d bytes\n", n); if ((o = fwrite(buf, n, 1, fp)) != 1) { Dmsg(000, "write to file failed (%d != %d) ERR=%d.\n", o, n, errno); exit(4); } m -= n; } fclose(fp); } namedpipe_free(&p); exit(0); } #endif bacula-15.0.3/src/findlib/fstype.c0000644000175000017500000004745314771010173016553 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Implement routines to determine file system types. * * Written by Preben 'Peppe' Guldberg, December MMIV * Updated by Kern Sibbald, April MMXV */ #ifndef TEST_PROGRAM #include "bacula.h" #include "find.h" #include #include #if defined(HAVE_LINUX_OS) || defined(HAVE_FREEBSD_OS) || defined(HAVE_AIX_OS) #include #endif #ifdef HAVE_DARWIN_OS #include #endif #ifdef HAVE_SUN_OS #include #endif #else /* Set up for testing a stand alone program */ #include #include #include #define bstrncpy strncpy #define Dmsg0(n,s) fprintf(stderr, s) #define Dmsg1(n,s,a1) fprintf(stderr, s, a1) #define Dmsg2(n,s,a1,a2) fprintf(stderr, s, a1, a2) #endif #define is_rootfs(x) bstrcmp("rootfs", x) #if defined(HAVE_GETMNTINFO) || defined(HAVE_GETMNTENT) static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; #endif struct mtab_item { rblink link; uint64_t dev; char fstype[1]; }; /* Compare two device types */ static int compare_mtab_items(void *item1, void *item2) { mtab_item *mtab1, *mtab2; mtab1 = (mtab_item *)item1; mtab2 = (mtab_item *)item2; if (mtab1->dev < mtab2->dev) return -1; if (mtab1->dev > mtab2->dev) return 1; return 0; } void add_mtab_item(void *user_ctx, struct stat *st, const char *fstype, const char *mountpoint, const char *mntopts, const char *fsname) { rblist *mtab_list = (rblist *)user_ctx; mtab_item *item, *ritem; int len = strlen(fstype) + 1; item = (mtab_item *)malloc(sizeof(mtab_item) + len); item->dev = (uint64_t)st->st_dev; bstrncpy(item->fstype, fstype, len); ritem = (mtab_item *)mtab_list->insert((void *)item, compare_mtab_items); if (ritem != item) { /* Item already inserted, so we discard this one */ free(item); } } /* * These functions should be implemented for each OS * * bool fstype(FF_PKT *ff_pkt, char *fs, int fslen); */ #if defined(HAVE_DARWIN_OS) \ || defined(HAVE_FREEBSD_OS ) \ || defined(HAVE_KFREEBSD_OS ) \ || defined(HAVE_OPENBSD_OS) #include #include /* * simple return fs type magic number or zero when error */ static uint64_t fstypeid(char *fname, FF_PKT *ff_pkt) { struct statfs st; if (statfs(fname, &st) == 0) { #if defined (HAVE_OPENBSD_OS) return (((uint64_t)st.f_fsid.val[0])<<32) | st.f_fsid.val[1]; #else return st.f_type; #endif } return 0; } bool fstype(FF_PKT *ff_pkt, char *fs, int fslen) { char *fname = ff_pkt->fname; struct statfs st; if (!fname) { return false; } if (statfs(fname, &st) == 0) { bstrncpy(fs, st.f_fstypename, fslen); return true; } Dmsg1(50, "statfs() failed for \"%s\"\n", fname); return false; } bool fstype(char *fname, FF_PKT *ff_pkt, char *fs, int fslen) { return fstype(ff_pkt, fs, fslen); } #elif defined(HAVE_NETBSD_OS) #include #include #ifdef HAVE_SYS_STATVFS_H #include #else #define statvfs statfs #endif /* * simple return fs type magic number or zero when error */ static uint64_t fstypeid(char *fname, FF_PKT *ff_pkt) { struct statvfs st; if (statvfs(fname, &st) == 0) { return st.f_type; } return 0; } bool fstype(FF_PKT *ff_pkt, char *fs, int fslen) { char *fname = ff_pkt->fname; struct statvfs st; if (fname) { return false; } if (statvfs(fname, &st) == 0) { bstrncpy(fs, st.f_fstypename, fslen); return true; } Dmsg1(50, "statfs() failed for \"%s\"\n", fname); return false; } bool fstype(char *fname, FF_PKT *ff_pkt, char *fs, int fslen) { return fstype(ff_pkt, fs, fslen); } #elif defined(HAVE_HPUX_OS) \ || defined(HAVE_IRIX_OS) || defined(HAVE_AIX_OS) #include #include #ifdef HAVE_AIX_OS #ifndef NAME_MAX #define NAME_MAX 2048 #endif #endif /* * TODO: RPK: is HP-UX version really working as I cannot find required API in HP-UX docs. */ /* * simple return fs type magic number or zero when error */ static uint64_t fstypeid(char *fname, FF_PKT *ff_pkt) { struct statfs st; if (statfs(fname, &st) == 0) { return st.f_fsid; } return 0; } bool fstype(FF_PKT *ff_pkt, char *fs, int fslen) { char *fname = ff_pkt->fname; struct statvfs st; if (!fname) { return false; } if (statvfs(fname, &st) == 0) { bstrncpy(fs, st.f_basetype, fslen); return true; } Dmsg1(50, "statfs() failed for \"%s\"\n", fname); return false; } bool fstype(char *fname, FF_PKT *ff_pkt, char *fs, int fslen) { return fstype(ff_pkt, fs, fslen); } #elif defined(HAVE_LINUX_OS) #include #include /* * simple return fs type magic number or zero when error */ static uint64_t fstypeid(char *fname, FF_PKT *ff_pkt) { struct statfs st; if (statfs(fname, &st) == 0) { return st.f_type; } return 0; } /* * Linux statfs() does not return the filesystem name type. It * only returns a binary fstype, so we must look up the type name * in mtab. */ bool fstype(char *fname, FF_PKT *ff_pkt, char *fs, int fslen) { struct statfs st; const char *fstype; if (!fname) { return false; } if (statfs(fname, &st) == 0) { mtab_item *item, search_item; if (*ff_pkt->last_fstypename && ff_pkt->last_fstype == (uint64_t)st.f_type) { bstrncpy(fs, ff_pkt->last_fstypename, fslen); return true; } if (!ff_pkt->mtab_list) { ff_pkt->mtab_list = New(rblist()); read_mtab(add_mtab_item, ff_pkt->mtab_list); } search_item.dev = st.f_type; item = (mtab_item *)ff_pkt->mtab_list->search((void *)&search_item, compare_mtab_items); if (item) { ff_pkt->last_fstype = st.f_type; bstrncpy(ff_pkt->last_fstypename, item->fstype, sizeof(ff_pkt->last_fstypename)); bstrncpy(fs, ff_pkt->last_fstypename, fslen); return true; } /* * Values obtained from statfs(2), testing and * * $ grep -r SUPER_MAGIC /usr/include/linux */ switch ((unsigned int)st.f_type) { /* Known good values */ /* ext2, ext3, and ext4 have the same code */ case 0xef53: fstype = "ext2"; break; /* EXT2_SUPER_MAGIC */ case 0x3153464a: fstype = "jfs"; break; /* JFS_SUPER_MAGIC */ case 0x5346544e: fstype = "ntfs"; break; /* NTFS_SB_MAGIC */ case 0x9fa0: fstype = "proc"; break; /* PROC_SUPER_MAGIC */ case 0x52654973: fstype = "reiserfs"; break; /* REISERFS_SUPER_MAGIC */ case 0x58465342: fstype = "xfs"; break; /* XFS_SB_MAGIC */ case 0x9fa2: fstype = "usbdevfs"; break; /* USBDEVICE_SUPER_MAGIC */ case 0x62656572: fstype = "sysfs"; break; /* SYSFS_MAGIC */ case 0x517B: fstype = "smbfs"; break; /* SMB_SUPER_MAGIC */ case 0x9660: fstype = "iso9660"; break; /* ISOFS_SUPER_MAGIC */ case 0xadf5: fstype = "adfs"; break; /* ADFS_SUPER_MAGIC */ case 0xadff: fstype = "affs"; break; /* AFFS_SUPER_MAGIC */ case 0x42465331: fstype = "befs"; break; /* BEFS_SUPER_MAGIC */ case 0xFF534D42: fstype = "cifs"; break; /* CIFS_MAGIC_NUMBER */ case 0xfe534d42: fstype = "cifs2"; break; /* CIFS2_MAGIC_NUMBER */ case 0x73757245: fstype = "coda"; break; /* CODA_SUPER_MAGIC */ case 0x012ff7b7: fstype = "coherent"; break; /* COH_SUPER_MAGIC */ case 0x28cd3d45: fstype = "cramfs"; break; /* CRAMFS_MAGIC */ case 0x1373: fstype = "devfs"; break; /* DEVFS_SUPER_MAGIC */ case 0x414A53: fstype = "efs"; break; /* EFS_SUPER_MAGIC */ case 0x137d: fstype = "ext"; break; /* EXT_SUPER_MAGIC */ case 0xef51: fstype = "oldext2"; break; /* EXT2_OLD_SUPER_MAGIC */ case 0x4244: fstype = "hfs"; break; /* EXT2_OLD_SUPER_MAGIC */ case 0xf995e849: fstype = "hpfs"; break; /* HPFS_SUPER_MAGIC */ case 0x958458f6: fstype = "hugetlbfs"; break; /* HUGETLBFS_MAGIC */ case 0x72b6: fstype = "jffs2"; break; /* JFFS2_SUPER_MAGIC */ case 0x2468: fstype = "minix"; break; /* MINIX2_SUPER_MAGIC */ case 0x2478: fstype = "minix"; break; /* MINIX2_SUPER_MAGIC2 */ case 0x137f: fstype = "minix"; break; /* MINIX_SUPER_MAGIC */ case 0x138f: fstype = "minix"; break; /* MINIX_SUPER_MAGIC2 */ case 0x4d44: fstype = "msdos"; break; /* MSDOS_SUPER_MAGIC */ case 0x564c: fstype = "ncpfs"; break; /* NCP_SUPER_MAGIC */ case 0x6969: fstype = "nfs"; break; /* NFS_SUPER_MAGIC */ case 0x9fa1: fstype = "openpromfs"; break; /* OPENPROM_SUPER_MAGIC */ case 0x002f: fstype = "qnx4"; break; /* QNX4_SUPER_MAGIC */ case 0x7275: fstype = "romfs"; break; /* QNX4_SUPER_MAGIC */ case 0x012ff7b6: fstype = "sysv2"; break; case 0x012ff7b5: fstype = "sysv4"; break; case 0x01021994: fstype = "tmpfs"; break; case 0x15013346: fstype = "udf"; break; case 0x00011954: fstype = "ufs"; break; case 0xa501FCF5: fstype = "vxfs"; break; case 0x012FF7B4: fstype = "xenix"; break; case 0x012FD16D: fstype = "xiafs"; break; case 0x9123683e: fstype = "btrfs"; break; case 0x7461636f: fstype = "ocfs2"; break; /* OCFS2_SUPER_MAGIC */ case 0x2fc12fc1: fstype = "zfs"; break; #if 0 /* These need confirmation */ case 0x6B414653: fstype = "afs"; break; /* AFS_FS_MAGIC */ case 0x0187: fstype = "autofs"; break; /* AUTOFS_SUPER_MAGIC */ case 0x62646576: fstype = "bdev"; break; /* ??? */ case 0x1BADFACE: fstype = "bfs"; break; /* BFS_MAGIC */ case 0x42494e4d: fstype = "binfmt_misc"; break; /* ??? */ case (('C'<<8)|'N'): fstype = "capifs"; break; /* CAPIFS_SUPER_MAGIC */ case 0x1cd1: fstype = "devpts"; break; /* ??? */ case 0x03111965: fstype = "eventpollfs"; break; /* EVENTPOLLFS_MAGIC */ case 0xBAD1DEA: fstype = "futexfs"; break; /* ??? */ case 0xaee71ee7: fstype = "gadgetfs"; break; /* GADGETFS_MAGIC */ case 0x00c0ffee: fstype = "hostfs"; break; /* HOSTFS_SUPER_MAGIC */ case 0xb00000ee: fstype = "hppfs"; break; /* HPPFS_SUPER_MAGIC */ case 0x12061983: fstype = "hwgfs"; break; /* HWGFS_MAGIC */ case 0x66726f67: fstype = "ibmasmfs"; break; /* IBMASMFS_MAGIC */ case 0x19800202: fstype = "mqueue"; break; /* MQUEUE_MAGIC */ case 0x6f70726f: fstype = "oprofilefs"; break; /* OPROFILEFS_MAGIC */ case 0xa0b4d889: fstype = "pfmfs"; break; /* PFMFS_MAGIC */ case 0x50495045: fstype = "pipfs"; break; /* PIPEFS_MAGIC */ case 0x858458f6: fstype = "ramfs"; break; /* RAMFS_MAGIC */ case 0x7275: fstype = "romfs"; break; /* ROMFS_MAGIC */ case 0x858458f6: fstype = "rootfs"; break; /* RAMFS_MAGIC */ case 0x67596969: fstype = "rpc_pipefs"; break; /* RPCAUTH_GSSMAGIC */ case 0x534F434B: fstype = "sockfs"; break; /* SOCKFS_MAGIC */ case 0x858458f6: fstype = "tmpfs"; break; /* RAMFS_MAGIC */ case 0x01021994: fstype = "tmpfs"; break; /* TMPFS_MAGIC */ #endif default: Dmsg2(10, "Unknown file system type \"0x%x\" for \"%s\".\n", st.f_type, fname); return false; } ff_pkt->last_fstype = st.f_type; bstrncpy(ff_pkt->last_fstypename, fstype, sizeof(ff_pkt->last_fstypename)); bstrncpy(fs, fstype, fslen); return true; } Dmsg1(50, "statfs() failed for \"%s\"\n", fname); return false; } /* * Parameter wrapper for fstype */ bool fstype(FF_PKT *ff_pkt, char *fs, int fslen) { return fstype(ff_pkt->fname, ff_pkt, fs, fslen); } #elif defined(HAVE_SUN_OS) #include #include #include #ifndef NAME_MAX #define NAME_MAX FSTYPSZ #endif /* * simple return fs type magic number */ static uint64_t fstypeid(char *fname, FF_PKT *ff_pkt) { struct statvfs st; if (statvfs(fname, &st) == 0) { return st.f_fsid; } return 0; } bool fstype(FF_PKT *ff_pkt, char *fs, int fslen) { /* Solaris has the filesystem type name in the lstat packet */ bstrncpy(fs, ff_pkt->statp.st_fstype, fslen); return true; } bool fstype(char *fname, FF_PKT *ff_pkt, char *fs, int fslen) { return fstype(ff_pkt, fs, fslen); } #elif defined (__digital__) && defined (__unix__) /* Tru64 */ /* Tru64 */ #include #include #include /* * TODO: RPK: Tru64 is already dead! */ /* * simple return fs type magic number */ static uint64_t fstypeid(char *fname, FF_PKT *ff_pkt) { struct statfs st; if (statfs((char *)fname, &st) == 0){ return st.f_type; } return 0; } bool fstype(FF_PKT *ff_pkt, char *fs, int fslen) { char *fname = ff_pkt->fname; struct statfs st; if (statfs((char *)fname, &st) == 0){ switch (st.f_type) { /* Known good values */ case 0xa: bstrncpy(fs, "advfs", fslen); return true; /* Tru64 AdvFS */ case 0xe: bstrncpy(fs, "nfs", fslen); return true; /* Tru64 NFS */ default: Dmsg2(10, "Unknown file system type \"0x%x\" for \"%s\".\n", st.f_type, fname); return false; } } Dmsg1(50, "statfs() failed for \"%s\"\n", fname); return false; } bool fstype(char *fname, FF_PKT *ff_pkt, char *fs, int fslen) { return fstype(ff_pkt, fs, fslen); } /* Tru64 */ #elif defined (HAVE_WIN32) /* Windows */ bool fstype(char *fname, FF_PKT *ff_pkt, char *fs, int fslen) { DWORD componentlength; DWORD fsflags; CHAR rootpath[4]; UINT oldmode; BOOL result; /* Copy Drive Letter, colon, and backslash to rootpath */ bstrncpy(rootpath, fname, sizeof(rootpath)); /* We don't want any popups if there isn't any media in the drive */ oldmode = SetErrorMode(SEM_FAILCRITICALERRORS); result = GetVolumeInformation(rootpath, NULL, 0, NULL, &componentlength, &fsflags, fs, fslen); SetErrorMode(oldmode); if (result) { /* Windows returns NTFS, FAT, etc. Make it lowercase to be consistent with other OSes */ lcase(fs); } else { Dmsg2(10, "GetVolumeInformation() failed for \"%s\", Error = %d.\n", rootpath, GetLastError()); } return result != 0; } /* * Parameter wrapper for fstype */ bool fstype(FF_PKT *ff_pkt, char *fs, int fslen) { return fstype(ff_pkt->fname, ff_pkt, fs, fslen); } /* Not implemented for windows, used in ACL code */ static uint64_t fstypeid(char *fname, FF_PKT *ff_pkt) { return 0; } /* Windows */ #else /* No recognised OS */ static uint64_t fstypeid(char *fname, FF_PKT *ff_pkt) { Dmsg0(10, "!!! fstypeid() not implemented for this OS. !!!\n"); return 0; } bool fstype(FF_PKT *ff_pkt, char *fs, int fslen) { Dmsg0(10, "!!! fstype() not implemented for this OS. !!!\n"); return false; } bool fstype(char *fname, FF_PKT *ff_pkt, char *fs, int fslen) { Dmsg0(10, "!!! fstype() not implemented for this OS. !!!\n"); return false; } #endif /* Read mtab entries */ bool read_mtab(mtab_handler_t *mtab_handler, void *user_ctx) { #ifdef HAVE_AIX_OS return false; #else /* Debian stretch GNU/KFreeBSD has both getmntinfo and getmntent, but * only the first seems to work, so we skip over getmntent in this case */ #ifndef HAVE_KFREEBSD_OS #ifdef HAVE_GETMNTENT FILE *mntfp; struct stat st; #ifdef HAVE_LINUX_OS struct mntent *mnt; P(mutex); if ((mntfp = setmntent("/proc/mounts", "r")) == NULL) { if ((mntfp = setmntent(_PATH_MOUNTED, "r")) == NULL) { V(mutex); return false; } } while ((mnt = getmntent(mntfp)) != NULL) { if (is_rootfs(mnt->mnt_type)) { continue; } if (stat(mnt->mnt_dir, &st) < 0) { continue; } mtab_handler(user_ctx, &st, mnt->mnt_type, mnt->mnt_dir, mnt->mnt_opts, mnt->mnt_fsname); } endmntent(mntfp); V(mutex); #endif #ifdef HAVE_SUN_OS struct mnttab mnt; P(mutex); if ((mntfp = bfopen(MNTTAB, "r")) == NULL) { V(mutex); return false; } while (getmntent(mntfp, &mnt) == 0) { if (is_rootfs(mnt.mnt_fstype)) { continue; } if (stat(mnt.mnt_mountp, &st) < 0) { continue; } mtab_handler(user_ctx, &st, mnt.mnt_fstype, mnt.mnt_mountp, mnt.mnt_mntopts, mnt.mnt_special); } fclose(mntfp); V(mutex); #endif #endif /* HAVE_GETMNTENT */ #endif /* HAVE_KFREEBSD_OS */ #ifdef HAVE_GETMNTINFO struct stat st; #if defined(ST_NOWAIT) int flags = ST_NOWAIT; #elif defined(MNT_NOWAIT) int flags = MNT_NOWAIT; #else int flags = 0; #endif #if defined(HAVE_NETBSD_OS) struct statvfs *mntinfo; #else struct statfs *mntinfo; #endif int nument; P(mutex); if ((nument = getmntinfo(&mntinfo, flags)) > 0) { while (nument-- > 0) { if (is_rootfs(mntinfo->f_fstypename)) { continue; } if (stat(mntinfo->f_mntonname, &st) < 0) { continue; } mtab_handler(user_ctx, &st, mntinfo->f_fstypename, // fstype mntinfo->f_mntonname, // mountpoint NULL, // mntopts mntinfo->f_mntfromname); // device mntinfo++; } } V(mutex); #endif /* HAVE_GETMNTINFO */ return true; #endif // HAVE_AIX_OS } #ifdef HAVE_AIX_OS #ifndef NAME_MAX #define NAME_MAX 2048 #endif #endif /* * compares current fstype from FF_PKT for required fstype_name */ bool check_current_fs(char *fname, FF_PKT *ff, const char *fstype_name) { if (fstype_name != NULL){ // read current fs name and compare it to required char fsname[NAME_MAX]; if (fstype(fname, ff, fsname, NAME_MAX)){ return bstrcmp(fsname, fstype_name); } } return false; }; /* * compares current fstype from FF_PKT for required fstype_magic */ bool check_current_fs(char *fname, FF_PKT *ff, uint64_t fstype_magic) { if (fstype_magic > 0){ // get fsid for file char fsname[NAME_MAX]; if (fstype(fname, ff, fsname, NAME_MAX)) { return ff->last_fstype == fstype_magic; } } return false; } #ifdef TEST_PROGRAM int main(int argc, char **argv) { char *p; char fs[1000]; int status = 0; if (argc < 2) { p = (argc < 1) ? "fstype" : argv[0]; printf("usage:\t%s path ...\n" "\t%s prints the file system type and pathname of the paths.\n", p, p); return EXIT_FAILURE; } while (*++argv) { if (!fstype(*argv, fs, sizeof(fs))) { status = EXIT_FAILURE; } else { printf("%s\t%s\n", fs, *argv); } } return status; } #endif bacula-15.0.3/src/findlib/win32filter.c0000644000175000017500000001006014771010173017371 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* Written by Alain Spineux */ #include "win32filter.h" #define WIN32_STREAM_HEADER_SIZE 20 /* the size of the WIN32_STREAM_ID header without the name */ /* search in a record of a STREAM_WIN32_DATA for the true data * when found: return have_data() true, '*raw' is set at the beginning of the * data inside the record and *use_len is the length of data to read. * *raw_len is decremented and contains the amount of data that as not * been filtered yet. * For this STREAM_WIN32_DATA, you can call have_data() only one * per record. * If the stream where the data is can be spread all around the stream * you must call have_data() until *raw_len is zero and increment * *data before the next call. */ bool Win32Filter::have_data(char **raw, int64_t *raw_len, int64_t *use_len) { int64_t size; char *orig=*raw; initialized = true; Dmsg2(100, "have_data(%lld) error=%d\n", *raw_len, error); while (!error && *raw_len > 0) { /* In this rec, we could have multiple streams of data and headers * to handle before to reach the data, then we must iterate */ Dmsg4(100, "s off=%lld len=%lld skip_size=%lld data_size=%lld\n", *raw-orig, *raw_len, skip_size, data_size); if (skip_size > 0) { /* skip what the previous header told us to skip */ size = *raw_len < skip_size ? *raw_len : skip_size; skip_size -= size; *raw_len -= size; *raw += size; } Dmsg4(100, "h off=%lld len=%lld skip_size=%lld data_size=%lld\n", *raw-orig, *raw_len, skip_size, data_size); if (data_size == 0 && skip_size == 0 && *raw_len > 0) { /* read a WIN32_STREAM header, merge it with the part that was read * from the previous record, if any, if the header was split across * 2 records. */ size = WIN32_STREAM_HEADER_SIZE - header_pos; if (*raw_len < size) { size = *raw_len; } memcpy((char *)&header + header_pos, *raw, size); header_pos += size; *raw_len -= size; *raw += size; if (header_pos == WIN32_STREAM_HEADER_SIZE) { Dmsg5(100, "header pos=%d size=%lld name_size=%d len=%lld StreamId=0x%x\n", header_pos, size, header.dwStreamNameSize, header.Size, header.dwStreamId); if (header.dwStreamNameSize < 0 || header.Size < 0) { error = true; break; } header_pos = 0; skip_size = header.dwStreamNameSize; /* skip the name of the stream */ if (header.dwStreamId == WIN32_BACKUP_DATA) { data_size = header.Size; } else { skip_size += header.Size; /* skip the all stream */ } } Dmsg4(100, "H off=%lld len=%lld skip_size=%lld data_size=%lld\n", *raw-orig, *raw_len, skip_size, data_size); } Dmsg4(100, "d off=%lld len=%lld skip_size=%lld data_size=%lld\n", *raw - orig, *raw_len, skip_size, data_size); if (data_size > 0 && skip_size == 0 && *raw_len > 0) { /* some data to read */ size = *raw_len < data_size ? *raw_len : data_size; data_size -= size; *raw_len -= size; *use_len = size; Dmsg5(100, "D off=%lld len=%lld use_len=%lld skip_size=%lld data_size=%lld\n", *raw-orig, *raw_len, *use_len, skip_size, data_size); return true; } } if (error) { *raw_len = 0; } return false; } bacula-15.0.3/src/findlib/savecwd.c0000644000175000017500000000554014771010173016664 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Kern Sibbald, August MMVII * */ #include "bacula.h" #include "savecwd.h" /* * Attempt to save the current working directory by various means so that * we can optimize code by doing a cwd and then restore the cwd. */ #ifdef HAVE_FCHDIR static bool fchdir_failed = false; /* set if we get a fchdir failure */ #else static bool fchdir_failed = true; /* set if we get a fchdir failure */ #endif /* * Save current working directory. * Returns: true if OK * false if failed */ bool saveCWD::save(JCR *jcr) { release(); /* clean up */ if (!fchdir_failed) { m_fd = open(".", O_RDONLY); if (m_fd < 0) { berrno be; Jmsg1(jcr, M_ERROR, 0, _("Cannot open current directory: ERR=%s\n"), be.bstrerror()); m_saved = false; return false; } } if (fchdir_failed) { POOLMEM *buf = get_memory(5000); m_cwd = (POOLMEM *)getcwd(buf, sizeof_pool_memory(buf)); if (m_cwd == NULL) { berrno be; Jmsg1(jcr, M_ERROR, 0, _("Cannot get current directory: ERR=%s\n"), be.bstrerror()); free_pool_memory(buf); m_saved = false; return false; } } m_saved = true; return true; } /* * Restore previous working directory. * Returns: true if OK * false if failed */ bool saveCWD::restore(JCR *jcr) { if (!m_saved) { return true; } m_saved = false; if (m_fd >= 0) { if (fchdir(m_fd) != 0) { berrno be; Jmsg1(jcr, M_ERROR, 0, _("Cannot reset current directory: ERR=%s\n"), be.bstrerror()); close(m_fd); m_fd = -1; fchdir_failed = true; chdir("/"); /* punt */ return false; } return true; } if (chdir(m_cwd) < 0) { berrno be; Jmsg1(jcr, M_ERROR, 0, _("Cannot reset current directory: ERR=%s\n"), be.bstrerror()); chdir("/"); free_pool_memory(m_cwd); m_cwd = NULL; return false; } return true; } void saveCWD::release() { if (!m_saved) { return; } m_saved = false; if (m_fd >= 0) { close(m_fd); m_fd = -1; } if (m_cwd) { free_pool_memory(m_cwd); m_cwd = NULL; } } bacula-15.0.3/src/findlib/find_one.c0000644000175000017500000007760714771010173017026 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* This file was derived from GNU TAR source code. Except for a few key ideas, it has been entirely rewritten for Bacula. Kern Sibbald, MM Thanks to the TAR programmers. */ #include "bacula.h" #include "find.h" #if defined(HAVE_LINUX_OS) && defined(HAVE_NODUMP) #include #include #endif #ifdef HAVE_DARWIN_OS #include #include #include #endif int breaddir(DIR *dirp, POOLMEM *&d_name); extern int32_t name_max; /* filename max length */ extern int32_t path_max; /* path name max length */ /* * Structure for keeping track of hard linked files, we * keep an entry for each hardlinked file that we save, * which is the first one found. For all the other files that * are linked to this one, we save only the directory * entry so we can link it. */ struct f_link { struct f_link *next; dev_t dev; /* device */ ino_t ino; /* inode with device is unique */ int32_t FileIndex; /* Bacula FileIndex of this file */ int32_t digest_stream; /* Digest type if needed */ uint32_t digest_len; /* Digest len if needed */ char *digest; /* Checksum of the file if needed */ char name[1]; /* The name */ }; typedef struct f_link link_t; #define LINK_HASHTABLE_BITS 16 #define LINK_HASHTABLE_SIZE (1<>= 16; hash ^= i; i >>= 16; hash ^= i; i >>= 16; hash ^= i; return hash & LINK_HASHTABLE_MASK; } /* * Create a new directory Find File packet, but copy * some of the essential info from the current packet. * However, be careful to zero out the rest of the * packet. */ static FF_PKT *new_dir_ff_pkt(FF_PKT *ff_pkt) { FF_PKT *dir_ff_pkt = (FF_PKT *)bmalloc(sizeof(FF_PKT)); memcpy((void*)dir_ff_pkt, (void*)ff_pkt, sizeof(FF_PKT)); /* Do not duplicate pointers */ dir_ff_pkt->fname = bstrdup(ff_pkt->fname); dir_ff_pkt->snap_fname = bstrdup(ff_pkt->snap_fname); dir_ff_pkt->link = bstrdup(ff_pkt->link); if (ff_pkt->fname_save) { dir_ff_pkt->fname_save = get_pool_memory(PM_FNAME); pm_strcpy(dir_ff_pkt->fname_save, ff_pkt->fname_save); } if (ff_pkt->link_save) { dir_ff_pkt->link_save = get_pool_memory(PM_FNAME); pm_strcpy(dir_ff_pkt->link_save, ff_pkt->link_save); } dir_ff_pkt->included_files_list = NULL; dir_ff_pkt->excluded_files_list = NULL; dir_ff_pkt->excluded_paths_list = NULL; dir_ff_pkt->linkhash = NULL; dir_ff_pkt->ignoredir_fname = NULL; return dir_ff_pkt; } /* * Free the temp directory ff_pkt */ static void free_dir_ff_pkt(FF_PKT *dir_ff_pkt) { free(dir_ff_pkt->fname); free(dir_ff_pkt->snap_fname); free(dir_ff_pkt->link); if (dir_ff_pkt->fname_save) { free_pool_memory(dir_ff_pkt->fname_save); } if (dir_ff_pkt->link_save) { free_pool_memory(dir_ff_pkt->link_save); } free(dir_ff_pkt); } /* * Check to see if we allow the file system type of a file or directory. * If we do not have a list of file system types, we accept anything. */ static int accept_fstype(FF_PKT *ff, void *dummy) { int i; char fs[1000]; bool accept = true; if (ff->fstypes.size()) { accept = false; if (!fstype(ff, fs, sizeof(fs))) { Dmsg1(50, "Cannot determine file system type for \"%s\"\n", ff->fname); } else { for (i = 0; i < ff->fstypes.size(); ++i) { if (strcmp(fs, (char *)ff->fstypes.get(i)) == 0) { Dmsg2(100, "Accepting fstype %s for \"%s\"\n", fs, ff->fname); accept = true; break; } Dmsg3(200, "fstype %s for \"%s\" does not match %s\n", fs, ff->fname, ff->fstypes.get(i)); } } } return accept; } /* * Check to see if we allow the drive type of a file or directory. * If we do not have a list of drive types, we accept anything. */ static int accept_drivetype(FF_PKT *ff, void *dummy) { int i; char dt[100]; bool accept = true; if (ff->drivetypes.size()) { accept = false; /* fname is a better choice here than snap_fname */ if (!drivetype(ff->fname, dt, sizeof(dt))) { Dmsg1(50, "Cannot determine drive type for \"%s\"\n", ff->fname); } else { for (i = 0; i < ff->drivetypes.size(); ++i) { if (strcmp(dt, (char *)ff->drivetypes.get(i)) == 0) { Dmsg2(100, "Accepting drive type %s for \"%s\"\n", dt, ff->fname); accept = true; break; } Dmsg3(200, "drive type %s for \"%s\" does not match %s\n", dt, ff->fname, ff->drivetypes.get(i)); } } } return accept; } /* * This function determines whether we can use getattrlist() * It's odd, but we have to use the function to determine that... * Also, the man pages talk about things as if they were implemented. * * On Mac OS X, this succesfully differentiates between HFS+ and UFS * volumes, which makes me trust it is OK for others, too. */ static bool volume_has_attrlist(const char *fname) { #ifdef HAVE_DARWIN_OS struct statfs st; struct volinfo_struct { unsigned long length; /* Mandatory field */ vol_capabilities_attr_t info; /* Volume capabilities */ } vol; struct attrlist attrList; memset(&attrList, 0, sizeof(attrList)); attrList.bitmapcount = ATTR_BIT_MAP_COUNT; attrList.volattr = ATTR_VOL_INFO | ATTR_VOL_CAPABILITIES; if (statfs(fname, &st) == 0) { /* We need to check on the mount point */ if (getattrlist(st.f_mntonname, &attrList, &vol, sizeof(vol), FSOPT_NOFOLLOW) == 0 && (vol.info.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_ATTRLIST) && (vol.info.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_ATTRLIST)) { return true; } } #endif return false; } /* * check for BSD nodump flag */ static bool no_dump(JCR *jcr, FF_PKT *ff_pkt) { #ifdef HAVE_NODUMP bool ret = false; /* do backup */ int attr; int fd = -1; if (ff_pkt->flags & FO_HONOR_NODUMP) { if (S_ISDIR(ff_pkt->statp.st_mode) || S_ISREG(ff_pkt->statp.st_mode)) { /* lsattr and chattr in the e2fsprogs package only works for files and dirs */ fd = open(ff_pkt->fname, O_RDONLY|O_CLOEXEC); if (fd < 0) { Dmsg2(50, "Failed to open file: %s err: %d\n", ff_pkt->fname, errno); goto bail_out; } int ioctl_ret = ioctl(fd, FS_IOC_GETFLAGS, &attr); if (ioctl_ret < 0) { if (errno == ENOTTY) { Dmsg2(50, "Failed to check for NODUMP flag for file: %s errno: %d, " "probably because of wrong filesystem used.\n", ff_pkt->fname, errno); } else { Dmsg2(50, "Failed to send Getflags IOCTL for: %s err: %d\n", ff_pkt->fname, errno); } goto bail_out; } /* Check if file has NODUMP flag set */ ret = attr & FS_NODUMP_FL; } } bail_out: if (fd > 0) { close(fd); } Dmsg2(500, "fname: %s nodump flag: %d\n", ff_pkt->fname, ret); return ret; #elif defined(HAVE_CHFLAGS) && defined(UF_NODUMP) if ( (ff_pkt->flags & FO_HONOR_NODUMP) && (ff_pkt->statp.st_flags & UF_NODUMP) ) { Jmsg(jcr, M_INFO, 1, _(" NODUMP flag set - will not process %s\n"), ff_pkt->fname); return true; /* do not backup this file */ } #endif return false; /* do backup */ } /* check if a file have changed during backup and display an error */ bool has_file_changed(JCR *jcr, FF_PKT *ff_pkt) { struct stat statp; Dmsg1(500, "has_file_changed fname=%s\n",ff_pkt->fname); if (ff_pkt->snapshot_convert_fct) { /* their is no reason to check for a file change inside a snapshot */ /* but this has already highlighted some bugs in the past */ /* if you want to optimize, uncomment the "return below" */ // return false; } if (ff_pkt->type != FT_REG) { /* not a regular file */ return false; } if (lstat(ff_pkt->snap_fname, &statp) != 0) { berrno be; Jmsg(jcr, M_WARNING, 0, _("Cannot stat file %s: ERR=%s\n"),ff_pkt->fname,be.bstrerror()); return true; } if (statp.st_mtime != ff_pkt->statp.st_mtime) { Jmsg(jcr, M_ERROR, 0, _("%s mtime changed during backup.\n"), ff_pkt->fname); Dmsg3(50, "%s mtime (%lld) changed during backup (%lld).\n", ff_pkt->fname, (int64_t)ff_pkt->statp.st_mtime, (int64_t)statp.st_mtime); return true; } if (statp.st_ctime != ff_pkt->statp.st_ctime) { Jmsg(jcr, M_ERROR, 0, _("%s ctime changed during backup.\n"), ff_pkt->fname); Dmsg3(50, "%s ctime (%lld) changed during backup (%lld).\n", ff_pkt->fname, (int64_t)ff_pkt->statp.st_ctime, (int64_t)statp.st_ctime); return true; } if ((int64_t)statp.st_size != (int64_t)ff_pkt->statp.st_size) { Jmsg(jcr, M_ERROR, 0, _("%s size of %lld changed during backup to %lld.\n"),ff_pkt->fname, (int64_t)ff_pkt->statp.st_size, (int64_t)statp.st_size); Dmsg3(50, "%s size (%lld) changed during backup (%lld).\n", ff_pkt->fname, (int64_t)ff_pkt->statp.st_size, (int64_t)statp.st_size); return true; } return false; } /* * For incremental/differential or accurate backups, we * determine if the current file has changed. */ bool check_changes(JCR *jcr, FF_PKT *ff_pkt) { /* in special mode (like accurate backup), the programmer can * choose his comparison function. */ if (ff_pkt->check_fct) { return ff_pkt->check_fct(jcr, ff_pkt); } /* For normal backups (incr/diff), we use this default * behaviour */ if (ff_pkt->incremental && (ff_pkt->statp.st_mtime < ff_pkt->save_time && ((ff_pkt->flags & FO_MTIMEONLY) || ff_pkt->statp.st_ctime < ff_pkt->save_time))) { return false; } return true; } static bool have_ignoredir(FF_PKT *ff_pkt) { struct stat sb; char *ignoredir; /* Ensure that pointers are defined */ if (!ff_pkt->fileset || !ff_pkt->fileset->incexe) { return false; } ignoredir = ff_pkt->fileset->incexe->ignoredir; if (ignoredir) { if (!ff_pkt->ignoredir_fname) { ff_pkt->ignoredir_fname = get_pool_memory(PM_FNAME); } Mmsg(ff_pkt->ignoredir_fname, "%s/%s", ff_pkt->fname, ignoredir); if (stat(ff_pkt->ignoredir_fname, &sb) == 0) { Dmsg2(100, "Directory '%s' ignored (found %s)\n", ff_pkt->fname, ignoredir); return true; /* Just ignore this directory */ } } return false; } /* * When the current file is a hardlink, the backup code can compute * the checksum and store it into the link_t structure. */ void ff_pkt_set_link_digest(FF_PKT *ff_pkt, int32_t digest_stream, const char *digest, uint32_t len) { if (ff_pkt->linked && !ff_pkt->linked->digest) { /* is a hardlink */ ff_pkt->linked->digest = (char *) bmalloc(len); memcpy(ff_pkt->linked->digest, digest, len); ff_pkt->linked->digest_len = len; ff_pkt->linked->digest_stream = digest_stream; } } /* * Find a single file. * handle_file is the callback for handling the file. * p is the filename * parent_device is the device we are currently on * top_level is 1 when not recursing or 0 when * descending into a directory. */ int find_one_file(JCR *jcr, FF_PKT *ff_pkt, int handle_file(JCR *jcr, FF_PKT *ff, bool top_level), char *fname, char *snap_fname, dev_t parent_device, bool top_level) { struct utimbuf restore_times; int rtn_stat; int len; ff_pkt->fname = ff_pkt->link = fname; ff_pkt->snap_fname = snap_fname; if (lstat(snap_fname, &ff_pkt->statp) != 0) { /* Cannot stat file */ ff_pkt->type = FT_NOSTAT; ff_pkt->ff_errno = errno; return handle_file(jcr, ff_pkt, top_level); } Dmsg2(300, "File ----: %s snap=%s\n", fname, snap_fname); /* Save current times of this directory in case we need to * reset them because the user doesn't want them changed. */ restore_times.actime = ff_pkt->statp.st_atime; restore_times.modtime = ff_pkt->statp.st_mtime; /* * We check for allowed fstypes and drivetypes at top_level and fstype change (below). */ if (top_level) { if (!accept_fstype(ff_pkt, NULL)) { ff_pkt->type = FT_INVALIDFS; if (ff_pkt->flags & FO_KEEPATIME && !ff_pkt->snapshot_convert_fct) { utime(snap_fname, &restore_times); /* snap_fname == fname */ } char fs[100]; if (!fstype(ff_pkt, fs, sizeof(fs))) { bstrncpy(fs, "unknown", sizeof(fs)); } Jmsg(jcr, M_INFO, 0, _("Top level directory \"%s\" has unlisted fstype \"%s\"\n"), fname, fs); return 1; /* Just ignore this error - or the whole backup is cancelled */ } if (!accept_drivetype(ff_pkt, NULL)) { ff_pkt->type = FT_INVALIDDT; if (ff_pkt->flags & FO_KEEPATIME && !ff_pkt->snapshot_convert_fct) { utime(snap_fname, &restore_times); /* snap_fname == fname */ } char dt[100]; if (!drivetype(ff_pkt->fname, dt, sizeof(dt))) { bstrncpy(dt, "unknown", sizeof(dt)); } Jmsg(jcr, M_INFO, 0, _("Top level directory \"%s\" has an unlisted drive type \"%s\"\n"), fname, dt); return 1; /* Just ignore this error - or the whole backup is cancelled */ } ff_pkt->volhas_attrlist = volume_has_attrlist(snap_fname); } /* * Ignore this entry if no_dump() returns true */ if (no_dump(jcr, ff_pkt)) { Dmsg1(100, "'%s' ignored (NODUMP flag set)\n", ff_pkt->fname); return 1; } /* * If this is an Incremental backup, see if file was modified * since our last "save_time", presumably the last Full save * or Incremental. */ if ( !S_ISDIR(ff_pkt->statp.st_mode) && !check_changes(jcr, ff_pkt)) { Dmsg1(500, "Non-directory incremental: %s\n", ff_pkt->fname); ff_pkt->type = FT_NOCHG; return handle_file(jcr, ff_pkt, top_level); } #ifdef HAVE_DARWIN_OS if (ff_pkt->flags & FO_HFSPLUS && ff_pkt->volhas_attrlist && S_ISREG(ff_pkt->statp.st_mode)) { /* TODO: initialise attrList once elsewhere? */ struct attrlist attrList; memset(&attrList, 0, sizeof(attrList)); attrList.bitmapcount = ATTR_BIT_MAP_COUNT; attrList.commonattr = ATTR_CMN_FNDRINFO; attrList.fileattr = ATTR_FILE_RSRCLENGTH; if (getattrlist(snap_fname, &attrList, &ff_pkt->hfsinfo, sizeof(ff_pkt->hfsinfo), FSOPT_NOFOLLOW) != 0) { ff_pkt->type = FT_NOSTAT; ff_pkt->ff_errno = errno; return handle_file(jcr, ff_pkt, top_level); } return -1; /* ignore */ } #endif ff_pkt->LinkFI = 0; /* * Handle hard linked files * * Maintain a list of hard linked files already backed up. This * allows us to ensure that the data of each file gets backed * up only once. */ if (!(ff_pkt->flags & FO_NO_HARDLINK) && ff_pkt->statp.st_nlink > 1 && (S_ISREG(ff_pkt->statp.st_mode) || S_ISCHR(ff_pkt->statp.st_mode) || S_ISBLK(ff_pkt->statp.st_mode) || S_ISFIFO(ff_pkt->statp.st_mode) || S_ISSOCK(ff_pkt->statp.st_mode))) { struct f_link *lp; if (ff_pkt->linkhash == NULL) { ff_pkt->linkhash = (link_t **)bmalloc(LINK_HASHTABLE_SIZE * sizeof(link_t *)); memset(ff_pkt->linkhash, 0, LINK_HASHTABLE_SIZE * sizeof(link_t *)); } const int linkhash = LINKHASH(ff_pkt->statp); /* Search link list of hard linked files */ for (lp = ff_pkt->linkhash[linkhash]; lp; lp = lp->next) if (lp->ino == (ino_t)ff_pkt->statp.st_ino && lp->dev == (dev_t)ff_pkt->statp.st_dev) { /* If we have already backed up the hard linked file don't do it again */ if (strcmp(lp->name, fname) == 0) { Dmsg2(400, "== Name identical skip FI=%d file=%s\n", lp->FileIndex, fname); return 1; /* ignore */ } ff_pkt->link = lp->name; ff_pkt->type = FT_LNKSAVED; /* Handle link, file already saved */ ff_pkt->LinkFI = lp->FileIndex; ff_pkt->linked = 0; ff_pkt->digest = lp->digest; ff_pkt->digest_stream = lp->digest_stream; ff_pkt->digest_len = lp->digest_len; rtn_stat = handle_file(jcr, ff_pkt, top_level); Dmsg3(400, "FT_LNKSAVED FI=%d LinkFI=%d file=%s\n", ff_pkt->FileIndex, lp->FileIndex, lp->name); return rtn_stat; } /* File not previously dumped. Chain it into our list. */ len = strlen(fname) + 1; lp = (struct f_link *)bmalloc(sizeof(struct f_link) + len); lp->digest = NULL; /* set later */ lp->digest_stream = 0; /* set later */ lp->digest_len = 0; /* set later */ lp->ino = ff_pkt->statp.st_ino; lp->dev = ff_pkt->statp.st_dev; lp->FileIndex = 0; /* set later */ bstrncpy(lp->name, fname, len); lp->next = ff_pkt->linkhash[linkhash]; ff_pkt->linkhash[linkhash] = lp; ff_pkt->linked = lp; /* mark saved link */ Dmsg2(400, "added to hash FI=%d file=%s\n", ff_pkt->FileIndex, lp->name); } else { ff_pkt->linked = NULL; } /* This is not a link to a previously dumped file, so dump it. */ if (S_ISREG(ff_pkt->statp.st_mode)) { boffset_t sizeleft; sizeleft = ff_pkt->statp.st_size; /* Don't bother opening empty, world readable files. Also do not open files when archive is meant for /dev/null. */ if (ff_pkt->null_output_device || (sizeleft == 0 && MODE_RALL == (MODE_RALL & ff_pkt->statp.st_mode))) { ff_pkt->type = FT_REGE; if (ff_pkt->stat_update) { /* No need do the metadata update for empty files, perform usual backup */ ff_pkt->stat_update = 0; } } else { ff_pkt->type = FT_REG; } rtn_stat = handle_file(jcr, ff_pkt, top_level); if (ff_pkt->linked) { ff_pkt->linked->FileIndex = ff_pkt->FileIndex; } Dmsg3(400, "FT_REG FI=%d linked=%d file=%s\n", ff_pkt->FileIndex, ff_pkt->linked ? 1 : 0, fname); if (ff_pkt->flags & FO_KEEPATIME && !ff_pkt->snapshot_convert_fct) { utime(snap_fname, &restore_times); /* snap_fname == fname */ } return rtn_stat; } else if (S_ISLNK(ff_pkt->statp.st_mode)) { /* soft link */ int size; char *buffer = (char *)alloca(path_max + name_max + 102); size = readlink(snap_fname, buffer, path_max + name_max + 101); if (size < 0) { /* Could not follow link */ ff_pkt->type = FT_NOFOLLOW; ff_pkt->ff_errno = errno; rtn_stat = handle_file(jcr, ff_pkt, top_level); if (ff_pkt->linked) { ff_pkt->linked->FileIndex = ff_pkt->FileIndex; } return rtn_stat; } buffer[size] = 0; ff_pkt->link = buffer; /* point to link */ ff_pkt->type = FT_LNK; /* got a real link */ rtn_stat = handle_file(jcr, ff_pkt, top_level); if (ff_pkt->linked) { ff_pkt->linked->FileIndex = ff_pkt->FileIndex; } return rtn_stat; } else if (S_ISDIR(ff_pkt->statp.st_mode)) { DIR *directory; POOL_MEM dname(PM_FNAME); char *link; int link_len; int len; int status; dev_t our_device = ff_pkt->statp.st_dev; bool recurse = true; bool volhas_attrlist = ff_pkt->volhas_attrlist; /* Remember this if we recurse */ /* * Ignore this directory and everything below if the file .nobackup * (or what is defined for IgnoreDir in this fileset) exists */ if (have_ignoredir(ff_pkt)) { Jmsg(jcr, M_SKIPPED, 0, _("Skipping ignored directory %s " "(found file with ExcludeDirContaining pattern)\n"), ff_pkt->fname); return 1; /* Just ignore this directory */ } /* Build a canonical directory name with a trailing slash in link var */ len = strlen(fname); link_len = len + 200; link = (char *)bmalloc(link_len + 2); bstrncpy(link, fname, link_len); /* Strip all trailing slashes */ while (len >= 1 && IsPathSeparator(link[len - 1])) len--; link[len++] = '/'; /* add back one */ link[len] = 0; ff_pkt->link = link; if (!check_changes(jcr, ff_pkt)) { /* Incremental/Full+Base option, directory entry not changed */ ff_pkt->type = FT_DIRNOCHG; } else { ff_pkt->type = FT_DIRBEGIN; } /* * We have set st_rdev to 1 if it is a reparse point, otherwise 0, * if st_rdev is 2, it is a mount point */ #if defined(HAVE_WIN32) /* * A reparse point (WIN32_REPARSE_POINT) * is something special like one of the following: * IO_REPARSE_TAG_DFS 0x8000000A * IO_REPARSE_TAG_DFSR 0x80000012 * IO_REPARSE_TAG_HSM 0xC0000004 * IO_REPARSE_TAG_HSM2 0x80000006 * IO_REPARSE_TAG_SIS 0x80000007 * IO_REPARSE_TAG_SYMLINK 0xA000000C * * A junction point is a: * IO_REPARSE_TAG_MOUNT_POINT 0xA0000003 * which can be either a link to a Volume (WIN32_MOUNT_POINT) * or a link to a directory (WIN32_JUNCTION_POINT) * * Ignore WIN32_REPARSE_POINT and WIN32_JUNCTION_POINT */ if (ff_pkt->statp.st_rdev == WIN32_REPARSE_POINT) { ff_pkt->type = FT_REPARSE; } else if (ff_pkt->statp.st_rdev == WIN32_JUNCTION_POINT || (ff_pkt->statp.st_rdev == WIN32_SYMLINK_POINT && S_ISDIR(ff_pkt->statp.st_mode))) { // Handle SYMLINK DIRECTORY like a Junction, BackupRead() does all the work ff_pkt->type = FT_JUNCTION; } #endif /* * Note, we return the directory to the calling program (handle_file) * when we first see the directory (FT_DIRBEGIN. * This allows the program to apply matches and make a * choice whether or not to accept it. If it is accepted, we * do not immediately save it, but do so only after everything * in the directory is seen (i.e. the FT_DIREND). */ rtn_stat = handle_file(jcr, ff_pkt, top_level); if (rtn_stat < 1 || ff_pkt->type == FT_REPARSE || ff_pkt->type == FT_JUNCTION) { /* ignore or error status */ free(link); return rtn_stat; } /* Done with DIRBEGIN, next call will be DIREND */ if (ff_pkt->type == FT_DIRBEGIN) { ff_pkt->type = FT_DIREND; } /* * Create a temporary ff packet for this directory * entry, and defer handling the directory until * we have recursed into it. This saves the * directory after all files have been processed, and * during the restore, the directory permissions will * be reset after all the files have been restored. */ Dmsg1(300, "Create temp ff packet for dir: %s\n", ff_pkt->fname); FF_PKT *dir_ff_pkt = new_dir_ff_pkt(ff_pkt); #if defined(HAVE_WIN32) if (dir_ff_pkt->root_of_volume || (ff_pkt->fname[3]=='\0' && ff_pkt->fname[1]==':')) { /* tell "restore" that this is a root directory and that it should not * restore the Windows hidden and system attribute */ dir_ff_pkt->statp.st_rdev = WIN32_ROOT_POINT; // don't "propagate" the "root_of_volume" property to sub-directories ff_pkt->root_of_volume = false; } #endif /* * Do not descend into subdirectories (recurse) if the * user has turned it off for this directory. * * If we are crossing file systems, we are either not allowed * to cross, or we may be restricted by a list of permitted * file systems. */ bool is_win32_mount_point = false; #if defined(HAVE_WIN32) is_win32_mount_point = ff_pkt->statp.st_rdev == WIN32_MOUNT_POINT; #endif if (!top_level && ff_pkt->flags & FO_NO_RECURSION) { ff_pkt->type = FT_NORECURSE; recurse = false; } else if (!top_level && (parent_device != ff_pkt->statp.st_dev || is_win32_mount_point)) { /* Nested mountpoint in a Windows snapshot works like the original * mountpoint and redirect to the Live filesystem (not a snapshot) * * In Linux the directory is not a mountpoint anymore and we backup * the content of the directory that is normally empty. */ if (is_win32_mount_point && ff_pkt->flags & FO_MULTIFS) { return 1; /* Ignore this dir, it will backed up later */ } else if (is_win32_mount_point) { recurse = false; /* We backup the mount point itself, but nothing more */ } else if(!(ff_pkt->flags & FO_MULTIFS)) { ff_pkt->type = FT_NOFSCHG; recurse = false; } else if (!accept_fstype(ff_pkt, NULL)) { ff_pkt->type = FT_INVALIDFS; recurse = false; } else { ff_pkt->volhas_attrlist = volume_has_attrlist(snap_fname); } } /* If not recursing, just backup dir and return */ if (!recurse) { rtn_stat = handle_file(jcr, ff_pkt, top_level); if (ff_pkt->linked) { ff_pkt->linked->FileIndex = ff_pkt->FileIndex; } free(link); free_dir_ff_pkt(dir_ff_pkt); ff_pkt->link = ff_pkt->fname; /* reset "link" */ if (ff_pkt->flags & FO_KEEPATIME && !ff_pkt->snapshot_convert_fct) { utime(snap_fname, &restore_times); /* snap_fname==fname */ } return rtn_stat; } ff_pkt->link = ff_pkt->fname; /* reset "link" */ /* * Descend into or "recurse" into the directory to read * all the files in it. */ errno = 0; if ((directory = opendir(snap_fname)) == NULL) { ff_pkt->type = FT_NOOPEN; ff_pkt->ff_errno = errno; rtn_stat = handle_file(jcr, ff_pkt, top_level); if (ff_pkt->linked) { ff_pkt->linked->FileIndex = ff_pkt->FileIndex; } free(link); free_dir_ff_pkt(dir_ff_pkt); return rtn_stat; } /* Build a canonical directory name of the file inside the snapshot, * with a trailing slash in link var */ int slen = strlen(snap_fname); int snap_len = slen + 200; char *snap_link = (char *)bmalloc(snap_len + 2); bstrncpy(snap_link, snap_fname, snap_len); /* Strip all trailing slashes */ while (slen >= 1 && IsPathSeparator(snap_link[slen - 1])) slen--; snap_link[slen++] = '/'; /* add back one */ snap_link[slen] = 0; /* * Process all files in this directory entry (recursing). * This would possibly run faster if we chdir to the directory * before traversing it. */ rtn_stat = 1; while (!job_canceled(jcr)) { char *p, *q, *s; int l; int i; if (jcr->estimate_limit > 0 && jcr->JobFiles >= jcr->estimate_limit) { Dmsg0(10, "We have reached the number of items to display in estimate\n"); break; } status = breaddir(directory, dname.addr()); if (status != 0) { /* error or end of directory */ // Dmsg1(99, "breaddir returned stat=%d\n", status); break; } p = dname.c_str(); /* Skip `.', `..', and excluded file names. */ if (p[0] == '\0' || (p[0] == '.' && (p[1] == '\0' || (p[1] == '.' && p[2] == '\0')))) { continue; } l = strlen(dname.c_str()); if (l + len >= link_len) { link_len = len + l + 1; link = (char *)brealloc(link, link_len + 1); } if (l + slen >= snap_len) { snap_len = slen + l + 1; snap_link = (char *)brealloc(snap_link, snap_len + 1); } q = link + len; s = snap_link + slen; for (i=0; i < l; i++) { *q++ = *s++ =*p++; } *q = *s = 0; if (!file_is_excluded(ff_pkt, link)) { rtn_stat = find_one_file(jcr, ff_pkt, handle_file, link, snap_link, our_device, false); /* TODO: and error here is ignored */ if (ff_pkt->linked) { ff_pkt->linked->FileIndex = ff_pkt->FileIndex; } } } closedir(directory); free(link); free(snap_link); /* * Now that we have recursed through all the files in the * directory, we "save" the directory so that after all * the files are restored, this entry will serve to reset * the directory modes and dates. Temp directory values * were used without this record. */ handle_file(jcr, dir_ff_pkt, top_level); /* handle directory entry */ if (ff_pkt->linked) { ff_pkt->linked->FileIndex = dir_ff_pkt->FileIndex; } free_dir_ff_pkt(dir_ff_pkt); if (ff_pkt->flags & FO_KEEPATIME && !ff_pkt->snapshot_convert_fct) { utime(snap_fname, &restore_times); } ff_pkt->volhas_attrlist = volhas_attrlist; /* Restore value in case it changed. */ return rtn_stat; } /* end check for directory */ /* * If it is explicitly mentioned (i.e. top_level) and is * a block device, we do a raw backup of it or if it is * a fifo, we simply read it. */ #ifdef HAVE_FREEBSD_OS /* * On FreeBSD, all block devices are character devices, so * to be able to read a raw disk, we need the check for * a character device. * crw-r----- 1 root operator - 116, 0x00040002 Jun 9 19:32 /dev/ad0s3 * crw-r----- 1 root operator - 116, 0x00040002 Jun 9 19:32 /dev/rad0s3 */ if (top_level && (S_ISBLK(ff_pkt->statp.st_mode) || S_ISCHR(ff_pkt->statp.st_mode))) { #else if (top_level && S_ISBLK(ff_pkt->statp.st_mode)) { #endif ff_pkt->type = FT_RAW; /* raw partition */ } else if (top_level && S_ISFIFO(ff_pkt->statp.st_mode) && ff_pkt->flags & FO_READFIFO) { ff_pkt->type = FT_FIFO; } else { /* The only remaining types are special (character, ...) files */ ff_pkt->type = FT_SPEC; } rtn_stat = handle_file(jcr, ff_pkt, top_level); if (ff_pkt->linked) { ff_pkt->linked->FileIndex = ff_pkt->FileIndex; } return rtn_stat; } int term_find_one(FF_PKT *ff) { struct f_link *lp, *lc; int count = 0; int i; if (ff->linkhash == NULL) return 0; for (i =0 ; i < LINK_HASHTABLE_SIZE; i ++) { /* Free up list of hard linked files */ lp = ff->linkhash[i]; while (lp) { lc = lp; lp = lp->next; if (lc) { if (lc->digest) { free(lc->digest); } free(lc); count++; } } ff->linkhash[i] = NULL; } free(ff->linkhash); ff->linkhash = NULL; return count; } bacula-15.0.3/src/findlib/attribs.c0000644000175000017500000007110414771010173016677 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Encode and decode standard Unix attributes and * Extended attributes for Win32 and * other non-Unix systems, or Unix systems with ACLs, ... * * Kern Sibbald, October MMII * */ //#define _POSIX_C_SOURCE 200809L //#define _BSD_SOURCE 1 #include "bacula.h" #include "find.h" #include "ch.h" static uid_t my_uid = 1; static gid_t my_gid = 1; static bool uid_set = false; #ifdef HAVE_WIN32 /* Forward referenced Windows subroutines */ static bool set_win32_attributes(JCR *jcr, ATTR *attr, BFILE *ofd); void win_error(JCR *jcr, int type, const char *prefix, const char *ofile); void win_error(JCR *jcr, const char *prefix, const char *ofile); HANDLE bget_handle(BFILE *bfd); #endif /* HAVE_WIN32 */ /* * For old systems that don't have lchown() or lchmod() */ #ifndef HAVE_LCHOWN #define lchown chown #endif #ifndef HAVE_LCHMOD #define lchmod chmod #endif /*=============================================================*/ /* */ /* *** A l l S y s t e m s *** */ /* */ /*=============================================================*/ /* * To turn off use of fchown(), fchmod(), or futimes(), * uncomment one or more of the following. */ //#undef HAVE_FCHOWN //#undef HAVE_FCHMOD //#undef HAVE_FUTIMES /* * Print errors only if debug level defined or we are root. * root should not get errors. Errors for users causes * too much output. */ #define print_error(jcr) (chk_dbglvl(100) || (my_uid == 0 && (!jcr || jcr->job_uid == 0))) /* * Restore the owner and permissions (mode) of a Directory. * See attribs.c for the equivalent for files. */ void set_own_mod(ATTR *attr, char *path, uid_t owner, gid_t group, mode_t mode) { if (lchown(path, owner, group) != 0 && print_error(attr->jcr) #ifdef AFS && errno != EPERM #endif ) { berrno be; Jmsg4(attr->jcr, M_WARNING, 0, _("Cannot change owner and/or group of %s: ERR=%s %d %d\n"), path, be.bstrerror(), getuid(), attr->jcr->job_uid); } if (lchmod(path, mode) != 0 && print_error(attr->jcr)) { berrno be; Jmsg2(attr->jcr, M_WARNING, 0, _("Cannot change permissions of %s: ERR=%s\n"), path, be.bstrerror()); } } bool set_mod_own_time(JCR *jcr, BFILE *ofd, ATTR *attr) { bool ok = true; ASSERTD(attr->type != FT_LNK, "function set_mod_own_time() not designed to handle SYMLINK"); /* Do not try to set rights with f functions when using a plugin */ if (is_bopen(ofd) && !ofd->cmd_plugin) { /* TODO: Look with opt_plugin */ /* * The #ifdefing is a bit ugly, but it is the only * way we can ensure this works on older systems. */ #ifdef HAVE_FCHOWN if (fchown(ofd->fid, attr->statp.st_uid, attr->statp.st_gid) < 0 && print_error(jcr)) { #else if (lchown(attr->ofname, attr->statp.st_uid, attr->statp.st_gid) < 0 && print_error(jcr)) { #endif berrno be; Jmsg2(jcr, M_ERROR, 0, _("Unable to set file owner %s: ERR=%s\n"), attr->ofname, be.bstrerror()); ok = false; } #ifdef HAVE_FCHMOD if (fchmod(ofd->fid, attr->statp.st_mode) < 0 && print_error(jcr)) { #else if (lchmod(attr->ofname, attr->statp.st_mode) < 0 && print_error(jcr)) { #endif berrno be; Jmsg2(jcr, M_ERROR, 0, _("Unable to set file modes %s: ERR=%s\n"), attr->ofname, be.bstrerror()); ok = false; } if (set_own_time(ofd->fid, attr->ofname, attr->statp.st_atime, attr->statp.st_mtime) < 0) { berrno be; Jmsg2(jcr, M_ERROR, 0, _("Unable to set file times %s: ERR=%s\n"), attr->ofname, be.bstrerror()); ok = false; } } else { // Not open if (lchown(attr->ofname, attr->statp.st_uid, attr->statp.st_gid) < 0 && print_error(jcr)) { berrno be; Jmsg2(jcr, M_ERROR, 0, _("Unable to set file owner %s: ERR=%s\n"), attr->ofname, be.bstrerror()); ok = false; } if (lchmod(attr->ofname, attr->statp.st_mode) < 0 && print_error(jcr)) { berrno be; Jmsg2(jcr, M_ERROR, 0, _("Unable to set file modes %s: ERR=%s\n"), attr->ofname, be.bstrerror()); ok = false; } /* * Reset file times. */ if (set_own_time(-1, attr->ofname, attr->statp.st_atime, attr->statp.st_mtime) < 0 && print_error(jcr)) { berrno be; Jmsg2(jcr, M_ERROR, 0, _("Unable to set file times %s: ERR=%s\n"), attr->ofname, be.bstrerror()); ok = false; } } return ok; } /* * Return the data stream that will be used */ int select_data_stream(FF_PKT *ff_pkt) { int stream; /* This is a plugin special restore object */ if (ff_pkt->type == FT_RESTORE_FIRST) { ff_pkt->flags = 0; stream = STREAM_FILE_DATA; goto get_out; } /* * Fix all incompatible options */ /* No sparse option for encrypted data */ if (ff_pkt->flags & FO_ENCRYPT) { ff_pkt->flags &= ~FO_SPARSE; } /* Note, no sparse option for win32_data */ if (!is_portable_backup(&ff_pkt->bfd)) { stream = STREAM_WIN32_DATA; ff_pkt->flags &= ~FO_SPARSE; } else if (ff_pkt->flags & FO_SPARSE) { stream = STREAM_SPARSE_DATA; } else { stream = STREAM_FILE_DATA; } /* Encryption is only supported for file data */ if (stream != STREAM_FILE_DATA && stream != STREAM_WIN32_DATA && stream != STREAM_MACOS_FORK_DATA) { ff_pkt->flags &= ~FO_ENCRYPT; } /* Compression is not supported for Mac fork data */ if (stream == STREAM_MACOS_FORK_DATA) { ff_pkt->flags &= ~FO_COMPRESS; } /* * Handle compression and encryption options */ #if defined(HAVE_LIBZ) || defined(HAVE_LZO) || defined(HAVE_ZSTD) if (ff_pkt->flags & FO_COMPRESS) { #ifdef HAVE_LIBZ if(ff_pkt->Compress_algo == COMPRESS_GZIP) { switch (stream) { case STREAM_WIN32_DATA: stream = STREAM_WIN32_GZIP_DATA; break; case STREAM_SPARSE_DATA: stream = STREAM_SPARSE_GZIP_DATA; break; case STREAM_FILE_DATA: stream = STREAM_GZIP_DATA; break; default: /* * All stream types that do not support compression should clear out * FO_COMPRESS above, and this code block should be unreachable. */ ASSERT(!(ff_pkt->flags & FO_COMPRESS)); stream = STREAM_NONE; goto get_out; } } #endif #if defined(HAVE_LZO) || defined(HAVE_ZSTD) if(ff_pkt->Compress_algo == COMPRESS_LZO1X || ff_pkt->Compress_algo == COMPRESS_ZSTD) { switch (stream) { case STREAM_WIN32_DATA: stream = STREAM_WIN32_COMPRESSED_DATA; break; case STREAM_SPARSE_DATA: stream = STREAM_SPARSE_COMPRESSED_DATA; break; case STREAM_FILE_DATA: stream = STREAM_COMPRESSED_DATA; break; default: /* * All stream types that do not support compression should clear out * FO_COMPRESS above, and this code block should be unreachable. */ ASSERT(!(ff_pkt->flags & FO_COMPRESS)); stream = STREAM_NONE; goto get_out; } } #endif } #endif #ifdef HAVE_CRYPTO if (ff_pkt->flags & FO_ENCRYPT) { switch (stream) { case STREAM_WIN32_DATA: stream = STREAM_ENCRYPTED_WIN32_DATA; break; case STREAM_WIN32_GZIP_DATA: stream = STREAM_ENCRYPTED_WIN32_GZIP_DATA; break; case STREAM_WIN32_COMPRESSED_DATA: stream = STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA; break; case STREAM_FILE_DATA: stream = STREAM_ENCRYPTED_FILE_DATA; break; case STREAM_GZIP_DATA: stream = STREAM_ENCRYPTED_FILE_GZIP_DATA; break; case STREAM_COMPRESSED_DATA: stream = STREAM_ENCRYPTED_FILE_COMPRESSED_DATA; break; default: /* All stream types that do not support encryption should clear out * FO_ENCRYPT above, and this code block should be unreachable. */ ASSERT(!(ff_pkt->flags & FO_ENCRYPT)); stream = STREAM_NONE; goto get_out; } } #endif get_out: return stream; } /* * Encode a stat structure into a base64 character string * All systems must create such a structure. * In addition, we tack on the LinkFI, which is non-zero in * the case of a hard linked file that has no data. This * is a File Index pointing to the link that does have the * data (always the first one encountered in a save). * You may piggyback attributes on this packet by encoding * them in the encode_attribsEx() subroutine, but this is * not recommended. */ void encode_stat(char *buf, struct stat *statp, int stat_size, int32_t LinkFI, int data_stream) { char *p = buf; /* * We read the stat packet so make sure the caller's conception * is the same as ours. They can be different if LARGEFILE is not * the same when compiling this library and the calling program. */ ASSERT(stat_size == (int)sizeof(struct stat)); /* * Encode a stat packet. I should have done this more intelligently * with a length so that it could be easily expanded. */ p += to_base64((int64_t)statp->st_dev, p); *p++ = ' '; /* separate fields with a space */ p += to_base64((int64_t)statp->st_ino, p); *p++ = ' '; p += to_base64((int64_t)statp->st_mode, p); *p++ = ' '; p += to_base64((int64_t)statp->st_nlink, p); *p++ = ' '; p += to_base64((int64_t)statp->st_uid, p); *p++ = ' '; p += to_base64((int64_t)statp->st_gid, p); *p++ = ' '; p += to_base64((int64_t)statp->st_rdev, p); *p++ = ' '; p += to_base64((int64_t)statp->st_size, p); *p++ = ' '; #ifndef HAVE_MINGW p += to_base64((int64_t)statp->st_blksize, p); *p++ = ' '; p += to_base64((int64_t)statp->st_blocks, p); *p++ = ' '; #else p += to_base64((int64_t)0, p); /* output place holder */ *p++ = ' '; p += to_base64((int64_t)0, p); /* output place holder */ *p++ = ' '; #endif p += to_base64((int64_t)statp->st_atime, p); *p++ = ' '; p += to_base64((int64_t)statp->st_mtime, p); *p++ = ' '; p += to_base64((int64_t)statp->st_ctime, p); *p++ = ' '; p += to_base64((int64_t)LinkFI, p); *p++ = ' '; #ifdef HAVE_CHFLAGS /* FreeBSD function */ p += to_base64((int64_t)statp->st_flags, p); /* output st_flags */ #else p += to_base64((int64_t)0, p); /* output place holder */ #endif *p++ = ' '; p += to_base64((int64_t)data_stream, p); #ifdef HAVE_MINGW *p++ = ' '; p += to_base64((int64_t)statp->st_fattrs, p); #endif *p = 0; return; } /* Do casting according to unknown type to keep compiler happy */ #ifdef HAVE_TYPEOF #define plug(st, val) st = (typeof st)val #else #if !HAVE_GCC & HAVE_SUN_OS /* Sun compiler does not handle templates correctly */ #define plug(st, val) st = val #elif __sgi #define plug(st, val) st = val #else /* Use templates to do the casting */ template void plug(T &st, uint64_t val) { st = static_cast(val); } #endif #endif /* * Decode a stat packet from base64 characters * returns: data_stream */ int decode_stat(char *buf, struct stat *statp, int stat_size, int32_t *LinkFI) { char *p = buf; int64_t val; int data_stream; /* * We store into the stat packet so make sure the caller's conception * is the same as ours. They can be different if LARGEFILE is not * the same when compiling this library and the calling program. */ ASSERT(stat_size == (int)sizeof(struct stat)); p += from_base64(&val, p); plug(statp->st_dev, val); p++; p += from_base64(&val, p); plug(statp->st_ino, val); p++; p += from_base64(&val, p); plug(statp->st_mode, val); p++; p += from_base64(&val, p); plug(statp->st_nlink, val); p++; p += from_base64(&val, p); plug(statp->st_uid, val); p++; p += from_base64(&val, p); plug(statp->st_gid, val); p++; p += from_base64(&val, p); plug(statp->st_rdev, val); p++; p += from_base64(&val, p); plug(statp->st_size, val); p++; #ifndef HAVE_MINGW p += from_base64(&val, p); plug(statp->st_blksize, val); p++; p += from_base64(&val, p); plug(statp->st_blocks, val); p++; #else p += from_base64(&val, p); // plug(statp->st_blksize, val); p++; p += from_base64(&val, p); // plug(statp->st_blocks, val); p++; #endif p += from_base64(&val, p); plug(statp->st_atime, val); p++; p += from_base64(&val, p); plug(statp->st_mtime, val); p++; p += from_base64(&val, p); plug(statp->st_ctime, val); /* Optional FileIndex of hard linked file data */ if (*p == ' ' || (*p != 0 && *(p+1) == ' ')) { p++; p += from_base64(&val, p); *LinkFI = (uint32_t)val; } else { *LinkFI = 0; return 0; } /* FreeBSD user flags */ if (*p == ' ' || (*p != 0 && *(p+1) == ' ')) { p++; p += from_base64(&val, p); #ifdef HAVE_CHFLAGS plug(statp->st_flags, val); } else { statp->st_flags = 0; #endif } /* Look for data stream id */ if (*p == ' ' || (*p != 0 && *(p+1) == ' ')) { p++; p += from_base64(&val, p); } else { val = 0; } data_stream = val; #ifdef HAVE_MINGW if (*p == ' ' || (*p != 0 && *(p+1) == ' ')) { p++; p += from_base64(&val, p); plug(statp->st_fattrs, val); } else { statp->st_fattrs = 0; val = 0; } #endif return data_stream; } /* Decode a LinkFI field of encoded stat packet */ int32_t decode_LinkFI(char *buf, struct stat *statp, int stat_size) { char *p = buf; int64_t val; /* * We store into the stat packet so make sure the caller's conception * is the same as ours. They can be different if LARGEFILE is not * the same when compiling this library and the calling program. */ ASSERT(stat_size == (int)sizeof(struct stat)); skip_nonspaces(&p); /* st_dev */ p++; /* skip space */ skip_nonspaces(&p); /* st_ino */ p++; p += from_base64(&val, p); plug(statp->st_mode, val); /* st_mode */ p++; skip_nonspaces(&p); /* st_nlink */ p++; skip_nonspaces(&p); /* st_uid */ p++; skip_nonspaces(&p); /* st_gid */ p++; skip_nonspaces(&p); /* st_rdev */ p++; skip_nonspaces(&p); /* st_size */ p++; skip_nonspaces(&p); /* st_blksize */ p++; skip_nonspaces(&p); /* st_blocks */ p++; skip_nonspaces(&p); /* st_atime */ p++; skip_nonspaces(&p); /* st_mtime */ p++; skip_nonspaces(&p); /* st_ctime */ /* Optional FileIndex of hard linked file data */ if (*p == ' ' || (*p != 0 && *(p+1) == ' ')) { p++; p += from_base64(&val, p); return (int32_t)val; } return 0; } /* * Set file modes, permissions and times * * fname is the original filename * ofile is the output filename (may be in a different directory) * * Returns: true on success * false on failure */ bool set_attributes(JCR *jcr, ATTR *attr, BFILE *ofd) { mode_t old_mask; bool ok = true; boffset_t fsize; if (!uid_set) { my_uid = getuid(); my_gid = getgid(); uid_set = true; } #ifdef HAVE_WIN32 if (attr->stream == STREAM_UNIX_ATTRIBUTES_EX && set_win32_attributes(jcr, attr, ofd)) { if (is_bopen(ofd)) { bclose(ofd); } pm_strcpy(attr->ofname, "*none*"); return true; } if (attr->data_stream == STREAM_WIN32_DATA || attr->data_stream == STREAM_WIN32_GZIP_DATA || attr->data_stream == STREAM_WIN32_COMPRESSED_DATA) { if (is_bopen(ofd)) { bclose(ofd); } pm_strcpy(attr->ofname, "*none*"); return true; } /* * If Windows stuff failed, e.g. attempt to restore Unix file * to Windows, simply fall through and we will do it the * universal way. */ #endif /* HAVE_WIN32 */ old_mask = umask(0); if (is_bopen(ofd)) { char ec1[50], ec2[50]; fsize = blseek(ofd, 0, SEEK_END); if (attr->type == FT_REG && fsize > 0 && attr->statp.st_size > 0 && fsize != (boffset_t)attr->statp.st_size) { Jmsg3(jcr, M_ERROR, 0, _("File size of restored file %s not correct. Original %s, restored %s.\n"), attr->ofname, edit_uint64(attr->statp.st_size, ec1), edit_uint64(fsize, ec2)); } } /* * We do not restore sockets, so skip trying to restore their * attributes. */ if (attr->type == FT_SPEC && S_ISSOCK(attr->statp.st_mode)) { goto bail_out; } /* ***FIXME**** optimize -- don't do if already correct */ /* * For link, change owner of link using lchown, but don't * try to do a chmod as that will update the file behind it. */ if (attr->type == FT_LNK) { /* Change owner of link, not of real file */ #ifdef HAVE_LCHOWN if (lchown(attr->ofname, attr->statp.st_uid, attr->statp.st_gid) < 0 && print_error(jcr)) { berrno be; Jmsg2(jcr, M_ERROR, 0, _("Unable to set file owner %s: ERR=%s\n"), attr->ofname, be.bstrerror()); ok = false; } #endif #if defined(HAVE_FREEBSD_OS) || defined(HAVE_DARWIN_OS) /* lchmod() is defined on Linux but doesn't work */ # ifdef HAVE_LCHMOD if (lchmod(attr->ofname, attr->statp.st_mode) < 0 && print_error(jcr)) { berrno be; Jmsg2(jcr, M_ERROR, 0, _("Unable to set file modes %s: ERR=%s\n"), attr->ofname, be.bstrerror()); ok = false; } # endif #endif #ifdef HAVE_LUTIMES struct timeval times[2]; times[0].tv_sec = attr->statp.st_atime; times[0].tv_usec = 0; times[1].tv_sec = attr->statp.st_mtime; times[1].tv_usec = 0; if (lutimes(attr->ofname, times) < 0 && print_error(jcr)) { berrno be; Jmsg2(jcr, M_ERROR, 0, _("Unable to set file times %s: ERR=%s\n"), attr->ofname, be.bstrerror()); ok = false; } #endif } else { /* * At this point, we have a file that is not a LINK */ ok = set_mod_own_time(jcr, ofd, attr); #ifdef HAVE_CHFLAGS /* * FreeBSD user flags * * Note, this should really be done before the utime() above, * but if the immutable bit is set, it will make the utimes() * fail. */ if (chflags(attr->ofname, attr->statp.st_flags) < 0 && print_error(jcr)) { berrno be; Jmsg2(jcr, M_ERROR, 0, _("Unable to set file flags %s: ERR=%s\n"), attr->ofname, be.bstrerror()); ok = false; } #endif } bail_out: if (is_bopen(ofd)) { bclose(ofd); } pm_strcpy(attr->ofname, "*none*"); umask(old_mask); return ok; } /*=============================================================*/ /* */ /* * * * U n i x * * * * */ /* */ /*=============================================================*/ #ifndef HAVE_WIN32 /* * It is possible to piggyback additional data e.g. ACLs on * the encode_stat() data by returning the extended attributes * here. They must be "self-contained" (i.e. you keep track * of your own length), and they must be in ASCII string * format. Using this feature is not recommended. * The code below shows how to return nothing. See the Win32 * code below for returning something in the attributes. */ int encode_attribsEx(JCR *jcr, char *attribsEx, FF_PKT *ff_pkt) { #ifdef HAVE_DARWIN_OS /* * We save the Mac resource fork length so that on a * restore, we can be sure we put back the whole resource. */ char *p; *attribsEx = 0; /* no extended attributes (yet) */ if (jcr->cmd_plugin || ff_pkt->type == FT_DELETED) { return STREAM_UNIX_ATTRIBUTES; } p = attribsEx; if (ff_pkt->flags & FO_HFSPLUS) { p += to_base64((uint64_t)(ff_pkt->hfsinfo.rsrclength), p); } *p = 0; #else *attribsEx = 0; /* no extended attributes */ #endif return STREAM_UNIX_ATTRIBUTES; } #endif /*=============================================================*/ /* */ /* * * * W i n 3 2 * * * * */ /* */ /*=============================================================*/ #ifdef HAVE_WIN32 int encode_attribsEx(JCR *jcr, char *attribsEx, FF_PKT *ff_pkt) { char *p = attribsEx; WIN32_FILE_ATTRIBUTE_DATA atts; ULARGE_INTEGER li; attribsEx[0] = 0; /* no extended attributes */ if (jcr->cmd_plugin || ff_pkt->type == FT_DELETED) { return STREAM_UNIX_ATTRIBUTES; } /* try unicode version */ POOLMEM* pwszBuf = get_pool_memory(PM_FNAME); make_win32_path_UTF8_2_wchar(&pwszBuf, ff_pkt->snap_fname); BOOL b=p_GetFileAttributesExW((LPCWSTR)pwszBuf, GetFileExInfoStandard, (LPVOID)&atts); free_pool_memory(pwszBuf); if (!b) { win_error(jcr, "GetFileAttributesExW:", ff_pkt->fname); return STREAM_UNIX_ATTRIBUTES; } p += to_base64((uint64_t)atts.dwFileAttributes, p); *p++ = ' '; /* separate fields with a space */ li.LowPart = atts.ftCreationTime.dwLowDateTime; li.HighPart = atts.ftCreationTime.dwHighDateTime; p += to_base64((uint64_t)li.QuadPart, p); *p++ = ' '; li.LowPart = atts.ftLastAccessTime.dwLowDateTime; li.HighPart = atts.ftLastAccessTime.dwHighDateTime; p += to_base64((uint64_t)li.QuadPart, p); *p++ = ' '; li.LowPart = atts.ftLastWriteTime.dwLowDateTime; li.HighPart = atts.ftLastWriteTime.dwHighDateTime; p += to_base64((uint64_t)li.QuadPart, p); *p++ = ' '; p += to_base64((uint64_t)atts.nFileSizeHigh, p); *p++ = ' '; p += to_base64((uint64_t)atts.nFileSizeLow, p); *p = 0; return STREAM_UNIX_ATTRIBUTES_EX; } /* Define attributes that are legal to set with SetFileAttributes() */ #define SET_ATTRS ( \ FILE_ATTRIBUTE_ARCHIVE| \ FILE_ATTRIBUTE_HIDDEN| \ FILE_ATTRIBUTE_NORMAL| \ FILE_ATTRIBUTE_NOT_CONTENT_INDEXED| \ FILE_ATTRIBUTE_OFFLINE| \ FILE_ATTRIBUTE_READONLY| \ FILE_ATTRIBUTE_SYSTEM| \ FILE_ATTRIBUTE_TEMPORARY) /* * Set Extended File Attributes for Win32 * * fname is the original filename * ofile is the output filename (may be in a different directory) * * Returns: true on success * false on failure */ static bool set_win32_attributes(JCR *jcr, ATTR *attr, BFILE *ofd) { char *p = attr->attrEx; int64_t val; WIN32_FILE_ATTRIBUTE_DATA atts; ULARGE_INTEGER li; /* if we have neither Win ansi nor wchar API, get out */ if (!(p_SetFileAttributesW || p_SetFileAttributesA)) { return false; } if (!p || !*p) { /* we should have attributes */ Dmsg2(100, "Attributes missing. of=%s ofd=%d\n", attr->ofname, ofd->fid); if (is_bopen(ofd)) { bclose(ofd); } return false; } else { Dmsg2(100, "Attribs %s = %s\n", attr->ofname, attr->attrEx); } p += from_base64(&val, p); plug(atts.dwFileAttributes, val); p++; /* skip space */ p += from_base64(&val, p); li.QuadPart = val; atts.ftCreationTime.dwLowDateTime = li.LowPart; atts.ftCreationTime.dwHighDateTime = li.HighPart; p++; /* skip space */ p += from_base64(&val, p); li.QuadPart = val; atts.ftLastAccessTime.dwLowDateTime = li.LowPart; atts.ftLastAccessTime.dwHighDateTime = li.HighPart; p++; /* skip space */ p += from_base64(&val, p); li.QuadPart = val; atts.ftLastWriteTime.dwLowDateTime = li.LowPart; atts.ftLastWriteTime.dwHighDateTime = li.HighPart; p++; p += from_base64(&val, p); plug(atts.nFileSizeHigh, val); p++; p += from_base64(&val, p); plug(atts.nFileSizeLow, val); /* At this point, we have reconstructed the WIN32_FILE_ATTRIBUTE_DATA pkt */ if (!is_bopen(ofd)) { Dmsg1(100, "File not open: %s\n", attr->ofname); bopen(ofd, attr->ofname, O_WRONLY|O_BINARY, 0); /* attempt to open the file */ } if (is_bopen(ofd)) { Dmsg1(100, "SetFileTime %s\n", attr->ofname); if (!SetFileTime(bget_handle(ofd), &atts.ftCreationTime, &atts.ftLastAccessTime, &atts.ftLastWriteTime)) { win_error(jcr, M_WARNING, "SetFileTime:", attr->ofname); } /* * Inform win32 api that the given file is a sparse file */ if (atts.dwFileAttributes & FILE_ATTRIBUTE_SPARSE_FILE) { DWORD bytesReturned; Dmsg1(100, "Set FILE_ATTRIBUTE_SPARSE_FILE on %s\n", attr->ofname); if (!DeviceIoControl(bget_handle(ofd), FSCTL_SET_SPARSE, NULL, 0, NULL, 0, &bytesReturned, NULL)) { /* Not sure we really want to have a Warning for such attribute */ win_error(jcr, M_WARNING, "set SPARSE_FILE:", attr->ofname); } } /* * Restore the file as compressed. */ if (atts.dwFileAttributes & FILE_ATTRIBUTE_COMPRESSED) { int fmt = COMPRESSION_FORMAT_DEFAULT; DWORD bytesReturned; Dmsg1(100, "Set FILE_ATTRIBUTE_COMPRESSED on %s\n", attr->ofname); if (!DeviceIoControl(bget_handle(ofd), FSCTL_SET_COMPRESSION, &fmt, sizeof(fmt), NULL, 0, &bytesReturned, NULL)) { /* Not sure we really want to have a Warning for such attribute */ win_error(jcr, M_WARNING, "set COMPRESSED:", attr->ofname); } } bclose(ofd); } Dmsg1(100, "SetFileAtts %s\n", attr->ofname); if (!(atts.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) { POOLMEM* pwszBuf = get_pool_memory(PM_FNAME); make_win32_path_UTF8_2_wchar(&pwszBuf, attr->ofname); BOOL b=p_SetFileAttributesW((LPCWSTR)pwszBuf, atts.dwFileAttributes & SET_ATTRS); free_pool_memory(pwszBuf); if (!b) win_error(jcr, M_WARNING, "SetFileAttributesW:", attr->ofname); } return true; } void win_error(JCR *jcr, int type, const char *prefix, const char *win32_ofile) { DWORD lerror = GetLastError(); LPTSTR msg; FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER| FORMAT_MESSAGE_FROM_SYSTEM, NULL, lerror, 0, (LPTSTR)&msg, 0, NULL); Dmsg3(100, "Error in %s on file %s: ERR=%s\n", prefix, win32_ofile, msg); strip_trailing_junk(msg); Jmsg3(jcr, type, 0, _("Error in %s file %s: ERR=%s\n"), prefix, win32_ofile, msg); LocalFree(msg); } void win_error(JCR *jcr, const char *prefix, const char *win32_ofile) { win_error(jcr, M_ERROR, prefix, win32_ofile); } void win_error(JCR *jcr, const char *prefix, DWORD lerror) { LPTSTR msg; FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER| FORMAT_MESSAGE_FROM_SYSTEM, NULL, lerror, 0, (LPTSTR)&msg, 0, NULL); strip_trailing_junk(msg); if (jcr) { Jmsg2(jcr, M_ERROR, 0, _("Error in %s: ERR=%s\n"), prefix, msg); } else { MessageBox(NULL, msg, prefix, MB_OK); } LocalFree(msg); } #endif /* HAVE_WIN32 */ #ifndef BEEF bool check_directory_acl(char **, alist *, const char *) { return true;} bool has_access(alist *, alist *, struct stat *) { return true;} #endif bacula-15.0.3/src/findlib/find.c0000644000175000017500000004670314771010173016156 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Main routine for finding files on a file system. * The heart of the work to find the files on the * system is done in find_one.c. Here we have the * higher level control as well as the matching * routines for the new syntax Options resource. * * Kern E. Sibbald, MM * */ #include "bacula.h" #include "find.h" static const int dbglvl = 450; int32_t name_max; /* filename max length */ int32_t path_max; /* path name max length */ #ifdef DEBUG #undef bmalloc #define bmalloc(x) sm_malloc(__FILE__, __LINE__, x) #endif static int our_callback(JCR *jcr, FF_PKT *ff, bool top_level); static const int fnmode = 0; /* * Initialize the find files "global" variables */ FF_PKT *init_find_files() { FF_PKT *ff; /* bmalloc returns zeroed buffer */ ff = (FF_PKT *)bmalloc(sizeof(FF_PKT)); /* Get system path and filename maximum lengths */ path_max = pathconf(".", _PC_PATH_MAX); if (path_max < 2048) { path_max = 2048; } name_max = pathconf(".", _PC_NAME_MAX); if (name_max < 2048) { name_max = 2048; } path_max++; /* add for EOS */ name_max++; /* add for EOS */ Dmsg1(dbglvl, "init_find_files ff=%p\n", ff); return ff; } /* * Set find_files options. For the moment, we only * provide for full/incremental saves, and setting * of save_time. For additional options, see above */ void set_find_options(FF_PKT *ff, int incremental, time_t save_time) { Dmsg0(dbglvl, "Enter set_find_options()\n"); ff->incremental = incremental; ff->save_time = save_time; Dmsg0(dbglvl, "Leave set_find_options()\n"); } void set_find_changed_function(FF_PKT *ff, bool check_fct(JCR *jcr, FF_PKT *ff)) { Dmsg0(dbglvl, "Enter set_find_changed_function()\n"); ff->check_fct = check_fct; } void set_find_snapshot_function(FF_PKT *ff, bool convert_path(JCR *jcr, FF_PKT *ff, dlist *filelist, dlistString *node)) { ff->snapshot_convert_fct = convert_path; } /* * Call this subroutine with a callback subroutine as the first * argument and a packet as the second argument, this packet * will be passed back to the callback subroutine as the last * argument. * */ int find_files(JCR *jcr, FF_PKT *ff, int file_save(JCR *jcr, FF_PKT *ff_pkt, bool top_level), int plugin_save(JCR *jcr, FF_PKT *ff_pkt, bool top_level)) { ff->file_save = file_save; ff->plugin_save = plugin_save; /* This is the new way */ findFILESET *fileset = ff->fileset; if (fileset) { int i, j; /* TODO: We probably need be move the initialization in the fileset loop, * at this place flags options are "concatenated" accross Include {} blocks * (not only Options{} blocks inside a Include{}) */ ff->flags = 0; for (i=0; iinclude_list.size(); i++) { findINCEXE *incexe = (findINCEXE *)fileset->include_list.get(i); fileset->incexe = incexe; /* Here, we reset some values between two different Include{} */ strcpy(ff->VerifyOpts, "V"); strcpy(ff->AccurateOpts, "Cmcs"); /* mtime+ctime+size by default */ strcpy(ff->BaseJobOpts, "Jspug5"); /* size+perm+user+group+chk */ ff->plugin = NULL; ff->opt_plugin = false; /* * By setting all options, we in effect OR the global options * which is what we want. */ for (j=0; jopts_list.size(); j++) { findFOPTS *fo = (findFOPTS *)incexe->opts_list.get(j); /* TODO options are "simply" reset by Options block that come next * For example : * Options { Dedup = storage } * Options { IgnoreCase = yes } * at the end the "Dedup = storage" is ignored * ATTN: some plugins use AddOptions() that create extra Option block * Also see accept_file() below that could suffer of the same problem */ ff->flags |= fo->flags; /* If the compress option was set in the previous block, overwrite the * algorithm only if defined */ if ((ff->flags & FO_COMPRESS) && fo->Compress_algo != 0) { ff->Compress_algo = fo->Compress_algo; ff->Compress_level = fo->Compress_level; } if (fo->flags & FO_DEDUPLICATION) { /* fix #2334 but see TODO above*/ ff->Dedup_level = fo->Dedup_level; } ff->strip_path = fo->strip_path; ff->fstypes = fo->fstype; ff->drivetypes = fo->drivetype; if (fo->plugin != NULL) { ff->plugin = fo->plugin; /* TODO: generate a plugin event ? */ ff->opt_plugin = true; } bstrncat(ff->VerifyOpts, fo->VerifyOpts, sizeof(ff->VerifyOpts)); /* TODO: Concat or replace? */ if (fo->AccurateOpts[0]) { bstrncpy(ff->AccurateOpts, fo->AccurateOpts, sizeof(ff->AccurateOpts)); } if (fo->BaseJobOpts[0]) { bstrncpy(ff->BaseJobOpts, fo->BaseJobOpts, sizeof(ff->BaseJobOpts)); } } Dmsg4(50, "Verify=<%s> Accurate=<%s> BaseJob=<%s> flags=<%lld>\n", ff->VerifyOpts, ff->AccurateOpts, ff->BaseJobOpts, ff->flags); dlistString *node; foreach_dlist(node, &incexe->name_list) { POOL_MEM fname(PM_FNAME); fname.strcpy(node->c_str()); #ifdef HAVE_WIN32 win32_normalize_fileset_path(fname.addr()); #endif Dmsg1(dbglvl, "F %s\n", fname.c_str()); ff->top_fname = fname.c_str(); /* Convert the filename if needed */ if (ff->snapshot_convert_fct) { ff->snapshot_convert_fct(jcr, ff, &incexe->name_list, node); } if (find_one_file(jcr, ff, our_callback, fname.c_str(), ff->top_fname, (dev_t)-1, true) == 0) { return 0; /* error return */ } if (job_canceled(jcr)) { return 0; } } foreach_dlist(node, &incexe->plugin_list) { char *fname = node->c_str(); if (!plugin_save) { Jmsg(jcr, M_FATAL, 0, _("Plugin: \"%s\" not found.\n"), fname); return 0; } Dmsg1(dbglvl, "PluginCommand: %s\n", fname); ff->top_fname = fname; ff->cmd_plugin = true; /* Make sure that opt plugin is not set * The current implementation doesn't allow option plugin * and command plugin to run at the same time */ ff->opt_plugin = false; ff->plugin = NULL; plugin_save(jcr, ff, true); ff->cmd_plugin = false; if (job_canceled(jcr)) { return 0; } } } } return 1; } /* * Test if the currently selected directory (in ff->fname) is * explicitly in the Include list or explicitly in the Exclude * list. */ bool is_in_fileset(FF_PKT *ff) { dlistString *node; char *fname; int i; findINCEXE *incexe; findFILESET *fileset = ff->fileset; if (fileset) { for (i=0; iinclude_list.size(); i++) { incexe = (findINCEXE *)fileset->include_list.get(i); foreach_dlist(node, &incexe->name_list) { fname = node->c_str(); Dmsg2(dbglvl, "Inc fname=%s ff->fname=%s\n", fname, ff->fname); if (strcmp(fname, ff->fname) == 0) { return true; } } } for (i=0; iexclude_list.size(); i++) { incexe = (findINCEXE *)fileset->exclude_list.get(i); foreach_dlist(node, &incexe->name_list) { fname = node->c_str(); Dmsg2(dbglvl, "Exc fname=%s ff->fname=%s\n", fname, ff->fname); if (strcmp(fname, ff->fname) == 0) { return true; } } } } return false; } /** * Check if the file being processed is allowed to backup or not. * * Returns: true if OK to backup * false to ignore file/directory */ static int check_allowed_dirs(JCR *jcr, FF_PKT *ff_pkt) { bool ret = true; char *dir, *pp; /* Check if file is not excluded at all */ if (ff_pkt->excluded_backup_dirs) { foreach_alist(dir, ff_pkt->excluded_backup_dirs) { if ((pp = b_path_match(ff_pkt->fname, dir)) == ff_pkt->fname) { ret = false; break; } } } /* If not excluded, then check if it's inside of allowed directories */ if (ret && ff_pkt->allowed_backup_dirs) { foreach_alist(dir, ff_pkt->allowed_backup_dirs) { /* The b_path_match check can be done twice here: * For the 1st time we check if current file path contains exactly the allowed dir - if it does * then file/directory can be backed up * For the 2nd time we check if current file is a part of allowed dir - if it does * then we know we can descend into directories below to continue checking. * If it does not then we can skip this file as well as all directories below. */ pp = b_path_match(dir, ff_pkt->fname); if (pp == dir) { ret = true; break; } else if ((pp = b_path_match(ff_pkt->fname, dir)) == ff_pkt->fname) { ret = true; break; } else { ret = false; } } if (ret == false && S_ISDIR(ff_pkt->statp.st_mode)) { Dmsg1(dbglvl, "Skipping directory %s, it's out of allowed ones\n", ff_pkt->fname); Jmsg(jcr, M_SKIPPED, 0, _("Skipping directory %s, it's out of allowed ones\n"), ff_pkt->fname); jcr->num_dirs_skipped++; } } return ret; } bool accept_file(JCR *jcr, FF_PKT *ff) { int i, j, k; int fnm_flags; findFILESET *fileset = ff->fileset; findINCEXE *incexe = fileset->incexe; const char *basename; int (*match_func)(const char *pattern, const char *string, int flags); Dmsg1(dbglvl, "enter accept_file: fname=%s\n", ff->fname); if (ff->flags & FO_ENHANCEDWILD) { // match_func = enh_fnmatch; match_func = fnmatch; if ((basename = last_path_separator(ff->fname)) != NULL) basename++; else basename = ff->fname; } else { match_func = fnmatch; basename = ff->fname; } if (!check_allowed_dirs(jcr, ff)) { return false; } for (j = 0; j < incexe->opts_list.size(); j++) { findFOPTS *fo = (findFOPTS *)incexe->opts_list.get(j); ff->flags = fo->flags; ff->Compress_algo = fo->Compress_algo; ff->Compress_level = fo->Compress_level; if (fo->flags & FO_DEDUPLICATION) { /* fix #2334 but see TODO in find_file() above */ ff->Dedup_level = fo->Dedup_level; } ff->fstypes = fo->fstype; ff->drivetypes = fo->drivetype; fnm_flags = (ff->flags & FO_IGNORECASE) ? FNM_CASEFOLD : 0; fnm_flags |= (ff->flags & FO_ENHANCEDWILD) ? FNM_PATHNAME : 0; if (S_ISDIR(ff->statp.st_mode)) { for (k=0; kwilddir.size(); k++) { if (match_func((char *)fo->wilddir.get(k), ff->fname, fnmode|fnm_flags) == 0) { if (ff->flags & FO_EXCLUDE) { Dmsg2(dbglvl, "Exclude wilddir: %s file=%s\n", (char *)fo->wilddir.get(k), ff->fname); return false; /* reject dir */ } return true; /* accept dir */ } } } else { for (k=0; kwildfile.size(); k++) { if (match_func((char *)fo->wildfile.get(k), ff->fname, fnmode|fnm_flags) == 0) { if (ff->flags & FO_EXCLUDE) { Dmsg2(dbglvl, "Exclude wildfile: %s file=%s\n", (char *)fo->wildfile.get(k), ff->fname); return false; /* reject file */ } return true; /* accept file */ } } for (k=0; kwildbase.size(); k++) { if (match_func((char *)fo->wildbase.get(k), basename, fnmode|fnm_flags) == 0) { if (ff->flags & FO_EXCLUDE) { Dmsg2(dbglvl, "Exclude wildbase: %s file=%s\n", (char *)fo->wildbase.get(k), basename); return false; /* reject file */ } return true; /* accept file */ } } } for (k=0; kwild.size(); k++) { if (match_func((char *)fo->wild.get(k), ff->fname, fnmode|fnm_flags) == 0) { if (ff->flags & FO_EXCLUDE) { Dmsg2(dbglvl, "Exclude wild: %s file=%s\n", (char *)fo->wild.get(k), ff->fname); return false; /* reject file */ } return true; /* accept file */ } } if (S_ISDIR(ff->statp.st_mode)) { for (k=0; kregexdir.size(); k++) { const int nmatch = 30; regmatch_t pmatch[nmatch]; if (regexec((regex_t *)fo->regexdir.get(k), ff->fname, nmatch, pmatch, 0) == 0) { if (ff->flags & FO_EXCLUDE) { return false; /* reject file */ } return true; /* accept file */ } } } else { for (k=0; kregexfile.size(); k++) { const int nmatch = 30; regmatch_t pmatch[nmatch]; if (regexec((regex_t *)fo->regexfile.get(k), ff->fname, nmatch, pmatch, 0) == 0) { if (ff->flags & FO_EXCLUDE) { return false; /* reject file */ } return true; /* accept file */ } } } for (k=0; kregex.size(); k++) { const int nmatch = 30; regmatch_t pmatch[nmatch]; if (regexec((regex_t *)fo->regex.get(k), ff->fname, nmatch, pmatch, 0) == 0) { if (ff->flags & FO_EXCLUDE) { return false; /* reject file */ } return true; /* accept file */ } } /* * If we have an empty Options clause with exclude, then * exclude the file */ if (ff->flags & FO_EXCLUDE && fo->regex.size() == 0 && fo->wild.size() == 0 && fo->regexdir.size() == 0 && fo->wilddir.size() == 0 && fo->regexfile.size() == 0 && fo->wildfile.size() == 0 && fo->wildbase.size() == 0) { return false; /* reject file */ } } /* Now apply the Exclude { } directive */ for (i=0; iexclude_list.size(); i++) { findINCEXE *incexe = (findINCEXE *)fileset->exclude_list.get(i); /* FIXME: I don't think we can have wild exclusion inside a Exclude {} */ for (j=0; jopts_list.size(); j++) { findFOPTS *fo = (findFOPTS *)incexe->opts_list.get(j); fnm_flags = (fo->flags & FO_IGNORECASE) ? FNM_CASEFOLD : 0; for (k=0; kwild.size(); k++) { if (fnmatch((char *)fo->wild.get(k), ff->fname, fnmode|fnm_flags) == 0) { Dmsg1(dbglvl, "Reject wild1: %s\n", ff->fname); return false; /* reject file */ } } } /* FIXME: I don't think we can set Options{} inside an Exclude{}, so it is * not possible to have the IGNORECASE flag. The Exclude must match the case. * One solution would be to look the Include flag for the current file. A very * old version of the code was using FNM_CASEFOLD by default. */ fnm_flags = (incexe->current_opts != NULL && incexe->current_opts->flags & FO_IGNORECASE) ? FNM_CASEFOLD : 0; dlistString *node; foreach_dlist(node, &incexe->name_list) { char *fname = node->c_str(); if (fnmatch(fname, ff->fname, fnmode|fnm_flags) == 0) { Dmsg1(dbglvl, "Reject wild2: %s\n", ff->fname); return false; /* reject file */ } } } return true; } /* * The code comes here for each file examined. * We filter the files, then call the user's callback if * the file is included. */ static int our_callback(JCR *jcr, FF_PKT *ff, bool top_level) { if (top_level) { /* Check if we want descend at all - we may not want to backup files from the top directory * but maybe something below */ if (check_allowed_dirs(jcr, ff)) { Dmsg1(dbglvl, "Descending into top-level directory %s, it's part of allowed directories paths\n", ff->fname); return ff->file_save(jcr, ff, top_level); /* accept file */ } else { Dmsg1(dbglvl, "Will not descend into top-level directory %s, " "it's not within allowed directories paths\n", ff->fname); /* Skip top dir and anything below */ return -1; } } switch (ff->type) { case FT_NOACCESS: case FT_NOFOLLOW: case FT_NOSTAT: case FT_NOCHG: case FT_ISARCH: case FT_NORECURSE: case FT_NOFSCHG: case FT_INVALIDFS: case FT_INVALIDDT: case FT_NOOPEN: // return ff->file_save(jcr, ff, top_level); /* These items can be filtered */ case FT_LNKSAVED: case FT_REGE: case FT_REG: case FT_LNK: case FT_DIRBEGIN: case FT_DIREND: case FT_RAW: case FT_FIFO: case FT_SPEC: case FT_DIRNOCHG: case FT_REPARSE: case FT_JUNCTION: if (accept_file(jcr, ff)) { return ff->file_save(jcr, ff, top_level); } else { Dmsg1(dbglvl, "Skip file %s\n", ff->fname); return -1; /* ignore this file */ } default: Dmsg1(000, "Unknown FT code %d\n", ff->type); return 0; } } /* * Terminate find_files() and release * all allocated memory */ int term_find_files(FF_PKT *ff) { int hard_links; if (ff->fname_save) { free_pool_memory(ff->fname_save); } if (ff->link_save) { free_pool_memory(ff->link_save); } if (ff->ignoredir_fname) { free_pool_memory(ff->ignoredir_fname); } if (ff->snap_top_fname) { free_pool_memory(ff->snap_top_fname); } if (ff->mtab_list) { delete ff->mtab_list; } hard_links = term_find_one(ff); free(ff); return hard_links; } /* dump the name_list of every include inside a FileSet */ void dump_name_list(const char* file, int lineno, int lvl, const char *prefix, findFILESET *fileset) { if (fileset == NULL) { d_msg(file, lineno, lvl, "%s Fileset is NULL\n", prefix); return; } for (int i=0; iinclude_list.size(); i++) { findINCEXE *incexe = (findINCEXE *)fileset->include_list.get(i); dlistString *node; foreach_dlist(node, &incexe->name_list) { Dmsg1(DT_VOLUME|50, "name_list = %s\n", node->c_str()); if (chk_dbglvl(lvl)) d_msg(file, lineno, lvl, "%s INC[%d] name = %s\n", prefix, i, node->c_str()); } } } bacula-15.0.3/src/findlib/enable_priv.c0000644000175000017500000001075114771010173017516 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Enable backup privileges for Win32 systems. * * Kern Sibbald, May MMIII * */ #include "bacula.h" #include "find.h" #include "jcr.h" /*=============================================================*/ /* */ /* * * * U n i x * * * * */ /* */ /*=============================================================*/ #if !defined(HAVE_WIN32) int enable_backup_privileges(JCR *jcr, int ignore_errors) { return 0; } #endif /*=============================================================*/ /* */ /* * * * W i n 3 2 * * * * */ /* */ /*=============================================================*/ #if defined(HAVE_WIN32) void win_error(JCR *jcr, const char *prefix, DWORD lerror); static int enable_priv(JCR *jcr, HANDLE hToken, const char *name, int ignore_errors) { TOKEN_PRIVILEGES tkp; DWORD lerror; if (!(p_LookupPrivilegeValue && p_AdjustTokenPrivileges)) { return 0; /* not avail on this OS */ } // Get the LUID for the security privilege. if (!p_LookupPrivilegeValue(NULL, name, &tkp.Privileges[0].Luid)) { win_error(jcr, "LookupPrivilegeValue", GetLastError()); return 0; } /* Set the security privilege for this process. */ tkp.PrivilegeCount = 1; tkp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; p_AdjustTokenPrivileges(hToken, FALSE, &tkp, sizeof(TOKEN_PRIVILEGES), NULL, NULL); lerror = GetLastError(); if (lerror != ERROR_SUCCESS) { if (!ignore_errors) { char buf[200]; strcpy(buf, _("AdjustTokenPrivileges set ")); bstrncat(buf, name, sizeof(buf)); win_error(jcr, buf, lerror); } return 0; } return 1; } /* * Setup privileges we think we will need. We probably do not need * the SE_SECURITY_NAME, but since nothing seems to be working, * we get it hoping to fix the problems. */ int enable_backup_privileges(JCR *jcr, int ignore_errors) { HANDLE hToken, hProcess; int stat = 0; if (!p_OpenProcessToken) { return 0; /* No avail on this OS */ } hProcess = OpenProcess(PROCESS_ALL_ACCESS, FALSE, GetCurrentProcessId()); // Get a token for this process. if (!p_OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hToken)) { if (!ignore_errors) { win_error(jcr, "OpenProcessToken", GetLastError()); } /* Forge on anyway */ } /* Return a bit map of permissions set. */ if (enable_priv(jcr, hToken, SE_BACKUP_NAME, ignore_errors)) { stat |= 1<<1; } if (enable_priv(jcr, hToken, SE_RESTORE_NAME, ignore_errors)) { stat |= 1<<2; } if (enable_priv(jcr, hToken, SE_SECURITY_NAME, ignore_errors)) { stat |= 1<<0; } if (enable_priv(jcr, hToken, SE_TAKE_OWNERSHIP_NAME, ignore_errors)) { stat |= 1<<3; } if (enable_priv(jcr, hToken, SE_ASSIGNPRIMARYTOKEN_NAME, ignore_errors)) { stat |= 1<<4; } if (enable_priv(jcr, hToken, SE_SYSTEM_ENVIRONMENT_NAME, ignore_errors)) { stat |= 1<<5; } if (enable_priv(jcr, hToken, SE_CREATE_TOKEN_NAME, ignore_errors)) { stat |= 1<<6; } if (enable_priv(jcr, hToken, SE_MACHINE_ACCOUNT_NAME, ignore_errors)) { stat |= 1<<7; } if (enable_priv(jcr, hToken, SE_TCB_NAME, ignore_errors)) { stat |= 1<<8; } if (enable_priv(jcr, hToken, SE_CREATE_PERMANENT_NAME, ignore_errors)) { stat |= 1<<10; } if (stat) { stat |= 1<<9; } CloseHandle(hToken); CloseHandle(hProcess); return stat; } #endif /* HAVE_WIN32 */ bacula-15.0.3/src/findlib/Makefile.in0000644000175000017500000000764114771010173017135 0ustar bsbuildbsbuild# # Copyright (C) 2000-2023 Kern Sibbald # License: BSD 2-Clause; see file LICENSE-FOSS # # Find files library Makefile # @MCOMMON@ srcdir = . VPATH = . .PATH: . # one up basedir = .. # top dir topdir = ../.. # this dir relative to top dir thisdir = src/findlib DEBUG=@DEBUG@ EXTRA_SRCS=@EXTRA_FINDLIB_SRCS@ first_rule: all dummy: # # include files installed when using libtool # INCLUDE_FILES = bfile.h find.h protos.h win32filter.h # LIBBACFIND_SRCS = find.c match.c find_one.c attribs.c create_file.c \ bfile.c drivetype.c enable_priv.c fstype.c mkpath.c \ savecwd.c namedpipe.c win32filter.c $(EXTRA_SRCS) LIBBACFIND_OBJS = $(LIBBACFIND_SRCS:.c=.o) LIBBACFIND_LOBJS = $(LIBBACFIND_SRCS:.c=.lo) LIBBACFIND_LT_RELEASE = @LIBBACFIND_LT_RELEASE@ .SUFFIXES: .c .o .lo .PHONY: .DONTCARE: # inference rules .c.o: @echo "Compiling $<" $(NO_ECHO)$(DO_CHECK_DMSG) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) -E $< | $(CHECK_DMSG) $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< .c.lo: @echo "Compiling $<" $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $< #------------------------------------------------------------------------- all: Makefile libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) @echo "==== Make of findlib is good ====" @echo " " libbacfind.a: $(LIBBACFIND_OBJS) @echo "Making $@ ..." $(AR) rc $@ $(LIBBACFIND_OBJS) $(RANLIB) $@ libbacfind.la: Makefile $(LIBBACFIND_LOBJS) @echo "Making $@ ..." $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ $(LIBBACFIND_LOBJS) -export-dynamic -rpath $(libdir) -release $(LIBBACFIND_LT_RELEASE) Makefile: $(srcdir)/Makefile.in $(topdir)/config.status cd $(topdir) \ && CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status install-includes: $(MKDIR) $(DESTDIR)/$(includedir)/bacula/findlib for I in $(INCLUDE_FILES); do \ $(INSTALL_DATA) $$I $(DESTDIR)$(includedir)/bacula/findlib/`basename $$I`; \ done uninstall-includes: for I in $(INCLUDE_FILES); do \ $(RMF) $(DESTDIR)$(includedir)/bacula/findlib/`basename $$I`; \ done libtool-install: all $(MKDIR) $(DESTDIR)$(libdir) $(RMF) $(DESTDIR)$(libdir)/libbacfind-*.so $(DESTDIR)$(libdir)/libbacfind.la $(LIBTOOL_INSTALL_FINISH) $(INSTALL_LIB) libbacfind$(DEFAULT_ARCHIVE_TYPE) $(DESTDIR)$(libdir) libtool-uninstall: $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(libdir)/libbacfind.la install: @LIBTOOL_INSTALL_TARGET@ @INCLUDE_INSTALL_TARGET@ uninstall: @LIBTOOL_UNINSTALL_TARGET@ @INCLUDE_UNINSTALL_TARGET@ libtool-clean: @find . -name '*.lo' -print | xargs $(LIBTOOL_CLEAN) $(RMF) @$(RMF) *.la @$(RMF) -r .libs _libs clean: libtool-clean @$(RMF) find core a.out *.o *.bak *~ *.intpro *.extpro 1 2 3 realclean: clean @$(RMF) tags distclean: realclean if test $(srcdir) = .; then $(MAKE) realclean; fi (cd $(srcdir); $(RMF) Makefile) devclean: realclean if test $(srcdir) = .; then $(MAKE) realclean; fi (cd $(srcdir); $(RMF) Makefile) # Semi-automatic generation of dependencies: # Use gcc -M because X11 `makedepend' doesn't work on all systems # and it also includes system headers. # `semi'-automatic since dependencies are generated at distribution time. depend: @$(MV) Makefile Makefile.bak @$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile @$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile @for src in $(LIBBACFIND_SRCS); do \ $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) -I$(srcdir) -I$(basedir) $$src >> Makefile; \ done @if test -f Makefile ; then \ $(RMF) Makefile.bak; \ else \ $(MV) Makefile.bak Makefile; \ echo " ===== Something went wrong in make depend ====="; \ fi # ----------------------------------------------------------------------- # DO NOT DELETE: nice dependency list follows bacula-15.0.3/src/findlib/bfile.h0000644000175000017500000001171014771010173016312 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Bacula low level File I/O routines. This routine simulates * open(), read(), write(), and close(), but using native routines. * I.e. on Windows, we use Windows APIs. * * Kern Sibbald May MMIII */ #ifndef __BFILE_H #define __BFILE_H #include "win32filter.h" typedef struct _PROCESS_WIN32_BACKUPAPIBLOCK_CONTEXT { int64_t liNextHeader; bool bIsInData; BWIN32_STREAM_ID header_stream; } PROCESS_WIN32_BACKUPAPIBLOCK_CONTEXT; /* ======================================================= * * W I N D O W S * * ======================================================= */ #ifdef HAVE_WIN32 enum { BF_CLOSED, BF_READ, /* BackupRead */ BF_WRITE /* BackupWrite */ }; /* In bfile.c */ /* Basic Win32 low level I/O file packet */ class BFILE { public: bool use_backup_api; /* set if using BackupRead/Write */ int mode; /* set if file is open */ HANDLE fh; /* Win32 file handle */ int fid; /* fd if doing Unix style */ LPVOID lpContext; /* BackupRead/Write context */ PVOID pvContext; /* Windows encryption (EFS) context */ POOLMEM *errmsg; /* error message buffer */ DWORD rw_bytes; /* Bytes read or written */ DWORD lerror; /* Last error code */ DWORD fattrs; /* Windows file attributes */ int berrno; /* errno */ int block; /* Count of read/writes */ uint64_t total_bytes; /* bytes written */ boffset_t offset; /* Delta offset */ JCR *jcr; /* jcr for editing job codes */ Win32Filter win32filter; /* context for decomposition of win32 backup streams */ int use_backup_decomp; /* set if using BackupRead Stream Decomposition */ bool reparse_point; /* set if reparse point */ bool cmd_plugin; /* set if we have a command plugin */ bool const is_encrypted() const; }; /* Windows encrypted file system */ inline const bool BFILE::is_encrypted() const { return (fattrs & FILE_ATTRIBUTE_ENCRYPTED) != 0; }; HANDLE bget_handle(BFILE *bfd); #else /* Linux/Unix systems */ /* ======================================================= * * U N I X * * ======================================================= */ /* Basic Unix low level I/O file packet */ struct BFILE { int fid; /* file id on Unix */ int berrno; /* errno */ int32_t lerror; /* not used - simplies Win32 builds */ int block; /* Count of read/writes */ uint64_t m_flags; /* open flags */ uint64_t total_bytes; /* bytes written */ boffset_t offset; /* Delta offset */ JCR *jcr; /* jcr for editing job codes */ Win32Filter win32filter; /* context for decomposition of win32 backup streams */ int use_backup_decomp; /* set if using BackupRead Stream Decomposition */ bool reparse_point; /* not used in Unix */ bool cmd_plugin; /* set if we have a command plugin */ }; #endif void binit(BFILE *bfd); bool is_bopen(BFILE *bfd); #ifdef HAVE_WIN32 void set_fattrs(BFILE *bfd, struct stat *statp); #else #define set_fattrs(bfd, fattrs) #endif bool set_win32_backup(BFILE *bfd); bool set_portable_backup(BFILE *bfd); bool set_cmd_plugin(BFILE *bfd, JCR *jcr); bool have_win32_api(); bool is_portable_backup(BFILE *bfd); bool is_plugin_data(BFILE *bfd); bool is_restore_stream_supported(int stream); bool is_win32_stream(int stream); int bopen(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode); int bopen_rsrc(BFILE *bfd, const char *fname, uint64_t flags, mode_t mode); int bclose(BFILE *bfd); ssize_t bread(BFILE *bfd, void *buf, size_t count); ssize_t bwrite(BFILE *bfd, void *buf, size_t count); boffset_t blseek(BFILE *bfd, boffset_t offset, int whence); const char *stream_to_ascii(int stream); bool processWin32BackupAPIBlock (BFILE *bfd, void *pBuffer, ssize_t dwSize); #endif /* __BFILE_H */ bacula-15.0.3/src/findlib/create_file.c0000644000175000017500000004301714771010173017473 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Create a file, and reset the modes * * Kern Sibbald, November MM * */ #include "bacula.h" #include "find.h" #ifndef S_IRWXUGO #define S_IRWXUGO (S_IRWXU | S_IRWXG | S_IRWXO) #endif #ifndef IS_CTG #define IS_CTG(x) 0 #define O_CTG 0 #endif #ifndef O_EXCL #define O_EXCL 0 #endif #ifndef O_NOFOLLOW #define O_NOFOLLOW 0 #endif static int separate_path_and_file(JCR *jcr, char *fname, char *ofile); static int path_already_seen(JCR *jcr, char *path, int pnl); /* * Create the file, or the directory * * fname is the original filename * ofile is the output filename (may be in a different directory) * * Returns: CF_SKIP if file should be skipped * CF_ERROR on error * CF_EXTRACT file created and data to restore * CF_CREATED file created no data to restore * * Note, we create the file here, except for special files, * we do not set the attributes because we want to first * write the file, then when the writing is done, set the * attributes. * So, we return with the file descriptor open for normal * files. * */ int create_file(JCR *jcr, ATTR *attr, BFILE *bfd, int replace) { mode_t new_mode, parent_mode; int flags; uid_t uid; gid_t gid; int pnl; bool exists = false; struct stat mstatp; bfd->reparse_point = false; if (is_win32_stream(attr->data_stream)) { set_win32_backup(bfd); } else { set_portable_backup(bfd); } new_mode = attr->statp.st_mode; Dmsg3(200, "type=%d newmode=%x file=%s\n", attr->type, new_mode, attr->ofname); parent_mode = S_IWUSR | S_IXUSR | new_mode; gid = attr->statp.st_gid; uid = attr->statp.st_uid; #ifdef HAVE_WIN32 if (!bfd->use_backup_api) { // eliminate invalid windows filename characters from foreign filenames char *ch = (char *)attr->ofname; if (ch[0] != 0 && ch[1] != 0) { ch += 2; while (*ch) { switch (*ch) { case ':': case '<': case '>': case '*': case '?': case '|': *ch = '_'; break; } ch++; } } } #endif Dmsg2(400, "Replace=%c %d\n", (char)replace, replace); if (lstat(attr->ofname, &mstatp) == 0) { exists = true; } /* The file already exists and we are not updating it, we need * to choose if we skip the file or not based on the replace flag */ if (exists && attr->delta_seq == 0) { switch (replace) { case REPLACE_IFNEWER: /* Set attributes if we created this directory */ if (attr->type == FT_DIREND && path_list_lookup(jcr, attr->ofname)) { break; } if (attr->statp.st_mtime <= mstatp.st_mtime) { Qmsg(jcr, M_SKIPPED, 0, _("File skipped. Not newer: %s\n"), attr->ofname); return CF_SKIP; } break; case REPLACE_IFOLDER: if (attr->statp.st_mtime >= mstatp.st_mtime) { Qmsg(jcr, M_SKIPPED, 0, _("File skipped. Not older: %s\n"), attr->ofname); return CF_SKIP; } break; case REPLACE_NEVER: /* Set attributes if we created this directory */ if (attr->type == FT_DIREND && path_list_lookup(jcr, attr->ofname)) { break; } Qmsg(jcr, M_SKIPPED, 0, _("File skipped. Already exists: %s\n"), attr->ofname); return CF_SKIP; case REPLACE_ALWAYS: break; } } if (!exists && attr->delta_seq > 0) { Qmsg(jcr, M_SKIPPED, 0, _("File skipped. File must exists to apply a patch: %s\n"), attr->ofname); return CF_SKIP; } switch (attr->type) { case FT_RAW: /* raw device to be written */ case FT_FIFO: /* FIFO to be written to */ case FT_LNKSAVED: /* Hard linked, file already saved */ case FT_LNK: case FT_SPEC: /* fifo, ... to be backed up */ case FT_REGE: /* empty file */ case FT_REG: /* regular file */ /* * Note, we do not delete FT_RAW because these are device files * or FIFOs that should already exist. If we blow it away, * we may blow away a FIFO that is being used to read the * restore data, or we may blow away a partition definition. */ if (exists && attr->type != FT_RAW && attr->type != FT_FIFO && attr->stream != STREAM_UNIX_ATTRIBUTE_UPDATE && attr->delta_seq == 0) { /* Get rid of old copy */ Dmsg1(50, "unlink %s\n", attr->ofname); if (unlink(attr->ofname) == -1) { berrno be; Qmsg(jcr, M_ERROR, 0, _("File %s already exists and could not be replaced. ERR=%s.\n"), attr->ofname, be.bstrerror()); return CF_ERROR; } } /* * Here we do some preliminary work for all the above * types to create the path to the file if it does * not already exist. Below, we will split to * do the file type specific work */ pnl = separate_path_and_file(jcr, attr->fname, attr->ofname); if (pnl < 0) { return CF_ERROR; } /* * If path length is <= 0 we are making a file in the root * directory. Assume that the directory already exists. */ if (pnl > 0) { char savechr; savechr = attr->ofname[pnl]; attr->ofname[pnl] = 0; /* terminate path */ if (!path_already_seen(jcr, attr->ofname, pnl)) { Dmsg1(400, "Make path %s\n", attr->ofname); /* * If we need to make the directory, ensure that it is with * execute bit set (i.e. parent_mode), and preserve what already * exists. Normally, this should do nothing. */ if (!makepath(attr, attr->ofname, parent_mode, parent_mode, uid, gid, 1)) { Dmsg1(10, "Could not make path. %s\n", attr->ofname); attr->ofname[pnl] = savechr; /* restore full name */ return CF_ERROR; } } attr->ofname[pnl] = savechr; /* restore full name */ } /* Now we do the specific work for each file type */ switch(attr->type) { case FT_REGE: case FT_REG: flags = O_WRONLY | O_CREAT | O_BINARY | O_EXCL; /* Remove the O_EXCL flag if we will update an existing file */ if (attr->delta_seq > 0) { flags &= ~O_EXCL; } if (IS_CTG(attr->statp.st_mode)) { flags |= O_CTG; /* set contiguous bit if needed */ } Dmsg3(50, "Create=%s flags=%lld delta=%d\n", attr->ofname, (uint64_t)flags, (int)attr->delta_seq); if (is_bopen(bfd)) { Qmsg1(jcr, M_ERROR, 0, _("bpkt already open fid=%d\n"), bfd->fid); bclose(bfd); } set_fattrs(bfd, &attr->statp); if (attr->stream == STREAM_UNIX_ATTRIBUTE_UPDATE) { /* File is created and has valid contents, we want only to update it's metadata */ return CF_CREATED; } if ((bopen(bfd, attr->ofname, flags, S_IRUSR | S_IWUSR)) < 0) { berrno be; be.set_errno(bfd->berrno); Qmsg2(jcr, M_ERROR, 0, _("Could not create %s: ERR=%s\n"), attr->ofname, be.bstrerror()); Dmsg2(100,"Could not create %s: ERR=%s\n", attr->ofname, be.bstrerror()); return CF_ERROR; } return CF_EXTRACT; #ifndef HAVE_WIN32 // none of these exist in MS Windows case FT_RAW: /* Bacula raw device e.g. /dev/sda1 */ case FT_FIFO: /* Bacula fifo to save data */ case FT_SPEC: if (S_ISFIFO(attr->statp.st_mode)) { Dmsg1(400, "Restore fifo: %s\n", attr->ofname); if (mkfifo(attr->ofname, attr->statp.st_mode) != 0 && errno != EEXIST) { berrno be; Qmsg2(jcr, M_ERROR, 0, _("Cannot make fifo %s: ERR=%s\n"), attr->ofname, be.bstrerror()); return CF_ERROR; } } else if (S_ISSOCK(attr->statp.st_mode)) { Dmsg1(200, "Skipping restore of socket: %s\n", attr->ofname); #ifdef S_IFDOOR // Solaris high speed RPC mechanism } else if (S_ISDOOR(attr->statp.st_mode)) { Dmsg1(200, "Skipping restore of door file: %s\n", attr->ofname); #endif #ifdef S_IFPORT // Solaris event port for handling AIO } else if (S_ISPORT(attr->statp.st_mode)) { Dmsg1(200, "Skipping restore of event port file: %s\n", attr->ofname); #endif } else { Dmsg1(400, "Restore node: %s\n", attr->ofname); if (mknod(attr->ofname, attr->statp.st_mode, attr->statp.st_rdev) != 0 && errno != EEXIST) { berrno be; Qmsg2(jcr, M_ERROR, 0, _("Cannot make node %s: ERR=%s\n"), attr->ofname, be.bstrerror()); return CF_ERROR; } } /* * Here we are going to attempt to restore to a FIFO, which * means that the FIFO must already exist, AND there must * be some process already attempting to read from the * FIFO, so we open it write-only. */ if (attr->type == FT_RAW || attr->type == FT_FIFO) { btimer_t *tid; Dmsg1(400, "FT_RAW|FT_FIFO %s\n", attr->ofname); flags = O_WRONLY | O_BINARY; /* Timeout open() in 60 seconds */ if (attr->type == FT_FIFO) { Dmsg0(400, "Set FIFO timer\n"); tid = start_thread_timer(jcr, pthread_self(), 60); } else { tid = NULL; } if (is_bopen(bfd)) { Qmsg1(jcr, M_ERROR, 0, _("bpkt already open fid=%d\n"), bfd->fid); } Dmsg2(400, "open %s flags=0x%x\n", attr->ofname, flags); set_fattrs(bfd, &attr->statp); if ((bopen(bfd, attr->ofname, flags, 0)) < 0) { berrno be; be.set_errno(bfd->berrno); Qmsg2(jcr, M_ERROR, 0, _("Could not open %s: ERR=%s\n"), attr->ofname, be.bstrerror()); Dmsg2(400, "Could not open %s: ERR=%s\n", attr->ofname, be.bstrerror()); stop_thread_timer(tid); return CF_ERROR; } stop_thread_timer(tid); return CF_EXTRACT; } Dmsg1(400, "FT_SPEC %s\n", attr->ofname); return CF_CREATED; case FT_LNK: Dmsg2(130, "FT_LNK should restore: %s -> %s\n", attr->ofname, attr->olname); if (symlink(attr->olname, attr->ofname) != 0 && errno != EEXIST) { berrno be; Qmsg3(jcr, M_ERROR, 0, _("Could not symlink %s -> %s: ERR=%s\n"), attr->ofname, attr->olname, be.bstrerror()); return CF_ERROR; } return CF_CREATED; case FT_LNKSAVED: /* Hard linked, file already saved */ Dmsg2(130, "Hard link %s => %s\n", attr->ofname, attr->olname); if (link(attr->olname, attr->ofname) != 0) { berrno be; #ifdef HAVE_CHFLAGS struct stat s; /* * If using BSD user flags, maybe has a file flag * preventing this. So attempt to disable, retry link, * and reset flags. * Note that BSD securelevel may prevent disabling flag. */ if (stat(attr->olname, &s) == 0 && s.st_flags != 0) { if (chflags(attr->olname, 0) == 0) { if (link(attr->olname, attr->ofname) != 0) { /* restore original file flags even when linking failed */ if (chflags(attr->olname, s.st_flags) < 0) { Qmsg2(jcr, M_ERROR, 0, _("Could not restore file flags for file %s: ERR=%s\n"), attr->olname, be.bstrerror()); } #endif /* HAVE_CHFLAGS */ Qmsg3(jcr, M_ERROR, 0, _("Could not hard link %s -> %s: ERR=%s\n"), attr->ofname, attr->olname, be.bstrerror()); Dmsg3(200, "Could not hard link %s -> %s: ERR=%s\n", attr->ofname, attr->olname, be.bstrerror()); return CF_ERROR; #ifdef HAVE_CHFLAGS } /* finally restore original file flags */ if (chflags(attr->olname, s.st_flags) < 0) { Qmsg2(jcr, M_ERROR, 0, _("Could not restore file flags for file %s: ERR=%s\n"), attr->olname, be.bstrerror()); } } else { Qmsg2(jcr, M_ERROR, 0, _("Could not reset file flags for file %s: ERR=%s\n"), attr->olname, be.bstrerror()); } } else { Qmsg3(jcr, M_ERROR, 0, _("Could not hard link %s -> %s: ERR=%s\n"), attr->ofname, attr->olname, be.bstrerror()); return CF_ERROR; } #endif /* HAVE_CHFLAGS */ } return CF_CREATED; #endif } /* End inner switch */ case FT_REPARSE: case FT_JUNCTION: bfd->reparse_point = true; /* Fall through wanted */ case FT_DIRBEGIN: case FT_DIREND: Dmsg2(200, "Make dir mode=%o dir=%s\n", new_mode, attr->ofname); if (is_win32_stream(attr->data_stream) && attr->statp.st_rdev == WIN32_ROOT_POINT) { /* this is a root directory like C:\ or C:\anymountpoint, * don't restore HIDDEN and SYSTEM attributes */ new_mode &= ~S_ISVTX; // remove FILE_ATTRIBUTE_HIDDEN new_mode |= S_IRWXO; // remove FILE_ATTRIBUTE_SYSTEM } if (!makepath(attr, attr->ofname, new_mode, parent_mode, uid, gid, 0)) { return CF_ERROR; } /* * If we are using the Win32 Backup API, we open the * directory so that the security info will be read * and saved. */ if (!is_portable_backup(bfd)) { if (is_bopen(bfd)) { Qmsg1(jcr, M_ERROR, 0, _("bpkt already open fid=%d\n"), bfd->fid); } set_fattrs(bfd, &attr->statp); if ((bopen(bfd, attr->ofname, O_WRONLY|O_BINARY, 0)) < 0) { berrno be; be.set_errno(bfd->berrno); #ifdef HAVE_WIN32 /* Check for trying to create a drive, if so, skip */ if (attr->ofname[1] == ':' && IsPathSeparator(attr->ofname[2]) && attr->ofname[3] == '\0') { return CF_SKIP; } #endif Qmsg2(jcr, M_ERROR, 0, _("Could not open %s: ERR=%s\n"), attr->ofname, be.bstrerror()); return CF_ERROR; } return CF_EXTRACT; } else { return CF_CREATED; } case FT_DELETED: Qmsg2(jcr, M_INFO, 0, _("Original file %s have been deleted: type=%d\n"), attr->fname, attr->type); break; /* The following should not occur */ case FT_NOACCESS: case FT_NOFOLLOW: case FT_NOSTAT: case FT_DIRNOCHG: case FT_NOCHG: case FT_ISARCH: case FT_NORECURSE: case FT_NOFSCHG: case FT_NOOPEN: Qmsg2(jcr, M_ERROR, 0, _("Original file %s not saved: type=%d\n"), attr->fname, attr->type); break; default: Qmsg2(jcr, M_ERROR, 0, _("Unknown file type %d; not restored: %s\n"), attr->type, attr->fname); Pmsg2(000, "Unknown file type %d; not restored: %s\n", attr->type, attr->fname); break; } return CF_ERROR; } /* * Returns: > 0 index into path where last path char is. * 0 no path * -1 filename is zero length */ static int separate_path_and_file(JCR *jcr, char *fname, char *ofile) { char *f, *p, *q; int fnl, pnl; /* Separate pathname and filename */ for (q=p=f=ofile; *p; p++) { #ifdef HAVE_WIN32 if (IsPathSeparator(*p)) { f = q; if (IsPathSeparator(p[1])) { p++; } } *q++ = *p; /* copy data */ #else if (IsPathSeparator(*p)) { f = q; /* possible filename */ } q++; #endif } if (IsPathSeparator(*f)) { f++; } *q = 0; /* terminate string */ fnl = q - f; if (fnl == 0) { /* The filename length must not be zero here because we * are dealing with a file (i.e. FT_REGE or FT_REG). */ Jmsg1(jcr, M_ERROR, 0, _("Zero length filename: %s\n"), fname); return -1; } pnl = f - ofile - 1; return pnl; } /* * Primitive caching of path to prevent recreating a pathname * each time as long as we remain in the same directory. */ static int path_already_seen(JCR *jcr, char *path, int pnl) { if (!jcr->cached_path) { jcr->cached_path = get_pool_memory(PM_FNAME); } if (jcr->cached_pnl == pnl && strcmp(path, jcr->cached_path) == 0) { return 1; } pm_strcpy(jcr->cached_path, path); jcr->cached_pnl = pnl; return 0; } bacula-15.0.3/src/findlib/drivetype.c0000644000175000017500000000573314771010173017247 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Implement routines to determine drive type (Windows specific). * * Written by Robert Nelson, June 2006 * */ #ifndef TEST_PROGRAM #include "bacula.h" #include "find.h" #else /* Set up for testing a stand alone program */ #include #include #include #define SUPPORTEDOSES \ "HAVE_WIN32\n" #define false 0 #define true 1 #define bstrncpy strncpy #define Dmsg0(n,s) fprintf(stderr, s) #define Dmsg1(n,s,a1) fprintf(stderr, s, a1) #define Dmsg2(n,s,a1,a2) fprintf(stderr, s, a1, a2) #endif /* * These functions should be implemented for each OS * * bool drivetype(const char *fname, char *dt, int dtlen); */ #if defined (HAVE_WIN32) /* Windows */ bool drivetype(const char *fname, char *dt, int dtlen) { CHAR rootpath[4]; UINT type; /* Copy Drive Letter, colon, and backslash to rootpath. bstrncpy will null-terminate the string */ bstrncpy(rootpath, fname, sizeof(rootpath)); type = GetDriveType(rootpath); switch (type) { case DRIVE_REMOVABLE: bstrncpy(dt, "removable", dtlen); return true; case DRIVE_FIXED: bstrncpy(dt, "fixed", dtlen); return true; case DRIVE_REMOTE: bstrncpy(dt, "remote", dtlen); return true; case DRIVE_CDROM: bstrncpy(dt, "cdrom", dtlen); return true; case DRIVE_RAMDISK: bstrncpy(dt, "ramdisk", dtlen); return true; case DRIVE_UNKNOWN: case DRIVE_NO_ROOT_DIR: default: return false; } } /* Windows */ #else /* No recognised OS */ bool drivetype(const char *fname, char *dt, int dtlen) { Dmsg0(10, "!!! drivetype() not implemented for this OS. !!!\n"); #ifdef TEST_PROGRAM Dmsg1(10, "Please define one of the following when compiling:\n\n%s\n", SUPPORTEDOSES); exit(EXIT_FAILURE); #endif return false; } #endif #ifdef TEST_PROGRAM int main(int argc, char **argv) { char *p; char dt[1000]; int status = 0; if (argc < 2) { p = (argc < 1) ? "drivetype" : argv[0]; printf("usage:\t%s path ...\n" "\t%s prints the drive type and pathname of the paths.\n", p, p); return EXIT_FAILURE; } while (*++argv) { if (!drivetype(*argv, dt, sizeof(dt))) { status = EXIT_FAILURE; } else { printf("%s\t%s\n", dt, *argv); } } return status; } #endif bacula-15.0.3/src/findlib/win32filter.h0000644000175000017500000000431214771010173017401 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* Pulled from other files by Alain Spineux */ #ifndef WIN32FILTER_H_ #define WIN32FILTER_H_ #include "bacula.h" /* this should physically correspond to WIN32_STREAM_ID * from winbase.h on Win32. We didn't include cStreamName * as we don't use it and don't need it for a correct struct size. */ #define WIN32_BACKUP_DATA 1 typedef struct _BWIN32_STREAM_ID { int32_t dwStreamId; int32_t dwStreamAttributes; int64_t Size; int32_t dwStreamNameSize; } BWIN32_STREAM_ID, *LPBWIN32_STREAM_ID ; class Win32Filter { public: bool error; /* set when an error is detected */ bool initialized; int64_t skip_size; /* how many bytes we have to skip before next header */ int64_t data_size; /* how many data are expected in the stream */ int header_pos; /* the part of the header that was filled in by previous record */ BWIN32_STREAM_ID header; Win32Filter() { init(); }; void init() { error = false; initialized = false; skip_size = 0; data_size = 0; header_pos = 0; }; void copy(Win32Filter *f) { skip_size = f->skip_size; data_size = f->data_size; header_pos = f->header_pos; header = f->header; initialized = (skip_size != 0 || data_size != 0 || header_pos != 0); }; /* If the stream is HHHDDDDD, you can call have_data("HHHDDDDD", 8, ) * and it will return "DDDDD", 0, 5 */ bool have_data(char **raw, int64_t *raw_len, int64_t *data_len); }; #endif bacula-15.0.3/src/dird/0000755000175000017500000000000014771010173014373 5ustar bsbuildbsbuildbacula-15.0.3/src/dird/fd_cmds.c0000644000175000017500000013042614771010173016144 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Bacula Director -- fd_cmds.c -- send commands to File daemon * * Kern Sibbald, October MM * * This routine is run as a separate thread. There may be more * work to be done to make it totally reentrant!!!! * * Utility functions for sending info to File Daemon. * These functions are used by both backup and verify. * */ #include "bacula.h" #include "dird.h" #include "findlib/find.h" const int dbglvl = 400; /* Commands sent to File daemon */ static char filesetcmd[] = "fileset%s%s\n"; /* set full fileset */ static char jobcmd[] = "JobId=%s Job=%s SDid=%u SDtime=%u Authorization=%s\n"; /* Note, mtime_only is not used here -- implemented as file option */ static char levelcmd[] = "level = %s%s%s mtime_only=%d %s%s\n"; static char runscript[] = "Run OnSuccess=%u OnFailure=%u AbortOnError=%u When=%u Command=%s\n"; static char runbeforenow[]= "RunBeforeNow\n"; static char bandwidthcmd[] = "setbandwidth=%lld Job=%s\n"; static char component_info[] = "component_info\n"; /* Responses received from File daemon */ static char OKinc[] = "2000 OK include\n"; static char OKjob[] = "2000 OK Job"; static char OKlevel[] = "2000 OK level\n"; static char OKRunScript[] = "2000 OK RunScript\n"; static char OKRunBeforeNow[] = "2000 OK RunBeforeNow\n"; static char OKRestoreObject[] = "2000 OK ObjectRestored\n"; static char OKComponentInfo[] = "2000 OK ComponentInfo\n"; static char OKBandwidth[] = "2000 OK Bandwidth\n"; /* Forward referenced functions */ static bool send_list_item(JCR *jcr, const char *code, char *item, BSOCK *fd); /* External functions */ extern DIRRES *director; extern int FDConnectTimeout; #define INC_LIST 0 #define EXC_LIST 1 static void delete_bsock_end_cb(JCR *jcr, void *ctx) { BSOCK *socket = (BSOCK *)ctx; free_bsock(socket); } /* 16.0.10 (12Jan24) x86_64-pc-linux-gnu,ubuntu,20.04 -> 160010 */ static uint64_t scan_version(char *str) { uint64_t version = 0; regex_t r1; regmatch_t pmatch[16]; regcomp(&r1, "^([0-9]+)\\.([0-9]+)\\.([0-9]+)", REG_EXTENDED); if (regexec(&r1, str, 4, pmatch, 0) == 0 && pmatch[1].rm_so == 0 && pmatch[1].rm_eo > 0 && pmatch[1].rm_eo < 50 && pmatch[2].rm_so > 0 && pmatch[2].rm_eo > 0 && (pmatch[2].rm_eo - pmatch[2].rm_so) < 50 && pmatch[3].rm_so > 0 && pmatch[3].rm_eo > 0 && (pmatch[3].rm_eo - pmatch[3].rm_so) < 50) { char buf[50]; bstrncpy(buf, str + pmatch[1].rm_so, pmatch[1].rm_eo - pmatch[1].rm_so + 1); version = str_to_uint64(buf) * 10000; bstrncpy(buf, str + pmatch[2].rm_so, pmatch[2].rm_eo - pmatch[2].rm_so + 1); version += str_to_uint64(buf) * 100; bstrncpy(buf, str + pmatch[3].rm_so, pmatch[3].rm_eo - pmatch[3].rm_so + 1); version += str_to_uint64(buf); } regfree(&r1); return version; } /* * Open connection with File daemon. * Try connecting every retry_interval (default 10 sec), and * give up after max_retry_time (default 30 mins). * If the return code is 0, the error is stored inside jcr->errmsg * Need to call free_bsock() if an error occurs outside of a job */ int connect_to_file_daemon(JCR *jcr, int retry_interval, int max_retry_time, int verbose) { BSOCK *fd = jcr->file_bsock; char ed1[30]; utime_t heart_beat; int status; if (!jcr->client) { Mmsg(jcr->errmsg, _("[DE0037] File daemon not defined for current Job\n")); Dmsg0(10, "No Client defined for the job.\n"); return 0; } if (jcr->client->heartbeat_interval) { heart_beat = jcr->client->heartbeat_interval; } else { heart_beat = director->heartbeat_interval; } if (!is_bsock_open(jcr->file_bsock)) { char name[MAX_NAME_LENGTH + 100]; POOL_MEM buf, tmp; bstrncpy(name, _("Client: "), sizeof(name)); bstrncat(name, jcr->client->name(), sizeof(name)); if (jcr->client->allow_fd_connections) { Dmsg0(DT_NETWORK, "Try to use the existing socket if any\n"); fd = jcr->client->getBSOCK(max_retry_time); /* Need to free the previous bsock, but without creating a race * condition. We will replace the BSOCK */ if (fd && jcr->file_bsock) { Dmsg0(DT_NETWORK, "Found a socket, keep it!\n"); job_end_push(jcr, delete_bsock_end_cb, (void *)jcr->file_bsock); } if (!fd) { Mmsg(jcr->errmsg, "[DE0030] No socket found of the client\n"); return 0; } jcr->file_bsock = fd; fd->set_jcr(jcr); /* TODO: Need to set TLS down */ if (fd->tls) { fd->free_tls(); } } else { if (!fd) { fd = jcr->file_bsock = new_bsock(); } fd->set_source_address(director->DIRsrc_addr); if (!fd->connect(jcr,retry_interval, max_retry_time, heart_beat, name, get_client_address(jcr, jcr->client, buf.addr()), NULL, jcr->client->FDport, verbose)) { fd->close(); pm_strcpy(jcr->errmsg, fd->errmsg); return 0; } Dmsg0(10, "Opened connection with File daemon\n"); } } fd->res = (RES *)jcr->client; /* save resource in BSOCK */ jcr->setJobStatus(JS_Running); if (!authenticate_file_daemon(jcr, &status, &jcr->errmsg)) { return 0; } /* * Now send JobId and authorization key */ if (jcr->sd_auth_key == NULL) { jcr->sd_auth_key = bstrdup("dummy"); } fd->fsend(jobcmd, edit_int64(jcr->JobId, ed1), jcr->Job, jcr->VolSessionId, jcr->VolSessionTime, jcr->sd_auth_key); if (!jcr->keep_sd_auth_key && strcmp(jcr->sd_auth_key, "dummy")) { memset(jcr->sd_auth_key, 0, strlen(jcr->sd_auth_key)); } Dmsg1(100, ">filed: %s", fd->msg); if (bget_dirmsg(jcr, fd, BSOCK_TYPE_FD) > 0) { Dmsg1(110, "msg); if (strncmp(fd->msg, OKjob, strlen(OKjob)) != 0) { Mmsg(jcr->errmsg, _("[DE0031] File daemon \"%s\" rejected Job command: %s\n"), jcr->client->hdr.name, fd->msg); return 0; } else { bool close_db = false; if (!jcr->db) { close_db = true; CAT *cat = jcr->client->catalog; jcr->db = db_init_database(jcr, cat->db_driver, cat->db_name, cat->db_user, cat->db_password, cat->db_address, cat->db_port, cat->db_socket, cat->db_ssl_mode, cat->db_ssl_key, cat->db_ssl_cert, cat->db_ssl_ca, cat->db_ssl_capath, cat->db_ssl_cipher, cat->mult_db_connections, cat->disable_batch_insert); if (!jcr->db || !db_open_database(jcr, jcr->db)) { if (jcr->db) { db_close_database(jcr, jcr->db); jcr->db = NULL; } } } if (jcr->db) { CLIENT_DBR cr; memset(&cr, 0, sizeof(cr)); bstrncpy(cr.Name, jcr->client->hdr.name, sizeof(cr.Name)); cr.AutoPrune = jcr->client->AutoPrune; cr.FileRetention = jcr->client->FileRetention; cr.JobRetention = jcr->client->JobRetention; /* information about plugins can be found after the Uname */ char *pos = strchr(fd->msg+strlen(OKjob)+1, ';'); if (pos) { *pos = 0; bstrncpy(cr.Plugins, pos+1, sizeof(cr.Plugins)); } bstrncpy(cr.Uname, fd->msg+strlen(OKjob)+1, sizeof(cr.Uname)); jcr->client_version = scan_version(cr.Uname); if (!db_update_client_record(jcr, jcr->db, &cr)) { Jmsg(jcr, M_WARNING, 0, _("[DE0028] Error updating Client record. ERR=%s\n"), db_strerror(jcr->db)); } if (close_db) { db_close_database(jcr, jcr->db); jcr->db = NULL; } } } } else { Mmsg(jcr->errmsg, _("[DE0031] FD gave bad response to JobId command: %s\n"), fd->bstrerror()); return 0; } return 1; } /* * This subroutine edits the last job start time into a * "since=date/time" buffer that is returned in the * variable since. This is used for display purposes in * the job report. The time in jcr->stime is later * passed to tell the File daemon what to do. */ void get_level_since_time(JCR *jcr, char *since, int since_len) { int JobLevel; bool have_full; bool do_full = false; bool do_vfull = false; bool do_diff = false; bool print_reason2 = false; utime_t now; utime_t last_full_time = 0; utime_t last_diff_time; char prev_job[MAX_NAME_LENGTH], edl[50]; char last_full_stime[MAX_TIME_LENGTH]; const char *reason = ""; POOL_MEM reason2; since[0] = 0; /* If job cloned and a since time already given, use it */ if (jcr->cloned && jcr->stime && jcr->stime[0]) { bstrncpy(since, _(", since="), since_len); bstrncat(since, jcr->stime, since_len); return; } /* Make sure stime buffer is allocated */ if (!jcr->stime) { jcr->stime = get_pool_memory(PM_MESSAGE); } jcr->PrevJob[0] = jcr->stime[0] = 0; last_full_stime[0] = 0; /* * Lookup the last FULL backup job to get the time/date for a * differential or incremental save. */ switch (jcr->getJobLevel()) { case L_DIFFERENTIAL: case L_INCREMENTAL: POOLMEM *stime = get_pool_memory(PM_MESSAGE); /* Look up start time of last Full job */ now = (utime_t)time(NULL); jcr->jr.JobId = 0; /* flag to return since time */ /* * This is probably redundant, but some of the code below * uses jcr->stime, so don't remove unless you are sure. */ if (!db_find_job_start_time(jcr, jcr->db, &jcr->jr, &jcr->stime, jcr->PrevJob)) { do_full = true; reason = _("No prior or suitable Full backup found in catalog. "); } have_full = db_find_last_job_start_time(jcr, jcr->db, &jcr->jr, &stime, prev_job, L_FULL); if (have_full) { last_full_time = str_to_utime(stime); bstrncpy(last_full_stime, stime, sizeof(last_full_stime)); } else { do_full = true; /* No full, upgrade to one */ /* We try to determine if we have a previous Full backup with an other FileSetId */ DBId_t id = jcr->jr.FileSetId; jcr->jr.FileSetId = 0; if (db_find_last_job_start_time(jcr, jcr->db, &jcr->jr, &stime, prev_job, L_FULL)) { FILESET_DBR fs; memset(&fs, 0, sizeof(fs)); fs.FileSetId = id; /* Print more information about the last fileset */ if (db_get_fileset_record(jcr, jcr->db, &fs)) { Mmsg(reason2, _("The FileSet \"%s\" was modified on %s, this is after the last successful backup on %s."), fs.FileSet, fs.cCreateTime, stime); print_reason2=true; } reason = _("No prior or suitable Full backup found in catalog for the current FileSet. "); } else { reason = _("No prior or suitable Full backup found in catalog. "); } jcr->jr.FileSetId = id; } Dmsg4(50, "have_full=%d do_full=%d now=%lld full_time=%lld\n", have_full, do_full, now, last_full_time); /* Make sure the last diff is recent enough */ if (have_full && jcr->getJobLevel() == L_INCREMENTAL && jcr->job->MaxDiffInterval > 0) { /* Lookup last diff job */ if (db_find_last_job_start_time(jcr, jcr->db, &jcr->jr, &stime, prev_job, L_DIFFERENTIAL)) { last_diff_time = str_to_utime(stime); /* If no Diff since Full, use Full time */ if (last_diff_time < last_full_time) { last_diff_time = last_full_time; } Dmsg2(50, "last_diff_time=%lld last_full_time=%lld\n", last_diff_time, last_full_time); } else { /* No last differential, so use last full time */ last_diff_time = last_full_time; Dmsg1(50, "No last_diff_time setting to full_time=%lld\n", last_full_time); } do_diff = ((now - last_diff_time) >= jcr->job->MaxDiffInterval); if (do_diff) { reason = _("Max Diff Interval exceeded. "); } Dmsg2(50, "do_diff=%d diffInter=%lld\n", do_diff, jcr->job->MaxDiffInterval); } /* Note, do_full takes precedence over do_vfull and do_diff */ if (have_full && jcr->job->MaxFullInterval > 0) { do_full = ((now - last_full_time) >= jcr->job->MaxFullInterval); if (do_full) { reason = _("Max Full Interval exceeded. "); } } else if (have_full && jcr->job->MaxVirtualFullInterval > 0) { /* Not used in BEE, not a real job */ do_vfull = ((now - last_full_time) >= jcr->job->MaxVirtualFullInterval); } free_pool_memory(stime); if (do_full) { /* No recent Full job found, so upgrade this one to Full */ if (print_reason2) { Jmsg(jcr, M_INFO, 0, "%s\n", reason2.c_str()); } Jmsg(jcr, M_INFO, 0, _("%sDoing FULL backup.\n"), reason); bsnprintf(since, since_len, _(" (upgraded from %s)"), level_to_str(edl , sizeof(edl), jcr->getJobLevel())); jcr->setJobLevel(jcr->jr.JobLevel = L_FULL); } else if (do_vfull) { /* No recent Full job found, and MaxVirtualFull is set so upgrade this one to Virtual Full */ Jmsg(jcr, M_INFO, 0, "%s", db_strerror(jcr->db)); Jmsg(jcr, M_INFO, 0, _("No prior or suitable Full backup found in catalog. Doing Virtual FULL backup.\n")); bsnprintf(since, since_len, _(" (upgraded from %s)"), level_to_str(edl, sizeof(edl), jcr->getJobLevel())); jcr->setJobLevel(jcr->jr.JobLevel = L_VIRTUAL_FULL); } else if (do_diff) { /* No recent diff job found, so upgrade this one to Diff */ Jmsg(jcr, M_INFO, 0, _("%sDoing Differential backup.\n"), reason); pm_strcpy(jcr->stime, last_full_stime); bsnprintf(since, since_len, _(", since=%s (upgraded from %s)"), jcr->stime, level_to_str(edl, sizeof(edl), jcr->getJobLevel())); jcr->setJobLevel(jcr->jr.JobLevel = L_DIFFERENTIAL); } else { if (jcr->job->rerun_failed_levels) { POOLMEM *etime = get_pool_memory(PM_MESSAGE); /* Get the end time of our most recent successfull backup for this job */ /* This will be used to see if there have been any failures since then */ if (db_find_last_job_end_time(jcr, jcr->db, &jcr->jr, &etime, prev_job)) { /* See if there are any failed Differential/Full backups since the completion */ /* of our last successful backup for this job */ if (db_find_failed_job_since(jcr, jcr->db, &jcr->jr, etime, JobLevel)) { /* If our job is an Incremental and we have a failed job then upgrade. */ /* If our job is a Differential and the failed job is a Full then upgrade. */ /* Otherwise there is no reason to upgrade. */ if ((jcr->getJobLevel() == L_INCREMENTAL) || ((jcr->getJobLevel() == L_DIFFERENTIAL) && (JobLevel == L_FULL))) { Jmsg(jcr, M_INFO, 0, _("Prior failed job found in catalog. Upgrading to %s.\n"), level_to_str(edl, sizeof(edl), JobLevel)); bsnprintf(since, since_len, _(" (upgraded from %s)"), level_to_str(edl, sizeof(edl), jcr->getJobLevel())); jcr->setJobLevel(jcr->jr.JobLevel = JobLevel); jcr->jr.JobId = jcr->JobId; break; } } } free_pool_memory(etime); } bstrncpy(since, _(", since="), since_len); bstrncat(since, jcr->stime, since_len); } jcr->jr.JobId = jcr->JobId; break; } Dmsg3(100, "Level=%c last start time=%s job=%s\n", jcr->getJobLevel(), jcr->stime, jcr->PrevJob); } static void send_since_time(JCR *jcr) { BSOCK *fd = jcr->file_bsock; utime_t stime; char ed1[50]; stime = str_to_utime(jcr->stime); fd->fsend(levelcmd, "", NT_("since_utime "), edit_uint64(stime, ed1), 0, NT_("prev_job="), jcr->PrevJob); while (bget_dirmsg(jcr, fd, BSOCK_TYPE_FD) >= 0) { /* allow him to poll us to sync clocks */ Jmsg(jcr, M_INFO, 0, "%s\n", fd->msg); } } bool send_bwlimit(JCR *jcr, const char *Job) { BSOCK *fd = jcr->file_bsock; if (jcr->FDVersion >= 4) { fd->fsend(bandwidthcmd, jcr->max_bandwidth, Job); if (!response(jcr, fd, BSOCK_TYPE_FD, OKBandwidth, "Bandwidth", DISPLAY_ERROR)) { jcr->max_bandwidth = 0; /* can't set bandwidth limit */ return false; } } return true; } /* * Send level command to FD. * Used for backup jobs and estimate command. */ bool send_level_command(JCR *jcr) { BSOCK *fd = jcr->file_bsock; const char *accurate = jcr->accurate?"accurate_":""; const char *not_accurate = ""; const char *rerunning = jcr->rerunning?" rerunning ":" "; const char *estimate = jcr->estimate?" estimate ": ""; /* * Send Level command to File daemon */ switch (jcr->getJobLevel()) { case L_BASE: fd->fsend(levelcmd, not_accurate, "base", rerunning, 0, estimate, ""); break; /* L_NONE is the console, sending something off to the FD */ case L_NONE: case L_FULL: fd->fsend(levelcmd, not_accurate, "full", rerunning, 0, estimate, ""); break; case L_DIFFERENTIAL: fd->fsend(levelcmd, accurate, "differential", rerunning, 0, estimate, ""); send_since_time(jcr); break; case L_INCREMENTAL: fd->fsend(levelcmd, accurate, "incremental", rerunning, 0, estimate, ""); send_since_time(jcr); break; case L_SINCE: default: Jmsg2(jcr, M_FATAL, 0, _("Unimplemented backup level %d %c\n"), jcr->getJobLevel(), jcr->getJobLevel()); return 0; } Dmsg1(120, ">filed: %s", fd->msg); if (!response(jcr, fd, BSOCK_TYPE_FD, OKlevel, "Level", DISPLAY_ERROR)) { return false; } return true; } /* * Send either an Included or an Excluded list to FD */ static bool send_fileset(JCR *jcr) { FILESET *fileset = jcr->fileset; BSOCK *fd = jcr->file_bsock; STORE *store = jcr->store_mngr->get_wstore(); int num; bool include = true; for ( ;; ) { if (include) { num = fileset->num_includes; } else { num = fileset->num_excludes; } for (int i=0; iinclude_items[i]; fd->fsend("I\n"); } else { ie = fileset->exclude_items[i]; fd->fsend("E\n"); } if (ie->ignoredir) { fd->fsend("Z %s\n", ie->ignoredir); } for (j=0; jnum_opts; j++) { FOPTS *fo = ie->opts_list[j]; bool enhanced_wild = false; bool stripped_opts = false; bool compress_disabled = false; char newopts[MAX_FOPTS]; for (k=0; fo->opts[k]!='\0'; k++) { if (fo->opts[k]=='W') { enhanced_wild = true; break; } } /* * Strip out compression option Zn if disallowed * for this Storage. * Strip out dedup option dn if old FD */ bool strip_compress = store && !store->AllowCompress; if (strip_compress || jcr->FDVersion >= 11) { int j = 0; for (k=0; fo->opts[k]!='\0'; k++) { /* Z compress option is followed by the single-digit compress level or 'o' */ if (strip_compress && fo->opts[k]=='Z') { stripped_opts = true; compress_disabled = true; k++; /* skip level */ } else if (jcr->FDVersion < 11 && fo->opts[k]=='d') { stripped_opts = true; k++; /* skip level */ } else { newopts[j] = fo->opts[k]; j++; } } newopts[j] = '\0'; if (compress_disabled) { Jmsg(jcr, M_INFO, 0, _("FD compression disabled for this Job because AllowCompression=No in Storage resource.\n") ); } } if (stripped_opts) { /* Send the new trimmed option set without overwriting fo->opts */ fd->fsend("O %s\n", newopts); } else { /* Send the original options */ fd->fsend("O %s\n", fo->opts); } for (k=0; kregex.size(); k++) { fd->fsend("R %s\n", fo->regex.get(k)); } for (k=0; kregexdir.size(); k++) { fd->fsend("RD %s\n", fo->regexdir.get(k)); } for (k=0; kregexfile.size(); k++) { fd->fsend("RF %s\n", fo->regexfile.get(k)); } for (k=0; kwild.size(); k++) { fd->fsend("W %s\n", fo->wild.get(k)); } for (k=0; kwilddir.size(); k++) { fd->fsend("WD %s\n", fo->wilddir.get(k)); } for (k=0; kwildfile.size(); k++) { fd->fsend("WF %s\n", fo->wildfile.get(k)); } for (k=0; kwildbase.size(); k++) { fd->fsend("W%c %s\n", enhanced_wild ? 'B' : 'F', fo->wildbase.get(k)); } for (k=0; kbase.size(); k++) { fd->fsend("B %s\n", fo->base.get(k)); } for (k=0; kfstype.size(); k++) { fd->fsend("X %s\n", fo->fstype.get(k)); } for (k=0; kdrivetype.size(); k++) { fd->fsend("XD %s\n", fo->drivetype.get(k)); } if (fo->plugin) { fd->fsend("G %s\n", fo->plugin); } if (fo->reader) { fd->fsend("D %s\n", fo->reader); } if (fo->writer) { fd->fsend("T %s\n", fo->writer); } fd->fsend("N\n"); } for (j=0; jname_list.size(); j++) { item = (char *)ie->name_list.get(j); if (!send_list_item(jcr, "F ", item, fd)) { goto bail_out; } } fd->fsend("N\n"); for (j=0; jplugin_list.size(); j++) { item = (char *)ie->plugin_list.get(j); if (!send_list_item(jcr, "P ", item, fd)) { goto bail_out; } } fd->fsend("N\n"); } if (!include) { /* If we just did excludes */ break; /* all done */ } include = false; /* Now do excludes */ } fd->signal(BNET_EOD); /* end of data */ if (!response(jcr, fd, BSOCK_TYPE_FD, OKinc, "Include", DISPLAY_ERROR)) { goto bail_out; } return true; bail_out: jcr->setJobStatus(JS_ErrorTerminated); return false; } static bool send_list_item(JCR *jcr, const char *code, char *item, BSOCK *fd) { BPIPE *bpipe; FILE *ffd; char buf[2000]; int optlen, stat; char *p = item; switch (*p) { case '|': p++; /* skip over the | */ fd->msg = edit_job_codes(jcr, fd->msg, p, ""); bpipe = open_bpipe(fd->msg, 0, "r"); if (!bpipe) { berrno be; Jmsg(jcr, M_FATAL, 0, _("Cannot run program: %s. ERR=%s\n"), p, be.bstrerror()); return false; } bstrncpy(buf, code, sizeof(buf)); Dmsg1(500, "code=%s\n", buf); optlen = strlen(buf); while (fgets(buf+optlen, sizeof(buf)-optlen, bpipe->rfd)) { fd->msglen = Mmsg(fd->msg, "%s", buf); Dmsg2(500, "Inc/exc len=%d: %s", fd->msglen, fd->msg); if (!fd->send()) { close_bpipe(bpipe); Jmsg(jcr, M_FATAL, 0, _(">filed: write error on socket\n")); return false; } } if ((stat=close_bpipe(bpipe)) != 0) { berrno be; Jmsg(jcr, M_FATAL, 0, _("Error running program: %s. ERR=%s\n"), p, be.bstrerror(stat)); return false; } break; case '<': p++; /* skip over < */ if ((ffd = bfopen(p, "rb")) == NULL) { berrno be; Jmsg(jcr, M_FATAL, 0, _("Cannot open included file: %s. ERR=%s\n"), p, be.bstrerror()); return false; } bstrncpy(buf, code, sizeof(buf)); Dmsg1(500, "code=%s\n", buf); optlen = strlen(buf); while (fgets(buf+optlen, sizeof(buf)-optlen, ffd)) { fd->msglen = Mmsg(fd->msg, "%s", buf); if (!fd->send()) { fclose(ffd); Jmsg(jcr, M_FATAL, 0, _(">filed: write error on socket\n")); return false; } } fclose(ffd); break; case '\\': p++; /* skip over \ */ /* Note, fall through wanted */ default: pm_strcpy(fd->msg, code); fd->msglen = pm_strcat(fd->msg, p); Dmsg1(500, "Inc/Exc name=%s\n", fd->msg); if (!fd->send()) { Jmsg(jcr, M_FATAL, 0, _(">filed: write error on socket\n")); return false; } break; } return true; } /* * Send include list to File daemon */ bool send_include_list(JCR *jcr) { BSOCK *fd = jcr->file_bsock; if (jcr->fileset->new_include) { fd->fsend(filesetcmd, jcr->fileset->enable_vss ? " vss=1" : "", jcr->fileset->enable_snapshot ? " snap=1" : ""); return send_fileset(jcr); } return true; } /* * Send an include list with a plugin and listing= parameter */ bool send_ls_plugin_fileset(JCR *jcr, const char *plugin, const char *path) { BSOCK *fd = jcr->file_bsock; fd->fsend(filesetcmd, "" /* no vss */, "" /* no snapshot */); fd->fsend("I\n"); fd->fsend("O h\n"); /* is it required? */ fd->fsend("N\n"); fd->fsend("P %s%s listing=%s\n", plugin, strchr(plugin, ':') == NULL ? ":" : "", path); fd->fsend("N\n"); fd->signal(BNET_EOD); /* end of data */ if (!response(jcr, fd, BSOCK_TYPE_FD, OKinc, "Include", DISPLAY_ERROR)) { return false; } return true; } /* * Send a include list with only one directory and recurse=no */ bool send_ls_fileset(JCR *jcr, const char *path, bool dironly) { BSOCK *fd = jcr->file_bsock; fd->fsend(filesetcmd, "" /* no vss */, "" /* no snapshot */); fd->fsend("I\n"); if (dironly) { /* Display only directories */ fd->fsend("O eh\n"); /* Limit recursion to one directory */ fd->fsend("WF *\n"); } else { fd->fsend("O h\n"); /* Limit recursion to one directory */ } fd->fsend("N\n"); fd->fsend("F %s\n", path); fd->fsend("N\n"); fd->signal(BNET_EOD); /* end of data */ if (!response(jcr, fd, BSOCK_TYPE_FD, OKinc, "Include", DISPLAY_ERROR)) { return false; } return true; } /* * Send exclude list to File daemon * Under the new scheme, the Exclude list * is part of the FileSet sent with the * "include_list" above. */ bool send_exclude_list(JCR *jcr) { return true; } /* TODO: drop this with runscript.old_proto in bacula 1.42 */ static char runbefore[] = "RunBeforeJob %s\n"; static char runafter[] = "RunAfterJob %s\n"; static char OKRunBefore[] = "2000 OK RunBefore\n"; static char OKRunAfter[] = "2000 OK RunAfter\n"; int send_runscript_with_old_proto(JCR *jcr, int when, POOLMEM *msg) { int ret; Dmsg1(120, "bdird: sending old runcommand to fd '%s'\n",msg); if (when & SCRIPT_Before) { jcr->file_bsock->fsend(runbefore, msg); ret = response(jcr, jcr->file_bsock, BSOCK_TYPE_FD, OKRunBefore, "ClientRunBeforeJob", DISPLAY_ERROR); } else { jcr->file_bsock->fsend(runafter, msg); ret = response(jcr, jcr->file_bsock, BSOCK_TYPE_FD, OKRunAfter, "ClientRunAfterJob", DISPLAY_ERROR); } return ret; } /* END OF TODO */ /* * Send RunScripts to File daemon * 1) We send all runscript to FD, they can be executed Before, After, or twice * 2) Then, we send a "RunBeforeNow" command to the FD to tell him to do the * first run_script() call. (ie ClientRunBeforeJob) */ int send_runscripts_commands(JCR *jcr) { POOLMEM *msg = get_pool_memory(PM_FNAME); BSOCK *fd = jcr->file_bsock; RUNSCRIPT *cmd; bool launch_before_cmd = false; POOLMEM *ehost = get_pool_memory(PM_FNAME); int result; Dmsg0(120, "bdird: sending runscripts to fd\n"); if (!jcr->job->RunScripts) { goto norunscript; } foreach_alist(cmd, jcr->job->RunScripts) { if (cmd->can_run_at_level(jcr->getJobLevel()) && cmd->target) { ehost = edit_job_codes(jcr, ehost, cmd->target, ""); Dmsg2(200, "bdird: runscript %s -> %s\n", cmd->target, ehost); if (strcmp(ehost, jcr->client->name()) == 0) { pm_strcpy(msg, cmd->command); bash_spaces(msg); Dmsg1(120, "bdird: sending runscripts to fd '%s'\n", cmd->command); /* TODO: remove this with bacula 1.42 */ if (cmd->old_proto) { result = send_runscript_with_old_proto(jcr, cmd->when, msg); } else { /* On the FileDaemon, AtJobCompletion is a synonym of After */ cmd->when = (cmd->when == SCRIPT_AtJobCompletion) ? SCRIPT_After : cmd->when; fd->fsend(runscript, cmd->on_success, cmd->on_failure, cmd->fail_on_error, cmd->when, msg); result = response(jcr, fd, BSOCK_TYPE_FD, OKRunScript, "RunScript", DISPLAY_ERROR); launch_before_cmd = true; } if (!result) { goto bail_out; } } /* TODO : we have to play with other client */ /* else { send command to an other client } */ } } /* Tell the FD to execute the ClientRunBeforeJob */ if (launch_before_cmd) { fd->fsend(runbeforenow); if (!response(jcr, fd, BSOCK_TYPE_FD, OKRunBeforeNow, "RunBeforeNow", DISPLAY_ERROR)) { goto bail_out; } } norunscript: free_pool_memory(msg); free_pool_memory(ehost); return 1; bail_out: Jmsg(jcr, M_FATAL, 0, _("Client \"%s\" RunScript failed.\n"), ehost); free_pool_memory(msg); free_pool_memory(ehost); return 0; } struct OBJ_CTX { JCR *jcr; int count; }; static int restore_object_handler(void *ctx, int num_fields, char **row) { OBJ_CTX *octx = (OBJ_CTX *)ctx; JCR *jcr = octx->jcr; BSOCK *fd; fd = jcr->file_bsock; if (jcr->is_job_canceled()) { return 1; } /* Old File Daemon doesn't handle restore objects */ if (jcr->FDVersion < 3) { Jmsg(jcr, M_WARNING, 0, _("Client \"%s\" may not be used to restore " "this job. Please upgrade your client.\n"), jcr->client->name()); return 1; } if (jcr->FDVersion < 5) { /* Old version without PluginName */ fd->fsend("restoreobject JobId=%s %s,%s,%s,%s,%s,%s\n", row[0], row[1], row[2], row[3], row[4], row[5], row[6]); } else { /* bash spaces from PluginName */ bash_spaces(row[9]); fd->fsend("restoreobject JobId=%s %s,%s,%s,%s,%s,%s,%s\n", row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[9]); } Dmsg1(010, "Send obj hdr=%s", fd->msg); fd->msglen = pm_strcpy(fd->msg, row[7]); fd->send(); /* send Object name */ Dmsg1(010, "Send obj: %s\n", fd->msg); // fd->msglen = str_to_uint64(row[1]); /* object length */ // Dmsg1(000, "obj size: %lld\n", (uint64_t)fd->msglen); /* object */ db_unescape_object(jcr, jcr->db, row[8], /* Object */ str_to_uint64(row[1]), /* Object length */ &fd->msg, &fd->msglen); fd->send(); /* send object */ octx->count++; if (debug_level > 100) { for (int i=0; i < fd->msglen; i++) if (!fd->msg[i]) fd->msg[i] = ' '; Dmsg1(000, "Send obj: %s\n", fd->msg); } return 0; } /* Send the restore file list to the plugin */ void feature_send_restorefilelist(JCR *jcr, const char *plugin) { if (!jcr->bsr_list) { Dmsg0(10, "Unable to send restore file list\n"); return; } jcr->file_bsock->fsend("restorefilelist plugin=%s\n", plugin); scan_bsr(jcr); jcr->file_bsock->signal(BNET_EOD); } typedef struct { const char *name; /* Name of the feature */ alist *plugins; /* List of the plugins that can use the feature */ void (*handler)(JCR *, const char *plugin); /* handler for the feature */ } PluginFeatures; /* List of all the supported features. This table is global and * each job should do a copy. */ const static PluginFeatures plugin_features[] = { /* name plugins handler */ {PLUGIN_FEATURE_RESTORELISTFILES, NULL, feature_send_restorefilelist}, {NULL, NULL, NULL} }; #define NB_FEATURES (sizeof(plugin_features) / sizeof(PluginFeatures)) /* Take a plugin and a feature name, and fill the Feature list */ static void fill_feature_list(JCR *jcr, PluginFeatures *features, char *plugin, char *name) { for (int i=0; features[i].name != NULL ; i++) { if (strcasecmp(features[i].name, name) == 0) { if (features[i].plugins == NULL) { features[i].plugins = New(alist(10, owned_by_alist)); } Dmsg2(10, "plugin=%s match feature %s\n", plugin, name); features[i].plugins->append(bstrdup(plugin)); break; } } } /* Free the memory allocated by get_plugin_features() */ static void free_plugin_features(PluginFeatures *features) { if (!features) { return; } for (int i=0; features[i].name != NULL ; i++) { if (features[i].plugins != NULL) { delete features[i].plugins; } } free(features); } /* Can be used in Backup or Restore jobs to know what a plugin can do * Need to call free_plugin_features() to release the PluginFeatures argument */ static bool get_plugin_features(JCR *jcr, PluginFeatures **ret) { POOL_MEM buf; char ed1[128]; BSOCK *fd = jcr->file_bsock; PluginFeatures *features; char *p, *start; *ret = NULL; if (jcr->FDVersion < 14 || jcr->FDVersion == 213 || jcr->FDVersion == 214) { return false; /* File Daemon too old or not compatible */ } /* Copy the features table */ features = (PluginFeatures *)malloc(sizeof(PluginFeatures) * NB_FEATURES); memcpy(features, plugin_features, sizeof(plugin_features)); /* Get the list of the features that the plugins can request * ex: plugin=ndmp features=files,feature1,feature2 */ fd->fsend("PluginFeatures\n"); while (bget_dirmsg(jcr, fd, BSOCK_TYPE_FD) > 0) { buf.check_size(fd->msglen+1); if (sscanf(fd->msg, "2000 plugin=%127s features=%s", ed1, buf.c_str()) == 2) { /* We have buf=feature1,feature2,feature3 */ start = buf.c_str(); while ((p = next_name(&start)) != NULL) { fill_feature_list(jcr, features, ed1, p); } } else { Dmsg1(10, "Something wrong with the protocol %s\n", fd->msg); free_plugin_features(features); return false; } } *ret = features; return true; } /* See with the FD if we need to send the list of all the files * to be restored before the start of the job */ bool send_restore_file_list(JCR *jcr) { PluginFeatures *features=NULL; /* TODO: If we have more features, we can store the features list in the JCR */ if (!get_plugin_features(jcr, &features)) { return true; /* Not handled by FD */ } /* Now, we can deal with what the plugins want */ for (int i=0; features[i].name != NULL ; i++) { if (strcmp(features[i].name, PLUGIN_FEATURE_RESTORELISTFILES) == 0) { if (features[i].plugins != NULL) { char *plug; foreach_alist(plug, features[i].plugins) { features[i].handler(jcr, plug); } } break; } } free_plugin_features(features); return true; } /* * Send the plugin Restore Objects, which allow the * plugin to get information early in the restore * process. The RestoreObjects were created during * the backup by the plugin. */ bool send_restore_objects(JCR *jcr) { char ed1[50]; POOL_MEM query(PM_MESSAGE); BSOCK *fd; OBJ_CTX octx; if (!jcr->JobIds || !jcr->JobIds[0]) { return true; } octx.jcr = jcr; octx.count = 0; /* restore_object_handler is called for each file found */ /* send restore objects for all jobs involved */ Mmsg(query, get_restore_objects, jcr->JobIds, FT_RESTORE_FIRST); db_sql_query(jcr->db, query.c_str(), restore_object_handler, (void *)&octx); /* send config objects for the current restore job */ Mmsg(query, get_restore_objects, edit_uint64(jcr->JobId, ed1), FT_PLUGIN_CONFIG_FILLED); db_sql_query(jcr->db, query.c_str(), restore_object_handler, (void *)&octx); /* * Send to FD only if we have at least one restore object. * This permits backward compatibility with older FDs. */ if (octx.count > 0) { fd = jcr->file_bsock; fd->fsend("restoreobject end\n"); if (!response(jcr, fd, BSOCK_TYPE_FD, OKRestoreObject, "RestoreObject", DISPLAY_ERROR)) { Jmsg(jcr, M_FATAL, 0, _("RestoreObject failed.\n")); return false; } } return true; } /* * Send the plugin a list of component info files. These * were files that were created during the backup for * the VSS plugin. The list is a list of those component * files that have been chosen for restore. We * send them before the Restore Objects. */ bool send_component_info(JCR *jcr) { BSOCK *fd; char buf[2000]; bool ok = true; if (!jcr->component_fd) { return true; /* nothing to send */ } /* Don't send if old version FD */ if (jcr->FDVersion < 6) { goto bail_out; } rewind(jcr->component_fd); fd = jcr->file_bsock; fd->fsend(component_info); while (fgets(buf, sizeof(buf), jcr->component_fd)) { fd->fsend("%s", buf); Dmsg1(050, "Send component_info to FD: %s\n", buf); } fd->signal(BNET_EOD); if (!response(jcr, fd, BSOCK_TYPE_FD, OKComponentInfo, "ComponentInfo", DISPLAY_ERROR)) { Jmsg(jcr, M_FATAL, 0, _("ComponentInfo failed.\n")); ok = false; } bail_out: fclose(jcr->component_fd); jcr->component_fd = NULL; unlink(jcr->component_fname); free_and_null_pool_memory(jcr->component_fname); return ok; } /* * Read the attributes from the File daemon for * a Verify job and store them in the catalog. */ int get_attributes_and_put_in_catalog(JCR *jcr) { BSOCK *fd; int n = 0; ATTR_DBR *ar = NULL; char digest[2*(MAXSTRING+1)+1]; /* escaped version of Digest */ fd = jcr->file_bsock; jcr->jr.FirstIndex = 1; jcr->FileIndex = 0; /* Start transaction allocates jcr->attr and jcr->ar if needed */ db_start_transaction(jcr, jcr->db); /* start transaction if not already open */ ar = jcr->ar; Dmsg0(120, "bdird: waiting to receive file attributes\n"); /* Pickup file attributes and digest */ while (!fd->errors && (n = bget_dirmsg(jcr, fd, BSOCK_TYPE_FD)) > 0) { int32_t file_index; int stream, len; char *p, *fn; char Digest[MAXSTRING+1]; /* either Verify opts or MD5/SHA1 digest */ /* Stop here if canceled */ if (jcr->is_job_canceled()) { jcr->cached_attribute = false; return 0; } if ((len = sscanf(fd->msg, "%ld %d %500s", &file_index, &stream, Digest)) != 3) { /* MAXSTRING */ Jmsg(jcr, M_FATAL, 0, _("msglen, fd->msg); jcr->setJobStatus(JS_ErrorTerminated); jcr->cached_attribute = false; return 0; } p = fd->msg; /* The following three fields were sscanf'ed above so skip them */ skip_nonspaces(&p); /* skip FileIndex */ skip_spaces(&p); skip_nonspaces(&p); /* skip Stream */ skip_spaces(&p); skip_nonspaces(&p); /* skip Opts_Digest */ p++; /* skip space */ Dmsg1(dbglvl, "Stream=%d\n", stream); if (stream == STREAM_UNIX_ATTRIBUTES || stream == STREAM_UNIX_ATTRIBUTES_EX) { if (jcr->cached_attribute) { Dmsg3(dbglvl, "Cached attr. Stream=%d fname=%s\n", ar->Stream, ar->fname, ar->attr); if (!db_create_file_attributes_record(jcr, jcr->db, ar)) { Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), db_strerror(jcr->db)); } jcr->cached_attribute = false; } /* Any cached attr is flushed so we can reuse jcr->attr and jcr->ar */ fn = jcr->fname = check_pool_memory_size(jcr->fname, fd->msglen); while (*p != 0) { *fn++ = *p++; /* copy filename */ } *fn = *p++; /* term filename and point p to attribs */ pm_strcpy(jcr->attr, p); /* save attributes */ jcr->JobFiles++; jcr->FileIndex = file_index; ar->attr = jcr->attr; ar->fname = jcr->fname; ar->FileIndex = file_index; ar->Stream = stream; ar->link = NULL; ar->JobId = jcr->JobId; ar->ClientId = jcr->ClientId; ar->PathId = 0; ar->Filename = NULL; ar->Digest = NULL; ar->DigestType = CRYPTO_DIGEST_NONE; ar->DeltaSeq = 0; jcr->cached_attribute = true; Dmsg2(dbglvl, "dirdfname); Dmsg1(dbglvl, "dirdattr); jcr->FileId = ar->FileId; /* * First, get STREAM_UNIX_ATTRIBUTES and fill ATTR_DBR structure * Next, we CAN have a CRYPTO_DIGEST, so we fill ATTR_DBR with it (or not) * When we get a new STREAM_UNIX_ATTRIBUTES, we known that we can add file to the catalog * At the end, we have to add the last file */ } else if (crypto_digest_stream_type(stream) != CRYPTO_DIGEST_NONE) { if (jcr->FileIndex != file_index) { Jmsg3(jcr, M_ERROR, 0, _("%s index %d not same as attributes %d\n"), stream_to_ascii(stream), file_index, jcr->FileIndex); continue; } ar->Digest = digest; ar->DigestType = crypto_digest_stream_type(stream); db_escape_string(jcr, jcr->db, digest, Digest, strlen(Digest)); Dmsg4(dbglvl, "stream=%d DigestLen=%d Digest=%s type=%d\n", stream, strlen(digest), digest, ar->DigestType); } jcr->jr.JobFiles = jcr->JobFiles = file_index; jcr->jr.LastIndex = file_index; } if (fd->is_error()) { Jmsg1(jcr, M_FATAL, 0, _("bstrerror()); jcr->cached_attribute = false; return 0; } if (jcr->cached_attribute) { Dmsg3(dbglvl, "Cached attr with digest. Stream=%d fname=%s attr=%s\n", ar->Stream, ar->fname, ar->attr); if (!db_create_file_attributes_record(jcr, jcr->db, ar)) { Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), db_strerror(jcr->db)); } jcr->cached_attribute = false; } jcr->setJobStatus(JS_Terminated); return 1; } bacula-15.0.3/src/dird/query.sql0000644000175000017500000000043614771010173016264 0ustar bsbuildbsbuild# # See the file /examples/sample-query.sql or /opt/bacula/scripts/sample-query.sql # for some sample queries. # # 1 :The default file is empty, see /opt/bacula/scripts/sample-query.sql for samples SELECT 'See /opt/bacula/scripts/sample-query.sql for samples' AS Info; bacula-15.0.3/src/dird/verify.c0000644000175000017500000010131214771010173016041 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Bacula Director -- verify.c -- responsible for running file verification * * Kern Sibbald, October MM * * Basic tasks done here: * Open DB * Open connection with File daemon and pass him commands * to do the verify. * When the File daemon sends the attributes, compare them to * what is in the DB. * */ #include "bacula.h" #include "dird.h" #include "findlib/find.h" /* Commands sent to File daemon */ static char verifycmd[] = "verify level=%s\n"; /* Responses received from File daemon */ static char OKverify[] = "2000 OK verify\n"; static char OKPluginOptions[] = "2000 OK plugin options\n"; /* Commands received from Storage daemon */ static char OKbootstrap[] = "3000 OK bootstrap\n"; /* Forward referenced functions */ static void prt_fname(JCR *jcr); static int missing_handler(void *ctx, int num_fields, char **row); /* * Called here before the job is run to do the job * specific setup. */ bool do_verify_init(JCR *jcr) { if (!allow_duplicate_job(jcr)) { return false; } switch (jcr->getJobLevel()) { case L_VERIFY_INIT: case L_VERIFY_CATALOG: case L_VERIFY_DISK_TO_CATALOG: jcr->store_mngr->reset_rwstorage(); break; case L_VERIFY_DATA: case L_VERIFY_VOLUME_TO_CATALOG: jcr->store_mngr->reset_wstorage(); break; default: Jmsg2(jcr, M_FATAL, 0, _("Unimplemented Verify level %d(%c)\n"), jcr->getJobLevel(), jcr->getJobLevel()); return false; } return true; } /* * Do a verification of the specified files against the Catlaog * * Returns: false on failure * true on success */ bool do_verify(JCR *jcr) { const char *level; BSOCK *fd, *sd; int stat; char ed1[100], edl[50]; JOB_DBR jr; JobId_t verify_jobid = 0; char *store_address; uint32_t store_port; const char *Name; STORE *rstore = jcr->store_mngr->get_rstore(); POOL_MEM buf, tmp; jcr->store_mngr->reset_wstorage(); memset(&jcr->previous_jr, 0, sizeof(jcr->previous_jr)); /* * Find JobId of last job that ran. Note, we do this when * the job actually starts running, not at schedule time, * so that we find the last job that terminated before * this job runs rather than before it is scheduled. This * permits scheduling a Backup and Verify at the same time, * but with the Verify at a lower priority. * * For VERIFY_CATALOG we want the JobId of the last INIT. * For VERIFY_VOLUME_TO_CATALOG, we want the JobId of the * last backup Job. */ if (jcr->getJobLevel() == L_VERIFY_CATALOG || jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG || jcr->getJobLevel() == L_VERIFY_DISK_TO_CATALOG || jcr->getJobLevel() == L_VERIFY_DATA) { memcpy(&jr, &jcr->jr, sizeof(jr)); if (jcr->verify_job && (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG || jcr->getJobLevel() == L_VERIFY_DISK_TO_CATALOG || jcr->getJobLevel() == L_VERIFY_DATA)) { Name = jcr->verify_job->name(); } else { Name = NULL; } Dmsg1(100, "find last jobid for: %s\n", NPRT(Name)); /* see if user supplied a jobid= as run argument or from menu */ if (jcr->RestoreJobId) { verify_jobid = jcr->RestoreJobId; Dmsg1(100, "Supplied jobid=%d\n", verify_jobid); } else { if (!db_find_last_jobid(jcr, jcr->db, Name, &jr)) { if (jcr->getJobLevel() == L_VERIFY_CATALOG) { Jmsg(jcr, M_FATAL, 0, _( "Unable to find JobId of previous InitCatalog Job.\n" "Please run a Verify with Level=InitCatalog before\n" "running the current Job.\n")); } else { Jmsg(jcr, M_FATAL, 0, _( "Unable to find JobId of previous Job for this client.\n")); } return false; } verify_jobid = jr.JobId; } Dmsg1(100, "Last full jobid=%d\n", verify_jobid); } /* * Now get the job record for the previous backup that interests * us. We use the verify_jobid that we found above. */ if (jcr->getJobLevel() == L_VERIFY_CATALOG || jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG || jcr->getJobLevel() == L_VERIFY_DISK_TO_CATALOG || jcr->getJobLevel() == L_VERIFY_DATA) { jcr->previous_jr.JobId = verify_jobid; if (!db_get_job_record(jcr, jcr->db, &jcr->previous_jr)) { Jmsg(jcr, M_FATAL, 0, _("Could not get job record for previous Job. ERR=%s"), db_strerror(jcr->db)); return false; } /* Keep the reference in the Job table to determine what is verified */ jcr->jr.PriorJobId = jcr->previous_jr.JobId; bstrncpy(jcr->jr.PriorJob, jcr->previous_jr.Job, sizeof(jcr->jr.PriorJob)); if (!(jcr->previous_jr.JobStatus == JS_Terminated || jcr->previous_jr.JobStatus == JS_Warnings)) { Jmsg(jcr, M_FATAL, 0, _("Last Job %d did not terminate normally. JobStatus=%c\n"), verify_jobid, jcr->previous_jr.JobStatus); return false; } Jmsg(jcr, M_INFO, 0, _("Verifying against JobId=%d Job=%s\n"), jcr->previous_jr.JobId, jcr->previous_jr.Job); /* Check for Malware */ if ( jcr->previous_jr.JobFiles > 0 && !jcr->previous_jr.PurgedFiles && jcr->job->CheckMalware) { Jmsg(jcr, M_INFO, 0, _("[DI0002] Checking file metadata for Malwares\n")); edit_int64(jcr->previous_jr.JobId, ed1); if (check_malware(jcr, ed1, buf.handle()) != 0) { Jmsg(jcr, M_ERROR, 0, "%s", buf.c_str()); } else { Jmsg(jcr, M_INFO, 0, "%s", buf.c_str()); } } } /* * If we are verifying a Volume, we need the Storage * daemon, so open a connection, otherwise, just * create a dummy authorization key (passed to * File daemon but not used). */ if (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG || jcr->getJobLevel() == L_VERIFY_DATA) { int stat; /* * Note: negative status is an error, zero status, means * no files were backed up, so skip calling SD and * client. */ stat = create_restore_bootstrap_file(jcr); if (stat < 0) { /* error */ return false; } else if (stat == 0) { /* No files, nothing to do */ verify_cleanup(jcr, JS_Terminated); /* clean up */ return true; /* get out */ } } else { jcr->sd_auth_key = bstrdup("dummy"); /* dummy Storage daemon key */ } /* Pass the original fileset to the client */ if (jcr->getJobLevel() == L_VERIFY_DATA) { FILESET_DBR fdbr; memset(&fdbr, 0, sizeof(fdbr)); fdbr.FileSetId = jcr->previous_jr.FileSetId; if (!db_get_fileset_record(jcr, jcr->db, &fdbr)) { Jmsg(jcr, M_FATAL, 0, _("Could not get fileset record from previous Job. ERR=%s"), db_strerror(jcr->db)); return false; } jcr->fileset = (FILESET *)GetResWithName(R_FILESET, fdbr.FileSet); if (!jcr->fileset) { if (jcr->verify_job) { jcr->fileset = jcr->verify_job->fileset; Jmsg(jcr, M_WARNING, 0, _("Could not find FileSet resource \"%s\" from previous Job\n"), fdbr.FileSet); Jmsg(jcr, M_INFO, 0, _("Using FileSet \"%\"\n"), jcr->fileset->name()); } else { Jmsg(jcr, M_FATAL, 0, _("Could not get FileSet resource for verify Job.")); return false; } } Dmsg1(50, "FileSet = %s\n", jcr->fileset->name()); } /* Pass the current fileset to the client */ if (jcr->getJobLevel() == L_VERIFY_DISK_TO_CATALOG && jcr->verify_job) { jcr->fileset = jcr->verify_job->fileset; } Dmsg2(100, "ClientId=%u JobLevel=%c\n", jcr->previous_jr.ClientId, jcr->getJobLevel()); if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) { Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); return false; } /* Print Job Start message */ Jmsg(jcr, M_INFO, 0, _("Start Verify JobId=%s Level=%s Job=%s\n"), edit_uint64(jcr->JobId, ed1), level_to_str(edl, sizeof(edl), jcr->getJobLevel()), jcr->Job); if (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG || jcr->getJobLevel() == L_VERIFY_DATA) { /* * Start conversation with Storage daemon */ jcr->setJobStatus(JS_WaitSD); if (!connect_to_storage_daemon(jcr, 10, SDConnectTimeout, 1)) { Jmsg(jcr, M_FATAL, 0, "%s", jcr->errmsg); return false; } sd = jcr->store_bsock; build_connecting_info_log(_("Storage"), jcr->store_mngr->get_rstore()->name(), get_storage_address(jcr->client, jcr->store_mngr->get_rstore()), jcr->store_mngr->get_rstore()->SDport, jcr->store_bsock->tls ? true : false, buf.addr()); Jmsg(jcr, M_INFO, 0, "%s", buf.c_str()); /* * Now start a job with the Storage daemon */ if (!start_storage_daemon_job(jcr, jcr->store_mngr->get_rstore_list(), NULL, true /* wait */)) { return false; } jcr->sd_calls_client = jcr->client->sd_calls_client; /* * Send the bootstrap file -- what Volumes/files to restore */ if (!send_bootstrap_file(jcr, sd) || !response(jcr, sd, BSOCK_TYPE_SD, OKbootstrap, "Bootstrap", DISPLAY_ERROR)) { goto bail_out; } if (!jcr->sd_calls_client) { if (!run_storage_and_start_message_thread(jcr, sd)) { return false; } } } /* * OK, now connect to the File daemon * and ask him for the files. */ jcr->setJobStatus(JS_WaitFD); if (!connect_to_file_daemon(jcr, 10, FDConnectTimeout, 1)) { Jmsg(jcr, M_FATAL, 0, "%s", jcr->errmsg); goto bail_out; } jcr->setJobStatus(JS_Running); fd = jcr->file_bsock; /* Print connection info only for real jobs */ build_connecting_info_log(_("Client"), jcr->client->name(), get_client_address(jcr, jcr->client, tmp.addr()), jcr->client->FDport, fd->tls ? true : false, buf.addr()); Jmsg(jcr, M_INFO, 0, "%s", buf.c_str()); Dmsg0(30, ">filed: Send include list\n"); if (!send_include_list(jcr)) { goto bail_out; } Dmsg0(30, ">filed: Send exclude list\n"); if (!send_exclude_list(jcr)) { goto bail_out; } /* * Send Level command to File daemon, as well * as the Storage address if appropriate. */ switch (jcr->getJobLevel()) { case L_VERIFY_INIT: level = "init"; break; case L_VERIFY_CATALOG: level = "catalog"; break; case L_VERIFY_DATA: send_accurate_current_files(jcr); /* Wanted */ case L_VERIFY_VOLUME_TO_CATALOG: if (jcr->sd_calls_client) { if (jcr->FDVersion < 10) { Jmsg(jcr, M_FATAL, 0, _("The File daemon does not support SDCallsClient.\n")); goto bail_out; } if (!send_client_addr_to_sd(jcr)) { goto bail_out; } if (!run_storage_and_start_message_thread(jcr, jcr->store_bsock)) { return false; } store_address = rstore->address; /* dummy */ store_port = 0; /* flag that SD calls FD */ } else { /* * send Storage daemon address to the File daemon */ if (rstore->SDDport == 0) { rstore->SDDport = rstore->SDport; } store_address = get_storage_address(jcr->client, rstore); store_port = rstore->SDDport; } if (!send_store_addr_to_fd(jcr, rstore, store_address, store_port)) { goto bail_out; } if (!jcr->RestoreBootstrap) { Jmsg0(jcr, M_FATAL, 0, _("Deprecated feature ... use bootstrap.\n")); goto bail_out; } if (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG) { level = "volume"; } else { level = "data"; } break; case L_VERIFY_DISK_TO_CATALOG: level="disk_to_catalog"; break; default: Jmsg2(jcr, M_FATAL, 0, _("Unimplemented Verify level %d(%c)\n"), jcr->getJobLevel(), jcr->getJobLevel()); goto bail_out; } if (!send_runscripts_commands(jcr)) { goto bail_out; } if (jcr->plugin_options) { if (jcr->FDVersion < 15) { Jmsg1(jcr, M_FATAL, 0, _("Unable to send PluginOptions to FD. Please upgrade the FD from %d to 15.\n"), jcr->FDVersion); goto bail_out; } bash_spaces(jcr->plugin_options); fd->fsend("pluginoptions=%s\n", jcr->plugin_options); if (!response(jcr, fd, BSOCK_TYPE_FD, OKPluginOptions, "Verify", DISPLAY_ERROR)) { goto bail_out; } } /* * Send verify command/level to File daemon */ fd->fsend(verifycmd, level); if (!response(jcr, fd, BSOCK_TYPE_FD, OKverify, "Verify", DISPLAY_ERROR)) { goto bail_out; } /* * Now get data back from File daemon and * compare it to the catalog or store it in the * catalog depending on the run type. */ /* Compare to catalog */ switch (jcr->getJobLevel()) { case L_VERIFY_CATALOG: Dmsg0(10, "Verify level=catalog\n"); jcr->sd_msg_thread_done = true; /* no SD msg thread, so it is done */ jcr->SDJobStatus = JS_Terminated; get_attributes_and_compare_to_catalog(jcr, jcr->previous_jr.JobId); break; case L_VERIFY_VOLUME_TO_CATALOG: Dmsg0(10, "Verify level=volume\n"); get_attributes_and_compare_to_catalog(jcr, jcr->previous_jr.JobId); break; case L_VERIFY_DISK_TO_CATALOG: Dmsg0(10, "Verify level=disk_to_catalog\n"); jcr->sd_msg_thread_done = true; /* no SD msg thread, so it is done */ jcr->SDJobStatus = JS_Terminated; get_attributes_and_compare_to_catalog(jcr, jcr->previous_jr.JobId); break; case L_VERIFY_INIT: /* Build catalog */ Dmsg0(10, "Verify level=init\n"); jcr->sd_msg_thread_done = true; /* no SD msg thread, so it is done */ jcr->SDJobStatus = JS_Terminated; get_attributes_and_put_in_catalog(jcr); db_end_transaction(jcr, jcr->db); /* terminate any open transaction */ flush_file_records(jcr); /* cached attribute + batch insert */ break; case L_VERIFY_DATA: /* Nothing special to do */ bget_dirmsg(jcr, fd, BSOCK_TYPE_FD); /* eat EOD */ break; default: Jmsg1(jcr, M_FATAL, 0, _("Unimplemented verify level %d\n"), jcr->getJobLevel()); goto bail_out; } stat = wait_for_job_termination(jcr); verify_cleanup(jcr, stat); return true; bail_out: return false; } /* * Release resources allocated during backup. * */ void verify_cleanup(JCR *jcr, int TermCode) { char sdt[50], edt[50], edl[50]; char ec1[30], ec2[30], elapsed[50]; char term_code[100], fd_term_msg[100], sd_term_msg[100]; const char *term_msg; int msg_type; const char *Name; // Dmsg1(100, "Enter verify_cleanup() TermCod=%d\n", TermCode); Dmsg3(900, "JobLevel=%c Expected=%u JobFiles=%u\n", jcr->getJobLevel(), jcr->ExpectedFiles, jcr->JobFiles); if ((jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG || jcr->getJobLevel() == L_VERIFY_DATA) && jcr->ExpectedFiles != jcr->JobFiles) { Jmsg(jcr, M_ERROR, 0, _("The Verify Job has analyzed %d file(s) but it was expecting %d file(s)\n"), jcr->JobFiles, jcr->ExpectedFiles); TermCode = JS_ErrorTerminated; } /* Job needs to be marked as terminated before running the after runscript */ jcr->setJobStatus(TermCode); run_scripts(jcr, jcr->job->RunScripts, "AtJobCompletion"); /* Runscript could have changed JobStatus, * now check if it should be changed in the report or not */ TermCode = compareJobStatus(TermCode, jcr->JobStatus); update_job_end(jcr, TermCode); if (job_canceled(jcr)) { cancel_storage_daemon_job(jcr); } if (jcr->unlink_bsr && jcr->RestoreBootstrap) { unlink(jcr->RestoreBootstrap); jcr->unlink_bsr = false; } msg_type = M_INFO; /* by default INFO message */ switch (TermCode) { case JS_Terminated: if (jcr->JobErrors || jcr->SDErrors) { term_msg = _("Verify OK -- with warnings"); } else { term_msg = _("Verify OK"); } break; case JS_FatalError: case JS_ErrorTerminated: term_msg = _("*** Verify Error ***"); msg_type = M_ERROR; /* Generate error message */ break; case JS_Error: term_msg = _("Verify warnings"); break; case JS_Canceled: term_msg = _("Verify Canceled"); break; case JS_Differences: term_msg = _("Verify Differences"); break; default: term_msg = term_code; bsnprintf(term_code, sizeof(term_code), _("Inappropriate term code: %d %c\n"), TermCode, TermCode); break; } bstrftimes_na(sdt, sizeof(sdt), jcr->jr.StartTime); bstrftimes_na(edt, sizeof(edt), jcr->jr.EndTime); if (jcr->verify_job) { Name = jcr->verify_job->hdr.name; } else { Name = ""; } jobstatus_to_ascii(jcr->FDJobStatus, fd_term_msg, sizeof(fd_term_msg)); if (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG || jcr->getJobLevel() == L_VERIFY_DATA) { const char *accurate = "yes"; if (jcr->is_JobLevel(L_VERIFY_DATA)) { accurate = jcr->accurate ? "yes": "no"; } jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg)); Jmsg(jcr, msg_type, 0, _("%s %s %s (%s):\n" " Build OS: %s %s %s\n" " JobId: %d\n" " Job: %s\n" " FileSet: %s\n" " Verify Level: %s\n" " Client: %s\n" " Verify JobId: %d\n" " Verify Job: %s\n" " Start time: %s\n" " End time: %s\n" " Elapsed time: %s\n" " Accurate: %s\n" " Files Expected: %s\n" " Files Examined: %s\n" " Non-fatal FD errors: %d\n" " SD Errors: %d\n" " FD termination status: %s\n" " SD termination status: %s\n" " Termination: %s\n\n"), BACULA, my_name, VERSION, LSMDATE, HOST_OS, DISTNAME, DISTVER, jcr->jr.JobId, jcr->jr.Job, jcr->fileset->hdr.name, level_to_str(edl, sizeof(edl), jcr->getJobLevel()), jcr->client->hdr.name, jcr->previous_jr.JobId, Name, sdt, edt, edit_utime(jcr->jr.RunTime, elapsed, sizeof(elapsed)), accurate, edit_uint64_with_commas(jcr->ExpectedFiles, ec1), edit_uint64_with_commas(jcr->JobFiles, ec2), jcr->JobErrors, jcr->SDErrors, fd_term_msg, sd_term_msg, term_msg); } else { Jmsg(jcr, msg_type, 0, _("%s %s %s (%s):\n" " Build OS: %s %s %s\n" " JobId: %d\n" " Job: %s\n" " FileSet: %s\n" " Verify Level: %s\n" " Client: %s\n" " Verify JobId: %d\n" " Verify Job: %s\n" " Start time: %s\n" " End time: %s\n" " Elapsed time: %s\n" " Files Examined: %s\n" " Non-fatal FD errors: %d\n" " FD termination status: %s\n" " Termination: %s\n\n"), BACULA, my_name, VERSION, LSMDATE, HOST_OS, DISTNAME, DISTVER, jcr->jr.JobId, jcr->jr.Job, jcr->fileset->hdr.name, level_to_str(edl, sizeof(edl), jcr->getJobLevel()), jcr->client->name(), jcr->previous_jr.JobId, Name, sdt, edt, edit_utime(jcr->jr.RunTime, elapsed, sizeof(elapsed)), edit_uint64_with_commas(jcr->JobFiles, ec1), jcr->JobErrors, fd_term_msg, term_msg); } Dmsg0(100, "Leave verify_cleanup()\n"); } /* * This routine is called only during a Verify */ void get_attributes_and_compare_to_catalog(JCR *jcr, JobId_t JobId) { BSOCK *fd; int n, len; FILE_DBR fdbr; struct stat statf; /* file stat */ struct stat statc; /* catalog stat */ char buf[MAXSTRING]; POOLMEM *fname = get_pool_memory(PM_MESSAGE); int do_Digest = CRYPTO_DIGEST_NONE; int32_t file_index = 0; memset(&fdbr, 0, sizeof(FILE_DBR)); fd = jcr->file_bsock; fdbr.JobId = JobId; jcr->FileIndex = 0; Dmsg0(20, "bdird: waiting to receive file attributes\n"); /* * Get Attributes and Signature from File daemon * We expect: * FileIndex * Stream * Options or Digest (MD5/SHA1) * Filename * Attributes * Link name ??? */ while ((n=bget_dirmsg(jcr, fd, BSOCK_TYPE_FD)) >= 0 && !job_canceled(jcr)) { int32_t stream, full_stream; char *attr, *p, *fn; char Opts_Digest[MAXSTRING]; /* Verify Opts or MD5/SHA1 digest */ if (job_canceled(jcr)) { free_pool_memory(fname); return; } fname = check_pool_memory_size(fname, fd->msglen); jcr->fname = check_pool_memory_size(jcr->fname, fd->msglen); Dmsg1(200, "Atts+Digest=%s\n", fd->msg); if ((len = sscanf(fd->msg, "%ld %d %100s", &file_index, &full_stream, fname)) != 3) { Jmsg3(jcr, M_FATAL, 0, _("birdmsglen, fd->msg); free_pool_memory(fname); return; } stream = full_stream & STREAMMASK_TYPE; Dmsg4(30, "Got hdr: FilInx=%d FullStream=%d Stream=%d fname=%s.\n", file_index, full_stream, stream, fname); /* * We read the Options or Signature into fname * to prevent overrun, now copy it to proper location. */ bstrncpy(Opts_Digest, fname, sizeof(Opts_Digest)); p = fd->msg; skip_nonspaces(&p); /* skip FileIndex */ skip_spaces(&p); skip_nonspaces(&p); /* skip Stream */ skip_spaces(&p); skip_nonspaces(&p); /* skip Opts_Digest */ p++; /* skip space */ fn = fname; while (*p != 0) { *fn++ = *p++; /* copy filename */ } *fn = *p++; /* term filename and point to attribs */ attr = p; /* * Got attributes stream, decode it */ if (stream == STREAM_UNIX_ATTRIBUTES || stream == STREAM_UNIX_ATTRIBUTES_EX) { int32_t LinkFIf, LinkFIc; Dmsg2(400, "file_index=%d attr=%s\n", file_index, attr); jcr->JobFiles++; jcr->FileIndex = file_index; /* remember attribute file_index */ jcr->previous_jr.FileIndex = file_index; decode_stat(attr, &statf, sizeof(statf), &LinkFIf); /* decode file stat packet */ do_Digest = CRYPTO_DIGEST_NONE; jcr->fn_printed = false; pm_strcpy(jcr->fname, fname); /* move filename into JCR */ Dmsg2(040, "dirdfname); Dmsg1(020, "dirdFileIndex <= 0) { continue; } if (!db_get_file_attributes_record(jcr, jcr->db, jcr->fname, &jcr->previous_jr, &fdbr)) { Jmsg(jcr, M_INFO, 0, _("New file: %s\n"), jcr->fname); Dmsg1(020, _("File not in catalog: %s\n"), jcr->fname); jcr->setJobStatus(JS_Differences); continue; } else { /* * mark file record as visited by stuffing the * current JobId, which is unique, into the MarkId field. */ db_mark_file_record(jcr, jcr->db, fdbr.FileId, jcr->JobId); } Dmsg3(400, "Found %s in catalog. inx=%d Opts=%s\n", jcr->fname, file_index, Opts_Digest); decode_stat(fdbr.LStat, &statc, sizeof(statc), &LinkFIc); /* decode catalog stat */ /* * Loop over options supplied by user and verify the * fields he requests. * * Keep this list synchronized with the configuration parser in inc_conf.c and in BWeb */ for (p=Opts_Digest; *p; p++) { char ed1[30], ed2[30]; switch (*p) { case 'i': /* compare INODEs */ if (statc.st_ino != statf.st_ino) { prt_fname(jcr); Jmsg(jcr, M_INFO, 0, _(" st_ino differ. Cat: %s File: %s\n"), edit_uint64((uint64_t)statc.st_ino, ed1), edit_uint64((uint64_t)statf.st_ino, ed2)); jcr->setJobStatus(JS_Differences); } break; case 'p': /* permissions bits */ if (statc.st_mode != statf.st_mode) { prt_fname(jcr); Jmsg(jcr, M_INFO, 0, _(" st_mode differ. Cat: %x File: %x\n"), (uint32_t)statc.st_mode, (uint32_t)statf.st_mode); jcr->setJobStatus(JS_Differences); } break; case 'n': /* number of links */ if (statc.st_nlink != statf.st_nlink) { prt_fname(jcr); Jmsg(jcr, M_INFO, 0, _(" st_nlink differ. Cat: %d File: %d\n"), (uint32_t)statc.st_nlink, (uint32_t)statf.st_nlink); jcr->setJobStatus(JS_Differences); } break; case 'u': /* user id */ if (statc.st_uid != statf.st_uid) { prt_fname(jcr); Jmsg(jcr, M_INFO, 0, _(" st_uid differ. Cat: %u File: %u\n"), (uint32_t)statc.st_uid, (uint32_t)statf.st_uid); jcr->setJobStatus(JS_Differences); } break; case 'g': /* group id */ if (statc.st_gid != statf.st_gid) { prt_fname(jcr); Jmsg(jcr, M_INFO, 0, _(" st_gid differ. Cat: %u File: %u\n"), (uint32_t)statc.st_gid, (uint32_t)statf.st_gid); jcr->setJobStatus(JS_Differences); } break; case 's': /* size */ if (statc.st_size != statf.st_size) { prt_fname(jcr); Jmsg(jcr, M_INFO, 0, _(" st_size differ. Cat: %s File: %s\n"), edit_uint64((uint64_t)statc.st_size, ed1), edit_uint64((uint64_t)statf.st_size, ed2)); jcr->setJobStatus(JS_Differences); } break; case 'a': /* access time */ if (statc.st_atime != statf.st_atime) { prt_fname(jcr); Jmsg(jcr, M_INFO, 0, _(" st_atime differs\n")); jcr->setJobStatus(JS_Differences); } break; case 'm': if (statc.st_mtime != statf.st_mtime) { prt_fname(jcr); Jmsg(jcr, M_INFO, 0, _(" st_mtime differs\n")); jcr->setJobStatus(JS_Differences); } break; case 'c': /* ctime */ if (statc.st_ctime != statf.st_ctime) { prt_fname(jcr); Jmsg(jcr, M_INFO, 0, _(" st_ctime differs\n")); jcr->setJobStatus(JS_Differences); } break; case 'd': /* file size decrease */ if (statc.st_size > statf.st_size) { prt_fname(jcr); Jmsg(jcr, M_INFO, 0, _(" st_size decrease. Cat: %s File: %s\n"), edit_uint64((uint64_t)statc.st_size, ed1), edit_uint64((uint64_t)statf.st_size, ed2)); jcr->setJobStatus(JS_Differences); } break; case '5': /* compare MD5 */ Dmsg1(500, "set Do_MD5 for %s\n", jcr->fname); do_Digest = CRYPTO_DIGEST_MD5; break; case '1': /* compare SHA1 */ do_Digest = CRYPTO_DIGEST_SHA1; break; case '2': /* compare SHA256 */ do_Digest = CRYPTO_DIGEST_SHA256; break; case '3': /* compare SHA512 */ do_Digest = CRYPTO_DIGEST_SHA512; break; case '6': /* compare XXHASH64 */ do_Digest = CRYPTO_DIGEST_XXHASH64; break; case '7': /* compare XXH3_64 */ do_Digest = CRYPTO_DIGEST_XXH3_64; break; case '8': /* compare XXH3_128 */ do_Digest = CRYPTO_DIGEST_XXH3_128; break; case ':': case 'V': default: break; } } /* * Got Digest Signature from Storage daemon * It came across in the Opts_Digest field. */ } else if (crypto_digest_stream_type(stream) != CRYPTO_DIGEST_NONE) { Dmsg2(400, "stream=Digest inx=%d Digest=%s\n", file_index, Opts_Digest); /* * When ever we get a digest it MUST have been * preceded by an attributes record, which sets attr_file_index */ if (jcr->FileIndex != file_index) { Jmsg2(jcr, M_FATAL, 0, _("MD5/SHA index %d not same as attributes %d\n"), file_index, jcr->FileIndex); free_pool_memory(fname); return; } if (do_Digest != CRYPTO_DIGEST_NONE) { db_escape_string(jcr, jcr->db, buf, Opts_Digest, strlen(Opts_Digest)); if (strcmp(buf, fdbr.Digest) != 0) { prt_fname(jcr); Jmsg(jcr, M_INFO, 0, _(" %s differs. File=%s Cat=%s\n"), stream_to_ascii(stream), buf, fdbr.Digest); jcr->setJobStatus(JS_Differences); } do_Digest = CRYPTO_DIGEST_NONE; } } jcr->JobFiles = file_index; } if (fd->is_error()) { berrno be; Jmsg2(jcr, M_FATAL, 0, _("bdirdfn_printed = false; bsnprintf(buf, sizeof(buf), "SELECT Path.Path,File.Filename FROM File,Path " "WHERE File.JobId=%d AND File.FileIndex > 0 " "AND File.MarkId!=%d AND File.PathId=Path.PathId ", JobId, jcr->JobId); /* missing_handler is called for each file found */ db_sql_query(jcr->db, buf, missing_handler, (void *)jcr); if (jcr->fn_printed) { jcr->setJobStatus(JS_Differences); } free_pool_memory(fname); } /* * We are called here for each record that matches the above * SQL query -- that is for each file contained in the Catalog * that was not marked earlier. This means that the file in * question is a missing file (in the Catalog but not on Disk). */ static int missing_handler(void *ctx, int num_fields, char **row) { JCR *jcr = (JCR *)ctx; if (job_canceled(jcr)) { return 1; } if (!jcr->fn_printed) { Qmsg(jcr, M_WARNING, 0, _("The following files are in the Catalog but not on %s:\n"), jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG ? "the Volume(s)" : "disk"); jcr->fn_printed = true; } Qmsg(jcr, M_INFO, 0, " %s%s\n", row[0]?row[0]:"", row[1]?row[1]:""); return 0; } /* * Print filename for verify */ static void prt_fname(JCR *jcr) { if (!jcr->fn_printed) { Jmsg(jcr, M_INFO, 0, _("File: %s\n"), jcr->fname); jcr->fn_printed = true; } } bacula-15.0.3/src/dird/protos.h0000644000175000017500000004163214771010173016100 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Director external function prototypes */ /* admin.c */ extern bool do_admin_init(JCR *jcr); extern bool do_admin(JCR *jcr); extern void admin_cleanup(JCR *jcr, int TermCode); /* authenticate.c */ extern bool authenticate_storage_daemon(JCR *jcr, STORE *store, int *status, POOLMEM **errmsg); extern int authenticate_file_daemon(JCR *jcr, int *status, POOLMEM **errmsg); extern int authenticate_user_agent(UAContext *ua); /* autoprune.c */ extern void do_autoprune(JCR *jcr); extern void prune_volumes(JCR *jcr, bool InChanger, MEDIA_DBR *mr, STORE *store); /* autorecycle.c */ extern bool recycle_oldest_purged_volume(JCR *jcr, bool InChanger, MEDIA_DBR *mr, STORE *store); extern int recycle_volume(JCR *jcr, MEDIA_DBR *mr); extern bool find_recycled_volume(JCR *jcr, bool InChanger, MEDIA_DBR *mr, STORE *store); /* backup.c */ extern int wait_for_job_termination(JCR *jcr, int timeout=0); extern bool do_backup_init(JCR *jcr); extern bool do_backup(JCR *jcr); extern void backup_cleanup(JCR *jcr, int TermCode); extern void update_bootstrap_file(JCR *jcr); extern bool send_accurate_current_files(JCR *jcr); extern char *get_storage_address(CLIENT *cli, STORE *store); extern bool run_storage_and_start_message_thread(JCR *jcr, BSOCK *sd); extern bool send_client_addr_to_sd(JCR *jcr); extern bool send_store_addr_to_fd(JCR *jcr, STORE *store, char *store_address, uint32_t store_port); /* vbackup.c */ extern bool do_vbackup_init(JCR *jcr); extern bool do_vbackup(JCR *jcr); extern void vbackup_cleanup(JCR *jcr, int TermCode); /* bsr.c */ RBSR *new_bsr(); rblist *create_bsr_list(uint32_t JobId, int findex, int findex2); void free_bsr(rblist *bsr_list); bool complete_bsr(UAContext *ua, rblist *bsr_list); uint32_t write_bsr_file(UAContext *ua, RESTORE_CTX &rx); void display_bsr_info(UAContext *ua, RESTORE_CTX &rx); void add_findex(rblist *bsr_list, uint32_t JobId, int32_t findex); void add_findex_all(rblist *bsr_list, uint32_t JobId, const char *fileregex); RBSR_FINDEX *new_findex(); void make_unique_restore_filename(UAContext *ua, POOLMEM **fname); void print_bsr(UAContext *ua, RESTORE_CTX &rx); void scan_bsr(JCR *jcr); int split_bsr_loop(JCR *jcr, bootstrap_info &info); /* catreq.c */ extern void fd_catreq(JCR *jcr, BSOCK *bs); extern void catalog_request(JCR *jcr, BSOCK *bs); extern void catalog_update(JCR *jcr, BSOCK *bs); extern bool despool_attributes_from_file(JCR *jcr, const char *file); extern void remove_dummy_jobmedia_records(JCR *jcr); /* dird_conf.c */ extern const char *level_to_static_str(int level); extern char *level_to_str(char *buf, int len, int level); extern "C" char *job_code_callback_director(JCR *jcr, const char*, char *, int); extern char *get_client_address(JCR *jcr, CLIENT *client, POOLMEM *&buf); /* expand.c */ int variable_expansion(JCR *jcr, char *inp, POOLMEM **exp); /* fd_cmds.c */ extern int connect_to_file_daemon(JCR *jcr, int retry_interval, int max_retry_time, int verbose); extern bool send_ls_fileset(JCR *jcr, const char *path, bool dironly); extern bool send_ls_plugin_fileset(JCR *jcr, const char *plugin, const char *path); extern bool send_include_list(JCR *jcr); extern bool send_exclude_list(JCR *jcr); extern bool send_level_command(JCR *jcr); extern bool send_bwlimit(JCR *jcr, const char *Job); extern bool send_restore_file_list(JCR *jcr); extern int get_attributes_and_put_in_catalog(JCR *jcr); extern void get_attributes_and_compare_to_catalog(JCR *jcr, JobId_t JobId); extern int put_file_into_catalog(JCR *jcr, long file_index, char *fname, char *link, char *attr, int stream); extern void get_level_since_time(JCR *jcr, char *since, int since_len); extern int send_runscripts_commands(JCR *jcr); extern bool send_restore_objects(JCR *jcr); extern bool send_component_info(JCR *jcr); /* getmsg.c */ enum e_prtmsg { DISPLAY_ERROR, NO_DISPLAY }; extern bool response(JCR *jcr, BSOCK *fd, BSOCK_CLIENT_TYPE role, const char *resp, const char *cmd, e_prtmsg prtmsg); /* job.c */ extern void jmsg_large_jobid_list(JCR *jcr, const char *msg, const char *jobids); extern bool allow_duplicate_job(JCR *jcr); extern void set_jcr_defaults(JCR *jcr, JOB *job); extern void create_unique_job_name(JCR *jcr, const char *base_name); extern void update_job_end_record(JCR *jcr); extern bool get_or_create_client_record(JCR *jcr); extern void fileset_get_content(FILESET_DBR *fdbr, FILESET *fileset); extern bool get_or_create_fileset_record(JCR *jcr); extern DBId_t get_or_create_pool_record(JCR *jcr, char *pool_name); extern void apply_pool_overrides(JCR *jcr); extern bool apply_wstorage_overrides(JCR *jcr, POOL *original_pool); extern JobId_t run_job(JCR *jcr); extern JobId_t resume_job(JCR *jcr, JOB_DBR *jr); extern bool cancel_job(UAContext *ua, JCR *jcr, int wait, bool cancel=true); extern int cancel_inactive_job(UAContext *ua); extern void get_job_storage(USTORE *store, JOB *job, RUN *run); extern void init_jcr_job_record(JCR *jcr); extern void update_job_end(JCR *jcr, int TermCode); extern void copy_rwstorage(JCR *jcr, alist *storage, const char *where); extern void set_rwstorage(JCR *jcr, USTORE *store); extern bool setup_job(JCR *jcr); extern void create_clones(JCR *jcr); extern int create_restore_bootstrap_file(JCR *jcr); extern int create_restore_bootstrap_file(JCR *jcr, JobId_t jobid, int findex1, int findex2); extern void dird_free_jcr(JCR *jcr); extern void dird_free_jcr_pointers(JCR *jcr); extern void cancel_storage_daemon_job(JCR *jcr); extern bool run_console_command(JCR *jcr, const char *cmd); extern void sd_msg_thread_send_signal(JCR *jcr, int sig); void terminate_sd_msg_chan_thread(JCR *jcr); bool flush_file_records(JCR *jcr); void dir_close_batch_connection(JCR *jcr); const char *get_encrypt_str(int val); /* jobq.c */ extern bool inc_read_store(JCR *jcr); extern void dec_read_store(JCR *jcr); /* mac.c */ extern bool do_mac(JCR *jcr); extern bool do_mac_init(JCR *jcr); extern void mac_cleanup(JCR *jcr, int TermCode, int writeTermCode); extern bool set_mac_wstorage(UAContext *ua, JCR *jcr, POOL *pool, POOL *next_pool, const char *source); /* msgchan.c */ extern BSOCK *open_sd_bsock(UAContext *ua); extern void close_sd_bsock(UAContext *ua); extern bool connect_to_storage_daemon(JCR *jcr, int retry_interval, int max_retry_time, int verbose); extern bool start_storage_daemon_job(JCR *jcr, alist *rstore, alist *wstore, bool wait, bool send_bsr=false); extern bool start_storage_daemon_message_thread(JCR *jcr); extern int bget_dirmsg(JCR *jcr, BSOCK *bs, BSOCK_CLIENT_TYPE role); extern void wait_for_storage_daemon_termination(JCR *jcr); extern bool send_bootstrap_file(JCR *jcr, BSOCK *sd); /* next_vol.c */ bool check_max_pool_bytes(POOL_DBR *pr); bool use_max_pool_bytes(JCR *jcr); bool has_quota_reached(JCR *jcr, POOL_DBR *pr); bool has_quota_reached(JCR *jcr, MEDIA_DBR *mr); bool has_quota_reached(JCR *jcr); void set_storageid_in_mr(STORE *store, MEDIA_DBR *mr); int find_next_volume_for_append(JCR *jcr, MEDIA_DBR *mr, int index, bool create, bool purge, int use_protect, int vol_encrypted, POOL_MEM &errmsg); bool has_volume_expired(JCR *jcr, MEDIA_DBR *mr); void check_if_volume_valid_or_recyclable(JCR *jcr, MEDIA_DBR *mr, const char **reason); bool get_scratch_volume(JCR *jcr, bool InChanger, MEDIA_DBR *mr, STORE *store); /* newvol.c */ bool newVolume(JCR *jcr, MEDIA_DBR *mr, STORE *store, POOL_MEM &errmsg); /* restore.c */ extern bool do_restore(JCR *jcr); extern bool do_restore_init(JCR *jcr); extern void restore_cleanup(JCR *jcr, int TermCode); /* ua_acl.c */ bool acl_access_ok(UAContext *ua, int acl, const char *item); bool acl_access_ok(UAContext *ua, int acl, const char *item, int len); bool have_restricted_acl(UAContext *ua, int acl); bool acl_access_client_ok(UAContext *ua, const char *name, int32_t jobtype); bool acl_access_console_ok(UAContext *ua, const char *name); /* ua_cmds.c */ int tag_cmd(UAContext *ua, const char *cmd); bool get_uid_gid_from_acl(UAContext *ua, alist **uid, alist **gid, alist **dir); bool do_a_command(UAContext *ua); bool do_a_dot_command(UAContext *ua); int qmessagescmd(UAContext *ua, const char *cmd); bool open_new_client_db(UAContext *ua); bool open_client_db(UAContext *ua); bool open_db(UAContext *ua); void close_db(UAContext *ua); int cloud_volumes_cmd(UAContext *ua, const char *cmd, const char *mode); void enable_disable_job(UAContext *ua, JOB *job, bool setting, bool force); enum e_pool_op { POOL_OP_UPDATE, POOL_OP_CREATE }; int create_pool(JCR *jcr, BDB *db, POOL *pool, e_pool_op op); void set_pool_dbr_defaults_in_media_dbr(MEDIA_DBR *mr, POOL_DBR *pr); bool set_pooldbr_references(JCR *jcr, BDB *db, POOL_DBR *pr, POOL *pool); void set_pooldbr_from_poolres(POOL_DBR *pr, POOL *pool, e_pool_op op); int update_pool_references(JCR *jcr, BDB *db, POOL *pool); /* ua_input.c */ bool get_cmd(UAContext *ua, const char *prompt, bool subprompt=false); bool get_selection_list(UAContext *ua, sellist &sl, const char *prompt, bool subprompt=false); bool get_pint(UAContext *ua, const char *prompt); bool get_yesno(UAContext *ua, const char *prompt); bool is_yesno(char *val, int *ret); int get_enabled(UAContext *ua, const char *val); void parse_ua_args(UAContext *ua); bool is_comment_legal(UAContext *ua, const char *name); /* ua_label.c */ bool is_volume_name_legal(UAContext *ua, const char *name); int get_num_drives_from_SD(UAContext *ua); void update_slots(UAContext *ua); /* ua_update.c */ void update_vol_pool(UAContext *ua, char *val, MEDIA_DBR *mr, POOL_DBR *opr); /* ua_output.c */ void prtit(void *ctx, const char *msg); bool complete_jcr_for_job(JCR *jcr, JOB *job, POOL *pool); RUN *find_next_run(RUN *run, JOB *job, utime_t &runtime, int ndays); bool acl_access_jobid_ok(UAContext *ua, const char *jobids); /* ua_restore.c */ void find_storage_resource(UAContext *ua, RESTORE_CTX &rx, char *Storage, char *MediaType); bool insert_table_into_findex_list(UAContext *ua, RESTORE_CTX *rx, char *table); void new_rx(RESTORE_CTX *rx); void free_rx(RESTORE_CTX *rx); /* ua_server.c */ void bsendmsg(void *ua_ctx, const char *fmt, ...); void berrormsg(void *ua_ctx, const char *fmt, ...); void bmsg(UAContext *ua, const char *fmt, va_list arg_ptr); void bwarningmsg(void *ua_ctx, const char *fmt, ...); void binfomsg(void *ua_ctx, const char *fmt, ...); UAContext *new_ua_context(JCR *jcr); JCR *new_control_jcr(const char *base_name, int job_type); void free_ua_context(UAContext *ua); /* ua_select.c */ STORE *select_storage_resource(UAContext *ua, bool unique=false); JOB *select_job_resource(UAContext *ua); void select_enable_disable_job_resource(UAContext *ua, bool enable); JOB *select_restore_job_resource(UAContext *ua); CLIENT *select_enable_disable_client_resource(UAContext *ua, bool enable); CLIENT *select_client_resource(UAContext *ua, int32_t jobtype); FILESET *select_fileset_resource(UAContext *ua); SCHED *select_enable_disable_schedule_resource(UAContext *ua, bool enable); int select_pool_and_media_dbr(UAContext *ua, POOL_DBR *pr, MEDIA_DBR *mr); int select_media_dbr(UAContext *ua, MEDIA_DBR *mr); bool select_pool_dbr(UAContext *ua, POOL_DBR *pr, const char *argk="pool"); bool select_client_dbr(UAContext *ua, CLIENT_DBR *cr, int32_t jobtype); void start_prompt(UAContext *ua, const char *msg); void add_prompt(UAContext *ua, const char *prompt, char *unique=NULL); int do_prompt(UAContext *ua, const char *automsg, const char *msg, char *prompt, int max_prompt); int do_alist_prompt(UAContext *ua, const char *automsg, const char *msg, alist *selected); CAT *get_catalog_resource(UAContext *ua); bool get_storage_resource(UAContext *ua, USTORE *ustore, bool use_default, bool unique=false); int get_storage_drive(UAContext *ua, STORE *store); int get_storage_slot(UAContext *ua, STORE *store); int get_media_type(UAContext *ua, char *MediaType, int max_media); bool get_pool_dbr(UAContext *ua, POOL_DBR *pr, const char *argk="pool"); bool get_client_dbr(UAContext *ua, CLIENT_DBR *cr, int32_t jobtype); POOL *get_pool_resource(UAContext *ua); JOB *get_restore_job(UAContext *ua); POOL *select_pool_resource(UAContext *ua); int select_running_jobs(UAContext *ua, alist *jcrs, const char *reason); CLIENT *get_client_resource(UAContext *ua, int32_t jobtype); int get_job_dbr(UAContext *ua, JOB_DBR *jr); int find_arg_keyword(UAContext *ua, const char **list); int find_arg(UAContext *ua, const char *keyword); int find_arg_with_value(UAContext *ua, const char *keyword); int do_keyword_prompt(UAContext *ua, const char *msg, const char **list); int confirm_retention(UAContext *ua, utime_t *ret, const char *msg); int confirm_retention_yesno(UAContext *ua, utime_t ret, const char *msg); bool get_level_from_name(JCR *jcr, const char *level_name); int get_level_code_from_name(const char *level_name); int scan_storage_cmd(UAContext *ua, const char *cmd, bool allfrompool, int *drive, MEDIA_DBR *mr, POOL_DBR *pr, const char **action, char *storage, int *nb, uint32_t **results); int scan_truncate_cmd(UAContext *ua, const char *cmd, char *truncate_opt); /* ua_status.c */ void list_dir_status_header(UAContext *ua); char *dir_get_information(JCR *jcr, POOLMEM **mem); /* ua_tree.c */ bool user_select_files_from_tree(TREE_CTX *tree); bool user_select_files_from_tree_plugin_obj(TREE_CTX *tree); int insert_tree_handler(void *ctx, int num_fields, char **row); bool check_directory_acl(char **last_dir, alist *dir_acl, const char *path); /* ua_prune.c */ int prune_files(UAContext *ua, CLIENT *client, POOL *pool); int prune_jobs(UAContext *ua, CLIENT *client, POOL *pool, int JobType); int prune_stats(UAContext *ua, utime_t retention); bool prune_volume(UAContext *ua, MEDIA_DBR *mr); int job_delete_handler(void *ctx, int num_fields, char **row); int del_count_handler(void *ctx, int num_fields, char **row); int file_delete_handler(void *ctx, int num_fields, char **row); int get_prune_list_for_volume(UAContext *ua, MEDIA_DBR *mr, del_ctx *del); int exclude_running_jobs_from_list(del_ctx *prune_list); /* ua_purge.c */ bool is_volume_purged(UAContext *ua, MEDIA_DBR *mr, bool force=false); bool mark_media_purged(UAContext *ua, MEDIA_DBR *mr); void purge_files_from_volume(UAContext *ua, MEDIA_DBR *mr); bool purge_jobs_from_volume(UAContext *ua, MEDIA_DBR *mr, bool force=false); void purge_files_from_jobs(UAContext *ua, char *jobs); void purge_jobs_from_catalog(UAContext *ua, char *jobs); void purge_job_list_from_catalog(UAContext *ua, del_ctx &del); void purge_files_from_job_list(UAContext *ua, del_ctx &del); /* ua_run.c */ bool check_storage_presence(UAContext *ua, JobId_t JobId); extern int run_cmd(UAContext *ua, const char *cmd); extern int restart_cmd(UAContext *ua, const char *cmd); extern bool check_pool(int32_t JobType, int32_t JobLevel, POOL *pool, POOL *nextpool, const char **name); /* verify.c */ extern bool do_verify(JCR *jcr); extern bool do_verify_init(JCR *jcr); extern void verify_cleanup(JCR *jcr, int TermCode); /* snapshot.c */ int select_snapshot_dbr(UAContext *ua, SNAPSHOT_DBR *sr); void snapshot_list(UAContext *ua, int i, DB_LIST_HANDLER *sendit, e_list_type llist); int snapshot_cmd(UAContext *ua, const char *cmd); int snapshot_catreq(JCR *jcr, BSOCK *bs); int delete_snapshot(UAContext *ua); bool update_snapshot(UAContext *ua); int prune_snapshot(UAContext *ua); bool send_snapshot_retention(JCR *jcr, utime_t val); /* dird.c */ bool reload_config(int sig, alist *msglist); /* ua_collect.c */ bool update_permanent_stats(void *data); void initialize_statcollector(); void start_collector_threads(); void terminate_collector_threads(); void update_config_stats(); bool catreq_get_pool_info(JCR *jcr, BSOCK *bs); #if BEEF # include "bee_dird_uid.h" #else # define send_job_permissions(x) true # define mark_access_denied(a) #endif /* dedup_util.c */ bool is_dedup_ref(DEV_RECORD *rec, bool lazy); /* dir_authplugins.c */ void * dir_authplugin_getauthenticationData(JCR *jcr, const char *console, const char *param); bRC dir_authplugin_do_interaction(JCR *jcr, BSOCK *bsock, const char *pluginname, void *data, bool pluginall = false); bRC dir_authplugin_authenticate(JCR *jcr, BSOCK *bsock, const char *pluginname); /* malware.c */ int check_malware(JCR *jcr, const char *jobids, POOLMEM **errmsg); bacula-15.0.3/src/dird/ua_input.c0000644000175000017500000001371614771010173016373 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Bacula Director -- User Agent Input and scanning code * * Kern Sibbald, October MMI * */ #include "bacula.h" #include "dird.h" /* Imported variables */ /* Exported functions */ /* * If subprompt is set, we send a BNET_SUB_PROMPT signal otherwise * send a BNET_TEXT_INPUT signal. */ bool get_cmd(UAContext *ua, const char *prompt, bool subprompt) { BSOCK *sock = ua->UA_sock; int stat; ua->cmd[0] = 0; if (!sock || ua->batch) { /* No UA or batch mode */ return false; } if (!subprompt && ua->api) { sock->signal(BNET_TEXT_INPUT); } sock->fsend("%s", prompt); if (!ua->api || subprompt) { sock->signal(BNET_SUB_PROMPT); } for ( ;; ) { stat = sock->recv(); if (stat == BNET_SIGNAL) { continue; /* ignore signals */ } if (sock->is_stop()) { return false; /* error or terminate */ } pm_strcpy(ua->cmd, sock->msg); strip_trailing_junk(ua->cmd); if (strcmp(ua->cmd, ".messages") == 0) { qmessagescmd(ua, ua->cmd); } /* Lone dot => break */ if (ua->cmd[0] == '.' && ua->cmd[1] == 0) { return false; } break; } return true; } /* * Get a selection list * We get a command from the user, scan it, then * return when OK * Returns true if OK * false if error */ bool get_selection_list(UAContext *ua, sellist &sl, const char *prompt, bool subprompt) { for ( ;; ) { if (!get_cmd(ua, prompt, subprompt)) { return false; } if (!sl.set_string(ua->cmd, true)) { ua->send_msg("%s", sl.get_errmsg()); continue; } return true; } } /* * Get a positive integer * Returns: false if failure * true if success => value in ua->pint32_val */ bool get_pint(UAContext *ua, const char *prompt) { double dval; ua->pint32_val = 0; ua->int64_val = 0; for (;;) { ua->cmd[0] = 0; if (!get_cmd(ua, prompt)) { return false; } /* Kludge for slots blank line => 0 */ if (ua->cmd[0] == 0 && strncmp(prompt, _("Enter slot"), strlen(_("Enter slot"))) == 0) { return true; } if (!is_a_number(ua->cmd)) { ua->warning_msg(_("Expected a positive integer, got: %s\n"), ua->cmd); continue; } errno = 0; dval = strtod(ua->cmd, NULL); if (errno != 0 || dval < 0) { ua->warning_msg(_("Expected a positive integer, got: %s\n"), ua->cmd); continue; } ua->pint32_val = (uint32_t)dval; ua->int64_val = (int64_t)dval; return true; } } /* * Test a yes or no response * Returns: false if failure * true if success => ret == 1 for yes * ret == 0 for no */ bool is_yesno(char *val, int *ret) { *ret = 0; if ((strcasecmp(val, _("yes")) == 0) || (strcasecmp(val, NT_("yes")) == 0)) { *ret = 1; } else if ((strcasecmp(val, _("no")) == 0) || (strcasecmp(val, NT_("no")) == 0)) { *ret = 0; } else { return false; } return true; } /* * Gets a yes or no response * Returns: false if failure * true if success => ua->pint32_val == 1 for yes * ua->pint32_val == 0 for no */ bool get_yesno(UAContext *ua, const char *prompt) { int len; int ret; ua->pint32_val = 0; for (;;) { if (ua->api) ua->UA_sock->signal(BNET_YESNO); if (!get_cmd(ua, prompt)) { return false; } len = strlen(ua->cmd); if (len < 1 || len > 3) { continue; } if (is_yesno(ua->cmd, &ret)) { ua->pint32_val = ret; return true; } ua->warning_msg(_("Invalid response. You must answer yes or no.\n")); } } /* * Gets an Enabled value => 0, 1, 2, yes, no, archived * Returns: 0, 1, 2 if OK * -1 on error */ int get_enabled(UAContext *ua, const char *val) { int Enabled = -1; if (strcasecmp(val, "yes") == 0 || strcasecmp(val, "true") == 0) { Enabled = 1; } else if (strcasecmp(val, "no") == 0 || strcasecmp(val, "false") == 0) { Enabled = 0; } else if (strcasecmp(val, "archived") == 0) { Enabled = 2; } else { Enabled = atoi(val); } if (Enabled < 0 || Enabled > 2) { ua->error_msg(_("Invalid Enabled value, it must be yes, no, archived, 0, 1, or 2\n")); return -1; } return Enabled; } void parse_ua_args(UAContext *ua) { parse_args(ua->cmd, &ua->args, &ua->argc, ua->argk, ua->argv, MAX_CMD_ARGS); } /* * Check if the comment has legal characters * If ua is non-NULL send the message */ bool is_comment_legal(UAContext *ua, const char *name) { int len; const char *p; const char *forbid = "'<>&\\\""; /* Restrict the characters permitted in the comment */ for (p=name; *p; p++) { if (!strchr(forbid, (int)(*p))) { continue; } if (ua) { ua->error_msg(_("Illegal character \"%c\" in a comment.\n"), *p); } return 0; } len = strlen(name); if (len >= 4096) { if (ua) { ua->error_msg(_("Comment too long.\n")); } return 0; } if (len == 0) { if (ua) { ua->error_msg(_("Comment must be at least one character long.\n")); } return 0; } return 1; } bacula-15.0.3/src/dird/bsr.h0000644000175000017500000000456314771010173015342 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Bootstrap Record header file * * BSR (bootstrap record) handling routines split from * ua_restore.c July MMIII * * Kern Sibbald, July MMII * */ #ifndef BSR_H #define BSR_H /* FileIndex entry in restore bootstrap record */ struct RBSR_FINDEX { rblink link; int32_t findex; int32_t findex2; }; /* * Restore bootstrap record -- not the real one, but useful here * The restore bsr is a chain of BSR records (linked by next). * Each BSR represents a single JobId, and within it, it * contains a linked list of file indexes for that JobId. * The complete_bsr() routine, will then add all the volumes * on which the Job is stored to the BSR. */ struct RBSR { rblink link; JobId_t JobId; /* JobId this bsr */ uint32_t VolSessionId; uint32_t VolSessionTime; int VolCount; /* Volume parameter count */ VOL_PARAMS *VolParams; /* Volume, start/end file/blocks */ rblist *fi_list; /* File indexes this JobId */ char *fileregex; /* Only restore files matching regex */ /* If we extend an existing fi, keep the memory for the next insert */ RBSR_FINDEX *m_fi; }; class UAContext; /* Structure used to quickly scan the BSR file and find * the right storage daemon to connect to */ struct bootstrap_info { FILE *bs; UAContext *ua; char storage[MAX_NAME_LENGTH+1]; alist *split_list; /* when a BSR cannot be restored in once, this is a list */ /* of offset where to split the BSR to send to the SD */ boffset_t *next_split_off; /* the next split position */ }; bool open_bootstrap_file(JCR *jcr, bootstrap_info &info); void close_bootstrap_file(bootstrap_info &info); #endif bacula-15.0.3/src/dird/snapshot.c0000644000175000017500000005664414771010173016415 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ #include "bacula.h" #include "dird.h" static char CreateSnap[] = "CatReq Job=%127s new_snapshot name=%127s volume=%s device=%s tdate=%d type=%127s retention=%50s"; static char ListSnap[] = "CatReq Job=%127s list_snapshot name=%127s volume=%s device=%s tdate=%d type=%127s before=%50s after=%50s"; static char DelSnap[] = "CatReq Job=%127s del_snapshot name=%127s device=%s"; static char snapretentioncmd[] = "snapshot retention=%s\n"; static void send_list(void *ctx, const char *msg) { BSOCK *bs = (BSOCK *)ctx; bs->fsend("%s", msg); } /* Scan command line for common snapshot arguments */ static void snapshot_scan_cmdline(UAContext *ua, int start, SNAPSHOT_DBR *snapdbr) { for (int j=start; jargc; j++) { if (strcasecmp(ua->argk[j], NT_("device")) == 0 && ua->argv[j]) { snapdbr->Device = bstrdup(ua->argv[j]); snapdbr->need_to_free = true; } else if (strcasecmp(ua->argk[j], NT_("jobid")) == 0 && ua->argv[j]) { snapdbr->JobId = str_to_int64(ua->argv[j]); } else if (strcasecmp(ua->argk[j], NT_("type")) == 0 && ua->argv[j]) { bstrncpy(snapdbr->Type, ua->argv[j], sizeof(snapdbr->Type)); } else if (strcasecmp(ua->argk[j], NT_("client")) == 0 && ua->argv[j]) { bstrncpy(snapdbr->Client, ua->argv[j], sizeof(snapdbr->Client)); } else if (strcasecmp(ua->argk[j], NT_("snapshotid")) == 0 && ua->argv[j]) { snapdbr->SnapshotId = str_to_int64(ua->argv[j]); } else if (strcasecmp(ua->argk[j], NT_("snapshot")) == 0 && ua->argv[j]) { bstrncpy(snapdbr->Name, ua->argv[j], sizeof(snapdbr->Name)); } else if (strcasecmp(ua->argk[j], NT_("volume")) == 0 && ua->argv[j]) { snapdbr->Volume = bstrdup(ua->argv[j]); snapdbr->need_to_free = true; } else if (strcasecmp(ua->argk[j], NT_("createdate")) == 0 && ua->argv[j]) { bstrncpy(snapdbr->CreateDate, ua->argv[j], sizeof(snapdbr->CreateDate)); snapdbr->CreateTDate = str_to_utime(ua->argv[j]); } else if (strcasecmp(ua->argk[j], NT_("createtdate")) == 0 && ua->argv[j]) { snapdbr->CreateTDate = str_to_uint64(ua->argv[j]); bstrutime(snapdbr->CreateDate, sizeof(snapdbr->CreateDate), snapdbr->CreateTDate); } else if (strcasecmp(ua->argk[j], NT_("name")) == 0 && ua->argv[j]) { bstrncpy(snapdbr->Name, ua->argv[j], sizeof(snapdbr->Name)); } else if (strcasecmp(ua->argk[j], NT_("size")) == 0 && ua->argv[j]) { snapdbr->Size = str_to_uint64(ua->argv[j]); } else if (strcasecmp(ua->argk[j], NT_("status")) == 0 && ua->argv[j]) { snapdbr->status = str_to_uint64(ua->argv[j]); } else if (strcasecmp(ua->argk[j], NT_("error")) == 0 && ua->argv[j]) { snapdbr->errmsg = bstrdup(ua->argv[j]); unbash_spaces(snapdbr->errmsg); snapdbr->need_to_free = true; } else { continue; } } } /* Get a snapshot record, and check that the current UA can access to the Client/FileSet */ static int get_snapshot_record(UAContext *ua, SNAPSHOT_DBR *snapdbr) { if (!open_client_db(ua)) { Dmsg0(10, "Unable to open database\n"); return 0; } if (!db_get_snapshot_record(ua->jcr, ua->db, snapdbr)) { Dmsg0(10, "Unable to get snapshot record\n"); return 0; } /* Need to check if the client is authorized */ if (!acl_access_client_ok(ua, snapdbr->Client, JT_BACKUP_RESTORE)) { Dmsg0(10, "Client access denied\n"); return 0; } if (snapdbr->FileSetId && !acl_access_ok(ua, FileSet_ACL, snapdbr->FileSet)) { Dmsg0(10, "Fileset access denied\n"); return 0; } return 1; } static int check_response(UAContext *ua, BSOCK *sd, const char *resp, const char *cmd) { if (sd->errors) { return 0; } if (bget_msg(sd) > 0) { unbash_spaces(sd->msg); if (strcmp(sd->msg, resp) == 0) { return 1; } } if (sd->is_error()) { ua->error_msg(_("Comm error with SD. bad response to %s. ERR=%s\n"), cmd, sd->bstrerror()); } else { ua->error_msg(_("Bad response from SD to %s command. Wanted %s, got %s len=%ld\n"), cmd, resp, sd->msg, sd->msglen); } return 0; } bool send_snapshot_retention(JCR *jcr, utime_t val) { BSOCK *fd = jcr->file_bsock; char ed1[50]; if (val > 0 && jcr->FDVersion >= 13) { fd->fsend(snapretentioncmd, edit_uint64(val, ed1)); if (!response(jcr, fd, BSOCK_TYPE_FD, "2000 Snapshot retention\n", "set Snapshot Retention", DISPLAY_ERROR)) { jcr->snapshot_retention = 0; /* can't set snapshot retention */ return false; } } return true; } /* Called from delete_cmd() in ua_cmd.c */ int delete_snapshot(UAContext *ua) { POOL_MEM buf; POOLMEM *out; SNAPSHOT_DBR snapdbr; CLIENT *client, *old_client; BSOCK *fd; if (!open_new_client_db(ua)) { return 1; } /* If the client or the fileset are not authorized, * the function will fail. */ if (!select_snapshot_dbr(ua, &snapdbr)) { ua->error_msg(_("Snapshot not found\n")); snapdbr.debug(0); return 0; } client = (CLIENT *)GetResWithName(R_CLIENT, snapdbr.Client); if (!client) { ua->error_msg(_("Client resource not found\n")); return 0; } /* Connect to File daemon */ old_client = ua->jcr->client; ua->jcr->client = client; /* Try to connect for 15 seconds */ ua->send_msg(_("Connecting to Client %s at %s:%d\n"), client->name(), get_client_address(ua->jcr, client, buf.addr()), client->FDport); if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) { ua->error_msg("%s", ua->jcr->errmsg); free_bsock(ua->jcr->file_bsock); ua->jcr->client = old_client; return 0; } fd = ua->jcr->file_bsock; out = get_pool_memory(PM_FNAME); fd->fsend("snapshot del %s\n", snapdbr.as_arg(&out)); free_pool_memory(out); /* If the snapshot is not found, still delete ours */ if (check_response(ua, fd, "2000 Snapshot deleted ERR=\n", "Snapshot")) { ua->send_msg(_("Snapshot \"%s\" deleted from client %s\n"), snapdbr.Name, snapdbr.Client); } ua->jcr->file_bsock->signal(BNET_TERMINATE); free_bsock(ua->jcr->file_bsock); ua->jcr->client = old_client; db_delete_snapshot_record(ua->jcr, ua->db, &snapdbr); ua->send_msg(_("Snapshot \"%s\" deleted from catalog\n"), snapdbr.Name); return 1; } /* Called from menu, if snap_list is valid, the snapshot * list will be stored in this list. (not_owned_by_alist) */ int list_snapshot(UAContext *ua, alist *snap_list) { POOL_MEM tmp; SNAPSHOT_DBR snap; POOLMEM *buf; CLIENT *client, *old_client; BSOCK *fd; int ret = 0; client = select_client_resource(ua, JT_BACKUP_RESTORE); if (!client) { return 0; } /* Connect to File daemon */ old_client = ua->jcr->client; ua->jcr->client = client; /* Try to connect for 15 seconds */ ua->send_msg(_("Connecting to Client %s at %s:%d\n"), client->name(), get_client_address(ua->jcr, client, tmp.addr()), client->FDport); if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) { ua->error_msg("%s", ua->jcr->errmsg); goto bail_out; } fd = ua->jcr->file_bsock; /* The command line can have filters */ snapshot_scan_cmdline(ua, 0, &snap); buf = get_pool_memory(PM_FNAME); fd->fsend("snapshot list %s\n", snap.as_arg(&buf)); while (fd->recv() >= 0) { if (snap_list) { SNAPSHOT_DBR *snapr = new SNAPSHOT_DBR(); parse_args(fd->msg, &ua->args, &ua->argc, ua->argk, ua->argv, MAX_CMD_ARGS); snapshot_scan_cmdline(ua, 0, snapr); bstrncpy(snapr->Client, client->name(), sizeof(snapr->Client)); snap_list->append(snapr); snapr->debug(0); } else { ua->send_msg("%s", fd->msg); } } /* Reset the UA arg list */ parse_args(ua->cmd, &ua->args, &ua->argc, ua->argk, ua->argv, MAX_CMD_ARGS); ua->jcr->file_bsock->signal(BNET_TERMINATE); free_pool_memory(buf); ret = 1; bail_out: free_bsock(ua->jcr->file_bsock); ua->jcr->client = old_client; return ret; } static void storeit(void *ctx, const char *msg) { char ed1[51]; alist *lst = (alist *)ctx; if ( sscanf(msg, "snapshotid=%50s", ed1) == 1 || sscanf(msg, "SnapshotId=%50s", ed1) == 1) { lst->append((void *)(intptr_t) str_to_int64(ed1)); } } int prune_snapshot(UAContext *ua) { /* First, we get the snapshot list that can be pruned */ CLIENT *client = NULL; BSOCK *fd = NULL; POOLMEM *buf = NULL; POOL_MEM tmp; SNAPSHOT_DBR snapdbr; alist *lst; intptr_t id; CLIENT *old_client = ua->jcr->client; snapshot_scan_cmdline(ua, 0, &snapdbr); snapdbr.expired = true; if (!open_client_db(ua)) { Dmsg0(10, "Unable to open database\n"); return 0; } buf = get_pool_memory(PM_FNAME); lst = New(alist(10, not_owned_by_alist)); db_list_snapshot_records(ua->jcr, ua->db, &snapdbr, storeit, lst, ARG_LIST); foreach_alist(id, lst) { snapdbr.reset(); snapdbr.SnapshotId = id; if (get_snapshot_record(ua, &snapdbr)) { ua->send_msg(_("Snapshot \"%s\" on Client %s\n"), snapdbr.Name, snapdbr.Client); if (!confirm_retention_yesno(ua, snapdbr.Retention, "Snapshot")) { continue; } if (client && strcmp(client->hdr.name, snapdbr.Client) != 0) { ua->jcr->file_bsock->signal(BNET_TERMINATE); free_bsock(ua->jcr->file_bsock); ua->jcr->client = NULL; client = NULL; } if (!client) { client = (CLIENT *)GetResWithName(R_CLIENT, snapdbr.Client); if (!client) { continue; } /* Connect to File daemon */ ua->jcr->client = client; /* Try to connect for 15 seconds */ ua->send_msg(_("Connecting to Client %s at %s:%d\n"), client->name(), get_client_address(ua->jcr, client, tmp.addr()), client->FDport); if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) { ua->error_msg("%s", ua->jcr->errmsg); free_bsock(ua->jcr->file_bsock); ua->jcr->client = NULL; client = NULL; continue; } fd = ua->jcr->file_bsock; } fd->fsend("snapshot del %s\n", snapdbr.as_arg(&buf)); fd->recv(); if (strncmp(fd->msg, "2000", 4) == 0) { ua->send_msg("Snapshot %s deleted\n", snapdbr.Volume); db_delete_snapshot_record(ua->jcr, ua->db, &snapdbr); } else { unbash_spaces(fd->msg); ua->send_msg("%s", fd->msg); } } } if (ua->jcr->file_bsock) { ua->jcr->file_bsock->signal(BNET_TERMINATE); free_bsock(ua->jcr->file_bsock); } free_pool_memory(buf); delete lst; ua->jcr->client = old_client; return 1; } /* Called from the FD, in catreq.c */ int snapshot_catreq(JCR *jcr, BSOCK *bs) { SNAPSHOT_DBR snapdbr; char Job[MAX_NAME_LENGTH], ed1[50]; POOLMEM *vol = get_memory(bs->msglen); POOLMEM *dev = get_memory(bs->msglen); POOLMEM *err = get_pool_memory(PM_MESSAGE); int n, ret = 1, expired; *vol = *dev = 0; Dmsg1(DT_SNAPSHOT|10, "Get snapshot catalog request %s\n", bs->msg); /* We need to create a snapshot record in the catalog */ n = sscanf(bs->msg, CreateSnap, Job, snapdbr.Name, vol, dev, &snapdbr.CreateTDate, snapdbr.Type, ed1); if (n == 7) { snapdbr.Volume = vol; snapdbr.Device = dev; snapdbr.JobId = jcr->JobId; unbash_spaces(snapdbr.Name); unbash_spaces(snapdbr.Volume); unbash_spaces(snapdbr.Device); snapdbr.Retention = str_to_uint64(ed1); bstrftimes(snapdbr.CreateDate, sizeof(snapdbr.CreateDate), snapdbr.CreateTDate); unbash_spaces(snapdbr.Type); bstrncpy(snapdbr.Client, jcr->client->hdr.name, sizeof(snapdbr.Client)); bstrncpy(snapdbr.FileSet, (jcr->fileset)?jcr->fileset->hdr.name:"", sizeof(snapdbr.FileSet)); Dmsg1(DT_SNAPSHOT|10, "Creating snapshot %s\n", snapdbr.Name); snapdbr.debug(20); /* We lock the db before to keep the error message */ db_lock(jcr->db); ret = db_create_snapshot_record(jcr, jcr->db, &snapdbr); pm_strcpy(err, jcr->db->errmsg); db_unlock(jcr->db); if (ret) { bs->fsend("1000 Snapshot created\n"); } else { bs->fsend("1999 Snapshot not created ERR=%s\n", err); } goto bail_out; } n = sscanf(bs->msg, ListSnap, Job, snapdbr.Name, vol, dev, &snapdbr.CreateTDate, snapdbr.Type, snapdbr.created_before, snapdbr.created_after, &expired); if (n == 8) { snapdbr.Volume = vol; snapdbr.Device = dev; unbash_spaces(snapdbr.Name); unbash_spaces(snapdbr.Volume); unbash_spaces(snapdbr.Device); bstrftimes(snapdbr.CreateDate, sizeof(snapdbr.CreateDate), snapdbr.CreateTDate); unbash_spaces(snapdbr.Type); unbash_spaces(snapdbr.created_before); unbash_spaces(snapdbr.created_after); bstrncpy(snapdbr.Client, jcr->client->hdr.name, sizeof(snapdbr.Client)); snapdbr.expired = (expired != 0); Dmsg0(DT_SNAPSHOT|10, "List snapshots\n"); snapdbr.debug(20); db_list_snapshot_records(jcr, jcr->db, &snapdbr, send_list, bs, ARG_LIST); bs->signal(BNET_EOD); goto bail_out; } n = sscanf(bs->msg, DelSnap, Job, snapdbr.Name, dev); if (n == 3) { snapdbr.Device = dev; unbash_spaces(snapdbr.Name); unbash_spaces(snapdbr.Device); bstrncpy(snapdbr.Client, jcr->client->hdr.name, sizeof(snapdbr.Client)); Dmsg2(DT_SNAPSHOT|10, "Delete snapshot %s from %s\n", snapdbr.Name, snapdbr.Client); snapdbr.debug(20); /* We lock the db before to keep the error message */ db_lock(jcr->db); ret = db_delete_snapshot_record(jcr, jcr->db, &snapdbr); pm_strcpy(err, jcr->db->errmsg); db_unlock(jcr->db); if (ret) { bs->fsend("1000 Snapshot deleted\n"); } else { bs->fsend("1999 Snapshot not deleted ERR=%s\n", err); } goto bail_out; } ret = 0; bail_out: free_pool_memory(vol); free_pool_memory(dev); free_pool_memory(err); return ret; } /* List snapshots, allow to use some parameters from the command line */ void snapshot_list(UAContext *ua, int i, DB_LIST_HANDLER *sendit, e_list_type llist) { SNAPSHOT_DBR snapdbr; snapshot_scan_cmdline(ua, i, &snapdbr); if (open_new_client_db(ua)) { db_list_snapshot_records(ua->jcr, ua->db, &snapdbr, sendit, ua, llist); } } static int list_client_snapshot(UAContext *ua, bool sync) { SNAPSHOT_DBR *s, stemp; alist *lst; // char ed1[50]; if (sync) { if (!open_new_client_db(ua)) { return 1; } } lst = New(alist(10, not_owned_by_alist)); if (list_snapshot(ua, lst)) { foreach_alist(s, lst) { ua->send_msg(_( "Snapshot %s:\n" " Volume: %s\n" " Device: %s\n" " CreateDate: %s\n" // " Size: %sB\n", " Type: %s\n" " Status: %s\n" " Error: %s\n"), s->Name, NPRT(s->Volume), NPRT(s->Device), s->CreateDate, // edit_uint64_with_suffix(s->Size, ed1), s->Type, s->status?_("OK"):_("Error"), s->errmsg); if (sync && s->Device && *s->Name) { stemp.reset(); stemp.Device = s->Device; bstrncpy(stemp.Name, s->Name, sizeof(stemp.Name)); if (!db_get_snapshot_record(ua->jcr, ua->db, &stemp)) { if (db_create_snapshot_record(ua->jcr, ua->db, s)) { ua->send_msg(_("Snapshot added in Catalog\n")); } } } } if (lst->size() == 0) { ua->send_msg(_("No snapshot found\n")); } } /* Cleanup the list */ foreach_alist (s, lst) { delete s; } delete lst; return 1; } /* Ask client to create/prune/delete a snapshot via the command line */ int snapshot_cmd(UAContext *ua, const char *cmd) { if (!open_client_db(ua)) { Dmsg0(10, "Unable to open database\n"); return 0; } SNAPSHOT_DBR snapdbr; for (int i=0; iargc; i++) { if (strcasecmp(ua->argk[i], NT_("purge")) == 0) { } else if (strcasecmp(ua->argk[i], NT_("prune")) == 0) { return prune_snapshot(ua); } else if (strcasecmp(ua->argk[i], NT_("listclient")) == 0) { return list_client_snapshot(ua, false); } else if (strcasecmp(ua->argk[i], NT_("list")) == 0) { snapshot_list(ua, 0, prtit, HORZ_LIST); return 1; } else if (strcasecmp(ua->argk[i], NT_("create")) == 0) { /* We need a job definition, or a client */ } else if (strcasecmp(ua->argk[i], NT_("delete")) == 0) { return delete_snapshot(ua); } else if (strcasecmp(ua->argk[i], NT_("status")) == 0) { } else if (strcasecmp(ua->argk[i], NT_("sync")) == 0) { return list_client_snapshot(ua, true); } else if (strcasecmp(ua->argk[i], NT_("update")) == 0) { return update_snapshot(ua); } else { continue; } } for ( ;; ) { start_prompt(ua, _("Snapshot choice: \n")); add_prompt(ua, _("List snapshots in Catalog")); add_prompt(ua, _("List snapshots on Client")); add_prompt(ua, _("Prune snapshots")); add_prompt(ua, _("Delete snapshot")); add_prompt(ua, _("Update snapshot parameters")); add_prompt(ua, _("Update catalog with Client snapshots")); add_prompt(ua, _("Done")); switch(do_prompt(ua, "", _("Select action to perform on Snapshot Engine"), NULL, 0)) { case 0: /* list catalog */ snapshot_list(ua, 0, prtit, HORZ_LIST); break; case 1: /* list client */ list_client_snapshot(ua, false); break; case 2: /* prune */ prune_snapshot(ua); break; case 3: /* delete */ delete_snapshot(ua); break; case 4: /* update snapshot */ update_snapshot(ua); break; case 5: /* sync snapshot */ list_client_snapshot(ua, true); break; case 6: /* done */ default: ua->info_msg(_("Selection terminated.\n")); return 1; } } return 1; } /* Select a Snapshot record from the database, might be in ua_select.c */ int select_snapshot_dbr(UAContext *ua, SNAPSHOT_DBR *sr) { int ret = 0; char *p; POOLMEM *err = get_pool_memory(PM_FNAME); *err=0; sr->reset(); snapshot_scan_cmdline(ua, 0, sr); if (sr->SnapshotId == 0 && (sr->Name[0] == 0 || sr->Client[0] == 0)) { CLIENT_DBR cr; memset(&cr, 0, sizeof(cr)); /* Get the pool from client= */ if (!get_client_dbr(ua, &cr, JT_BACKUP_RESTORE)) { goto bail_out; } sr->ClientId = cr.ClientId; db_list_snapshot_records(ua->jcr, ua->db, sr, prtit, ua, HORZ_LIST); if (!get_cmd(ua, _("Enter a SnapshotId: "))) { goto bail_out; } p = ua->cmd; if (*p == '*') { p++; } if (is_a_number(p)) { sr->SnapshotId = str_to_int64(p); } else { goto bail_out; } } if (!get_snapshot_record(ua, sr)) { ua->error_msg(_("Unable to get Snapshot record.\n")); goto bail_out; } ret = 1; bail_out: if (!ret && *err) { ua->error_msg("%s", err); } free_pool_memory(err); return ret; } /* This part should be in ua_update.c */ static void update_snapretention(UAContext *ua, char *val, SNAPSHOT_DBR *sr) { char ed1[150]; POOL_MEM tmp(PM_MESSAGE); bool ret; if (!duration_to_utime(val, &sr->Retention)) { ua->error_msg(_("Invalid retention period specified: %s\n"), val); return; } db_lock(ua->db); if (!(ret = db_update_snapshot_record(ua->jcr, ua->db, sr))) { pm_strcpy(tmp, db_strerror(ua->db)); } db_unlock(ua->db); if (!ret) { ua->error_msg("%s", tmp.c_str()); } else { ua->info_msg(_("New retention period is: %s\n"), edit_utime(sr->Retention, ed1, sizeof(ed1))); } } /* This part should be in ua_update.c */ static void update_snapcomment(UAContext *ua, char *val, SNAPSHOT_DBR *sr) { POOL_MEM tmp(PM_MESSAGE); bool ret; bstrncpy(sr->Comment, val, sizeof(sr->Comment)); db_lock(ua->db); if (!(ret = db_update_snapshot_record(ua->jcr, ua->db, sr))) { pm_strcpy(tmp, db_strerror(ua->db)); } db_unlock(ua->db); if (!ret) { ua->error_msg("%s", tmp.c_str()); } else { ua->info_msg(_("New Comment is: %s\n"), sr->Comment); } } /* This part should be in ua_update.c */ bool update_snapshot(UAContext *ua) { SNAPSHOT_DBR sr; POOL_MEM ret; char ed1[130]; bool done = false; int i; const char *kw[] = { NT_("Retention"), /* 0 */ NT_("Comment"), /* 1 */ NULL }; for (i=0; kw[i]; i++) { int j; if ((j=find_arg_with_value(ua, kw[i])) > 0) { /* If all from pool don't select a media record */ if (!select_snapshot_dbr(ua, &sr)) { return 0; } switch (i) { case 0: update_snapretention(ua, ua->argv[j], &sr); break; case 1: update_snapcomment(ua, ua->argv[j], &sr); break; default: break; } done = true; } } for ( ; !done; ) { start_prompt(ua, _("Parameters to modify:\n")); add_prompt(ua, _("Snapshot Retention Period")); /* 0 */ add_prompt(ua, _("Snapshot Comment")); /* 1 */ add_prompt(ua, _("Done")); /* 2 */ i = do_prompt(ua, "", _("Select parameter to modify"), NULL, 0); if (i == 2) { return 0; } if (!select_snapshot_dbr(ua, &sr)) { /* Get Snapshot record */ return 0; } ua->info_msg(_("Updating Snapshot \"%s\" on \"%s\"\n"), sr.Name, sr.Client); switch (i) { case 0: /* Snapshot retention */ ua->info_msg(_("Current retention period is: %s\n"), edit_utime(sr.Retention, ed1, sizeof(ed1))); if (!get_cmd(ua, _("Enter Snapshot Retention period: "))) { return 0; } update_snapretention(ua, ua->cmd, &sr); break; case 1: ua->info_msg(_("Current comment is: %s\n"), NPRTB(sr.Comment)); if (!get_cmd(ua, _("Enter Snapshot comment: "))) { return 0; } update_snapcomment(ua, ua->cmd, &sr); break; default: /* Done or error */ ua->info_msg(_("Selection terminated.\n")); return 1; } } return 1; } bacula-15.0.3/src/dird/store_mngr.h0000644000175000017500000002423414771010173016730 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Storage manager classes. * All of this code is intented to make managing * (accessing, setting, incrementing counters, applying storage policy...) storage easier * from the code perspective. */ #ifndef STORE_MNGR_H #define STORE_MNGR_H 1 /* Forward delcaration */ class STORE; /* * Helper class to make managing each storage type (write/read) easier. * It contains storage resource ('store' member) which is currently used as well as list of all * possible storage resource choices. */ class storage { private: bool write; /* Write or read storage */ STORE *store; /* Selected storage to be used */ alist *origin_list; /* Configured Storage (raw list without any policy applied) */ alist *list; /* Storage possibilities */ POOLMEM *source; /* Where the storage came from */ POOLMEM *list_str; /* List of storage names in the list */ pthread_mutex_t mutex; /* Mutex for accessing items */ bool unused_stores_decremented; /* Set if only currently used storage has NumConcurrentJobs incremented */ /* Only when we are a read storage - increment concurrent read counters for all storages on the list */ bool inc_rstores(JCR *jcr); /* Only when we are a write storage - increment concurrent write counters for all storages on the list */ bool inc_wstores(JCR *jcr); const char *print_list(alist *list); public: storage(); ~storage(); /* Determine if we are write or read storage */ void set_rw(bool write); /* Get storage which will be used next */ STORE *get_store() { return store; } /* Get list of all possible storages. * This metod can possibly return list with less storages than the original group. * It's because some of it's elements can ba unavailable at that time (e.g. reached maxConcJobs limit). */ alist *get_list(); /* Get original list of all storages as assigned by the set() method. */ alist *get_origin_list(); /* Get source of the storage (pool, job, commandline, unknown, ...) */ const char *get_source() const; /* Get media type of current storage */ const char *get_media_type() const; /* Set storage override. Remove all previous storage. * Can be called for single storage - list consists only one, specified storage then. * Can be called for setting a list - internal list consists of same elemets as the list passed * as an arg. First item from the list becames storage currently used. */ void set(STORE *storage, const char *source); void set(alist *storage, const char *source); /* Reset class, remove all items from list, unset storage currently used, clean source */ void reset(); /* Set custom storage for next usage (it needs to be an item from the current store list) */ bool set_current_storage(STORE *storage); /* Increment concurrent read/write counters for all storages on the list */ bool inc_stores(JCR *jcr); /* Decrement concurrent read/write counters for all storages on the list */ void dec_stores(); void dec_unused_stores(); void dec_curr_store(); /* Print all elements of the list (sample result of print_list() -> "File1, File2, File3" */ const char *print_origin_list(); const char *print_possible_list(); }; /* * Storage Manager class responsible for managing all of the storage used by the JCR. * It's holds read as well as write storage resources assigned to the JCR. * It is a base class for Storage Policy (hence virtual 'apply_policy' method). * Most of member functions are just wrappers around the storage class to make accessing * and managin read/write storage in a bit more friendly way. * */ class StorageManager : public SMARTALLOC { protected: storage rstore; /* Read storage */ storage wstore; /* Write storage */ const char *policy; /* Storage Group Policy used */ virtual void apply_policy(bool write_store) = 0; public: virtual void apply_write_policy(JCR*) = 0; virtual void apply_read_policy(JCR*) = 0; virtual ~StorageManager() { reset_rwstorage(); free(policy); }; StorageManager(const char *policy); static const char *get_default_policy() { return "ListedOrder"; } /************ READ STORAGE HELPERS ************/ STORE *get_rstore(); alist *get_rstore_list(); alist *get_origin_rstore_list(); const char *get_rsource() const; const char *get_rmedia_type() const; void set_rstore(STORE *storage, const char *source); void set_rstore(alist *storage, const char *source); void reset_rstorage(); bool inc_read_stores(JCR *jcr); void dec_read_stores(); const char *print_possible_rlist(); const char *print_origin_rlist(); /************ WRITE STORAGE HELPERS ************/ STORE *get_wstore() { return wstore.get_store(); } alist *get_wstore_list(); alist *get_origin_wstore_list(); const char *get_wsource() const; const char *get_wmedia_type() const; bool set_current_wstorage(STORE *storage); void set_wstorage(STORE *storage, const char *source); void set_wstorage(alist *storage, const char *source); void reset_wstorage(); const char *print_possible_wlist(); const char *print_origin_wlist(); bool inc_write_stores(JCR *jcr); void dec_write_stores(); void dec_curr_wstore(); void dec_unused_wstores(); /************ GENERIC STORAGE HELPERS ************/ void reset_rwstorage(); const char *get_policy_name() { return policy; } }; /* * Least used policy chooses storage from the list which has the least concurrent jobs number. */ class LeastUsedStore : public StorageManager { private: void apply_policy(bool write_store); public: void apply_write_policy(JCR*); void apply_read_policy(JCR*); LeastUsedStore() : StorageManager("LeastUsed") { } ~LeastUsedStore() { } }; /* * Default policy for the storage group. It uses first available storage from the list. */ class ListedOrderStore : public StorageManager { private: void apply_policy(bool write_store) { /* Do nothing for now */ } public: void apply_write_policy(JCR*) { return apply_policy(true); } void apply_read_policy(JCR*) { return apply_policy(false); } ListedOrderStore(): StorageManager("ListedOrder") { } ~ListedOrderStore() { } }; class LastBackedUpToStore : public StorageManager { private: void apply_policy(bool write_store); public: void apply_write_policy(JCR* jcr); void apply_read_policy(JCR* jcr); LastBackedUpToStore() : StorageManager("LastBackedUpTo") { } ~LastBackedUpToStore() { } }; /* Context with per-policy specific data (as of now there's only single uint64_t value available for each policy)*/ class sm_ctx : public SMARTALLOC { public: STORE *store; dlink link; uint64_t number; /* Per-policy specific number (e.g. free space, cpu usage...) */ sm_ctx(STORE *s) { store = s; number = 0; } }; /* * Abstract class for policies which require DIR<->SD querying. * Each querying policy has to implement query() and reorder() methods - theese act * as a callbacks for QueryStore's generic methods. */ class QueryStore : public StorageManager { protected: /* Policy-specific query method */ virtual bool query(BSOCK *sd, dlist *d_list, sm_ctx *context) = 0; /* Reorder storage list after querying all storages in the list for needed information */ virtual void reorder_list(alist *list, dlist *d_list) = 0; public: virtual void apply_policy(bool write_store); QueryStore (const char *policy="VirtualPolicy_QueryStore"): StorageManager(policy) { } ~QueryStore() { } }; /* * Policy which reorders storage list based on FreeSpace available on each Storage */ class FreeSpaceStore : public QueryStore { private: /* Private helper struct */ struct store_free_size { STORE *store; uint64_t free_size; dlink link; }; /* Comparator for easy list ordering */ static int cmp(void *item1, void *item2) { sm_ctx *ctx1 = (sm_ctx *) item1; sm_ctx *ctx2 = (sm_ctx *) item2; uint64_t s1 = ctx1->number; uint64_t s2 = ctx2->number; if (s1 < s2) { return 1; } else if (s1 > s2) { return -1; } else { return 0; } } protected: bool query(BSOCK *sd, dlist *d_list, sm_ctx *context); virtual void reorder_list(alist *list, dlist *d_list); public: void apply_write_policy(JCR*) { return apply_policy(true); } void apply_read_policy(JCR*) { return apply_policy(false); } FreeSpaceStore(): QueryStore("FreeSpace") { } FreeSpaceStore(const char *policy): QueryStore("policy") { } virtual ~FreeSpaceStore() { } }; class FreeSpaceLeastUsedStore : public FreeSpaceStore { private: uint64_t threshold; protected: virtual void reorder_list(alist *list, dlist *d_list); public: FreeSpaceLeastUsedStore(uint64_t thres=0): FreeSpaceStore("FreeSpaceLeastUsed") { threshold = thres; } virtual ~FreeSpaceLeastUsedStore() { } }; #endif // STORE_MNGR_Hbacula-15.0.3/src/dird/ua_output.c0000644000175000017500000015060114771010173016567 0ustar bsbuildbsbuild/* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2025 Kern Sibbald The original author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * * Bacula Director -- User Agent Output Commands * I.e. messages, listing database, showing resources, ... * * Kern Sibbald, September MM */ #include "bacula.h" #include "dird.h" /* Imported subroutines */ /* Imported variables */ /* Imported functions */ /* Forward referenced functions */ static int do_list_cmd(UAContext *ua, const char *cmd, e_list_type llist); static bool list_nextvol(UAContext *ua, int ndays); /* * Turn auto display of console messages on/off */ int autodisplay_cmd(UAContext *ua, const char *cmd) { static const char *kw[] = { NT_("on"), NT_("off"), NULL}; switch (find_arg_keyword(ua, kw)) { case 0: ua->auto_display_messages = true; break; case 1: ua->auto_display_messages = false; break; default: ua->error_msg(_("ON or OFF keyword missing.\n")); break; } return 1; } /* * Turn GUI mode on/off */ int gui_cmd(UAContext *ua, const char *cmd) { static const char *kw[] = { NT_("on"), NT_("off"), NULL}; switch (find_arg_keyword(ua, kw)) { case 0: ua->jcr->gui = ua->gui = true; break; case 1: ua->jcr->gui = ua->gui = false; break; default: ua->error_msg(_("ON or OFF keyword missing.\n")); break; } return 1; } /* * Enter with Resources locked */ static void show_disabled_jobs(UAContext *ua) { JOB *job; bool first = true; foreach_res(job, R_JOB) { if (!acl_access_ok(ua, Job_ACL, job->name())) { continue; } if (!job->is_enabled()) { if (first) { first = false; ua->send_msg(_("Disabled Jobs:\n")); } ua->send_msg(" %s\n", job->name()); } } if (first) { ua->send_msg(_("No disabled Jobs.\n")); } } struct showstruct {const char *res_name; int type;}; static struct showstruct reses[] = { {NT_("directors"), R_DIRECTOR}, {NT_("clients"), R_CLIENT}, {NT_("counters"), R_COUNTER}, {NT_("devices"), R_DEVICE}, {NT_("jobs"), R_JOB}, {NT_("storages"), R_STORAGE}, {NT_("catalogs"), R_CATALOG}, {NT_("schedules"), R_SCHEDULE}, {NT_("filesets"), R_FILESET}, {NT_("pools"), R_POOL}, {NT_("messages"), R_MSGS}, {NT_("statistics"), R_COLLECTOR}, {NT_("consoles"), R_CONSOLE}, // {NT_("jobdefs"), R_JOBDEFS}, // {NT_{"autochangers"), R_AUTOCHANGER}, {NT_("all"), -1}, {NT_("help"), -2}, {NULL, 0} }; /* * Displays Resources * * show all * show e.g. show directors * show = e.g. show director=HeadMan * show disabled shows disabled jobs * */ int show_cmd(UAContext *ua, const char *cmd) { int i, j, type, len; int recurse; char *res_name; RES_HEAD *reshead = NULL; RES *res = NULL; Dmsg1(20, "show: %s\n", ua->UA_sock->msg); LockRes(); for (i=1; iargc; i++) { if (strcasecmp(ua->argk[i], NT_("disabled")) == 0) { show_disabled_jobs(ua); goto bail_out; } res = NULL; reshead = NULL; type = 0; res_name = ua->argk[i]; if (!ua->argv[i]) { /* was a name given? */ /* No name, dump all resources of specified type */ recurse = 1; len = strlen(res_name); for (j=0; reses[j].res_name; j++) { if (strncasecmp(res_name, reses[j].res_name, len) == 0) { type = reses[j].type; if (type > 0) { reshead = res_head[type-r_first]; } else { reshead = NULL; } break; } } } else { /* Dump a single resource with specified name */ recurse = 0; len = strlen(res_name); for (j=0; reses[j].res_name; j++) { if (strncasecmp(res_name, reses[j].res_name, len) == 0) { type = reses[j].type; res = (RES *)GetResWithName(type, ua->argv[i]); if (!res) { type = -3; } break; } } } switch (type) { /* All resources */ case -1: for (j=r_first; j<=r_last; j++) { /* Skip R_DEVICE since it is really not used or updated */ if (j != R_DEVICE) { dump_each_resource(j, bsendmsg, ua); } } break; /* Help */ case -2: ua->send_msg(_("Keywords for the show command are:\n")); for (j=0; reses[j].res_name; j++) { ua->error_msg("%s\n", reses[j].res_name); } goto bail_out; /* Resource not found */ case -3: ua->error_msg(_("%s resource %s not found.\n"), res_name, ua->argv[i]); goto bail_out; /* Resource not found */ case 0: ua->error_msg(_("Resource %s not found\n"), res_name); goto bail_out; /* Dump a specific type */ default: if (res) { /* keyword and argument, ie: show job=name */ dump_resource(recurse?type:-type, res, bsendmsg, ua); } else if (reshead) { /* keyword only, ie: show job */ dump_each_resource(-type, bsendmsg, ua); } break; } } bail_out: UnlockRes(); return 1; } /* * Check if the access is permitted for a list of jobids * * Not in ua_acl.c because it's using db access, and tools such * as bdirjson are not linked with cats. */ bool acl_access_jobid_ok(UAContext *ua, const char *jobids) { char *tmp=NULL, *p; bool ret=false; JOB_DBR jr; uint32_t jid; if (!jobids) { return false; } if (!is_a_number_list(jobids)) { return false; } /* If no console resource => default console and all is permitted */ if (!ua || !ua->cons) { Dmsg0(1400, "Root cons access OK.\n"); return true; /* No cons resource -> root console OK for everything */ } alist *list = ua->cons->ACL_lists[Job_ACL]; if (!list) { /* empty list */ return false; /* List empty, reject everything */ } /* Special case *all* gives full access */ if (list->size() == 1 && strcasecmp("*all*", (char *)list->get(0)) == 0) { return true; } /* If we can't open the database, just say no */ if (!open_new_client_db(ua)) { return false; } p = tmp = bstrdup(jobids); while (get_next_jobid_from_list(&p, &jid) > 0) { bmemset(&jr, 0, sizeof(jr)); jr.JobId = jid; if (db_get_job_record(ua->jcr, ua->db, &jr)) { ret = false; for (int i=0; isize(); i++) { if (strcasecmp(jr.Name, (char *)list->get(i)) == 0) { Dmsg3(1400, "ACL found %s in %d %s\n", jr.Name, Job_ACL, (char *)list->get(i)); ret = true; break; } } if (ret && !acl_access_client_ok(ua, jr.Client, JT_BACKUP)) { ret = false; } } if (!ret) { goto bail_out; } } bail_out: if (tmp) { free(tmp); } return ret; } /* * List contents of database * * list jobs - lists all jobs run * list jobid=nnn - list job data for jobid * list ujobid=uname - list job data for unique jobid * list job=name - list all jobs with "name" * list jobname=name - same as above * list jobmedia jobid= * list jobmedia job=name * list joblog pattern=xxx jobid= * list joblog pattern=xxx jobid= * list joblog job=name * list files [type=] jobid= - list files saved for job nn * list files [type=] job=name * list pools - list pool records * list jobtotals - list totals for all jobs * list media - list media for given pool (deprecated) * list volumes - list Volumes * list clients - list clients * list nextvol job=xx - list the next vol to be used by job * list nextvolume job=xx - same as above. * list copies jobid=x,y,z * list objects [type=objecttype job_id=id clientname=n,status=S] - list plugin objects * list pluginrestoreconf jobid=x,y,z [id=k] * list filemedia jobid=x fileindex=z * list fileevents jobid=x fileindex=z * list metadata type=[email|attachment] tenant=xxx owner=xxx jobid= client= * from= * to= cc= tags= * subject= bodypreview= all= minsize= maxsize= * importance= isread=<0|1> isdraft=<0|1> * categories= conversationid= hasattachment=<0|1> * starttime=