pax_global_header00006660000000000000000000000064144725214020014513gustar00rootroot0000000000000052 comment=79ccff163020670298a615930fd1bf69c4e71d10 jdupes-1.27.3/000077500000000000000000000000001447252140200130775ustar00rootroot00000000000000jdupes-1.27.3/.dockerignore000066400000000000000000000000331447252140200155470ustar00rootroot00000000000000.* example_scripts testdir jdupes-1.27.3/.github/000077500000000000000000000000001447252140200144375ustar00rootroot00000000000000jdupes-1.27.3/.github/FUNDING.yml000066400000000000000000000013241447252140200162540ustar00rootroot00000000000000# These are supported funding model platforms github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] patreon: JodyBruchon open_collective: # Replace with a single Open Collective username ko_fi: jodybruchon tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry liberapay: JodyBruchon issuehunt: # Replace with a single IssueHunt username otechie: # Replace with a single Otechie username lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry custom: ['https://www.subscribestar.com/JodyBruchon', 'https://paypal.me/JodyBruchon' ] jdupes-1.27.3/.github/ISSUE_TEMPLATE/000077500000000000000000000000001447252140200166225ustar00rootroot00000000000000jdupes-1.27.3/.github/ISSUE_TEMPLATE/template-for-all-issue-submissions.md000066400000000000000000000040541447252140200260160ustar00rootroot00000000000000--- name: Template for all Issue submissions about: Use this template for all submissions, including bug reports and feature requests title: '' labels: '' assignees: '' --- **What jdupes version are you using?** `paste version within these backquotes` **Where did you get your copy of the jdupes binary?** [ ] Official binary from jdupes releases [ ] OS distribution repository (Linux, Homebrew, etc. - put distro's name/version in text block below) [ ] I personally compiled it from source code (paste your build commands in the text block below) [ ] Other (please explain below) ``` No additional information provided (replace this line with info requested above as needed) ``` **Have you searched in Issues?** [ ] I have searched for my problem in Issues and I am confident that this is not a duplicate of a previous reported issue or feature request. **I have done my due diligence** [ ] I am not suggesting changing the hash algorithm, storing files in a database, comparing file data blocks differently, suggesting that file sizes should be used before comparison, or other common suggestions that indicate I haven't read the documentation, code, or examined the issue tracker entries to discover that all of these things have already been implemented or won't make a difference if they were implemented. I have done my due diligence before asking for help or making a suggestion. **Issue or feature request details** blah blah blah, I didn't fill this out, please close my request jdupes-1.27.3/.github/workflows/000077500000000000000000000000001447252140200164745ustar00rootroot00000000000000jdupes-1.27.3/.github/workflows/main.yml000066400000000000000000000027351447252140200201520ustar00rootroot00000000000000name: ci on: push: branches: - 'master' - 'github-action' jobs: docker: runs-on: ubuntu-latest steps: - name: set lower case owner name run: | echo "OWNER_LC=${OWNER,,}" >>${GITHUB_ENV} env: OWNER: '${{ github.repository_owner }}' - name: Checkout uses: actions/checkout@v2 - name: Set up QEMU uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v1 - name: Login to GitHub Container Registry uses: docker/login-action@v1 with: registry: ghcr.io username: ${{ env.OWNER_LC }} password: ${{ secrets.PERSONAL_ACCESS_TOKEN }} - name: Build and push slim uses: docker/build-push-action@v2 with: context: . file: docker/slim.Dockerfile platforms: linux/amd64 push: true tags: | ghcr.io/${{ env.OWNER_LC }}/jdupes:latest ghcr.io/${{ env.OWNER_LC }}/jdupes:${{ github.ref_name }} - name: Build and push alpine uses: docker/build-push-action@v2 with: context: . file: docker/alpine.Dockerfile platforms: linux/amd64 push: true tags: | ghcr.io/${{ env.OWNER_LC }}/jdupes:alpine ghcr.io/${{ env.OWNER_LC }}/jdupes:${{ github.ref_name }}-alpine jdupes-1.27.3/.gitignore000066400000000000000000000006751447252140200150770ustar00rootroot00000000000000# # Build ignores # #.* *.o *.o.* *.a *.so *.so.* *.1.gz # # Never ignore these # !.gitignore # # Normal output and testing dirs # /build_date.h /jdupes /jdupes*.exe /jdupes-standalone /jdupes-*-*/ /jdupes-*-*.zip /jdupes_hashdb.txt /*.pkg.tar.* test_temp output.log # # Backups / patches # *~ *.orig *.rej /*.patch # # debugging and editor stuff # core .gdb_history .gdbinit .*.swp *.gcda *.gcno *.gcov cachegrind.out.* # Mac OS .DS_Store jdupes-1.27.3/CHANGES.txt000066400000000000000000000463621447252140200147230ustar00rootroot00000000000000jdupes 1.27.3 (2023-08-26) - Fix crash on Linux when opening a file for hashing fails jdupes 1.27.2 (2023-08-26) - Hash database no longer crashes on 32-bit and x32 systems - Hash database now uses 64-bit time and size counts jdupes 1.27.1 (2023-08-25) - Fix a hash database corruption bug jdupes 1.27.0 (2023-08-25) - Added long-awaited hash database feature '-y' (SEE README BEFORE USING) - Removed legacy long options that were deprecated in v1.20.0 - -E option was moved to -e and now throws an error (CHECK YOUR SCRIPTS) - Multiple hash algorithm capability added, but no user option exists yet - Linux: use posix_fadvise() to improve read performance - Fixed possible infinite loop freezing on certain errors when linking files - Removed annoying warning for -Q; the user deserves what the user asks for jdupes 1.26.1 (2023-07-04) - Fix '-P early' to actually work correctly jdupes 1.26 (2023-07-02) - Change -C to use KiB instead of bytes for easier typing (CHECK YOUR SCRIPTS) jdupes 1.25.3 (2023-06-27) - Fix CTRL+C broken during scanning phase - Added verbose error information for dedupe errors 22 and 95 - Partial-only flag for dedupe is no longer inappropriately enabled on macOS - Build now enables dedupe support and uses nearby libjodycode by default - Fix NO_HARDLINKS, LOW_MEMORY, and BARE_BONES builds jdupes 1.25.2 (2023-06-21) - Fix -d and no valid directories exiting with an internal error message - All errors or serious warnings exit with FAILURE instead of SUCCESS jdupes 1.25.1 (2023-06-19) - Fix -D on non-debug builds which was falling through to set -E - Debugging now dumps a list of internal option flags - Fixed progress indicator: "hashing: xyz%" only shows if "stuck" on one file jdupes 1.25.0 (2023-06-16) - Major bug in 1.24.0 affecting all types of linking has been fixed - Update interfaces to libjodycode 3 API jdupes 1.24.0 (2023-06-11) - Progress indication now uses platform timers instead of check loops - Lots of small code size and efficiency improvements - No one should be using -T so it has been made far more annoying - -B can be used with -T which may be faster for BTRFS dedupe work - CTRL-C aborts now show a user abort warning message to stderr - Improved version information display - Documentation updates jdupes 1.23.0 (2023-05-08) - Use the new libjodycode 2.0 API version handling system - Dedupe is now attempted on read-only files (meant for BTRFS snapshots) - The -C option can now accept power-of-two chunk sizes up to 256 MiB - "Extensions" are now "feature flags" and some have changed - OMIT_GETOPT_LONG is now NO_GETOPT_LONG (does anyone actually use this?) - Linux-x86_64 can now build several multilib arches (x86_64, x32, i386) - Various other under-the-hood code changes for libjodycode 2.0 jdupes 1.22.0 (2023-04-09) - libjodycode is now required to build and run jdupes (see INSTALL) - Move all jody_* code to libjodycode and remove from jdupes - Add -E/--error-on-dupe option to terminate immediately if any dupe is found - Support showing x32 ABI in version information - Several build system improvements for macOS, BSD, and linking in libxxhash jdupes 1.21.3 (2023-02-09) - Fix exit behavior when no valid directories are given - Only act on "normal" files and directories jdupes 1.21.2 (2023-02-06) - Work around macOS data corruption (clonefile may wreck compressed files) - Major performance fix in the double traversal prevention tree code - Added undocumented '-9' benchmark option for testing traversal code - Extra error checks to try to finally solve a very rare crash bug - Tiny code size reduction by discarding unused xxHash code - Build system can now build native M1 + x86_64 fat binares on macOS - jody_hash re-added as a build-time option (mainly for old/embedded systems) - Many options/features/safety checks can now be selectively compiled out - New 'make BARE_BONES=1' option builds the most minimal jdupes possible jdupes 1.21.1 (2022-12-01) - Reinstate '-I/--isolate' by popular demand; use at your own risk! jdupes 1.21.0 (2022-09-03) - Remove '-I/--isolate' which has never worked correctly - Fix compiling when NO_HARDLINKS and NO_SYMLINKS are both defined - Increased stack size limits to enable deeper recursion without crashing - Fixes to make compilation under Cygwin (instead of MSYS2 MinGW) work - Remove the temporary '-X help' warning about changes in functionality - Some minor under-the-hood changes for future enhancements jdupes 1.20.2 (2021-11-02) - Interactive deletion now offers "link set" options too jdupes 1.20.1 (2021-10-21) - Interactive deletion now assumes 'a' when you just hit [ENTER] jdupes 1.20.0 (2021-05-12) - Normalize long option hyphenation (CHECK YOUR SHELL SCRIPTS) - Remove long-deprecated -n/--noempty option (CHECK YOUR SHELL SCRIPTS) - Fix printout for -P/--partial when progress indicator is active - Miscellaneous documentation cleanups/updates jdupes 1.19.2 (2021-03-24) - SIGUSR1 to toggle -Z now shows an indication of the -Z status change - Fix for error in JSON handling of extended ASCII range (0x80-0xff) - Fix sort when zeroes in names aren't followed by a non-zero digit - Mac builds now output i386+x86_64 fat binaries (if possible) jdupes 1.19.1 (2020-11-28) - Fix missing space in ----> hard link indicator - Fix -P/--print and -p/--permissions options - Remove temporary warning when using -X jdupes 1.19.0 (2020-10-11) - Make -X size[+-=] an inclusion filter instead (CHECK YOUR SHELL SCRIPTS) - Fix -X noext/onlyext extension matching bugs - Remove deprecated -x/--xsize option and some undocumented redundant options - Preserve metadata when doing -B/--dedupe on Apple APFS - Name sorting is now case-insensitive - Disable -K/--skiphash which was accidentally left active but doesn't work - When sorting by modify time and there's a tie, break it using name sorting - Add Windows XP build support (last supported MSYS2: msys2-i686-20160205) - Fix building on old GCC versions as seen in Xcode 3.1 (Mac OS X 10.5.8) - jdupes-standalone has been removed due to falling too far out of sync - Embedded build date option has been removed jdupes 1.18.2 (2020-07-20) - Add -U/--notravcheck to skip double-traversal safety (for Google Drive FS) - Unified all simple packaging options under a single 'make package' call - Reduce code size on macOS by dropping some Linux-specific code jdupes 1.18.1 (2020-07-08) - Fix -X newer/older on Windows by writing a local strptime() substitute jdupes 1.18.0 (2020-07-08) - Add -X newer/older extfilters to reject files by modification date jdupes 1.17.1 (2020-07-02) - Add basic APFS clonefile() support to -B dedupe jdupes 1.17.0 (2020-06-24) - Rewrite dedupe code from scratch, probably fixing all dedupe bugs - extfilter: add substring match filter for file paths - Add -u/--printunique option to print all non-duplicates (unmatched files) - Dedupe-blacklisted kernel version check now happens before work is done - Build warnings fixed; report any you get (except ENABLE_DEDUPE=1 #warning) - New build targets static and static_stripped (for static binary builds) jdupes 1.16.0 (2020-06-06) - Add -X noext/onlyext filters to exclude/require specific file extension(s) - Added in-depth help text for -X/--extfilter (use -X help to view) - Clarify hard link limit behavior on Windows in program help text - This version still has BTRFS dedupe issues and file add-by-name disabled jdupes 1.15.0 (2020-05-15) - Disable single file addition on the command line for safety jdupes 1.14.1 (2020-05-15) - Fix some compilation issues - Add example shell scripts for processing piped jdupes output - Add `stupid_dupes` educational shell script to source code - Fix some swapped/mangled help text in program and documentation - LOW_MEMORY compiles exclude more stuff to further reduce usage jdupes 1.14.0 (2019-12-29) - Long option --exclude is deprecated and renamed --extfilter - BTRFS compile options have been generalized to dedupe (see README) - Fix a bug in 1.13.3 where many options caused an inappropriate exit jdupes 1.13.3 (2019-12-22) - Fix the behavior of the -I/--isolate option - Move BTRFS-specific dedupe interfaces to general Linux 4.5+ interfaces - Change BTRFS compilation flag name (see README) - Fix FS dedupe only working on the first 16 MiB of files - Add FS dedupe static header for when this header is missing - Add EXE version info for generated Windows executables - Correct several copyright dates and comments jdupes 1.13.2 (2019-08-01) - Fix Unicode and escaped in JSON output jdupes 1.13.1 (2019-06-10) - Fix an incorrect NULL pointer check jdupes 1.13 (2019-06-04) - Add new option -j/--json for JSON (machine-readable) output - /usr/local is now the default PREFIX in Makefile - Minor performance optimizations - A few minor bug fixes jdupes 1.12 (2019-02-18) - Small reductions in memory usage - Add "standalone" jdupes C file which has no external requirements - Add ability to toggle -Z with a USR1 signal (not available on Windows) - Add -t/-no-tocttou option to disable file change safety checks jdupes 1.11.1 (2018-11-09) - Disable build date embedding by default to make reproducible builds easier jdupes 1.11 (2018-11-03) - Add new option -T for partial hash matches only (dangerous!) - Fix '-P partial' printing jdupes 1.10.4 (2018-09-09) - Fix a bug that caused -x/--xsize to fail randomly jdupes 1.10.3 (2018-09-02) - Add -M/--printwithsummary option - Add -0/--printnull option - Add very long path support on Windows 10 - Do not output progress indicators if output is not a TTY - Remove an old undocumented long option '--summary' jdupes 1.10.2 (2018-05-24) - Add -P/--print option jdupes 1.10.1 (2018-04-24) - Fix -I option jdupes 1.10 (2018-04-22) - cacheinfo code not included on Windows where it is not used - Fix -H to work properly on individual files (not just directories) - Fix memory corruption which causes a crash when using -A option - Block btrfs dedupe on Linux kernels < 3.0 due to possible data loss bugs - Removed all references to 'fdupes-jody' and unused TODO file - Add -C/--chunksize option for tuning I/O chunk size (see README) - Make more features configurable and exclude them in LOW_MEMORY mode - Remove HAVE_BTRFS_IOCTL_H deprecated compile-time option - Remove experimental tree rebalance code jdupes 1.9 (2017-12-04) - stderr on Windows is no longer polluted or empty when redirected - Added -1/--one-file-system to restrict recursion to the same filesystem - Added a universal exclusion stack which is currently only used for -X - Added -X/--exclude to use exclusion stack; supersedes -x/--xsize - More robust BTRFS enablement behavior in Makefile - Fixed Unicode display for hard linking on Windows - Efficiency improvements to internal memory allocator (string_malloc) - Documentation improvements and updates - Provide "fdupes_oneline.sh" which emulates old "fdupes -1" feature - Single file names passed as arguments are now accepted and processed jdupes 1.8 (2017-01-31) - All files are now licensed under The MIT License exclusively - Fixed a serious memory alloc bug; upgrading is *strongly* recommended - Several huge improvements to progress indicators - Fix some error message display problems and add more error checking - Fixes for several potential crashes and buffer overflows - Indicate no duplicates were found if printing matches and none exist - On Linux, jdupes now auto-tunes I/O size based on CPU L1 D-cache size - The -v option now also shows info about bitness in the version string jdupes 1.7 (2016-12-28) - Incompatible change: zero-length files no longer duplicates by default - New -z/--zeromatch option to consider zero-length files as duplicates - I/O chunk size changed for better performance - The PROGRAM_NAME variable is now used properly during make - Program was re-organized into several split C files jdupes 1.6.2 (2016-12-03) - Fix: version number shown in jdupes -v wasn't updated in 1.6.1 - Prevent BTRFS dedupe of more files than the kernel can handle - Track directories to avoid scanning the same directory twice jdupes 1.6.1 (2016-12-02) - Show backslash instead of forward slash as path separator on Windows - Make BTRFS dedupe error messages more informative and less confusing - Minor code tweaks, typo and help text fixes - Split some functions into separate files (jdupes.c was getting large) jdupes 1.6 (2016-11-27) - Add the -l/--linksoft option to create symbolic links from duplicates - Disable following symlinks to directories when -s/--symlinks is used - Reduce overall memory usage by approximately 5% - Add configurable path buffer sizes and path buffer overflow checks - Fixes for some build warnings seen on ARM and MIPS jdupes 1.5.1 (2016-11-01) - Significant reduction in memory usage (with a bonus tiny speed boost) - Improvements in string_malloc memory allocator code - Bug fixes for output formatting inconsistencies - Major BTRFS dedupe compilation and functionality fixes - LOW_MEMORY compile option added for more size/speed tradeoff control jdupes 1.5 (2016-09-26) - Fix partial hash optimization and re-enable for better performance - Invert -Z option: only "soft abort" if asked explicitly to do so - Tweak internal data chunk size to reduce data cache misses - Change PREFIX for building from /usr/local back to /usr jdupes 1.4 (2016-08-22) - Add support for Unicode file paths on Windows platforms - Discard floating point code of dubious value - Remove -1/--sameline feature which is not practically useful - Process partially complete duplicate scan if CTRL+C is pressed - Add -Z/--hardabort option to disable the new CTRL+C behavior - Add [n]one option to -d/--delete to discard all files in a match set - Minor bug fixes and tweaks to improve behavior - Partial hash optimization still broken and disabled jdupes 1.3 (2016-08-04) - Add -i/--reverse to invert the match sort order - Add -I/--isolate to force cross-parameter matching - Add "loud" debugging messages (-@ option, build with 'make LOUD=1') - Improved debugging statistics - Partial hash optimization still broken and disabled jdupes 1.2.1 (2016-04-04) - Disable partial hash optimization; it's broken and misses some duplicates jdupes 1.2 (2016-03-19) - Change I/O block size for improved performance - Improved progress indicator behavior with large files; now the progress indicator will update more frequently when full file reads are needed - Windows read speed boost with _O_SEQUENTIAL file flag - Experimental tree rebalance code tuning jdupes 1.1.1 (2016-03-0) - Fix a bug where recursion was always enabled even if not specified jdupes 1.1 (2016-03-07) - Work around the 1023-link limit for Windows hard linking so that linking can continue even when the limit is reached - Update documentation to include hard link arrow explanations - Add "time of check to time of use" checks immediately prior to taking actions on files so that files which changed since being checked will not be touched, avoiding potential data loss on "live" data sets - Add debug stats for files skipped due to Windows hard link limit - Change default sort to filename instead of modification time - Replaced Windows "get inode number" code with simpler, faster version - Fixed a bug where an extra newline was at the end of printed matches - Reduced progress delay interval; it was a bit slow on many large files jdupes 1.0.2 (2016-02-26) - Update jody_hash code to latest version - Change string_malloc to enable future string_free() improvements - Add string_malloc counters for debug stat mode - Add '+size' option to -x/--xsize option to exclude files larger than the specified size instead of smaller than that size jdupes 1.0.1 (2016-01-14) - Fix bug in deletion set counter that would show e.g. "Set 1 of 0" - Minor size reductions by merging repeated fixed strings - Add feature flag 'fastmath' to show when compiled with -ffast-math - Corrections to code driven by -Wconversion and -Wwrite-strings jdupes 1.0 (2015-12-23) - Renamed program to "jdupes" to fully differentiate the fork from fdupes - Version text now lists build-time "feature flags" (useful for scripting) fdupes-jody-2.2 (2015-12-22) - Change all "fdupes" references to "fdupes-jody" and change contact info - Add block-level dedupe (-B) written by Sebastian Schmidt - Remove and replace some string function calls fdupes-jody-2.1 (2015-12-09) - Minor performance improvements to hashing and memory allocation code - Added an experimental tree rebalancing function (-DUSE_TREE_REBALANCE) - Increased string_malloc pages from 64K to 256K for better performance - Made variable scope and type changes for a tiny performance boost fdupes-jody-2.0.2 (2015-12-01) - Removed redundant getfilestats() calls for a tiny speed boost - Added -D/--debug to show statistic counters; build with 'make DEBUG=1' fdupes-jody-2.0.1 (2015-10-04) - A serious hard link match performance bug was fixed - Some minor efficiency improvements fdupes-jody-2.0 (2015-08-26) - Increased "chunk size" for better performance and "thrashing" - When using -H, hard linked files match each other without any file reads - Use Jody Bruchon's string_alloc memory allocator instead of malloc/calloc - Progress indicator now shows the number of duplicate pairs found - Progress is updated more rapidly when full file comparisons happen - Floating point code was made optional and is removed by default - Comparison script added to check built program against installed version - Added secret -Q/--quick option which is faster but can be dangerous - Added -O/--paramorder option to sort by command line parameter order - The file list loading progress indicator was reworked - Support added for working on more than 2 million files at once - Hard linking is much safer and can roll back changes on failures - Hard links on Windows (on supporting filesystems) are now supported - Hashing code was optimized for an 8.4% improvement in benchmarks - Hard linking checks for more error states and final output is much clearer fdupes-jody-1.51-jody5 (2015-05-31) - Lower memory usage while slightly boosting performance - Change --order=name to an intelligent numerically correct sort - Fixed progress text not showing until the first progress update - Performance boost for small files (under 4 KiB) - Test files added for numerically correct sort ordering - Added `--xsize=SIZE' option: exclude files of size < SIZE - Updated Makefile: `PREFIX = /usr/local' - Updated README: Usage to reflect curent parameters fdupes-jody-1.51-jody4 (2015-03-27) - Better progress indicator delay amount - Updated jody_hash algorithm with much lower collision rate fdupes-jody-1.51-jody3 (2015-03-09) - Remove unnecessary MD5 support code - Improve progess indicator for large files - Remove freopen() which prevents porting to Mac OS and Windows - Add support for hard linking duplicates (-L option) - Fix -A option - Many minor bug fixes fdupes-jody-1.51-jody2 (2015-01-16) - Switched to C99 - Replaced MD5 with Jody Bruchon's hash function - Added a delay to progress indications for better performance - Removed lots of unused code - Ported fdupes to Microsoft Windows (with MinGW) fdupes-jody-1.51-jody1 (2015-01-08) - Switch MD5 hash function to jody_hash for about 17% higher performance - Add a progress counter delay for approximate 2x speed boost over SSH - Switch code base to use C99 standard - Lots of code cleanups and minor improvements fdupes-1.51 (master @ commit 5d9143b) - Version of fdupes (with some changes included) forked by Jody Bruchon - Jody Bruchon's changes start after fdupes master @ commit 5d9143b jdupes-1.27.3/INSTALL.txt000066400000000000000000000157551447252140200147630ustar00rootroot00000000000000Building and Installing jdupes ----------------------------------------------------------------------------- You must have libjodycode to build and run jdupes. Install the libjodycode library and development headers from your distribution package manager. These will probably be called "libjodycode" and "libjodycode-dev". If your distribution doesn't have libjodycode, get it here: https://github.com/jbruchon/libjodycode If you clone and build libjodycode into ../libjodycode/ then the build will use the header and library in that location instead of the libjodycode already installed on the system. You can then test the dynamically linked jdupes by telling it where to look: LD_LIBRARY_PATH=../libjodycode ./jdupes To install the program with the default options and flags, just issue the following commands: make sudo make install This installs all jdupes files under /usr/local by default. You may change this to a different location by editing the Makefile or specifying a PREFIX on the command line: 'make PREFIX=/usr install'. DESTDIR is also supported to place everything into a different location for i.e. building distribution packages: 'make DESTDIR=/pkg/jdupes install'. There is also a package generation script 'generate_packages.sh' which tries to make an archive file containing all of the necessary files for distribution; for Linux you can also tell it which x86 architecture you want explicitly (i686, x32, x86_64): 'make package ARCH=x32" To force static or dynamic linking of libjodycode, use the make targets static_jc and dynamic_jc respectively. On Windows you must tell Make to use the DLL version explicitly: 'make FORCE_JC_DLL=1' (ld on Windows doesn't seem to respect -Wl,-Bdynamic properly). On Windows, you need to use MSYS2 with MinGW-w64 installed. Use this guide to install the build environment: https://stackoverflow.com/a/30071634/1906641 Running "make" as usual under the MSYS2 mingw32/mingw64 terminal will build a Windows binary for the bitness of the terminal you're using. The Makefile will detect a Windows environment and automatically make the needed build changes. Various build options are available and can be turned on at compile time by setting CFLAGS_EXTRA or by passing it to 'make': make CFLAGS_EXTRA=-DYOUR_OPTION make CFLAGS_EXTRA='-DYOUR_OPTION_ONE -DYOUR_OPTION_TWO' This is a list of options that can be "turned on" this way: ON_WINDOWS Modify code to compile with MinGW on Windows NO_WINDOWS Disable Windows MinGW special cases (mainly for Cygwin) NO_ATIME Disable all access time features NO_CHUNKSIZE Disable auto I/O chunk sizing code and -C option NO_DELETE Disable deletion -d, -N NO_ERRORONDUPE Disable error exit on first dupe found -E NO_EXTFILTER Disable extended filter -X NO_GETOPT_LONG Disable getopt_long() (long options will not work) NO_HARDLINKS Disable hard link code -L, -H NO_HASHDB Disable hash cache database feature -y NO_HELPTEXT Disable all help text and almost all version text NO_JODY_SORT Disable numerically-correct sort (use "naive" name sort) NO_JSON Disable JSON output -j NO_MTIME Disable all modify time features NO_PERMS Disable permission matching -p NO_SYMLINKS Disable symbolic link code -l, -s NO_TRAVCHECK Disable double-traversal safety code (-U always on) NO_USER_ORDER Disable isolation and parameter sort order -I, -O Certain options can be turned on by setting a variable passed to make instead of using CFLAGS_EXTRA, i.e. 'make DEBUG=1': NO_UNICODE [Windows only] disable all Unicode support DEBUG Turn on algorithm statistic reporting with '-D' LOUD '-@' for low-level debugging; enables DEBUG ENABLE_DEDUPE Enable '-B' deduplication (Linux/macOS: on by default) DISABLE_DEDUPE Forcibly disable (undefine) ENABLE_DEDUPE STATIC_DEDUPE_H Build dedupe support with included minimal header file LOW_MEMORY Build for extremely low-RAM environments (CAUTION!) BARE_BONES Build LOW_MEMORY with very aggressive code removal USE_JODY_HASH Use jody_hash instead of xxHash64 (smaller, slower) EXTENRAL_HASH_LIB Force hash code to be linked in externally (no build) FORCE_JC_DLL Windows only: force linking to nearby libjodycode.dll IGNORE_NEARBY_JC Do NOT use libjodycode at ../libjodycode if it exists GC_SECTIONS Use gcc/ld section garbage collection to reduce size EXTERNAL_HASH_LIB will build jdupes with the interface code for the chosen hash type (xxhash vs jody_hash) but will suppress building the actual code. This is intended for use by OS distributions to use a shared library. You will need to link against the shared library on your own (i.e. LDFLAGS=-lxxhash make). The LOW_MEMORY option tweaks various knobs in the program to lower total memory usage. It also disables some features to reduce the size of certain data structures. The improvements in memory usage are not very large, but if you're running in a very RAM-limited environment or have a CPU with very small caches it may be a good choice. This is primarily meant for use in embedded systems and should not be used unless you know what you are doing. The BARE_BONES option sets LOW_MEMORY and also enables code removals that are extremely aggressive, to the point that what some might consider fundamental capabilities and safety features are completely stripped out, inclduing the NO_DELETE and NO_TRAVCHECK options. It is intended to reduce the program to the most basic functionality expected of a simple duplicate finder and may be suitable for embedded systems with extremely limited storage and memory. This is an example of the size drop for a normal 64-bit Windows build: text data bss total filename 128384 6988 167776 303148 jdupes.exe 74248 6348 29888 110484 jdupes-barebones.exe ------------------------------- - 54136 640 137888 192664 size reduction (64% smaller) A test directory is included so that you may familiarize yourself with the way jdupes operates. You may test the program before installing it by issuing a command such as "./jdupes testdir" or "./jdupes -r testdir", just to name a couple of examples. See the README for information on valid options. A comparison shell script is also included. It will run your natively installed 'jdupes' or 'jdupes' with the directories and extra options you specify and compare the run times and output a 'diff' of the two program outputs. Unless the core algorithm or sort behavior is changed, both programs should produce identical outputs and the 'diff' output shouldn't appear at all. To use it, type: ./compare_jdupes.sh [options] There are some package generators included as make targets: chrootpackage Uses chroots under /chroot to build Linux packages package Makes auto-detected macOS/Linux/Windows packages If you have a multilib compiler for x86_64 you can specify an architecture to make packages for: 'ARCH=xxx make package' where xxx is i386, x86_64, or x32. jdupes-1.27.3/LICENSE.txt000066400000000000000000000021151447252140200147210ustar00rootroot00000000000000MIT License Copyright (c) 2015-2023 Jody Lee Bruchon Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. jdupes-1.27.3/Makefile000066400000000000000000000173061447252140200145460ustar00rootroot00000000000000# jdupes Makefile # Default flags to pass to the C compiler (can be overridden) CFLAGS ?= -O2 -g # PREFIX determines where files will be installed. Common examples # include "/usr" or "/usr/local". PREFIX = /usr/local # PROGRAM_NAME determines the installation name and manual page name PROGRAM_NAME = jdupes # BIN_DIR indicates directory where program is to be installed. # Suggested value is "$(PREFIX)/bin" BIN_DIR = $(PREFIX)/bin # MAN_DIR indicates directory where the jdupes man page is to be # installed. Suggested value is "$(PREFIX)/man/man1" MAN_BASE_DIR = $(PREFIX)/share/man MAN_DIR = $(MAN_BASE_DIR)/man1 MAN_EXT = 1 # Required external tools CC ?= gcc INSTALL = install RM = rm -f RMDIR = rmdir -p MKDIR = mkdir -p INSTALL_PROGRAM = $(INSTALL) -m 0755 INSTALL_DATA = $(INSTALL) -m 0644 # Main object files OBJS += hashdb.o OBJS += args.o checks.o dumpflags.o extfilter.o filehash.o filestat.o jdupes.o helptext.o OBJS += interrupt.o libjodycode_check.o loaddir.o match.o progress.o sort.o travcheck.o OBJS += act_deletefiles.o act_linkfiles.o act_printmatches.o act_summarize.o act_printjson.o # Configuration section COMPILER_OPTIONS = -Wall -Wwrite-strings -Wcast-align -Wstrict-aliasing -Wstrict-prototypes -Wpointer-arith -Wundef COMPILER_OPTIONS += -Wshadow -Wfloat-equal -Waggregate-return -Wcast-qual -Wswitch-default -Wswitch-enum -Wunreachable-code -Wformat=2 COMPILER_OPTIONS += -std=gnu11 -D_FILE_OFFSET_BITS=64 -fstrict-aliasing -pipe COMPILER_OPTIONS += -DNO_ATIME # Remove unused code if requested ifdef GC_SECTIONS COMPILER_OPTIONS += -fdata-sections -ffunction-sections LINK_OPTIONS += -Wl,--gc-sections endif # Bare-bones mode (for the adventurous lunatic) - includes all LOW_MEMORY options ifdef BARE_BONES LOW_MEMORY = 1 COMPILER_OPTIONS += -DNO_DELETE -DNO_TRAVCHECK -DBARE_BONES -DNO_ERRORONDUPE COMPILER_OPTIONS += -DNO_HASHDB -DNO_HELPTEXT -DCHUNK_SIZE=4096 -DPATHBUF_SIZE=1024 endif # Low memory mode ifdef LOW_MEMORY USE_JODY_HASH = 1 DISABLE_DEDUPE = 1 override undefine ENABLE_DEDUPE COMPILER_OPTIONS += -DLOW_MEMORY COMPILER_OPTIONS += -DNO_HARDLINKS -DNO_SYMLINKS -DNO_USER_ORDER -DNO_PERMS COMPILER_OPTIONS += -DNO_ATIME -DNO_JSON -DNO_EXTFILTER -DNO_CHUNKSIZE COMPILER_OPTIONS += -DNO_JODY_SORT ifndef BARE_BONES COMPILER_OPTIONS += -DCHUNK_SIZE=16384 endif endif UNAME_S=$(shell uname -s) # Are we running on a Windows OS? ifeq ($(OS), Windows_NT) ifndef NO_WINDOWS ON_WINDOWS=1 endif endif # Debugging code inclusion ifdef LOUD DEBUG=1 COMPILER_OPTIONS += -DLOUD_DEBUG endif ifdef DEBUG COMPILER_OPTIONS += -DDEBUG else COMPILER_OPTIONS += -DNDEBUG endif ifdef HARDEN COMPILER_OPTIONS += -Wformat -Wformat-security -D_FORTIFY_SOURCE=2 -fstack-protector-strong -fPIE -fpie -Wl,-z,relro -Wl,-z,now endif # MinGW needs this for printf() conversions to work ifdef ON_WINDOWS ifndef NO_UNICODE UNICODE=1 COMPILER_OPTIONS += -municode endif SUFFIX=.exe LIBEXT=.dll COMPILER_OPTIONS += -D__USE_MINGW_ANSI_STDIO=1 -DON_WINDOWS=1 ifeq ($(UNAME_S), MINGW32_NT-5.1) OBJS += winres_xp.o else OBJS += winres.o endif override undefine ENABLE_DEDUPE DISABLE_DEDUPE = 1 else LIBEXT=.so endif # Don't use unsupported compiler options on gcc 3/4 (Mac OS X 10.5.8 Xcode) # ENABLE_DEDUPE by default - macOS Sierra 10.12 and up required ifeq ($(UNAME_S), Darwin) GCCVERSION = $(shell expr `LC_ALL=C gcc -v 2>&1 | grep '[cn][cg] version' | sed 's/[^0-9]*//;s/[ .].*//'` \>= 5) ifndef DISABLE_DEDUPE ENABLE_DEDUPE = 1 endif else GCCVERSION = 1 BDYNAMIC = -Wl,-Bdynamic BSTATIC = -Wl,-Bstatic endif ifeq ($(GCCVERSION), 1) COMPILER_OPTIONS += -Wextra -Wstrict-overflow=5 -Winit-self endif # Use jody_hash instead of xxHash if requested ifdef USE_JODY_HASH COMPILER_OPTIONS += -DUSE_JODY_HASH -DNO_XXHASH2 OBJS_CLEAN += xxhash.o else ifndef EXTERNAL_HASH_LIB OBJS += xxhash.o endif endif # USE_JODY_HASH # Stack size limit can be too small for deep directory trees, so set to 16 MiB # The ld syntax for Windows is the same for both Cygwin and MinGW ifndef LOW_MEMORY ifeq ($(OS), Windows_NT) COMPILER_OPTIONS += -Wl,--stack=16777216 else ifeq ($(UNAME_S), Darwin) COMPILER_OPTIONS += -Wl,-stack_size -Wl,0x1000000 else COMPILER_OPTIONS += -Wl,-z,stack-size=16777216 endif endif # Don't do clonefile on Mac OS X < 10.13 (High Sierra) ifeq ($(UNAME_S), Darwin) DARWINVER := $(shell expr `uname -r | cut -d. -f1` \< 17) ifeq "$(DARWINVER)" "1" COMPILER_OPTIONS += -DNO_CLONEFILE=1 endif endif ### Dedupe feature stuff (BTRFS, XFS, APFS) # ENABLE_DEDUPE should be ON by default for Linux ifeq ($(UNAME_S), Linux) ifndef DISABLE_DEDUPE ENABLE_DEDUPE = 1 endif endif # Allow forced override of ENABLE_DEDUPE ifdef DISABLE_DEDUPE override undefine ENABLE_DEDUPE override undefine STATIC_DEDUPE_H endif # Catch someone trying to enable dedupe in flags and turn on ENABLE_DEDUPE ifneq (,$(findstring DENABLE_DEDUPE,$(CFLAGS) $(CFLAGS_EXTRA))) ENABLE_DEDUPE = 1 $(warn Do not enable dedupe in CFLAGS; use make ENABLE_DEDUPE=1 instead) ifdef DISABLE_DEDUPE $(error DISABLE_DEDUPE set but -DENABLE_DEDUPE is in CFLAGS. Choose only one) endif endif # Actually enable dedupe ifdef ENABLE_DEDUPE COMPILER_OPTIONS += -DENABLE_DEDUPE OBJS += act_dedupefiles.o else OBJS_CLEAN += act_dedupefiles.o endif ifdef STATIC_DEDUPE_H COMPILER_OPTIONS += -DSTATIC_DEDUPE_H endif ### Find and use nearby libjodycode by default ifndef IGNORE_NEARBY_JC ifneq ("$(wildcard ../libjodycode/libjodycode.h)","") $(info Found and using nearby libjodycode at ../libjodycode) COMPILER_OPTIONS += -I../libjodycode -L../libjodycode ifeq ("$(wildcard ../libjodycode/version.o)","") $(error You must build libjodycode before building jdupes) endif endif STATIC_LDFLAGS += ../libjodycode/libjodycode.a ifdef ON_WINDOWS DYN_LDFLAGS += -l:../libjodycode/libjodycode$(LIBEXT) else DYN_LDFLAGS += -ljodycode endif endif CFLAGS += $(COMPILER_OPTIONS) $(CFLAGS_EXTRA) LDFLAGS += $(LINK_OPTIONS) $(LDFLAGS_EXTRA) all: libjodycode_hint $(PROGRAM_NAME) dynamic_jc dynamic_jc: $(PROGRAM_NAME) $(CC) $(CFLAGS) $(OBJS) $(BDYNAMIC) $(LDFLAGS) $(DYN_LDFLAGS) -o $(PROGRAM_NAME)$(SUFFIX) static_jc: $(PROGRAM_NAME) $(CC) $(CFLAGS) $(OBJS) $(LDFLAGS) $(STATIC_LDFLAGS) $(BDYNAMIC) -o $(PROGRAM_NAME)$(SUFFIX) static: $(PROGRAM_NAME) $(CC) $(CFLAGS) $(OBJS) -static $(LDFLAGS) $(STATIC_LDFLAGS) -o $(PROGRAM_NAME)$(SUFFIX) static_stripped: $(PROGRAM_NAME) static -strip $(PROGRAM_NAME)$(SUFFIX) $(PROGRAM_NAME): $(OBJS) $(CC) $(CFLAGS) $(OBJS) $(BDYNAMIC) $(LDFLAGS) $(DYN_LDFLAGS) -o $(PROGRAM_NAME)$(SUFFIX) winres.o: winres.rc winres.manifest.xml ./tune_winres.sh windres winres.rc winres.o winres_xp.o: winres_xp.rc ./tune_winres.sh windres winres_xp.rc winres_xp.o installdirs: test -e $(DESTDIR)$(BIN_DIR) || $(MKDIR) $(DESTDIR)$(BIN_DIR) test -e $(DESTDIR)$(MAN_DIR) || $(MKDIR) $(DESTDIR)$(MAN_DIR) install: $(PROGRAM_NAME) installdirs $(INSTALL_PROGRAM) $(PROGRAM_NAME)$(SUFFIX) $(DESTDIR)$(BIN_DIR)/$(PROGRAM_NAME)$(SUFFIX) $(INSTALL_DATA) $(PROGRAM_NAME).1 $(DESTDIR)$(MAN_DIR)/$(PROGRAM_NAME).$(MAN_EXT) uninstalldirs: -test -e $(DESTDIR)$(BIN_DIR) && $(RMDIR) $(DESTDIR)$(BIN_DIR) -test -e $(DESTDIR)$(MAN_DIR) && $(RMDIR) $(DESTDIR)$(MAN_DIR) uninstall: uninstalldirs $(RM) $(DESTDIR)$(BIN_DIR)/$(PROGRAM_NAME)$(SUFFIX) $(RM) $(DESTDIR)$(MAN_DIR)/$(PROGRAM_NAME).$(MAN_EXT) test: ./test.sh stripped: $(PROGRAM_NAME) strip $(PROGRAM_NAME)$(SUFFIX) clean: $(RM) $(OBJS) $(OBJS_CLEAN) build_date.h $(PROGRAM_NAME)$(SUFFIX) *~ .*.un~ *.gcno *.gcda *.gcov distclean: clean $(RM) -rf *.pkg.tar* jdupes-*-*/ jdupes-*-*.zip chrootpackage: +./chroot_build.sh package: +./generate_packages.sh $(ARCH) libjodycode_hint: $(info hint: if ../libjodycode is built but jdupes won't run, try doing 'make static_jc') jdupes-1.27.3/README.md000066400000000000000000001026731447252140200143670ustar00rootroot00000000000000Introduction ------------------------------------------------------------------------------- jdupes is a program for identifying and taking actions upon duplicate files such as deleting, hard linking, symlinking, and block-level deduplication (also known as "dedupe" or "reflink"). It is faster than most other duplicate scanners. It prioritizes data safety over performance while also giving expert users access to advanced (and sometimes dangerous) features. Please consider financially supporting continued development of jdupes using the links on my home page (Ko-fi, PayPal, SubscribeStar, etc.): https://www.jodybruchon.com/ Why use jdupes instead of the original fdupes or other duplicate finders? ------------------------------------------------------------------------------- The biggest reason is raw speed. In testing on various data sets, jdupes is over 7 times faster than fdupes-1.51 on average. jdupes provides a native Windows port. Most duplicate scanners built on Linux and other UNIX-like systems do not compile for Windows out-of-the-box and even if they do, they don't support Unicode and other Windows-specific quirks and features. jdupes is generally stable. All releases of jdupes are compared against a known working reference versions of fdupes or jdupes to be certain that output does not change. You get the benefits of an aggressive development process without putting your data at increased risk. Code in jdupes is written with data loss avoidance as the highest priority. If a choice must be made between being aggressive or careful, the careful way is always chosen. jdupes includes features that are not always found elsewhere. Examples of such features include block-level data deduplication and control over which file is kept when a match set is automatically deleted. jdupes is not afraid of dropping features of low value; a prime example is the `-1` switch which outputs all matches in a set on one line, a feature which was found to be useless in real-world tests and therefore thrown out. While jdupes maintains some degree of compatibility with fdupes from which it was originally derived, there is no guarantee that it will continue to maintain such compatibility in the future. However, compatibility will be retained between minor versions, i.e. jdupes-1.6 and jdupes-1.6.1 should not have any significant differences in results with identical command lines. If the program eats your dog or sets fire to your lawn, the authors cannot be held responsible. If you notice a bug, please report it. What jdupes is not: a similar (but not identical) file finding tool ------------------------------------------------------------------------------- Please note that jdupes ONLY works on 100% exact matches. It does not have any sort of "similarity" matching, nor does it know anything about any specific file formats such as images or sounds. Something as simple as a change in embedded metadata such as the ID3 tags in an MP3 file or the EXIF information in a JPEG image will not change the sound or image presented to the user when opened, but technically it makes the file no longer identical to the original. Plenty of excellent tools already exist to "fuzzy match" specific file types using knowledge of their file formats to help. There are no plans to add this type of matching to jdupes. There are some match options available in jdupes that enable dangerous file matching based on partial or likely but not 100% certain matching. These are considered expert options for special situations and are clearly and loudly documented as being dangerous. The `-Q` and `-T` options are notable examples, and the extreme danger of the `-T` option is safeguarded by a requirement to specify it twice so it can't be used accidentally. How can I do stuff with jdupes that isn't supported by fdupes? ------------------------------------------------------------------------------- The standard output format of jdupes is extremely simple. Match sets are presented with one file path per line, and match sets are separated by a blank line. This is easy to process with fairly simple shell scripts. You can find example shell scripts in the "example scripts" directory in the jdupes source code. The main example script, "example.sh", is easy to modify to take basic actions on each file in a match set. These scripts are used by piping the standard jdupes output to them: jdupes dir1 dir2 dir3 | example.sh scriptparameters Usage ------------------------------------------------------------------------------- ``` Usage: jdupes [options] DIRECTORY... ``` ### Or with Docker ``` docker run -it --init -v /path/to/dir:/data ghcr.io/jbruchon/jdupes:latest [options] /data ``` Duplicate file sets will be printed by default unless a different action option is specified (delete, summarize, link, dedupe, etc.) ``` -@ --loud output annoying low-level debug info while running -0 --print-null output nulls instead of CR/LF (like 'find -print0') -1 --one-file-system do not match files on different filesystems/devices -A --no-hidden exclude hidden files from consideration -B --dedupe do a copy-on-write (reflink/clone) deduplication -C --chunk-size=# override I/O chunk size in KiB (min 4, max 262144) -d --delete prompt user for files to preserve and delete all others; important: under particular circumstances, data may be lost when using this option together with -s or --symlinks, or when specifying a particular directory more than once; refer to the documentation for additional information -D --debug output debug statistics after completion -e --error-on-dupe exit on any duplicate found with status code 255 -f --omit-first omit the first file in each set of matches -h --help display this help message -H --hard-links treat any linked files as duplicate files. Normally linked files are treated as non-duplicates for safety -i --reverse reverse (invert) the match sort order -I --isolate files in the same specified directory won't match -j --json produce JSON (machine-readable) output -l --link-soft make relative symlinks for duplicates w/o prompting -L --link-hard hard link all duplicate files without prompting Windows allows a maximum of 1023 hard links per file -m --summarize summarize dupe information -M --print-summarize will print matches and --summarize at the end -N --no-prompt together with --delete, preserve the first file in each set of duplicates and delete the rest without prompting the user -o --order=BY select sort order for output, linking and deleting: by mtime (BY=time) or filename (BY=name, the default) -O --param-order sort output files in order of command line parameter sequence Parameter order is more important than selected -o sort which applies should several files share the same parameter order -p --permissions don't consider files with different owner/group or permission bits as duplicates -P --print=type print extra info (partial, early, fullhash) -q --quiet hide progress indicator -Q --quick skip byte-by-byte duplicate verification. WARNING: this may delete non-duplicates! Read the manual first! -r --recurse for every directory, process its subdirectories too -R --recurse: for each directory given after this option follow subdirectories encountered within (note the ':' at the end of the option, manpage for more details) -s --symlinks follow symlinks -S --size show size of duplicate files -t --no-change-check disable security check for file changes (aka TOCTTOU) -T --partial-only match based on partial hashes only. WARNING: EXTREMELY DANGEROUS paired with destructive actions! -T must be specified twice to work. Read the manual! -u --print-unique print only a list of unique (non-matched) files -U --no-trav-check disable double-traversal safety check (BE VERY CAREFUL) This fixes a Google Drive File Stream recursion issue -v --version display jdupes version and license information -X --ext-filter=x:y filter files based on specified criteria Use '-X help' for detailed extfilter help -y --hash-db=file use a hash database text file to speed up repeat runs Passing '-y .' will expand to '-y jdupes_hashdb.txt' -z --zero-match consider zero-length files to be duplicates -Z --soft-abort If the user aborts (i.e. CTRL-C) act on matches so far You can send SIGUSR1 to the program to toggle this Detailed help for jdupes -X/--extfilter options General format: jdupes -X filter[:value][size_suffix] noext:ext1[,ext2,...] Exclude files with certain extension(s) onlyext:ext1[,ext2,...] Only include files with certain extension(s) size[+-=]:size[suffix] Only Include files matching size criteria Size specs: + larger, - smaller, = equal to Specs can be mixed, i.e. size+=:100k will only include files 100KiB or more in size. nostr:text_string Exclude all paths containing the string onlystr:text_string Only allow paths containing the string HINT: you can use these for directories: -X nostr:/dir_x/ or -X onlystr:/dir_x/ newer:datetime Only include files newer than specified date older:datetime Only include files older than specified date Date/time format: "YYYY-MM-DD HH:MM:SS" Time is optional (remember to escape spaces!) Some filters take no value or multiple values. Filters that can take a numeric option generally support the size multipliers K/M/G/T/P/E with or without an added iB or B. Multipliers are binary-style unless the -B suffix is used, which will use decimal multipliers. For example, 16k or 16kib = 16384; 16kb = 16000. Multipliers are case-insensitive. Filters have cumulative effects: jdupes -X size+:99 -X size-:101 will cause only files of exactly 100 bytes in size to be included. Extension matching is case-insensitive. Path substring matching is case-sensitive. ``` The `-U`/`--no-trav-check` option disables the double-traversal protection. In the VAST MAJORITY of circumstances, this SHOULD NOT BE DONE, as it protects against several dangerous user errors, including specifying the same files or directories twice causing them to match themselves and potentially be lost or irreversibly damaged, or a symbolic link to a directory making an endless loop of recursion that will cause the program to hang indefinitely. This option was added because Google Drive File Stream presents directories in the virtual hard drive used by GDFS with identical device:inode pairs despite the directories actually being different. This triggers double-traversal prevention against every directory, effectively blocking all recursion. Disabling this check will reduce safety, but will allow duplicate scanning inside Google Drive File Stream drives. This also results in a very minor speed boost during recursion, but the boost is unlikely to be noticeable. The `-t`/`--no-change-check` option disables file change checks during/after scanning. This opens a security vulnerability that is called a TOCTTOU (time of check to time of use) vulnerability. The program normally runs checks immediately before scanning or taking action upon a file to see if the file has changed in some way since it was last checked. With this option enabled, the program will not run any of these checks, making the algorithm slightly faster, but also increasing the risk that the program scans a file, the file is changed after the scan, and the program still acts like the file was in its previous state. This is particularly dangerous when considering actions such as linking and deleting. In the most extreme case, a file could be deleted during scanning but match other files prior to that deletion; if the file is the first in the list of duplicates and auto-delete is used, all of the remaining matched files will be deleted as well. This option was added due to user reports of some filesystems (particularly network filesystems) changing the reported file information inappropriately, rendering the entire program unusable on such filesystems. The `-n`/`--no-empty` option was removed for safety. Matching zero-length files as duplicates now requires explicit use of the `-z`/`--zero-match` option instead. Duplicate files are listed together in groups with each file displayed on a separate line. The groups are then separated from each other by blank lines. The `-s`/`--symlinks` option will treat symlinked files as regular files, but direct symlinks will be treated as if they are hard linked files and the -H/--hard-links option will apply to them in the same manner. When using `-d` or `--delete`, care should be taken to insure against accidental data loss. While no information will be immediately lost, using this option together with `-s` or `--symlink` can lead to confusing information being presented to the user when prompted for files to preserve. Specifically, a user could accidentally preserve a symlink while deleting the file it points to. A similar problem arises when specifying a particular directory more than once. All files within that directory will be listed as their own duplicates, leading to data loss should a user preserve a file without its "duplicate" (the file itself!) Using `-1` or `--one-file-system` prevents matches that cross filesystems, but a more relaxed form of this option may be added that allows cross-matching for all filesystems that each parameter is present on. `-Z` or `--soft-abort` used to be `--hard-abort` in jdupes prior to v1.5 and had the opposite behavior. Defaulting to taking action on abort is probably not what most users would expect. The decision to invert rather than reassign to a different option was made because this feature was still fairly new at the time of the change. On non-Windows platforms that support SIGUSR1, you can toggle the state of the `-Z` option by sending a SIGUSR1 to the program. This is handy if you want to abort jdupes, didn't specify `-Z`, and changed your mind and don't want to lose all the work that was done so far. Just do '`killall -USR1 jdupes`' and you will be able to abort with `-Z`. This works in reverse: if you want to prevent a `-Z` from happening, a SIGUSR1 will toggle it back off. That's a lot less useful because you can just stop and kill the program to get the same effect, but it's there if you want it for some reason. Sending the signal twice while the program is stopped will behave as if it was only sent once, as per normal POSIX signal behavior. The `-O` or `--param-order` option allows the user greater control over what appears in the first position of a match set, specifically for keeping the `-N` option from deleting all but one file in a set in a seemingly random way. All directories specified on the command line will be used as the sorting order of result sets first, followed by the sorting algorithm set by the `-o` or `--order` option. This means that the order of all match pairs for a single directory specification will retain the old sorting behavior even if this option is specified. When used together with options `-s` or `--symlink`, a user could accidentally preserve a symlink while deleting the file it points to. The `-Q` or `--quick` option only reads each file once, hashes it, and performs comparisons based solely on the hashes. There is a small but significant risk of a hash collision which is the purpose of the failsafe byte-for-byte comparison that this option explicitly bypasses. Do not use it on ANY data set for which any amount of data loss is unacceptable. You have been warned! The `-T` or `--partial-only` option produces results based on a hash of the first block of file data in each file, ignoring everything else in the file. Partial hash checks have always been an important exclusion step in the jdupes algorithm, usually hashing the first 4096 bytes of data and allowing files that are different at the start to be rejected early. In certain scenarios it may be a useful heuristic for a user to see that a set of files has the same size and the same starting data, even if the remaining data does not match; one example of this would be comparing files with data blocks that are damaged or missing such as an incomplete file transfer or checking a data recovery against known-good copies to see what damaged data can be deleted in favor of restoring the known-good copy. This option is meant to be used with informational actions and can result in EXTREME DATA LOSS if used with options that delete files, create hard links, or perform other destructive actions on data based on the matching output. Because of the potential for massive data destruction, this option MUST BE SPECIFIED TWICE to take effect and will error out if it is only specified once. The `-I`/`--isolate` option attempts to block matches that are contained in the same specified directory parameter on the command line. Due to the underlying nature of the jdupes algorithm, a lot of matches will be blocked by this option that probably should not be. This code could use improvement. The `-C`/`--chunk-size` option overrides the size of the I/O "chunk" used for all file operations. Larger numbers will increase the amount of data read at once from each file and may improve performance when scanning lots of files that are larger than the default chunk size by reducing "thrashing" of the hard disk heads. Smaller numbers may increase algorithm speed depending on the characteristics of your CPU but will usually increase I/O and system call overhead as well. The number also directly affects memory usage: I/O chunk size is used for at least three allocations in the program, so using a chunk size of 16777216 (16 MiB) will require 48 MiB of RAM. The default is usually between 32768 and 65536 which results in the fastest raw speed of the algorithm and generally good all-around performance. Feel free to experiment with the number on your data set and report your experiences (preferably with benchmarks and info on your data set.) Using `-P`/`--print` will cause the program to print extra information that may be useful but will pollute the output in a way that makes scripted handling difficult. Its current purpose is to reveal more information about the file matching process by printing match pairs that pass certain steps of the process prior to full file comparison. This can be useful if you have two files that are passing early checks but failing after full checks. The `-y`/`--hash-db` feature creates and maintains a text file with a list of file paths, hashes, and other metadata that enables jdupes to "remember" file data across runs. Specifying a period '.' as the database file name will use a name of "jdupes_hashdb.txt" instead; this alias makes it easy to use the hash database feature without typing a descriptive name each time. THIS FEATURE IS CURRENTLY UNDER DEVELOPMENT AND HAS MANY QUIRKS. USE IT AT YOUR OWN RISK. In particular, one of the biggest problems with this feature is that it stores every path exactly as specified on the command line; if any paths are passed into jdupes on a subsequent run with a different prefix then they will not be recognized and they will be treated as totally different files. For example, running `jdupes -y . foo/` is not the same as `jdupes -y . ./foo` nor the same as (from a sibling directory) `jdupes -y ../foo`. You must run jdupes from the same working directory and with the same path specifications to take advantage of the hash database feature. When used correctly, a fully populated hash database can reduce subsequent runs with hundreds of thousands of files that normally take a very long time to run down to the directory scanning time plus a couple of seconds. If the directory data is already in the OS disk cache, this can make subsequent runs with over 100K files finish in under one second. Hard and soft (symbolic) linking status symbols and behavior ------------------------------------------------------------------------------- A set of arrows are used in file linking to show what action was taken on each link candidate. These arrows are as follows: `---->` File was hard linked to the first file in the duplicate chain `-@@->` File was symlinked to the first file in the chain `-##->` File was cloned from the first file in the chain `-==->` Already a hard link to the first file in the chain `-//->` File linking failed due to an error during the linking process If your data set has linked files and you do not use `-H` to always consider them as duplicates, you may still see linked files appear together in match sets. This is caused by a separate file that matches with linked files independently and is the correct behavior. See notes below on the "triangle problem" in jdupes for technical details. Microsoft Windows platform-specific notes ------------------------------------------------------------------------------- Windows has a hard limit of 1024 hard links per file. There is no way to change this. The documentation for CreateHardLink() states: "The maximum number of hard links that can be created with this function is 1023 per file. If more than 1023 links are created for a file, an error results." (The number is actually 1024, but they're ignoring the first file.) The current jdupes algorithm's "triangle problem" ------------------------------------------------------------------------------- Pairs of files are excluded individually based on how the two files compare. For example, if `--hard-links` is not specified then two files which are hard linked will not match one another for duplicate scanning purposes. The problem with only examining files in pairs is that certain circumstances will lead to the exclusion being overridden. Let's say we have three files with identical contents: ``` a/file1 a/file2 a/file3 ``` and `a/file1` is linked to `a/file3`. Here's how `jdupes a/` sees them: --- Are 'a/file1' and 'a/file2' matches? Yes [point a/file1->duplicates to a/file2] Are 'a/file1' and 'a/file3' matches? No (hard linked already, `-H` off) Are 'a/file2' and 'a/file3' matches? Yes [point a/file2->duplicates to a/file3] --- Now you have the following duplicate list: ``` a/file1->duplicates ==> a/file2->duplicates ==> a/file3 ``` The solution is to split match sets into multiple sets, but doing this will also remove the guarantee that files will only ever appear in one match set and could result in data loss if handled improperly. In the future, options for "greedy" and "sparse" may be introduced to switch between allowing triangle matches to be in the same set vs. splitting sets after matching finishes without the "only ever appears once" guarantee. Does jdupes meet the "Good Practice when Deleting Duplicates" by rmlint? ------------------------------------------------------------------------------- Yes. If you've not read this list of cautions, it is available at http://rmlint.readthedocs.io/en/latest/cautions.html Here's a breakdown of how jdupes addresses each of the items listed. ### "Backup your data"/"Measure twice, cut once" These guidelines are for the user of duplicate scanning software, not the software itself. Back up your files regularly. Use jdupes to print a list of what is found as duplicated and check that list very carefully before automatically deleting the files. ### "Beware of unusual filename characters" The only character that poses a concern in jdupes is a newline `\n` and that is only a problem because the duplicate set printer uses them to separate file names. Actions taken by jdupes are not parsed like a command line, so spaces and other weird characters in names aren't a problem. Escaping the names properly if acting on the printed output is a problem for the user's shell script or other external program. ### "Consider safe removal options" This is also an exercise for the user. ### "Traversal Robustness" jdupes tracks each directory traversed by dev:inode pair to avoid adding the contents of the same directory twice. This prevents the user from being able to register all of their files twice by duplicating an entry on the command line. Symlinked directories are only followed if they weren't already followed earlier. Files are renamed to a temporary name before any linking is done and if the link operation fails they are renamed back to the original name. ### "Collision Robustness" jdupes uses xxHash for file data hashing. This hash is extremely fast with a low collision rate, but it still encounters collisions as any hash function will ("secure" or otherwise) due to the pigeonhole principle. This is why jdupes performs a full-file verification before declaring a match. It's slower than matching by hash only, but the pigeonhole principle puts all data sets larger than the hash at risk of collision, meaning a false duplicate detection and data loss. The slower completion time is not as important as data integrity. Checking for a match based on hashes alone is irresponsible, and using secure hashes like MD5 or the SHA families is orders of magnitude slower than xxHash while still suffering from the risk brought about by the pigeonholing. An example of this problem is as follows: if you have 365 days in a year and 366 people, the chance of having at least two birthdays on the same day is guaranteed; likewise, even though SHA512 is a 512-bit (64-byte) wide hash, there are guaranteed to be at least 256 pairs of data streams that causes a collision once any of the data streams being hashed for comparison is 65 bytes (520 bits) or larger. ### "Unusual Characters Robustness" jdupes does not protect the user from putting ASCII control characters in their file names; they will mangle the output if printed, but they can still be operated upon by the actions (delete, link, etc.) in jdupes. ### "Seek Thrash Robustness" jdupes uses an I/O chunk size that is optimized for reading as much as possible from disk at once to take advantage of high sequential read speeds in traditional rotating media drives while balancing against the significantly higher rate of CPU cache misses triggered by an excessively large I/O buffer size. Enlarging the I/O buffer further may allow for lots of large files to be read with less head seeking, but the CPU cache misses slow the algorithm down and memory usage increases to hold these large buffers. jdupes is benchmarked periodically to make sure that the chosen I/O chunk size is the best compromise for a wide variety of data sets. ### "Memory Usage Robustness" This is a very subjective concern considering that even a cell phone in someone's pocket has at least 1GB of RAM, however it still applies in the embedded device world where 32MB of RAM might be all that you can have. Even when processing a data set with over a million files, jdupes memory usage (tested on Linux x86-64 with -O3 optimization) doesn't exceed 2GB. A low memory mode can be chosen at compile time to reduce overall memory usage with a small performance penalty. How does a duplicate scanner algorithm work? ------------------------------------------------------------------------------- The most naive way to look for files that are the same is to compare all files to all other files using a tool like `cmp` command on Linux/macOS/BSD or the `fc` command on Windows/DOS. This works but is extremely slow and wastes a lot of time. For every new file to compare, the number of comparisons increases exponentially (the formula is n(n-1)/2 for the discrete math nerds): | Files | Compares | |-------|----------| | 2 | 1 | | 3 | 3 | | 4 | 6 | | 5 | 10 | | 10 | 45 | | 100 | 4950 | | 1000 | 499500 | | 5000 | 12497500 | | 10000 | 49995000 | | 14142 | 99991011 | Let's say that every file is 1,000 bytes in size and you have 10,000 files for a total size of 10,000,000 bytes (about 9.53 MiB). Using this naive comparison approach means the actual amount of data to compare is around 47,679 MiB. You should be able to see how extreme this can get--especially with larger files. A slightly smarter approach is to use *file hashes* as a substitute for the full file contents. A *hash* is a number based on the data fed into a *hash function* and the number is always the same when the same data is fed in. If the hash for two files is different then the contents of those files are guaranteed to be different; if the hash is the same then the data **might** be the same, though this is not guaranteed due to the *birthday problem:* the size of the number is much smaller than the size of the data it represents, so *there will always be many different inputs that produce the same hash value.* Files with matching hash values must still be compared just to be sure that they are 100% identical. 49,995,000 comparisons can be done much quicker when you're only comparing a single big number every time instead of thousands or millions of bytes. This makes a big difference in performance since the only files being compared are files that look likely to be identical. **Fast exclusion of non-duplicates is the main purpose of duplicate scanners.** jdupes uses a lot of fast exclusion techniques beyond this. A partial list of these in the order they're performed is as follows: 1. Files that the user asks the program to exclude are skipped entirely 2. Files with different sizes can't be identical, so they're not compared 3. The first 4 KiB is hashed and compared which avoids reading full files 4. Entire files are hashed and compared which avoids comparing data directly 5. Finally, actual file data is compared to verify that they are duplicates The vast majority of non-duplicate file pairs never make it past the partial (4 KiB) hashing step. This reduces the amount of data read from disk and time spent comparing things to the smallest amount possible. v1.20.0 specific: most long options have changed and -n has been removed ------------------------------------------------------------------------------- Long options now have consistent hyphenation to separate the words used in the option names. Run `jdupes -h` to see the correct usage. Legacy options will remain in place until the next major or minor release (v2.0 or v1.21.0) for compatibility purposes. Users should change any scripts using the old options to use the new ones...or better yet, stop using long options in your scripts in the first place, because it's unnecessarily verbose and wasteful to do so. v1.15+ specific: Why is the addition of single files not working? ------------------------------------------------------------------------------- If a file was added through recursion and also added explicitly, that file would end up matching itself. This issue can be seen in v1.14.1 or older versions that support single file addition using a command like this in the jdupes source code directory: /usr/src/jdupes$ jdupes -rH testdir/isolate/1/ testdir/isolate/1/1.txt testdir/isolate/1/1.txt testdir/isolate/1/1.txt testdir/isolate/1/2.txt Even worse, using the special dot directory will make it happen without the -H option, which is how I discovered this bug: /usr/src/jdupes/testdir/isolate/1$ jdupes . 1.txt ./1.txt ./2.txt 1.txt This works for any path with a single dot directory anywhere in the path, so it has a good deal of potential for data loss in some use cases. As such, the best option was to shove out a new minor release with this feature turned off until some additional checking can be done, e.g. by making sure the canonical paths aren't identical between any two files. A future release will fix this safely. Contact information ------------------------------------------------------------------------------- For general program information, help, and tech info: https://www.jdupes.com/ Have a bug report or questions? contact Jody Bruchon Legal information and software license ------------------------------------------------------------------------------- jdupes is Copyright (C) 2015-2023 by Jody Bruchon Derived from the original 'fdupes' 1.51 (C) 1999-2014 by Adrian Lopez The MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. jdupes-1.27.3/README.stupid_dupes000066400000000000000000000045211447252140200164700ustar00rootroot00000000000000Introduction ------------------------------------------------------------------------------- stupid_dupes is a shell script that copies the most basic capabilities of jdupes. It is inefficient. It barely has enough features to be worthy of using the word "features" at all. Despite all of that, it's pretty safe and produces the same simple match set printouts as jdupes. This program illustrates how a duplicate scanner works on a basic level. It has a minimal set of requirements: * GNU bash * find with support for -type and -maxdepth * stat * cat * jodyhash (or any other program that outputs ONLY a hash) * dd (for partial hashing) It's slow. Real slow. You're welcome. Please consider financially supporting continued development of stupid_dupes (like you'd spend the money so smartly otherwise): https://www.subscribestar.com/JodyBruchon Contact information ------------------------------------------------------------------------------- For stupid_dupes inquiries, contact Jody Bruchon and be sure to say something really stupid when you do. Legal information and software license ------------------------------------------------------------------------------- Copyright (C) 2020-2023 by Jody Bruchon and contributors and for some reason Jody is willing to admit to writing it. The MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. jdupes-1.27.3/act_dedupefiles.c000066400000000000000000000140431447252140200163650ustar00rootroot00000000000000/* Deduplication of files with OS-specific copy-on-write mechanisms * This file is part of jdupes; see jdupes.c for license information */ #include "jdupes.h" #ifdef ENABLE_DEDUPE #include #include #include #include #include #include #include #include #include "act_dedupefiles.h" #include "libjodycode.h" #ifdef __linux__ /* Use built-in static dedupe header if requested */ #ifdef STATIC_DEDUPE_H #include "linux-dedupe-static.h" #else #include #endif /* STATIC_DEDUPE_H */ /* If the Linux headers are too old, automatically use the static one */ #ifndef FILE_DEDUPE_RANGE_SAME #warning Automatically enabled STATIC_DEDUPE_H due to insufficient header support #include "linux-dedupe-static.h" #endif /* FILE_DEDUPE_RANGE_SAME */ #include #define JDUPES_DEDUPE_SUPPORTED 1 #define KERNEL_DEDUP_MAX_SIZE 16777216 /* Error messages */ static const char s_err_dedupe_notabug[] = "This is not a bug in jdupes; check your file stats/permissions."; static const char s_err_dedupe_repeated[] = "This verbose error description will not be repeated."; #endif /* __linux__ */ #ifdef __APPLE__ #ifdef NO_HARDLINKS #error Hard link support is required for dedupe on macOS but NO_HARDLINKS was set #endif #include "act_linkfiles.h" #define JDUPES_DEDUPE_SUPPORTED 1 #endif #ifndef JDUPES_DEDUPE_SUPPORTED #error Dedupe is only supported on Linux and macOS #endif void dedupefiles(file_t * restrict files) { #ifdef __linux__ struct file_dedupe_range *fdr; struct file_dedupe_range_info *fdri; file_t *curfile, *curfile2, *dupefile; int src_fd; int err_twentytwo = 0, err_ninetyfive = 0; uint64_t total_files = 0; LOUD(fprintf(stderr, "\ndedupefiles: %p\n", files);) fdr = (struct file_dedupe_range *)calloc(1, sizeof(struct file_dedupe_range) + sizeof(struct file_dedupe_range_info) + 1); fdr->dest_count = 1; fdri = &fdr->info[0]; for (curfile = files; curfile; curfile = curfile->next) { /* Skip all files that have no duplicates */ if (!ISFLAG(curfile->flags, FF_HAS_DUPES)) continue; CLEARFLAG(curfile->flags, FF_HAS_DUPES); /* For each duplicate list head, handle the duplicates in the list */ curfile2 = curfile; src_fd = open(curfile->d_name, O_RDONLY); /* If an open fails, keep going down the dupe list until it is exhausted */ while (src_fd == -1 && curfile2->duplicates && curfile2->duplicates->duplicates) { fprintf(stderr, "dedupe: open failed (skipping): %s\n", curfile2->d_name); exit_status = EXIT_FAILURE; curfile2 = curfile2->duplicates; src_fd = open(curfile2->d_name, O_RDONLY); } if (src_fd == -1) continue; printf(" [SRC] %s\n", curfile2->d_name); /* Run dedupe for each set */ for (dupefile = curfile->duplicates; dupefile; dupefile = dupefile->duplicates) { off_t remain; int err; /* Don't pass hard links to dedupe (GitHub issue #25) */ if (dupefile->device == curfile->device && dupefile->inode == curfile->inode) { printf(" -==-> %s\n", dupefile->d_name); continue; } /* Open destination file, skipping any that fail */ fdri->dest_fd = open(dupefile->d_name, O_RDONLY); if (fdri->dest_fd == -1) { fprintf(stderr, "dedupe: open failed (skipping): %s\n", dupefile->d_name); exit_status = EXIT_FAILURE; continue; } /* Dedupe src <--> dest, 16 MiB or less at a time */ remain = dupefile->size; fdri->status = FILE_DEDUPE_RANGE_SAME; /* Consume data blocks until no data remains */ while (remain) { errno = 0; fdr->src_offset = (uint64_t)(dupefile->size - remain); fdri->dest_offset = fdr->src_offset; fdr->src_length = (uint64_t)(remain <= KERNEL_DEDUP_MAX_SIZE ? remain : KERNEL_DEDUP_MAX_SIZE); ioctl(src_fd, FIDEDUPERANGE, fdr); if (fdri->status < 0) break; remain -= (off_t)fdr->src_length; } /* Handle any errors */ err = fdri->status; if (err != FILE_DEDUPE_RANGE_SAME || errno != 0) { printf(" -XX-> %s\n", dupefile->d_name); fprintf(stderr, "error: "); if (err == FILE_DEDUPE_RANGE_DIFFERS) { fprintf(stderr, "not identical (files modified between scan and dedupe?)\n"); exit_status = EXIT_FAILURE; } else if (err != 0) { fprintf(stderr, "%s (%d)\n", strerror(-err), err); exit_status = EXIT_FAILURE; } else if (errno != 0) { fprintf(stderr, "%s (%d)\n", strerror(errno), errno); exit_status = EXIT_FAILURE; } if ((err == -22 || errno == 22) && err_twentytwo == 0) { fprintf(stderr, " One or more files being deduped are read-only or hard linked.\n"); fprintf(stderr, " Read-only files can only be deduped by the root user.\n"); fprintf(stderr, " %s\n", s_err_dedupe_notabug); fprintf(stderr, " %s\n", s_err_dedupe_repeated); err_twentytwo = 1; } if ((err == -95 || errno == 95) && err_ninetyfive == 0) { fprintf(stderr, " One or more files is on a filesystem that does not support\n"); fprintf(stderr, " block-level deduplication or are on different filesystems.\n"); fprintf(stderr, " %s\n", s_err_dedupe_notabug); fprintf(stderr, " %s\n", s_err_dedupe_repeated); err_ninetyfive = 1; } } else { /* Dedupe OK; report to the user and add to file count */ printf(" ====> %s\n", dupefile->d_name); total_files++; } close((int)fdri->dest_fd); } printf("\n"); close(src_fd); total_files++; } if (!ISFLAG(flags, F_HIDEPROGRESS)) fprintf(stderr, "Deduplication done (%" PRIuMAX " files processed)\n", total_files); free(fdr); #endif /* __linux__ */ /* On macOS, clonefile() is basically a "hard link" function, so linkfiles will do the work. */ #ifdef __APPLE__ linkfiles(files, 2, 0); #endif /* __APPLE__ */ return; } #endif /* ENABLE_DEDUPE */ jdupes-1.27.3/act_dedupefiles.h000066400000000000000000000005441447252140200163730ustar00rootroot00000000000000/* jdupes action for OS-specific block-level or CoW deduplication * This file is part of jdupes; see jdupes.c for license information */ #ifndef ACT_DEDUPEFILES_H #define ACT_DEDUPEFILES_H #ifdef __cplusplus extern "C" { #endif #include "jdupes.h" void dedupefiles(file_t * restrict files); #ifdef __cplusplus } #endif #endif /* ACT_DEDUPEFILES_H */ jdupes-1.27.3/act_deletefiles.c000066400000000000000000000153501447252140200163630ustar00rootroot00000000000000/* Delete duplicate files automatically or interactively * This file is part of jdupes; see jdupes.c for license information */ #ifndef NO_DELETE #include #include #include #include #include #include #include "jdupes.h" #include "likely_unlikely.h" #include "act_deletefiles.h" #include "act_linkfiles.h" #ifndef NO_HASHDB #include "hashdb.h" #endif /* For interactive deletion input */ #define INPUT_SIZE 1024 /* Count the following statistics: - Maximum number of files in a duplicate set (length of longest dupe chain) - Total number of duplicate file sets (groups) */ static unsigned int get_max_dupes(const file_t *files, unsigned int * const restrict max) { unsigned int groups = 0; if (unlikely(files == NULL || max == NULL)) jc_nullptr("get_max_dupes()"); LOUD(fprintf(stderr, "get_max_dupes(%p, %p)\n", (const void *)files, (void *)max);) *max = 0; while (files) { unsigned int n_dupes; if (ISFLAG(files->flags, FF_HAS_DUPES)) { groups++; n_dupes = 1; for (file_t *curdupe = files->duplicates; curdupe; curdupe = curdupe->duplicates) n_dupes++; if (n_dupes > *max) *max = n_dupes; } files = files->next; } return groups; } void deletefiles(file_t *files, int prompt, FILE *tty) { unsigned int counter, groups; unsigned int curgroup = 0; file_t *tmpfile; file_t **dupelist; unsigned int *preserve; char *preservestr; char *token; char *tstr; unsigned int number, sum, max, x; size_t i; LOUD(fprintf(stderr, "deletefiles: %p, %d, %p\n", files, prompt, tty)); groups = get_max_dupes(files, &max); max++; dupelist = (file_t **) malloc(sizeof(file_t*) * max); preserve = (unsigned int *) malloc(sizeof(int) * max); preservestr = (char *) malloc(INPUT_SIZE); if (!dupelist || !preserve || !preservestr) jc_oom("deletefiles() structures"); for (; files; files = files->next) { if (ISFLAG(files->flags, FF_HAS_DUPES)) { curgroup++; counter = 1; dupelist[counter] = files; if (prompt) { printf("[%u] ", counter); jc_fwprint(stdout, files->d_name, 1); } tmpfile = files->duplicates; while (tmpfile) { dupelist[++counter] = tmpfile; if (prompt) { printf("[%u] ", counter); jc_fwprint(stdout, tmpfile->d_name, 1); } tmpfile = tmpfile->duplicates; } if (prompt) printf("\n"); /* Preserve only the first file */ if (!prompt) { preserve[1] = 1; for (x = 2; x <= counter; x++) preserve[x] = 0; } else do { /* Prompt for files to preserve */ printf("Set %u of %u: keep which files? (1 - %u, [a]ll, [n]one", curgroup, groups, counter); #ifndef NO_HARDLINKS printf(", [l]ink all"); #endif #ifndef NO_SYMLINKS printf(", [s]ymlink all"); #endif printf(")"); if (ISFLAG(a_flags, FA_SHOWSIZE)) printf(" (%" PRIuMAX " byte%c each)", (uintmax_t)files->size, (files->size != 1) ? 's' : ' '); printf(": "); fflush(stdout); /* Treat fgets() failure as if nothing was entered */ if (!fgets(preservestr, INPUT_SIZE, tty)) preservestr[0] = '\n'; /* If nothing is entered, treat it as if 'a' was entered */ if (preservestr[0] == '\n') strcpy(preservestr, "a\n"); i = strlen(preservestr) - 1; /* tail of buffer must be a newline */ while (preservestr[i] != '\n') { tstr = (char *)realloc(preservestr, strlen(preservestr) + 1 + INPUT_SIZE); if (!tstr) jc_oom("deletefiles() prompt"); preservestr = tstr; if (!fgets(preservestr + i + 1, INPUT_SIZE, tty)) { preservestr[0] = '\n'; /* treat fgets() failure as if nothing was entered */ break; } i = strlen(preservestr) - 1; } for (x = 1; x <= counter; x++) preserve[x] = 0; token = strtok(preservestr, " ,\n"); if (token != NULL) { #if defined NO_HARDLINKS && defined NO_SYMLINKS /* no linktype needed */ #else int linktype = -1; #endif /* defined NO_HARDLINKS && defined NO_SYMLINKS */ /* "Delete none" = stop parsing string */ if (*token == 'n' || *token == 'N') goto stop_scanning; /* If requested, link this set instead */ #ifndef NO_HARDLINKS if (*token == 'l' || *token == 'L') linktype = 1; /* hard link */ #endif #ifndef NO_SYMLINKS if (*token == 's' || *token == 'S') linktype = 0; /* symlink */ #endif #if defined NO_HARDLINKS && defined NO_SYMLINKS /* no linking calls */ #else if (linktype != -1) { linkfiles(files, linktype, 1); goto skip_deletion; } #endif /* defined NO_HARDLINKS && defined NO_SYMLINKS */ } while (token != NULL) { if (*token == 'a' || *token == 'A') for (x = 0; x <= counter; x++) preserve[x] = 1; number = 0; sscanf(token, "%u", &number); if (number > 0 && number <= counter) preserve[number] = 1; token = strtok(NULL, " ,\n"); } for (sum = 0, x = 1; x <= counter; x++) sum += preserve[x]; } while (sum < 1); /* save at least one file */ stop_scanning: printf("\n"); for (x = 1; x <= counter; x++) { if (preserve[x]) { printf(" [+] "); jc_fwprint(stdout, dupelist[x]->d_name, 1); } else { #ifdef UNICODE if (!M2W(dupelist[x]->d_name, wstr)) { printf(" [!] "); jc_fwprint(stdout, dupelist[x]->d_name, 0); printf("-- MultiByteToWideChar failed\n"); exit_status = EXIT_FAILURE; continue; } #endif if (file_has_changed(dupelist[x])) { printf(" [!] "); jc_fwprint(stdout, dupelist[x]->d_name, 0); printf("-- file changed since being scanned\n"); exit_status = EXIT_FAILURE; #ifdef UNICODE } else if (DeleteFileW(wstr) != 0) { #else } else if (remove(dupelist[x]->d_name) == 0) { #endif printf(" [-] "); jc_fwprint(stdout, dupelist[x]->d_name, 1); #ifndef NO_HASHDB if (ISFLAG(flags, F_HASHDB)) { dupelist[x]->mtime = 0; add_hashdb_entry(NULL, 0, dupelist[x]); } #endif } else { printf(" [!] "); jc_fwprint(stdout, dupelist[x]->d_name, 0); printf("-- unable to delete file\n"); exit_status = EXIT_FAILURE; } } } #if defined NO_HARDLINKS && defined NO_SYMLINKS /* label not needed */ #else skip_deletion: #endif /* defined NO_HARDLINKS && defined NO_SYMLINKS */ printf("\n"); } } free(dupelist); free(preserve); free(preservestr); return; } #endif /* NO_DELETE */ jdupes-1.27.3/act_deletefiles.h000066400000000000000000000006171447252140200163700ustar00rootroot00000000000000/* jdupes action for deleting duplicate files * This file is part of jdupes; see jdupes.c for license information */ #ifndef NO_DELETE #ifndef ACT_DELETEFILES_H #define ACT_DELETEFILES_H #ifdef __cplusplus extern "C" { #endif #include "jdupes.h" extern void deletefiles(file_t *files, int prompt, FILE *tty); #ifdef __cplusplus } #endif #endif /* ACT_DELETEFILES_H */ #endif /* NO_DELETE */ jdupes-1.27.3/act_linkfiles.c000066400000000000000000000404371447252140200160620ustar00rootroot00000000000000/* Hard link or symlink files * This file is part of jdupes; see jdupes.c for license information */ #include "jdupes.h" /* Compile out the code if no linking support is built in */ #if !(defined NO_HARDLINKS && !defined NO_SYMLINKS && !defined ENABLE_DEDUPE) #include #include #include #include #include #include "act_linkfiles.h" #ifndef NO_HASHDB #include "hashdb.h" #endif #ifdef UNICODE static wpath_t wname, wname2; #endif /* Apple clonefile() is basically a hard link */ #ifdef ENABLE_DEDUPE #ifdef __APPLE__ #ifdef NO_HARDLINKS #error Hard link support is required for dedupe on macOS #endif #include #include #ifndef NO_CLONEFILE #include #define ENABLE_CLONEFILE_LINK 1 #endif /* NO_CLONEFILE */ #endif /* __APPLE__ */ #endif /* ENABLE_DEDUPE */ #ifdef ENABLE_CLONEFILE_LINK static void clonefile_error(const char * const restrict func, const char * const restrict name) { fprintf(stderr, "warning: %s failed for destination file, reverting:\n-##-> ", func); jc_fwprint(stderr,name, 1); exit_status = EXIT_FAILURE; return; } #endif /* ENABLE_CLONEFILE_LINK */ /* Only build this function if some functionality does not exist */ #if defined NO_SYMLINKS || defined NO_HARDLINKS || !defined ENABLE_CLONEFILE_LINK static void linkfiles_nosupport(const char * const restrict call, const char * const restrict type) { fprintf(stderr, "internal error: linkfiles(%s) called without %s support\nPlease report this to the author as a program bug\n", call, type); exit(EXIT_FAILURE); } #endif /* anything unsupported */ #ifdef ON_WINDOWS static void mb2wc_failed(const char * const restrict name) { fprintf(stderr, "error: MultiByteToWideChar failed: "); jc_fwprint(stderr, name, 1); exit_status = EXIT_FAILURE; return; } #endif /* ON_WINDOWS */ static void revert_failed(const char * const restrict orig, const char * const restrict current) { fprintf(stderr, "\nwarning: couldn't revert the file to its original name\n"); fprintf(stderr, "original: "); jc_fwprint(stderr, orig, 1); fprintf(stderr, "current: "); jc_fwprint(stderr, current, 1); exit_status = EXIT_FAILURE; return; } /* linktype: 0=symlink, 1=hardlink, 2=clonefile() */ void linkfiles(file_t *files, const int linktype, const int only_current) { static file_t *tmpfile; static file_t *srcfile; static file_t *curfile; static file_t ** restrict dupelist; static unsigned int counter = 0; static unsigned int max = 0; static unsigned int x = 0; static size_t name_len = 0; static int i, success; #ifndef NO_SYMLINKS static unsigned int symsrc; static char rel_path[PATHBUF_SIZE]; #endif #ifdef ENABLE_CLONEFILE_LINK static unsigned int srcfile_preserved_flags = 0; static unsigned int dupfile_preserved_flags = 0; static unsigned int dupfile_original_flags = 0; static struct timeval dupfile_original_tval[2]; #endif LOUD(fprintf(stderr, "linkfiles(%d): %p\n", linktype, files);) curfile = files; /* Calculate a maximum */ while (curfile) { if (ISFLAG(curfile->flags, FF_HAS_DUPES)) { counter = 1; tmpfile = curfile->duplicates; while (tmpfile) { counter++; tmpfile = tmpfile->duplicates; } if (counter > max) max = counter; } curfile = curfile->next; } max++; dupelist = (file_t**) malloc(sizeof(file_t*) * max); if (!dupelist) jc_oom("linkfiles() dupelist"); while (files) { if (ISFLAG(files->flags, FF_HAS_DUPES)) { counter = 1; dupelist[counter] = files; tmpfile = files->duplicates; while (tmpfile) { counter++; dupelist[counter] = tmpfile; tmpfile = tmpfile->duplicates; } /* Link every file to the first file */ if (linktype != 0) { #ifndef NO_HARDLINKS x = 2; srcfile = dupelist[1]; #else linkfiles_nosupport("hard", "hard link"); #endif } else { #ifndef NO_SYMLINKS x = 1; /* Symlinks should target a normal file if one exists */ srcfile = NULL; for (symsrc = 1; symsrc <= counter; symsrc++) { if (!ISFLAG(dupelist[symsrc]->flags, FF_IS_SYMLINK)) { srcfile = dupelist[symsrc]; break; } } /* If no normal file exists, abort */ if (srcfile == NULL) goto linkfile_loop; #else linkfiles_nosupport("soft", "symlink"); #endif } if (!ISFLAG(flags, F_HIDEPROGRESS)) { printf("[SRC] "); jc_fwprint(stdout, srcfile->d_name, 1); } if (linktype == 2) { #ifdef ENABLE_CLONEFILE_LINK if (STAT(srcfile->d_name, &s) != 0) { fprintf(stderr, "warning: stat() on source file failed, skipping:\n[SRC] "); jc_fwprint(stderr, srcfile->d_name, 1); exit_status = EXIT_FAILURE; goto linkfile_loop; } /* macOS unexpectedly copies the compressed flag when copying metadata * (which can result in files being unreadable), so we want to retain * the compression flag of srcfile */ srcfile_preserved_flags = s.st_flags & UF_COMPRESSED; #else linkfiles_nosupport("clone", "clonefile"); #endif } for (; x <= counter; x++) { if (linktype == 1 || linktype == 2) { /* Can't hard link files on different devices */ if (srcfile->device != dupelist[x]->device) { fprintf(stderr, "warning: hard link target on different device, not linking:\n-//-> "); jc_fwprint(stderr, dupelist[x]->d_name, 1); exit_status = EXIT_FAILURE; continue; } else { /* The devices for the files are the same, but we still need to skip * anything that is already hard linked (-L and -H both set) */ if (srcfile->inode == dupelist[x]->inode) { /* Don't show == arrows when not matching against other hard links */ if (ISFLAG(flags, F_CONSIDERHARDLINKS)) if (!ISFLAG(flags, F_HIDEPROGRESS)) { printf("-==-> "); jc_fwprint(stdout, dupelist[x]->d_name, 1); } continue; } } } else { /* Symlink prerequisite check code can go here */ /* Do not attempt to symlink a file to itself or to another symlink */ #ifndef NO_SYMLINKS if (ISFLAG(dupelist[x]->flags, FF_IS_SYMLINK) && ISFLAG(dupelist[symsrc]->flags, FF_IS_SYMLINK)) continue; if (x == symsrc) continue; #endif } #ifdef UNICODE if (!M2W(dupelist[x]->d_name, wname)) { mb2wc_failed(dupelist[x]->d_name); continue; } #endif /* UNICODE */ /* Do not attempt to hard link files for which we don't have write access */ #ifdef ON_WINDOWS if (dupelist[x]->mode & FILE_ATTRIBUTE_READONLY) #else if (access(dupelist[x]->d_name, W_OK) != 0) #endif { fprintf(stderr, "warning: link target is a read-only file, not linking:\n-//-> "); jc_fwprint(stderr, dupelist[x]->d_name, 1); exit_status = EXIT_FAILURE; continue; } /* Check file pairs for modification before linking */ /* Safe linking: don't actually delete until the link succeeds */ i = file_has_changed(srcfile); if (i) { fprintf(stderr, "warning: source file modified since scanned; changing source file:\n[SRC] "); jc_fwprint(stderr, dupelist[x]->d_name, 1); LOUD(fprintf(stderr, "file_has_changed: %d\n", i);) srcfile = dupelist[x]; exit_status = EXIT_FAILURE; continue; continue; } if (file_has_changed(dupelist[x])) { fprintf(stderr, "warning: target file modified since scanned, not linking:\n-//-> "); jc_fwprint(stderr, dupelist[x]->d_name, 1); exit_status = EXIT_FAILURE; continue; continue; } #ifdef ON_WINDOWS /* For Windows, the hard link count maximum is 1023 (+1); work around * by skipping linking or changing the link source file as needed */ if (STAT(srcfile->d_name, &s) != 0) { fprintf(stderr, "warning: win_stat() on source file failed, changing source file:\n[SRC] "); jc_fwprint(stderr, dupelist[x]->d_name, 1); srcfile = dupelist[x]; exit_status = EXIT_FAILURE; continue; } if (s.st_nlink >= 1024) { fprintf(stderr, "warning: maximum source link count reached, changing source file:\n[SRC] "); srcfile = dupelist[x]; exit_status = EXIT_FAILURE; continue; } if (STAT(dupelist[x]->d_name, &s) != 0) continue; if (s.st_nlink >= 1024) { fprintf(stderr, "warning: maximum destination link count reached, skipping:\n-//-> "); jc_fwprint(stderr, dupelist[x]->d_name, 1); exit_status = EXIT_FAILURE; continue; } #endif #ifdef ENABLE_CLONEFILE_LINK if (linktype == 2) { if (STAT(dupelist[x]->d_name, &s) != 0) { fprintf(stderr, "warning: stat() on destination file failed, skipping:\n-##-> "); jc_fwprint(stderr, dupelist[x]->d_name, 1); exit_status = EXIT_FAILURE; continue; } /* macOS unexpectedly copies the compressed flag when copying metadata * (which can result in files being unreadable), so we want to ignore * the compression flag on dstfile in favor of the one from srcfile */ dupfile_preserved_flags = s.st_flags & ~(unsigned int)UF_COMPRESSED; dupfile_original_flags = s.st_flags; dupfile_original_tval[0].tv_sec = s.st_atime; dupfile_original_tval[1].tv_sec = s.st_mtime; dupfile_original_tval[0].tv_usec = 0; dupfile_original_tval[1].tv_usec = 0; } #endif /* Make sure the name will fit in the buffer before trying */ name_len = strlen(dupelist[x]->d_name) + 14; if (name_len > PATHBUF_SIZE) continue; /* Assemble a temporary file name */ strcpy(tempname, dupelist[x]->d_name); strcat(tempname, ".__jdupes__.tmp"); /* Rename the destination file to the temporary name */ #ifdef UNICODE if (!M2W(tempname, wname2)) { mb2wc_failed(srcfile->d_name); continue; } i = MoveFileW(wname, wname2) ? 0 : 1; #else i = rename(dupelist[x]->d_name, tempname); #endif if (i != 0) { fprintf(stderr, "warning: cannot move link target to a temporary name, not linking:\n-//-> "); jc_fwprint(stderr, dupelist[x]->d_name, 1); exit_status = EXIT_FAILURE; /* Just in case the rename succeeded yet still returned an error, roll back the rename */ #ifdef UNICODE MoveFileW(wname2, wname); #else rename(tempname, dupelist[x]->d_name); #endif continue; } /* Create the desired hard link with the original file's name */ errno = 0; success = 0; #ifdef ON_WINDOWS #ifdef UNICODE if (!M2W(srcfile->d_name, wname2)) { mb2wc_failed(srcfile->d_name); continue; } if (CreateHardLinkW((LPCWSTR)wname, (LPCWSTR)wname2, NULL) == TRUE) success = 1; #else if (CreateHardLink(dupelist[x]->d_name, srcfile->d_name, NULL) == TRUE) success = 1; #endif #else /* ON_WINDOWS */ if (linktype == 1) { if (link(srcfile->d_name, dupelist[x]->d_name) == 0) success = 1; #ifdef ENABLE_CLONEFILE_LINK } else if (linktype == 2) { if (clonefile(srcfile->d_name, dupelist[x]->d_name, 0) == 0) { if (copyfile(tempname, dupelist[x]->d_name, NULL, COPYFILE_METADATA) == 0) { /* If the preserved flags match what we just copied from the original dupfile, we're done. * Otherwise, we need to update the flags to avoid data loss due to differing compression flags */ if (dupfile_original_flags == (srcfile_preserved_flags | dupfile_preserved_flags)) { success = 1; } else if (chflags(dupelist[x]->d_name, srcfile_preserved_flags | dupfile_preserved_flags) == 0) { /* chflags overrides the timestamps that were restored by copyfile, so we need to reapply those as well */ if (utimes(dupelist[x]->d_name, dupfile_original_tval) == 0) { success = 1; } else clonefile_error("utimes", dupelist[x]->d_name); } else clonefile_error("chflags", dupelist[x]->d_name); } else clonefile_error("copyfile", dupelist[x]->d_name); } else clonefile_error("clonefile", dupelist[x]->d_name); #endif /* ENABLE_CLONEFILE_LINK */ } #ifndef NO_SYMLINKS else { i = jc_make_relative_link_name(srcfile->d_name, dupelist[x]->d_name, rel_path); LOUD(fprintf(stderr, "symlink MRLN: %s to %s = %s\n", srcfile->d_name, dupelist[x]->d_name, rel_path)); if (i < 0) { fprintf(stderr, "warning: make_relative_link_name() failed (%d)\n", i); } else if (i == 1) { fprintf(stderr, "warning: files to be linked have the same canonical path; not linking\n"); } else if (symlink(rel_path, dupelist[x]->d_name) == 0) success = 1; } #endif /* NO_SYMLINKS */ #endif /* ON_WINDOWS */ if (success) { if (!ISFLAG(flags, F_HIDEPROGRESS)) { switch (linktype) { case 0: /* symlink */ printf("-@@-> "); break; default: case 1: /* hardlink */ printf("----> "); break; #ifdef ENABLE_CLONEFILE_LINK case 2: /* clonefile */ printf("-##-> "); break; #endif } jc_fwprint(stdout, dupelist[x]->d_name, 1); } #ifndef NO_HASHDB /* Delete the hashdb entry for new hard/symbolic links */ if (linktype != 2 && ISFLAG(flags, F_HASHDB)) { dupelist[x]->mtime = 0; add_hashdb_entry(NULL, 0, dupelist[x]); } #endif } else { /* The link failed. Warn the user and put the link target back */ exit_status = EXIT_FAILURE; if (!ISFLAG(flags, F_HIDEPROGRESS)) { printf("-//-> "); jc_fwprint(stdout, dupelist[x]->d_name, 1); } fprintf(stderr, "warning: unable to link '"); jc_fwprint(stderr, dupelist[x]->d_name, 0); fprintf(stderr, "' -> '"); jc_fwprint(stderr, srcfile->d_name, 0); fprintf(stderr, "': %s\n", strerror(errno)); #ifdef UNICODE if (!M2W(tempname, wname2)) { mb2wc_failed(tempname); continue; } i = MoveFileW(wname2, wname) ? 0 : 1; #else i = rename(tempname, dupelist[x]->d_name); #endif /* UNICODE */ if (i != 0) revert_failed(dupelist[x]->d_name, tempname); continue; } /* Remove temporary file to clean up; if we can't, reverse the linking */ #ifdef UNICODE if (!M2W(tempname, wname2)) { mb2wc_failed(tempname); continue; } i = DeleteFileW(wname2) ? 0 : 1; #else i = remove(tempname); #endif /* UNICODE */ if (i != 0) { /* If the temp file can't be deleted, there may be a permissions problem * so reverse the process and warn the user */ fprintf(stderr, "\nwarning: can't delete temp file, reverting: "); jc_fwprint(stderr, tempname, 1); exit_status = EXIT_FAILURE; #ifdef UNICODE i = DeleteFileW(wname) ? 0 : 1; #else i = remove(dupelist[x]->d_name); #endif /* This last error really should not happen, but we can't assume it won't */ if (i != 0) fprintf(stderr, "\nwarning: couldn't remove link to restore original file\n"); else { #ifdef UNICODE i = MoveFileW(wname2, wname) ? 0 : 1; #else i = rename(tempname, dupelist[x]->d_name); #endif if (i != 0) revert_failed(dupelist[x]->d_name, tempname); } } } if (!ISFLAG(flags, F_HIDEPROGRESS)) printf("\n"); } #if !defined NO_SYMLINKS || defined ENABLE_CLONEFILE_LINK linkfile_loop: #endif if (only_current == 1) break; files = files->next; } if (counter == 0) printf("%s", s_no_dupes); free(dupelist); return; } #endif /* NO_HARDLINKS + NO_SYMLINKS + !ENABLE_DEDUPE */ jdupes-1.27.3/act_linkfiles.h000066400000000000000000000006701447252140200160620ustar00rootroot00000000000000/* jdupes action for hard and soft file linking * This file is part of jdupes; see jdupes.c for license information */ #if !(defined NO_HARDLINKS && defined NO_SYMLINKS) #ifndef ACT_LINKFILES_H #define ACT_LINKFILES_H #ifdef __cplusplus extern "C" { #endif #include "jdupes.h" void linkfiles(file_t *files, const int linktype, const int only_current); #ifdef __cplusplus } #endif #endif /* ACT_LINKFILES_H */ #endif /* NO_*LINKS */ jdupes-1.27.3/act_printjson.c000066400000000000000000000111251447252140200161200ustar00rootroot00000000000000/* Print comprehensive information to stdout in JSON format * This file is part of jdupes; see jdupes.c for license information */ #ifndef NO_JSON #include #include #include #include #include #include #include #include "likely_unlikely.h" #include "jdupes.h" #include "version.h" #include "act_printjson.h" #define IS_CONT(a) ((a & 0xc0) == 0x80) #define GET_CONT(a) (a & 0x3f) #define TO_HEX(a) (char)(((a) & 0x0f) <= 0x09 ? ((a) & 0x0f) + 0x30 : ((a) & 0x0f) + 0x57) #if defined(__GNU__) && !defined(PATH_MAX) #define PATH_MAX 1024 #endif /** Decodes a single UTF-8 codepoint, consuming bytes. */ static inline uint32_t decode_utf8(const char * restrict * const string) { uint32_t ret = 0; /** Eat problems up silently. */ assert(!IS_CONT(**string)); while (unlikely(IS_CONT(**string))) (*string)++; /** ASCII. */ if (likely(!(**string & 0x80))) return (uint32_t)*(*string)++; /** Multibyte 2, 3, 4. */ if ((**string & 0xe0) == 0xc0) { ret = *(*string)++ & 0x1f; ret = (ret << 6) | GET_CONT(*(*string)++); return ret; } if ((**string & 0xf0) == 0xe0) { ret = *(*string)++ & 0x0f; ret = (ret << 6) | GET_CONT(*(*string)++); ret = (ret << 6) | GET_CONT(*(*string)++); return ret; } if ((**string & 0xf8) == 0xf0) { ret = *(*string)++ & 0x07; ret = (ret << 6) | GET_CONT(*(*string)++); ret = (ret << 6) | GET_CONT(*(*string)++); ret = (ret << 6) | GET_CONT(*(*string)++); return ret; } /** We shouldn't be here... Because 5 and 6 bytes are impossible... */ assert(0); return 0xffffffff; } /** Escapes a single UTF-16 code unit for JSON. */ static inline void escape_uni16(uint16_t u16, char ** const json) { *(*json)++ = '\\'; *(*json)++ = 'u'; *(*json)++ = TO_HEX(u16 >> 12); *(*json)++ = TO_HEX(u16 >> 8); *(*json)++ = TO_HEX(u16 >> 4); *(*json)++ = TO_HEX(u16); } /** Escapes a UTF-8 string to ASCII JSON format. */ static void json_escape(const char * restrict string, char * restrict const target) { uint32_t curr = 0; char *escaped = target; while (*string != '\0' && (escaped - target) < (PATH_MAX * 2 - 1)) { switch (*string) { case '\"': case '\\': *escaped++ = '\\'; *escaped++ = *string++; break; default: curr = decode_utf8(&string); if (curr == 0xffffffff) break; if (likely(curr < 0xffff)) { if (likely(curr < 0x20 || curr > 0x7f)) escape_uni16((uint16_t)curr, &escaped); else *escaped++ = (char)curr; } else { curr -= 0x10000; escape_uni16((uint16_t)(0xD800 + ((curr >> 10) & 0x03ff)), &escaped); escape_uni16((uint16_t)(0xDC00 + (curr & 0x03ff)), &escaped); } break; } } *escaped = '\0'; return; } void printjson(file_t * restrict files, const int argc, char **argv) { file_t * restrict tmpfile; int arg = 0, comma = 0, len = 0; char *temp = malloc(PATH_MAX * 2); char *temp2 = malloc(PATH_MAX * 2); char *temp_insert = temp; LOUD(fprintf(stderr, "printjson: %p\n", files)); /* Output information about the jdupes command environment */ printf("{\n \"jdupesVersion\": \"%s\",\n \"jdupesVersionDate\": \"%s\",\n", VER, VERDATE); printf(" \"commandLine\": \""); while (arg < argc) { len = sprintf(temp_insert, " %s", argv[arg]); assert(len >= 0); temp_insert += len; arg++; } json_escape(temp + 1, temp2); /* Skip the starting space */ printf("%s\",\n", temp2); printf(" \"extensionFlags\": \""); #ifndef NO_HELPTEXT if (feature_flags[0] == NULL) printf("none\",\n"); else for (int c = 0; feature_flags[c] != NULL; c++) printf("%s%s", feature_flags[c], feature_flags[c+1] == NULL ? "\",\n" : " "); #else printf("unavailable\",\n"); #endif printf(" \"matchSets\": [\n"); while (files != NULL) { if (ISFLAG(files->flags, FF_HAS_DUPES)) { if (comma) printf(",\n"); printf(" {\n \"fileSize\": %" PRIdMAX ",\n \"fileList\": [\n { \"filePath\": \"", (intmax_t)files->size); sprintf(temp, "%s", files->d_name); json_escape(temp, temp2); jc_fwprint(stdout, temp2, 0); printf("\""); tmpfile = files->duplicates; while (tmpfile != NULL) { printf(" },\n { \"filePath\": \""); sprintf(temp, "%s", tmpfile->d_name); json_escape(temp, temp2); jc_fwprint(stdout, temp2, 0); printf("\""); tmpfile = tmpfile->duplicates; } printf(" }\n ]\n }"); comma = 1; } files = files->next; } printf("\n ]\n}\n"); free(temp); free(temp2); return; } #endif /* NO_JSON */ jdupes-1.27.3/act_printjson.h000066400000000000000000000006611447252140200161300ustar00rootroot00000000000000/* jdupes action for printing comprehensive data as JSON to stdout * This file is part of jdupes; see jdupes.c for license information */ #ifndef NO_JSON #ifndef ACT_PRINTJSON_H #define ACT_PRINTJSON_H #ifdef __cplusplus extern "C" { #endif #include "jdupes.h" void printjson(file_t * restrict files, const int argc, char ** const restrict argv); #ifdef __cplusplus } #endif #endif /* ACT_PRINTJSON_H */ #endif /* NO_JSON */ jdupes-1.27.3/act_printmatches.c000066400000000000000000000037301447252140200165760ustar00rootroot00000000000000/* Print matched file sets * This file is part of jdupes; see jdupes.c for license information */ #include #include #include #include "jdupes.h" #include #include "act_printmatches.h" void printmatches(file_t * restrict files) { file_t * restrict tmpfile; int printed = 0; int cr = 1; LOUD(fprintf(stderr, "printmatches: %p\n", files)); if (ISFLAG(a_flags, FA_PRINTNULL)) cr = 2; while (files != NULL) { if (ISFLAG(files->flags, FF_HAS_DUPES)) { printed = 1; if (!ISFLAG(a_flags, FA_OMITFIRST)) { if (ISFLAG(a_flags, FA_SHOWSIZE)) printf("%" PRIdMAX " byte%c each:\n", (intmax_t)files->size, (files->size != 1) ? 's' : ' '); jc_fwprint(stdout, files->d_name, cr); } tmpfile = files->duplicates; while (tmpfile != NULL) { jc_fwprint(stdout, tmpfile->d_name, cr); tmpfile = tmpfile->duplicates; } if (files->next != NULL) jc_fwprint(stdout, "", cr); } files = files->next; } if (printed == 0) printf("%s", s_no_dupes); return; } /* Print files that have no duplicates (unique files) */ void printunique(file_t *files) { file_t *chain, *scan; int printed = 0; int cr = 1; LOUD(fprintf(stderr, "print_uniques: %p\n", files)); if (ISFLAG(a_flags, FA_PRINTNULL)) cr = 2; scan = files; while (scan != NULL) { if (ISFLAG(scan->flags, FF_HAS_DUPES)) { chain = scan; while (chain != NULL) { SETFLAG(chain->flags, FF_NOT_UNIQUE); chain = chain->duplicates; } } scan = scan->next; } while (files != NULL) { if (!ISFLAG(files->flags, FF_NOT_UNIQUE)) { printed = 1; if (ISFLAG(a_flags, FA_SHOWSIZE)) printf("%" PRIdMAX " byte%c each:\n", (intmax_t)files->size, (files->size != 1) ? 's' : ' '); jc_fwprint(stdout, files->d_name, cr); } files = files->next; } if (printed == 0) jc_fwprint(stderr, "No unique files found.", 1); return; } jdupes-1.27.3/act_printmatches.h000066400000000000000000000006011447252140200165750ustar00rootroot00000000000000/* jdupes action for printing matched file sets to stdout * This file is part of jdupes; see jdupes.c for license information */ #ifndef ACT_PRINTMATCHES_H #define ACT_PRINTMATCHES_H #ifdef __cplusplus extern "C" { #endif #include "jdupes.h" void printmatches(file_t * restrict files); void printunique(file_t *files); #ifdef __cplusplus } #endif #endif /* ACT_PRINTMATCHES_H */ jdupes-1.27.3/act_summarize.c000066400000000000000000000022001447252140200161000ustar00rootroot00000000000000/* Print summary of match statistics to stdout * This file is part of jdupes; see jdupes.c for license information */ #include #include #include #include "jdupes.h" #include "act_summarize.h" void summarizematches(const file_t * restrict files) { unsigned int numsets = 0; off_t numbytes = 0; int numfiles = 0; LOUD(fprintf(stderr, "summarizematches: %p\n", files)); while (files != NULL) { file_t *tmpfile; if (ISFLAG(files->flags, FF_HAS_DUPES)) { numsets++; tmpfile = files->duplicates; while (tmpfile != NULL) { numfiles++; numbytes += files->size; tmpfile = tmpfile->duplicates; } } files = files->next; } if (numsets == 0) printf("%s", s_no_dupes); else { printf("%d duplicate files (in %d sets), occupying ", numfiles, numsets); if (numbytes < 1000) printf("%" PRIdMAX " byte%c\n", (intmax_t)numbytes, (numbytes != 1) ? 's' : ' '); else if (numbytes <= 1000000) printf("%" PRIdMAX " KB\n", (intmax_t)(numbytes / 1000)); else printf("%" PRIdMAX " MB\n", (intmax_t)(numbytes / 1000000)); } return; } jdupes-1.27.3/act_summarize.h000066400000000000000000000005571447252140200161220ustar00rootroot00000000000000/* jdupes action for printing a summary of match stats to stdout * This file is part of jdupes; see jdupes.c for license information */ #ifndef ACT_SUMMARIZE_H #define ACT_SUMMARIZE_H #ifdef __cplusplus extern "C" { #endif #include "jdupes.h" extern void summarizematches(const file_t * restrict files); #ifdef __cplusplus } #endif #endif /* ACT_SUMMARIZE_H */ jdupes-1.27.3/args.c000066400000000000000000000022721447252140200142020ustar00rootroot00000000000000/* Argument functions * This file is part of jdupes; see jdupes.c for license information */ #include #include #include #include "jdupes.h" char **cloneargs(const int argc, char **argv) { static int x; static char **args; args = (char **)malloc(sizeof(char *) * (unsigned int)argc); if (args == NULL) jc_oom("cloneargs() start"); for (x = 0; x < argc; x++) { args[x] = (char *)malloc(strlen(argv[x]) + 1); if (args[x] == NULL) jc_oom("cloneargs() loop"); strcpy(args[x], argv[x]); } return args; } int findarg(const char * const arg, const int start, const int argc, char **argv) { int x; for (x = start; x < argc; x++) if (jc_streq(argv[x], arg) == 0) return x; return x; } /* Find the first non-option argument after specified option. */ int nonoptafter(const char *option, const int argc, char **oldargv, char **newargv) { int x; int targetind; int testind; int startat = 1; targetind = findarg(option, 1, argc, oldargv); for (x = optind; x < argc; x++) { testind = findarg(newargv[x], startat, argc, oldargv); if (testind > targetind) return x; else startat = testind; } return x; } jdupes-1.27.3/args.h000066400000000000000000000010261447252140200142030ustar00rootroot00000000000000/* jdupes argument functions * This file is part of jdupes; see jdupes.c for license information */ #ifndef JDUPES_ARGS_H #define JDUPES_ARGS_H #ifdef __cplusplus extern "C" { #endif char **cloneargs(const int argc, char **argv); int findarg(const char * const arg, const int start, const int argc, char **argv); int nonoptafter(const char *option, const int argc, char **oldargv, char **newargv); void linkfiles(file_t *files, const int linktype, const int only_current); #ifdef __cplusplus } #endif #endif /* JDUPES_ARGS_H */ jdupes-1.27.3/checks.c000066400000000000000000000117051447252140200145070ustar00rootroot00000000000000/* jdupes file check functions * This file is part of jdupes; see jdupes.c for license information */ #include #include #include #include #include #include "likely_unlikely.h" #ifndef NO_EXTFILTER #include "extfilter.h" #endif #include "filestat.h" #include "jdupes.h" /***** End definitions, begin code *****/ /***** Add new functions here *****/ /* Check a pair of files for match exclusion conditions * Returns: * 0 if all condition checks pass * -1 or 1 on compare result less/more * -2 on an absolute exclusion condition met * 2 on an absolute match condition met * -3 on exclusion due to isolation * -4 on exclusion due to same filesystem * -5 on exclusion due to permissions */ int check_conditions(const file_t * const restrict file1, const file_t * const restrict file2) { if (unlikely(file1 == NULL || file2 == NULL || file1->d_name == NULL || file2->d_name == NULL)) jc_nullptr("check_conditions()"); LOUD(fprintf(stderr, "check_conditions('%s', '%s')\n", file1->d_name, file2->d_name);) /* Exclude files that are not the same size */ if (file1->size > file2->size) { LOUD(fprintf(stderr, "check_conditions: no match: size of file1 > file2 (%" PRIdMAX " > %" PRIdMAX ")\n", (intmax_t)file1->size, (intmax_t)file2->size)); return -1; } if (file1->size < file2->size) { LOUD(fprintf(stderr, "check_conditions: no match: size of file1 < file2 (%" PRIdMAX " < %"PRIdMAX ")\n", (intmax_t)file1->size, (intmax_t)file2->size)); return 1; } #ifndef NO_USER_ORDER /* Exclude based on -I/--isolate */ if (ISFLAG(flags, F_ISOLATE) && (file1->user_order == file2->user_order)) { LOUD(fprintf(stderr, "check_conditions: files ignored: parameter isolation\n")); return -3; } #endif /* NO_USER_ORDER */ /* Exclude based on -1/--one-file-system */ if (ISFLAG(flags, F_ONEFS) && (file1->device != file2->device)) { LOUD(fprintf(stderr, "check_conditions: files ignored: not on same filesystem\n")); return -4; } /* Exclude files by permissions if requested */ if (ISFLAG(flags, F_PERMISSIONS) && (file1->mode != file2->mode #ifndef NO_PERMS || file1->uid != file2->uid || file1->gid != file2->gid #endif )) { return -5; LOUD(fprintf(stderr, "check_conditions: no match: permissions/ownership differ (-p on)\n")); } /* Hard link and symlink + '-s' check */ #ifndef NO_HARDLINKS if ((file1->inode == file2->inode) && (file1->device == file2->device)) { if (ISFLAG(flags, F_CONSIDERHARDLINKS)) { LOUD(fprintf(stderr, "check_conditions: files match: hard/soft linked (-H on)\n")); return 2; } else { LOUD(fprintf(stderr, "check_conditions: files ignored: hard/soft linked (-H off)\n")); return -2; } } #endif /* Fall through: all checks passed */ LOUD(fprintf(stderr, "check_conditions: all condition checks passed\n")); return 0; } /* Check for exclusion conditions for a single file (1 = fail) */ int check_singlefile(file_t * const restrict newfile) { char * restrict tp = tempname; if (unlikely(newfile == NULL)) jc_nullptr("check_singlefile()"); LOUD(fprintf(stderr, "check_singlefile: checking '%s'\n", newfile->d_name)); /* Exclude hidden files if requested */ if (likely(ISFLAG(flags, F_EXCLUDEHIDDEN))) { if (unlikely(newfile->d_name == NULL)) jc_nullptr("check_singlefile newfile->d_name"); strcpy(tp, newfile->d_name); tp = basename(tp); if (tp[0] == '.' && jc_streq(tp, ".") && jc_streq(tp, "..")) { LOUD(fprintf(stderr, "check_singlefile: excluding hidden file (-A on)\n")); return 1; } } /* Get file information and check for validity */ const int i = getfilestats(newfile); if (i || newfile->size == -1) { LOUD(fprintf(stderr, "check_singlefile: excluding due to bad stat()\n")); return 1; } if (!S_ISREG(newfile->mode) && !S_ISDIR(newfile->mode)) { LOUD(fprintf(stderr, "check_singlefile: excluding non-regular file\n")); return 1; } if (!S_ISDIR(newfile->mode)) { /* Exclude zero-length files if requested */ if (newfile->size == 0 && !ISFLAG(flags, F_INCLUDEEMPTY)) { LOUD(fprintf(stderr, "check_singlefile: excluding zero-length empty file (-z not set)\n")); return 1; } #ifndef NO_EXTFILTER if (extfilter_exclude(newfile)) { LOUD(fprintf(stderr, "check_singlefile: excluding based on an extfilter option\n")); return 1; } #endif /* NO_EXTFILTER */ } #ifdef ON_WINDOWS /* Windows has a 1023 (+1) hard link limit. If we're hard linking, * ignore all files that have hit this limit */ #ifndef NO_HARDLINKS if (ISFLAG(a_flags, FA_HARDLINKFILES) && newfile->nlink >= 1024) { #ifdef DEBUG hll_exclude++; #endif LOUD(fprintf(stderr, "check_singlefile: excluding due to Windows 1024 hard link limit\n")); return 1; } #endif /* NO_HARDLINKS */ #endif /* ON_WINDOWS */ LOUD(fprintf(stderr, "check_singlefile: all checks passed\n")); return 0; } jdupes-1.27.3/checks.h000066400000000000000000000006231447252140200145110ustar00rootroot00000000000000/* jdupes file check functions * This file is part of jdupes; see jdupes.c for license information */ #ifndef JDUPES_CHECKS_H #define JDUPES_CHECKS_H #ifdef __cplusplus extern "C" { #endif int check_conditions(const file_t * const restrict file1, const file_t * const restrict file2); int check_singlefile(file_t * const restrict newfile); #ifdef __cplusplus } #endif #endif /* JDUPES_CHECKS_H */ jdupes-1.27.3/chroot_build.sh000077500000000000000000000044771447252140200161270ustar00rootroot00000000000000#!/bin/sh # Jody's generic chroot build script # Version 1.0 ARCHES="i386 x86-64 uclibc-i386 uclibc-x86-64" test -z "$NAME" && NAME="$(basename "$(pwd)")" test -e "version.h" && VER="$(grep '#define VER ' version.h | tr -d \\\" | cut -d' ' -f3)" test -z "$VER" && VER=0 export NAME export VER export CHROOT_BASE=/chroots export WD="$(pwd)" export PKG="pkg" echo "chroot builder: building '$NAME' version '$VER'" trap clean_exit INT QUIT ABRT HUP clean_exit () { umount $CHROOT/proc $CHROOT/sys $CHROOT/tmp $CHROOT/dev $CHROOT/usr/src $CHROOT/home } do_build () { test -z "$WD" && echo "WD not set, aborting" && exit 1 test -z "$PKG" && echo "PKG not set, aborting" && exit 1 if [ -e ./generate_packages.sh ] then ./generate_packages.sh else make clean PN="${NAME}_$VER-$ARCH.pkg.tar.xz" if ! make -j$JOBS all then echo "Build failed"; exit 1 else echo "WD/PKG: $WD/$PKG" test -d $WD/$PKG && rm -rf $WD/$PKG mkdir $WD/$PKG make DESTDIR=$WD/$PKG install && \ tar -C pkg -c usr | xz -e > "$PN" # Set ownership to current directory ownership chown "$(stat -c '%u:%g' .)" "$PN" echo "Built $PN" make clean fi fi } if [ "$(id -u)" != "0" ] then echo "You must be root to auto-build chroot packages." exit 1 fi if [ "$DO_CHROOT_BUILD" = "1" ] then test -z "$1" && echo "No arch specified" && exit 1 test ! -d "$1" && echo "Not a directory: $1" && exit 1 cd $1 export WD="$1" do_build echo "finished: $1" exit else echo baz export DO_CHROOT_BUILD=1 for ARCH in $ARCHES do export ARCH export CHROOT="$CHROOT_BASE/$ARCH" test ! -d $CHROOT && echo "$CHROOT not present, not building $ARCH package." && continue echo "Performing package build for $CHROOT" test ! -x $CHROOT/bin/sh && echo "$CHROOT does not seem to be a chroot; aborting." && exit 1 mount --bind /dev $CHROOT/dev || clean_exit mount --bind /usr/src $CHROOT/usr/src || clean_exit mount --bind /home $CHROOT/home || clean_exit mount -t proc proc $CHROOT/proc || clean_exit mount -t sysfs sysfs $CHROOT/sys || clean_exit mount -t tmpfs tmpfs $CHROOT/tmp || clean_exit if echo "$ARCH" | grep -q "i386" then linux32 chroot $CHROOT $WD/$0 $WD else chroot $CHROOT $WD/$0 $WD fi umount $CHROOT/proc $CHROOT/sys $CHROOT/tmp $CHROOT/dev $CHROOT/usr/src $CHROOT/home test -d $WD/$PKG && rm -rf $WD/$PKG done fi jdupes-1.27.3/compare_jdupes.sh000077500000000000000000000017031447252140200164370ustar00rootroot00000000000000#!/bin/bash # Runs the installed *dupes* binary and the built binary and compares # the output for sameness. Also displays timing statistics. ERR=0 # Detect installed jdupes if [ -z "$ORIG_JDUPES" ] then jdupes -v 2>/dev/null >/dev/null && ORIG_JDUPES=jdupes test ! -z "$WINDIR" && "$WINDIR/jdupes.exe" -v 2>/dev/null >/dev/null && ORIG_JDUPES="$WINDIR/jdupes.exe" fi if [ ! $ORIG_JDUPES -v 2>/dev/null >/dev/null ] then echo "Can't run installed jdupes" echo "To manually specify an original jdupes, use: ORIG_JDUPES=path/to/jdupes $0" exit 1 fi test ! -e ./jdupes && echo "Build jdupes first, silly" && exit 1 echo -n "Installed $ORIG_JDUPES:" sync time $ORIG_JDUPES -q "$@" > installed_output.txt || ERR=1 echo -en "\nBuilt jdupes:" sync time ./jdupes -q "$@" > built_output.txt || ERR=1 diff -Nau installed_output.txt built_output.txt rm -f installed_output.txt built_output.txt test "$ERR" != "0" && echo "Errors were returned during execution" jdupes-1.27.3/docker/000077500000000000000000000000001447252140200143465ustar00rootroot00000000000000jdupes-1.27.3/docker/alpine.Dockerfile000066400000000000000000000003771447252140200176160ustar00rootroot00000000000000FROM alpine:latest as builder RUN apk update && apk add --no-cache gcc make musl-dev COPY . . RUN make && make install FROM alpine:latest as runner COPY --from=builder /usr/local/bin/jdupes /usr/local/bin/jdupes ENTRYPOINT [ "/usr/local/bin/jdupes" ] jdupes-1.27.3/docker/slim.Dockerfile000066400000000000000000000003161447252140200173030ustar00rootroot00000000000000FROM gcc:bullseye as builder COPY . . RUN make && make install FROM debian:bullseye-slim as runner COPY --from=builder /usr/local/bin/jdupes /usr/local/bin/jdupes ENTRYPOINT [ "/usr/local/bin/jdupes" ] jdupes-1.27.3/dumpflags.c000066400000000000000000000061241447252140200152300ustar00rootroot00000000000000/* Debug flag dumping * This file is part of jdupes; see jdupes.c for license information */ #include #include "jdupes.h" #ifdef DEBUG void dump_all_flags(void) { fprintf(stderr, "\nSet flag dump:"); /* Behavior modification flags */ if (ISFLAG(flags, F_RECURSE)) fprintf(stderr, " F_RECURSE"); if (ISFLAG(flags, F_HIDEPROGRESS)) fprintf(stderr, " F_HIDEPROGRESS"); if (ISFLAG(flags, F_SOFTABORT)) fprintf(stderr, " F_SOFTABORT"); if (ISFLAG(flags, F_FOLLOWLINKS)) fprintf(stderr, " F_FOLLOWLINKS"); if (ISFLAG(flags, F_INCLUDEEMPTY)) fprintf(stderr, " F_INCLUDEEMPTY"); if (ISFLAG(flags, F_CONSIDERHARDLINKS)) fprintf(stderr, " F_CONSIDERHARDLINKS"); if (ISFLAG(flags, F_RECURSEAFTER)) fprintf(stderr, " F_RECURSEAFTER"); if (ISFLAG(flags, F_NOPROMPT)) fprintf(stderr, " F_NOPROMPT"); if (ISFLAG(flags, F_EXCLUDEHIDDEN)) fprintf(stderr, " F_EXCLUDEHIDDEN"); if (ISFLAG(flags, F_PERMISSIONS)) fprintf(stderr, " F_PERMISSIONS"); if (ISFLAG(flags, F_EXCLUDESIZE)) fprintf(stderr, " F_EXCLUDESIZE"); if (ISFLAG(flags, F_QUICKCOMPARE)) fprintf(stderr, " F_QUICKCOMPARE"); if (ISFLAG(flags, F_USEPARAMORDER)) fprintf(stderr, " F_USEPARAMORDER"); if (ISFLAG(flags, F_REVERSESORT)) fprintf(stderr, " F_REVERSESORT"); if (ISFLAG(flags, F_ISOLATE)) fprintf(stderr, " F_ISOLATE"); if (ISFLAG(flags, F_ONEFS)) fprintf(stderr, " F_ONEFS"); if (ISFLAG(flags, F_PARTIALONLY)) fprintf(stderr, " F_PARTIALONLY"); if (ISFLAG(flags, F_NOCHANGECHECK)) fprintf(stderr, " F_NOCHANGECHECK"); if (ISFLAG(flags, F_NOTRAVCHECK)) fprintf(stderr, " F_NOTRAVCHECK"); if (ISFLAG(flags, F_SKIPHASH)) fprintf(stderr, " F_SKIPHASH"); if (ISFLAG(flags, F_BENCHMARKSTOP)) fprintf(stderr, " F_BENCHMARKSTOP"); if (ISFLAG(flags, F_HASHDB)) fprintf(stderr, " F_HASHDB"); if (ISFLAG(flags, F_LOUD)) fprintf(stderr, " F_LOUD"); if (ISFLAG(flags, F_DEBUG)) fprintf(stderr, " F_DEBUG"); /* Action-related flags */ if (ISFLAG(a_flags, FA_PRINTMATCHES)) fprintf(stderr, " FA_PRINTMATCHES"); if (ISFLAG(a_flags, FA_PRINTUNIQUE)) fprintf(stderr, " FA_PRINTUNIQUE"); if (ISFLAG(a_flags, FA_OMITFIRST)) fprintf(stderr, " FA_OMITFIRST"); if (ISFLAG(a_flags, FA_SUMMARIZEMATCHES)) fprintf(stderr, " FA_SUMMARIZEMATCHES"); if (ISFLAG(a_flags, FA_DELETEFILES)) fprintf(stderr, " FA_DELETEFILES"); if (ISFLAG(a_flags, FA_SHOWSIZE)) fprintf(stderr, " FA_SHOWSIZE"); if (ISFLAG(a_flags, FA_HARDLINKFILES)) fprintf(stderr, " FA_HARDLINKFILES"); if (ISFLAG(a_flags, FA_DEDUPEFILES)) fprintf(stderr, " FA_DEDUPEFILES"); if (ISFLAG(a_flags, FA_MAKESYMLINKS)) fprintf(stderr, " FA_MAKESYMLINKS"); if (ISFLAG(a_flags, FA_PRINTNULL)) fprintf(stderr, " FA_PRINTNULL"); if (ISFLAG(a_flags, FA_PRINTJSON)) fprintf(stderr, " FA_PRINTJSON"); if (ISFLAG(a_flags, FA_ERRORONDUPE)) fprintf(stderr, " FA_ERRORONDUPE"); /* Extra print flags */ if (ISFLAG(p_flags, PF_PARTIAL)) fprintf(stderr, " PF_PARTIAL"); if (ISFLAG(p_flags, PF_EARLYMATCH)) fprintf(stderr, " PF_EARLYMATCH"); if (ISFLAG(p_flags, PF_FULLHASH)) fprintf(stderr, " PF_FULLHASH"); fprintf(stderr, " [end of list]\n\n"); fflush(stderr); return; } #endif jdupes-1.27.3/dumpflags.h000066400000000000000000000004621447252140200152340ustar00rootroot00000000000000/* Debug flag dumping * This file is part of jdupes; see jdupes.c for license information */ #ifndef JDUPES_DUMPFLAGS_H #define JDUPES_DUMPFLAGS_H #ifdef __cplusplus extern "C" { #endif #ifdef DEBUG extern void dump_all_flags(void); #endif #ifdef __cplusplus } #endif #endif /* JDUPES_DUMPFLAGS_H */ jdupes-1.27.3/example_scripts/000077500000000000000000000000001447252140200163015ustar00rootroot00000000000000jdupes-1.27.3/example_scripts/delete_but_exclude_nonposix.sh000066400000000000000000000025671447252140200244310ustar00rootroot00000000000000#!/bin/bash # NOTE: This non-POSIX version is faster but requires bash/ksh/zsh etc. # This is a shell script that deletes match sets like jdupes -dN does, but # excludes any file paths from deletion that match any of the grep regex # patterns passed to the script. Use it like this: # # jdupes whatever | ./delete_but_exclude.sh regex1 [regex2] [...] # Announce what this script does so the user knows what's going on echo "jdupes script - delete duplicates that don't match specified patterns" # If no parameters are passed, give usage info and abort test -z "$1" && echo "usage: $0 regex1 [regex2] [...]" && exit 1 # Exit status will be 0 on success, 1 on any failure EXITSTATUS=0 # Skip the first file in each match set FIRSTFILE=1 while read -r LINE do # Remove Windows CR characters if present in name LINE=${LINE/$'\r'/} # Reset on a blank line; next line will be a first file test -z "$LINE" && FIRSTFILE=1 && continue # If this is the first file, take no action test $FIRSTFILE -eq 1 && FIRSTFILE=0 && echo $'\n'"[+] $LINE" && continue # Move the file specified on the line to the directory specified for RX in "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8" "$9" do test -z "$RX" && continue if [[ $LINE =~ $RX ]] then echo "[+] $LINE" else if rm -f "$LINE" then echo "[-] $LINE" else echo "[!] $LINE" EXITSTATUS=1 fi fi done done exit $EXITSTATUS jdupes-1.27.3/example_scripts/delete_but_exclude_posix.sh000066400000000000000000000025061447252140200237070ustar00rootroot00000000000000#!/bin/sh # This is a shell script that deletes match sets like jdupes -dN does, but # excludes any file paths from deletion that match any of the grep regex # patterns passed to the script. Use it like this: # # jdupes whatever | ./delete_but_exclude.sh regex1 [regex2] [...] # Announce what this script does so the user knows what's going on echo "jdupes script - delete duplicates that don't match specified patterns" # If no parameters are passed, give usage info and abort test -z "$1" && echo "usage: $0 regex1 [regex2] [...]" && exit 1 # Exit status will be 0 on success, 1 on any failure EXITSTATUS=0 # Skip the first file in each match set FIRSTFILE=1 while read -r LINE do # Remove Windows CR characters if present in name LINE="$(echo "$LINE" | tr -d '\r')" # Reset on a blank line; next line will be a first file test -z "$LINE" && FIRSTFILE=1 && continue # If this is the first file, take no action test $FIRSTFILE -eq 1 && FIRSTFILE=0 && echo $'\n'"[+] $LINE" && continue # Move the file specified on the line to the directory specified for RX in "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8" "$9" do test -z "$RX" && continue if echo "$LINE" | grep -q "$RX" then echo "[+] $LINE" else if rm -f "$LINE" then echo "[-] $LINE" else echo "[!] $LINE" EXITSTATUS=1 fi fi done done exit $EXITSTATUS jdupes-1.27.3/example_scripts/example.sh000077500000000000000000000036571447252140200203060ustar00rootroot00000000000000#!/bin/sh # This is a shell script that demonstrates how to process the standard # jdupes output (known as "printmatches") to perform custom actions. # Use it like this: # # jdupes whatever_parameters_you_like | ./example.sh script_parameters # # If you are on Windows, jdupes uses backslash path separators which # must be converted to forward slashes before piping to this script, # and carriage returns (\r) must also be deleted from jdupes output # (tested on MSYS2 MinGW, probably true for similar environments): # # jdupes params | tr '\\' / | tr -d '\r' | ./example.sh script_params # # The general structure of jdupes pipe scripts are: # * Initialize conditions # * Iterates through a match set and act on items # * Reset conditions and restart when a blank line is reached # This script moves all duplicate files to a different directory # without duplicating the directory structure. It can be easily # modified to make the required directories and create a "mirror" # consisting of duplicates that 'jdupes -rdN' would delete. # Announce what this script does so the user knows what's going on echo "jdupes example script - moving duplicate files to a directory" # If first parameter isn't a valid directory, give usage info and abort test ! -d "$1" && echo "usage: $0 destination_dir_to_move_files_to" && exit 1 # Exit status will be 0 on success, 1 on any failure EXITSTATUS=0 # Skip the first file in each match set FIRSTFILE=1 while read LINE do echo "$LINE" # Reset on a blank line; next line will be a first file test -z "$LINE" && FIRSTFILE=1 && continue # If this is the first file, take no action test $FIRSTFILE -eq 1 && FIRSTFILE=0 && continue # Move the file specified on the line to the directory specified if mv -f "$LINE" "$1" then # Print the action that was taken echo "'$LINE' => '$1/$(basename "$LINE")'" else echo "Failed to move: '$LINE' => '$1/$(basename "$LINE")'" >&2 EXITSTATUS=1 fi done exit $EXITSTATUS jdupes-1.27.3/example_scripts/fdupes_oneline.sh000077500000000000000000000010151447252140200216340ustar00rootroot00000000000000#!/bin/sh # Emulates fdupes -1 output # Usage: jdupes command line | ./fdupes_oneline.sh # This is a newline. IFS=' ' if [ "$1" = "-q" ] || [ "$1" = "--shell-quote" ]; then # This only works with GNU (env printf) or bash (builtin printf). # If you are using dash, change the command to use env printf... escape() { printf '%q ' "$LINE"; } else escape() { printf '%s' "$LINE" | sed 's/\\/\\\\/g; s/ /\\ /g'; printf ' '; } fi while read -r LINE do if [ -z "$LINE" ] then printf '\n' else escape fi done jdupes-1.27.3/extfilter.c000066400000000000000000000252341447252140200152570ustar00rootroot00000000000000/* jdupes extended filters * See jdupes.c for license information */ #ifndef NO_EXTFILTER #include #include #include #include #include #include "helptext.h" #include "jdupes.h" /* Extended filter parameter flags */ #define XF_EXCL_EXT 0x00000001U #define XF_SIZE_EQ 0x00000002U #define XF_SIZE_GT 0x00000004U #define XF_SIZE_LT 0x00000008U #define XF_ONLY_EXT 0x00000010U #define XF_EXCL_STR 0x00000020U #define XF_ONLY_STR 0x00000040U #define XF_DATE_NEWER 0x00000080U #define XF_DATE_OLDER 0x00000100U /* The X-than-or-equal are combination flags */ #define XF_SIZE_GTEQ 0x00000006U #define XF_SIZE_LTEQ 0x0000000aU /* Flags that use a numeric size with optional suffix */ #define XF_REQ_NUMBER 0x0000000eU /* Flags that require a data parameter (after a colon) */ #define XF_REQ_VALUE 0x0000001fU /* Flags that take a date that needs to be converted to time_t seconds */ #define XF_REQ_DATE 0x00000180U /* -X extended filter parameter stack */ struct extfilter { struct extfilter *next; unsigned int flags; int64_t size; /* also used for other large integers */ char param[]; }; struct extfilter_tags { const char * const tag; const uint32_t flags; }; /* Extended filter tree head and static tag list */ static struct extfilter *extfilter_head = NULL; static const struct extfilter_tags extfilter_tags[] = { { "noext", XF_EXCL_EXT }, { "onlyext", XF_ONLY_EXT }, { "size+", XF_SIZE_GT }, { "size-", XF_SIZE_LT }, { "size+=", XF_SIZE_GTEQ }, { "size-=", XF_SIZE_LTEQ }, { "size=", XF_SIZE_EQ }, { "nostr", XF_EXCL_STR }, { "onlystr", XF_ONLY_STR }, { "newer", XF_DATE_NEWER }, { "older", XF_DATE_OLDER }, { NULL, 0 }, }; static void help_text_extfilter(void) { #ifndef NO_HELPTEXT printf("Detailed help for jdupes -X/--ext-filter options\n"); printf("General format: jdupes -X filter[:value][size_suffix]\n\n"); printf("noext:ext1[,ext2,...] \tExclude files with certain extension(s)\n\n"); printf("onlyext:ext1[,ext2,...] \tOnly include files with certain extension(s)\n\n"); printf("size[+-=]:size[suffix] \tOnly Include files matching size criteria\n"); printf(" \tSize specs: + larger, - smaller, = equal to\n"); printf(" \tSpecs can be mixed, i.e. size+=:100k will\n"); printf(" \tonly include files 100KiB or more in size.\n\n"); printf("nostr:text_string \tExclude all paths containing the string\n"); printf("onlystr:text_string \tOnly allow paths containing the string\n"); printf(" \tHINT: you can use these for directories:\n"); printf(" \t-X nostr:/dir_x/ or -X onlystr:/dir_x/\n"); printf("newer:datetime \tOnly include files newer than specified date\n"); printf("older:datetime \tOnly include files older than specified date\n"); printf(" \tDate/time format: \"YYYY-MM-DD HH:MM:SS\"\n"); printf(" \tTime is optional (remember to escape spaces!)\n"); /* printf("\t\n"); */ printf("\nSome filters take no value or multiple values. Filters that can take\n"); printf( "a numeric option generally support the size multipliers K/M/G/T/P/E\n"); printf( "with or without an added iB or B. Multipliers are binary-style unless\n"); printf( "the -B suffix is used, which will use decimal multipliers. For example,\n"); printf( "16k or 16kib = 16384; 16kb = 16000. Multipliers are case-insensitive.\n\n"); printf( "Filters have cumulative effects: jdupes -X size+:99 -X size-:101 will\n"); printf( "cause only files of exactly 100 bytes in size to be included.\n\n"); printf( "Extension matching is case-insensitive.\n"); printf( "Path substring matching is case-sensitive.\n"); #else /* NO_HELPTEXT */ version_text(0); #endif /* NO_HELPTEXT */ } /* Does a file have one of these comma-separated extensions? * Returns 1 after any match, 0 if no matches */ static int match_extensions(char *path, const char *extlist) { char *dot; const char *ext; size_t len, extlen; LOUD(fprintf(stderr, "match_extensions('%s', '%s')\n", path, extlist);) if (path == NULL || extlist == NULL) jc_nullptr("match_extensions"); dot = NULL; /* Scan to end of path, save the last dot, reset on path separators */ while (*path != '\0') { if (*path == '.') dot = path; if (*path == '/' || *path == '\\') dot = NULL; path++; } /* No dots in the file name = no extension, so give up now */ if (dot == NULL) return 0; dot++; /* Handle a dot at the end of a file name */ if (*dot == '\0') return 0; /* Get the length of the file's extension for later checking */ extlen = strlen(dot); LOUD(fprintf(stderr, "match_extensions: file has extension '%s' with length %" PRIdMAX "\n", dot, (intmax_t)extlen);) /* dot is now at the location of the last file extension; check the list */ /* Skip any commas at the start of the list */ while (*extlist == ',') extlist++; ext = extlist; len = 0; while (1) { /* Reject upon hitting the end with no more extensions to process */ if (*extlist == '\0' && len == 0) return 0; /* Process extension once a comma or EOL is hit */ if (*extlist == ',' || *extlist == '\0') { /* Skip serial commas */ while (*extlist == ',') extlist++; if (extlist == ext) goto skip_empty; if (jc_strncaseeq(dot, ext, len) == 0 && extlen == len) { LOUD(fprintf(stderr, "match_extensions: matched on extension '%s' (len %" PRIdMAX ")\n", dot, (intmax_t)len);) return 1; } LOUD(fprintf(stderr, "match_extensions: no match: '%s' (%" PRIdMAX "), '%s' (%" PRIdMAX ")\n", dot, (intmax_t)len, ext, (intmax_t)extlen);) skip_empty: ext = extlist; len = 0; continue; } extlist++; len++; /* LOUD(fprintf(stderr, "match_extensions: DEBUG: '%s' : '%s' (%ld), '%s' (%ld)\n", extlist, dot, len, ext, extlen);) */ } return 0; } /* Add a filter to the filter stack */ void add_extfilter(const char *option) { char *opt, *p; time_t tt; struct extfilter *extf = extfilter_head; const struct extfilter_tags *tags = extfilter_tags; const struct jc_size_suffix *ss = jc_size_suffix; if (option == NULL) jc_nullptr("add_extfilter()"); LOUD(fprintf(stderr, "add_extfilter '%s'\n", option);) /* Invoke help text if requested */ if (jc_strcaseeq(option, "help") == 0) { help_text_extfilter(); exit(EXIT_SUCCESS); } opt = malloc(strlen(option) + 1); if (opt == NULL) jc_oom("add_extfilter option"); strcpy(opt, option); p = opt; while (*p != ':' && *p != '\0') p++; /* Split tag string into *opt (tag) and *p (value) */ if (*p == ':') { *p = '\0'; p++; } while (tags->tag != NULL && jc_streq(tags->tag, opt) != 0) tags++; if (tags->tag == NULL) goto error_bad_filter; /* Check for a tag that requires a value */ if (tags->flags & XF_REQ_VALUE && *p == '\0') goto error_value_missing; /* *p is now at the value, NOT the tag string! */ if (extfilter_head != NULL) { /* Add to end of exclusion stack if head is present */ while (extf->next != NULL) extf = extf->next; extf->next = malloc(sizeof(struct extfilter) + strlen(p) + 1); if (extf->next == NULL) jc_oom("add_extfilter alloc"); extf = extf->next; } else { /* Allocate extfilter_head if no exclusions exist yet */ extfilter_head = malloc(sizeof(struct extfilter) + strlen(p) + 1); if (extfilter_head == NULL) jc_oom("add_extfilter alloc"); extf = extfilter_head; } /* Set tag value from predefined tag array */ extf->flags = tags->flags; /* Initialize the new extfilter element */ extf->next = NULL; if (extf->flags & XF_REQ_NUMBER) { /* Exclude uses a number; handle it with possible suffixes */ *(extf->param) = '\0'; /* Get base size */ if (*p < '0' || *p > '9') goto error_bad_size_suffix; extf->size = strtoll(p, &p, 10); /* Handle suffix, if any */ if (*p != '\0') { while (ss->suffix != NULL && jc_strcaseeq(ss->suffix, p) != 0) ss++; if (ss->suffix == NULL) goto error_bad_size_suffix; extf->size *= ss->multiplier; } } else if (extf->flags & XF_REQ_DATE) { /* Exclude uses a date; convert it to seconds since the epoch */ *(extf->param) = '\0'; tt = jc_strtoepoch(p); LOUD(fprintf(stderr, "extfilter: jody_strtoepoch: '%s' -> %" PRIdMAX "\n", p, (intmax_t)tt);) if (tt == -1) goto error_bad_time; extf->size = tt; } else { /* Exclude uses string data; just copy it */ extf->size = 0; if (*p != '\0') strcpy(extf->param, p); else *(extf->param) = '\0'; } LOUD(fprintf(stderr, "Added extfilter: tag '%s', data '%s', size %lld, flags %d\n", opt, extf->param, (long long)extf->size, extf->flags);) free(opt); return; error_bad_time: fprintf(stderr, "Invalid extfilter date[time] was specified: -X filter:datetime\n"); goto extf_help_and_exit; error_value_missing: fprintf(stderr, "extfilter value missing or invalid: -X filter:value\n"); goto extf_help_and_exit; error_bad_filter: fprintf(stderr, "Invalid extfilter filter name was specified\n"); goto extf_help_and_exit; error_bad_size_suffix: fprintf(stderr, "Invalid extfilter size suffix specified; use B or KMGTPE[i][B]\n"); goto extf_help_and_exit; extf_help_and_exit: help_text_extfilter(); exit(EXIT_FAILURE); } /* Exclude single files based on extended filter stack; return 0 = exclude */ int extfilter_exclude(file_t * const restrict newfile) { for (struct extfilter *extf = extfilter_head; extf != NULL; extf = extf->next) { uint32_t sflag = extf->flags; LOUD(fprintf(stderr, "check_singlefile: extfilter check: %08x %" PRIdMAX " %" PRIdMAX " %s\n", sflag, (intmax_t)newfile->size, (intmax_t)extf->size, newfile->d_name);) if ( /* Any line that passes will result in file exclusion */ ((sflag == XF_SIZE_EQ) && (newfile->size != extf->size)) || ((sflag == XF_SIZE_LTEQ) && (newfile->size > extf->size)) || ((sflag == XF_SIZE_GTEQ) && (newfile->size < extf->size)) || ((sflag == XF_SIZE_GT) && (newfile->size <= extf->size)) || ((sflag == XF_SIZE_LT) && (newfile->size >= extf->size)) || ((sflag == XF_EXCL_EXT) && match_extensions(newfile->d_name, extf->param)) || ((sflag == XF_ONLY_EXT) && !match_extensions(newfile->d_name, extf->param)) || ((sflag == XF_EXCL_STR) && strstr(newfile->d_name, extf->param)) || ((sflag == XF_ONLY_STR) && !strstr(newfile->d_name, extf->param)) #ifndef NO_MTIME || ((sflag == XF_DATE_NEWER) && (newfile->mtime < extf->size)) || ((sflag == XF_DATE_OLDER) && (newfile->mtime >= extf->size)) #endif ) return 1; } return 0; } #endif /* NO_EXTFILTER */ jdupes-1.27.3/extfilter.h000066400000000000000000000006121447252140200152550ustar00rootroot00000000000000/* jdupes extended filters * See jdupes.c for license information */ #ifndef JDUPES_EXTFILTER_H #define JDUPES_EXTFILTER_H #ifdef __cplusplus extern "C" { #endif #ifndef NO_EXTFILTER #include "jdupes.h" void add_extfilter(const char *option); int extfilter_exclude(file_t * const restrict newfile); #endif /* NO_EXTFILTER */ #ifdef __cplusplus } #endif #endif /* JDUPES_EXTFILTER_H */ jdupes-1.27.3/filehash.c000066400000000000000000000143241447252140200150320ustar00rootroot00000000000000/* jdupes file hashing function * This file is part of jdupes; see jdupes.c for license information */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "likely_unlikely.h" #include "filehash.h" #include "interrupt.h" #include "progress.h" #include "jdupes.h" #include "xxhash.h" const char *hash_algo_list[2] = { "xxHash64 v2", "jodyhash v7" }; /* Hash part or all of a file * * READ THIS BEFORE CHANGING THE HASH FUNCTION! * The hash function is only used to do fast exclusion. There is not much * benefit to using bigger or "better" hash functions. Upstream jdupes WILL * NOT accept any pull requests that change the hash function unless there * is an EXTREMELY compelling reason to do so. Do not waste your time with * swapping hash functions. If you want to do it for fun then that's fine. */ uint64_t *get_filehash(const file_t * const restrict checkfile, const size_t max_read, int algo) { off_t fsize; /* This is an array because we return a pointer to it */ static uint64_t hash[1]; static uint64_t *chunk = NULL; FILE *file = NULL; int hashing = 0; #ifndef NO_XXHASH2 XXH64_state_t *xxhstate = NULL; #endif #ifdef __linux__ int filenum; #endif if (unlikely(checkfile == NULL || checkfile->d_name == NULL)) jc_nullptr("get_filehash()"); if (unlikely((algo > HASH_ALGO_COUNT - 1) || (algo < 0))) goto error_bad_hash_algo; LOUD(fprintf(stderr, "get_filehash('%s', %" PRIdMAX ")\n", checkfile->d_name, (intmax_t)max_read);) /* Allocate on first use */ if (unlikely(chunk == NULL)) { chunk = (uint64_t *)malloc(auto_chunk_size); if (unlikely(!chunk)) jc_oom("get_filehash() chunk"); } /* Get the file size. If we can't read it, bail out early */ if (unlikely(checkfile->size == -1)) { LOUD(fprintf(stderr, "get_filehash: not hashing because stat() info is bad\n")); return NULL; } fsize = checkfile->size; /* Do not read more than the requested number of bytes */ if (max_read > 0 && fsize > (off_t)max_read) fsize = (off_t)max_read; /* Initialize the hash and file read parameters (with filehash_partial skipped) * * If we already hashed the first chunk of this file, we don't want to * wastefully read and hash it again, so skip the first chunk and use * the computed hash for that chunk as our starting point. */ *hash = 0; if (ISFLAG(checkfile->flags, FF_HASH_PARTIAL)) { *hash = checkfile->filehash_partial; /* Don't bother going further if max_read is already fulfilled */ if (max_read != 0 && max_read <= PARTIAL_HASH_SIZE) { LOUD(fprintf(stderr, "Partial hash size (%d) >= max_read (%" PRIuMAX "), not hashing anymore\n", PARTIAL_HASH_SIZE, (uintmax_t)max_read);) return hash; } } errno = 0; #ifdef UNICODE if (!M2W(checkfile->d_name, wstr)) file = NULL; else file = _wfopen(wstr, FILE_MODE_RO); #else file = fopen(checkfile->d_name, FILE_MODE_RO); #endif /* UNICODE */ if (file == NULL) { fprintf(stderr, "\n%s error opening file ", strerror(errno)); jc_fwprint(stderr, checkfile->d_name, 1); return NULL; } /* Actually seek past the first chunk if applicable * This is part of the filehash_partial skip optimization */ if (ISFLAG(checkfile->flags, FF_HASH_PARTIAL)) { if (fseeko(file, PARTIAL_HASH_SIZE, SEEK_SET) == -1) { fclose(file); fprintf(stderr, "\nerror seeking in file "); jc_fwprint(stderr, checkfile->d_name, 1); return NULL; } fsize -= PARTIAL_HASH_SIZE; #ifdef __linux__ filenum = fileno(file); posix_fadvise(filenum, PARTIAL_HASH_SIZE, fsize, POSIX_FADV_SEQUENTIAL); posix_fadvise(filenum, PARTIAL_HASH_SIZE, fsize, POSIX_FADV_WILLNEED); #endif /* __linux__ */ } else { #ifdef __linux__ filenum = fileno(file); posix_fadvise(filenum, 0, fsize, POSIX_FADV_SEQUENTIAL); posix_fadvise(filenum, 0, fsize, POSIX_FADV_WILLNEED); #endif /* __linux__ */ } /* WARNING: READ NOTICE ABOVE get_filehash() BEFORE CHANGING HASH FUNCTIONS! */ #ifndef NO_XXHASH2 if (algo == HASH_ALGO_XXHASH2_64) { xxhstate = XXH64_createState(); if (unlikely(xxhstate == NULL)) jc_nullptr("xxhstate"); XXH64_reset(xxhstate, 0); } #endif /* NO_XXHASH2 */ /* Read the file in chunks until we've read it all. */ while (fsize > 0) { size_t bytes_to_read; if (interrupt) return 0; bytes_to_read = (fsize >= (off_t)auto_chunk_size) ? auto_chunk_size : (size_t)fsize; if (unlikely(fread((void *)chunk, bytes_to_read, 1, file) != 1)) goto error_reading_file; switch (algo) { #ifndef NO_XXHASH2 case HASH_ALGO_XXHASH2_64: if (unlikely(XXH64_update(xxhstate, chunk, bytes_to_read) != XXH_OK)) goto error_reading_file; break; #endif case HASH_ALGO_JODYHASH64: if (unlikely(jc_block_hash(chunk, hash, bytes_to_read) != 0)) goto error_reading_file; break; default: goto error_bad_hash_algo; } if ((off_t)bytes_to_read > fsize) break; else fsize -= (off_t)bytes_to_read; check_sigusr1(); if (jc_alarm_ring != 0) { jc_alarm_ring = 0; /* Only show "hashing" part if hashing one file updates progress at least twice */ if (hashing == 1) { update_phase2_progress("hashing", (int)(((checkfile->size - fsize) * 100) / checkfile->size)); } else { update_phase2_progress(NULL, -1); hashing = 1; } } continue; } fclose(file); #ifndef NO_XXHASH2 if (algo == HASH_ALGO_XXHASH2_64) { *hash = XXH64_digest(xxhstate); XXH64_freeState(xxhstate); } #endif /* NO_XXHASH2 */ LOUD(fprintf(stderr, "get_filehash: returning hash: 0x%016jx\n", (uintmax_t)*hash)); return hash; error_reading_file: fprintf(stderr, "\nerror reading from file "); jc_fwprint(stderr, checkfile->d_name, 1); fclose(file); return NULL; error_bad_hash_algo: if ((hash_algo > HASH_ALGO_COUNT) || (hash_algo < 0)) fprintf(stderr, "\nerror: requested hash algorithm %d is not available", hash_algo); else fprintf(stderr, "\nerror: requested hash algorithm %s [%d] is not available", hash_algo_list[hash_algo], hash_algo); fclose(file); return NULL; } jdupes-1.27.3/filehash.h000066400000000000000000000010071447252140200150310ustar00rootroot00000000000000/* jdupes file hashing function * This file is part of jdupes; see jdupes.c for license information */ #ifndef JDUPES_FILEHASH_H #define JDUPES_FILEHASH_H #ifdef __cplusplus extern "C" { #endif #define HASH_ALGO_COUNT 2 extern const char *hash_algo_list[HASH_ALGO_COUNT]; #define HASH_ALGO_XXHASH2_64 0 #define HASH_ALGO_JODYHASH64 1 #include "jdupes.h" uint64_t *get_filehash(const file_t * const restrict checkfile, const size_t max_read, int algo); #ifdef __cplusplus } #endif #endif /* JDUPES_FILEHASH_H */ jdupes-1.27.3/filestat.c000066400000000000000000000072361447252140200150660ustar00rootroot00000000000000/* jdupes (C) 2015-2023 Jody Bruchon Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include #include #include "jdupes.h" #include "likely_unlikely.h" /* Check file's stat() info to make sure nothing has changed * Returns 1 if changed, 0 if not changed, negative if error */ int file_has_changed(file_t * const restrict file) { /* If -t/--no-change-check specified then completely bypass this code */ if (ISFLAG(flags, F_NOCHANGECHECK)) return 0; if (unlikely(file == NULL || file->d_name == NULL)) jc_nullptr("file_has_changed()"); LOUD(fprintf(stderr, "file_has_changed('%s')\n", file->d_name);) if (!ISFLAG(file->flags, FF_VALID_STAT)) return -66; if (STAT(file->d_name, &s) != 0) return -2; if (file->inode != s.st_ino) return 1; if (file->size != s.st_size) return 1; if (file->device != s.st_dev) return 1; if (file->mode != s.st_mode) return 1; #ifndef NO_MTIME if (file->mtime != s.st_mtime) return 1; #endif #ifndef NO_PERMS if (file->uid != s.st_uid) return 1; if (file->gid != s.st_gid) return 1; #endif #ifndef NO_SYMLINKS if (lstat(file->d_name, &s) != 0) return -3; if ((S_ISLNK(s.st_mode) > 0) ^ ISFLAG(file->flags, FF_IS_SYMLINK)) return 1; #endif return 0; } int getfilestats(file_t * const restrict file) { if (unlikely(file == NULL || file->d_name == NULL)) jc_nullptr("getfilestats()"); LOUD(fprintf(stderr, "getfilestats('%s')\n", file->d_name);) /* Don't stat the same file more than once */ if (ISFLAG(file->flags, FF_VALID_STAT)) return 0; SETFLAG(file->flags, FF_VALID_STAT); if (STAT(file->d_name, &s) != 0) return -1; file->size = s.st_size; file->inode = s.st_ino; file->device = s.st_dev; #ifndef NO_MTIME file->mtime = s.st_mtime; #endif #ifndef NO_ATIME file->atime = s.st_atime; #endif file->mode = s.st_mode; #ifndef NO_HARDLINKS file->nlink = s.st_nlink; #endif #ifndef NO_PERMS file->uid = s.st_uid; file->gid = s.st_gid; #endif #ifndef NO_SYMLINKS if (lstat(file->d_name, &s) != 0) return -1; if (S_ISLNK(s.st_mode) > 0) SETFLAG(file->flags, FF_IS_SYMLINK); #endif return 0; } /* Returns -1 if stat() fails, 0 if it's a directory, 1 if it's not */ int getdirstats(const char * const restrict name, jdupes_ino_t * const restrict inode, dev_t * const restrict dev, jdupes_mode_t * const restrict mode) { if (unlikely(name == NULL || inode == NULL || dev == NULL)) jc_nullptr("getdirstats"); LOUD(fprintf(stderr, "getdirstats('%s', %p, %p)\n", name, (void *)inode, (void *)dev);) if (STAT(name, &s) != 0) return -1; *inode = s.st_ino; *dev = s.st_dev; *mode = s.st_mode; if (!S_ISDIR(s.st_mode)) return 1; return 0; } jdupes-1.27.3/filestat.h000066400000000000000000000011541447252140200150640ustar00rootroot00000000000000/* jdupes file/dir stat()-related functions * This file is part of jdupes; see jdupes.c for license information */ #ifndef JDUPES_FILESTAT_H #define JDUPES_FILESTAT_H #ifdef __cplusplus extern "C" { #endif #include "jdupes.h" int file_has_changed(file_t * const restrict file); int getfilestats(file_t * const restrict file); /* Returns -1 if stat() fails, 0 if it's a directory, 1 if it's not */ int getdirstats(const char * const restrict name, jdupes_ino_t * const restrict inode, dev_t * const restrict dev, jdupes_mode_t * const restrict mode); #ifdef __cplusplus } #endif #endif /* JDUPES_FILESTAT_H */ jdupes-1.27.3/generate_packages.sh000077500000000000000000000103021447252140200170620ustar00rootroot00000000000000#!/bin/bash # Generate package folders with variant builds # Number of parallel make processes if [ -z "$PM" ] then PM=12 [ -d /sys/devices/system/cpu ] && \ PM=$(find /sys/devices/system/cpu -maxdepth 1 -mindepth 1 -type d | grep '/cpu[0-9][0-9]*' | wc -l) && \ PM=$((PM * 2)) fi NAME="jdupes" VER="$(cat version.h | grep '#define VER "' | cut -d\" -f2)" echo "Program version: $VER" [ -z "$TA" ] && TA=__NONE__ [ ! -z "$1" ] && ARCH="$1" [[ "$ARCH" = "linux-x64" || "$ARCH" = "x86_64" || "$ARCH" = "x86-64" ]] && TA=linux && ARCH=x86_64 && CF=-m64 [[ "$ARCH" = "linux-x32" || "$ARCH" = "x32" ]] && TA=linux && ARCH=x32 && CF=-mx32 [[ "$ARCH" = "linux-i686" || "$ARCH" = "linux-i386" || "$ARCH" = "i686" || "$ARCH" = "i386" ]] && TA=linux && ARCH=i386 && CF=-m32 UNAME_S="$(uname -s | tr '[:upper:]' '[:lower:]')" UNAME_P="$(uname -p)" UNAME_M="$(uname -m)" # Detect macOS if [[ "$TA" = "macos" || "$UNAME_S" = "darwin" ]] then PKGTYPE=zip TA=mac32 test "$UNAME_M" = "x86_64" && TA=mac64 fi # Detect Power Macs under macOS if [[ "$TA" = "macppc" || "$UNAME_P" = "Power Macintosh" || "$UNAME_P" = "powerpc" ]] then TA=macppc32 test "$(sysctl hw.cpu64bit_capable)" = "hw.cpu64bit_capable: 1" && TA=macppc64 [ -z "$PKGTYPE" ] && PKGTYPE=zip fi # Detect Linux if [[ "$TA" = "linux" || "$UNAME_S" = "linux" ]] then TA="linux-$UNAME_M" [ ! -z "$ARCH" ] && TA="linux-$ARCH" [ -z "$PKGTYPE" ] && PKGTYPE=xz fi # Fall through - assume Windows if [[ "$TA" = "windows" || "$TA" = "__NONE__" ]] then [ -z "$PKGTYPE" ] && PKGTYPE=zip [ -z "$ARCH" ] && ARCH=$(gcc -v 2>&1 | grep Target | cut -d\ -f2- | cut -d- -f1) [[ "$ARCH" = "i686" || "$ARCH" = "i386" ]] && TA=win32 [ "$ARCH" = "x86_64" ] && TA=win64 [ "$UNAME_S" = "MINGW32_NT-5.1" ] && TA=winxp EXT=".exe" fi echo "Target architecture: $TA" test "$TA" = "__NONE__" && echo "Failed to detect system type" && exit 1 PKGNAME="${NAME}-${VER}-$TA" echo "Generating package for: $PKGNAME" mkdir -p "$PKGNAME" || exit 1 test ! -d "$PKGNAME" && echo "Can't create directory for package" && exit 1 cp CHANGES.txt README.md LICENSE.txt $PKGNAME/ || exit 1 if [ -d "../libjodycode" ] then echo "Rebuilding nearby libjodycode first" WD="$(pwd)" cd ../libjodycode make clean && make -j$PM CFLAGS_EXTRA="$CF" cd "$WD" fi E1=1; E2=1; E3=1; E4=1 make clean && make CFLAGS_EXTRA="$CF" -j$PM ENABLE_DEDUPE=1 static_jc stripped && cp $NAME$EXT $PKGNAME/$NAME$EXT && E1=0 make clean && make CFLAGS_EXTRA="$CF" -j$PM ENABLE_DEDUPE=1 LOUD=1 static_jc stripped && cp $NAME$EXT $PKGNAME/${NAME}-loud$EXT && E2=0 make clean && make CFLAGS_EXTRA="$CF" -j$PM LOW_MEMORY=1 static_jc stripped && cp $NAME$EXT $PKGNAME/${NAME}-lowmem$EXT && E3=0 make clean && make CFLAGS_EXTRA="$CF" -j$PM BARE_BONES=1 static_jc stripped && cp $NAME$EXT $PKGNAME/${NAME}-barebones$EXT && E4=0 strip ${PKGNAME}/${NAME}*$EXT make clean test $((E1 + E2 + E3 + E4)) -gt 0 && echo "Error building packages; aborting." && exit 1 # Make a fat binary on macOS x86_64 if possible if [ "$TA" = "mac64" ] && ld -v 2>&1 | grep -q 'archs:.*i386' then ERR=0 TYPE=-i386; CE=-m32 # On macOS Big Sur (Darwin 20) or higher, try to build a x86_64 + arm64 binary [ $(uname -r | cut -d. -f1) -ge 20 ] && TYPE=-arm64 && CE="-target arm64-apple-macos11" if [ -d "../libjodycode" ] then echo "Rebuilding nearby libjodycode first" WD="$(pwd)" cd ../libjodycode make clean && make -j$PM CFLAGS_EXTRA="$CE" cd "$WD" fi for X in '' '-loud' '-lowmem' '-barebones' do make clean && make -j$PM CFLAGS_EXTRA="$CE" stripped && cp $NAME$EXT $PKGNAME/$NAME$X$EXT$TYPE || ERR=1 [ $ERR -eq 0 ] && lipo -create -output $PKGNAME/jdupes_temp $PKGNAME/$NAME$X$EXT$TYPE $PKGNAME/$NAME$X$EXT && mv $PKGNAME/jdupes_temp $PKGNAME/$NAME$X$EXT done make clean test $ERR -gt 0 && echo "Error building packages; aborting." && exit 1 rm -f $PKGNAME/$NAME$EXT$TYPE $PKGNAME/$NAME-loud$EXT$TYPE $PKGNAME/$NAME-lowmem$EXT$TYPE $PKGNAME/$NAME-barebones$EXT$TYPE fi test "$PKGTYPE" = "zip" && zip -9r $PKGNAME.zip $PKGNAME/ test "$PKGTYPE" = "tar" && tar -c $PKGNAME/ > $PKGNAME.pkg.tar test "$PKGTYPE" = "gz" && tar -c $PKGNAME/ | gzip -9 > $PKGNAME.pkg.tar.gz test "$PKGTYPE" = "xz" && tar -c $PKGNAME/ | xz -e > $PKGNAME.pkg.tar.xz echo "Package generation complete." jdupes-1.27.3/hashdb.c000066400000000000000000000405451447252140200145040ustar00rootroot00000000000000/* File hash database management * This file is part of jdupes; see jdupes.c for license information */ #include #include #include #include #include #include #include #include #include "jdupes.h" #include "libjodycode.h" #include "likely_unlikely.h" #include "hashdb.h" #define HASHDB_VER 2 #define HASHDB_MIN_VER 1 #define HASHDB_MAX_VER 2 #ifndef PH_SHIFT #define PH_SHIFT 12 #endif #define SECS_TO_TIME(a,b) strftime(a, 32, "%F %T", localtime(b)); #ifndef HT_SIZE #define HT_SIZE 131072 #endif #define HT_MASK (HT_SIZE - 1) static hashdb_t *hashdb[HT_SIZE]; static int hashdb_init = 0; static int hashdb_algo = 0; static int hashdb_dirty = 0; /* Pivot direction for rebalance */ enum pivot { PIVOT_LEFT, PIVOT_RIGHT }; static int write_hashdb_entry(FILE *db, hashdb_t *cur, uint64_t *cnt, const int destroy); static int get_path_hash(char *path, uint64_t *path_hash); #if 0 /* Hex dump 16 bytes of memory */ void hd16(char *a) { int i; printf("DUMP Hex: "); for (i = 0; i < 16; i++) printf("%x", a[i]); printf("\nDUMP ASCII: "); for (i = 0; i < 16; i++) printf("%c", a[i]); printf("\n"); return; } #endif static hashdb_t *alloc_hashdb_node(const int pathlen) { int allocsize; allocsize = sizeof(hashdb_t) + pathlen + 1; return (hashdb_t *)calloc(1, EXTEND64(allocsize)); } /* destroy = 1 will free() all nodes while saving */ int save_hash_database(const char * const restrict dbname, const int destroy) { FILE *db = NULL; uint64_t cnt = 0; if (dbname == NULL) goto error_hashdb_null; LOUD(fprintf(stderr, "save_hash_database('%s')\n", dbname);) /* Don't save the hash database if it wasn't changed */ if (hashdb_dirty == 0 && destroy == 0) return 0; if (hashdb_dirty == 1) { errno = 0; db = fopen(dbname, "w+b"); if (db == NULL) goto error_hashdb_open; } if (write_hashdb_entry(db, NULL, &cnt, destroy) != 0) goto error_hashdb_write; if (hashdb_dirty == 1) { fclose(db); LOUD(if (hashdb_dirty == 1) fprintf(stderr, "Wrote %" PRIu64 " items to hash databse '%s'\n", cnt, dbname);) hashdb_dirty = 0; } return cnt; error_hashdb_null: fprintf(stderr, "error: internal failure: NULL pointer for hashdb\n"); return -1; error_hashdb_open: fprintf(stderr, "error: cannot open hashdb '%s' for writing: %s\n", dbname, strerror(errno)); return -2; error_hashdb_write: fprintf(stderr, "error: writing failed to hashdb '%s': %s\n", dbname, strerror(errno)); fclose(db); return -3; } static int write_hashdb_entry(FILE *db, hashdb_t *cur, uint64_t *cnt, const int destroy) { struct timeval tm; int err = 0; static char out[PATH_MAX + 128]; /* Write header and traverse array on first call */ if (unlikely(cur == NULL)) { if (hashdb_dirty == 1) { gettimeofday(&tm, NULL); snprintf(out, PATH_MAX + 127, "jdupes hashdb:%d,%d,%08lx\n", HASHDB_VER, hash_algo, (unsigned long)tm.tv_sec); LOUD(fprintf(stderr, "write hashdb: %s", out);) errno = 0; fputs(out, db); if (errno != 0) return 1; } /* Write out each hash bucket, skipping empty buckets */ for (int i = 0; i < HT_SIZE; i++) { if (hashdb[i] == NULL) continue; err = write_hashdb_entry(db, hashdb[i], cnt, destroy); if (err != 0) return err; } if (destroy == 1) { memset(hashdb, 0, sizeof(hashdb_t *) * HT_SIZE); hashdb_init = 0; } return 0; } /* Write out this node if it wasn't invalidated */ if (hashdb_dirty == 1 && cur->hashcount != 0) { snprintf(out, PATH_MAX + 127, "%u,%016" PRIx64 ",%016" PRIx64 ",%016" PRIx64 ",%016" PRIx64 ",%016" PRIx64 ",%s\n", cur->hashcount, cur->partialhash, cur->fullhash, (uint64_t)cur->mtime, (uint64_t)cur->size, (uint64_t)cur->inode, cur->path); (*cnt)++; LOUD(fprintf(stderr, "write hashdb: %s", out);) errno = 0; fputs(out, db); if (errno != 0) return 1; } /* Traverse the tree, propagating errors */ if (cur->left != NULL) err = write_hashdb_entry(db, cur->left, cnt, destroy); if (err == 0 && cur->right != NULL) err = write_hashdb_entry(db, cur->right, cnt, destroy); if (destroy == 1) { free(cur); } return err; } #if 0 void dump_hashdb(hashdb_t *cur) { struct timeval tm; if (cur == NULL) { gettimeofday(&tm, NULL); printf("jdupes hashdb:1,%d,%08lx\n", hash_algo, tm.tv_sec); for (int i = 0; i < HT_SIZE; i++) { if (hashdb[i] == NULL) continue; dump_hashdb(hashdb[i]); } return; } /* db line format: hashcount,partial,full,mtime,path */ #ifdef ON_WINDOWS if (cur->hashcount != 0) printf("%u,%016llx,%016llx,%08llx,%08llx,%016llx,%s\n", cur->hashcount, cur->partialhash, cur->fullhash, cur->mtime, cur->size, cur->inode, cur->path); #else if (cur->hashcount != 0) printf("%u,%016lx,%016lx,%08lx,%08lx,%016lx,%s\n", cur->hashcount, cur->partialhash, cur->fullhash, cur->mtime, cur->size, cur->inode, cur->path); #endif if (cur->left != NULL) dump_hashdb(cur->left); if (cur->right != NULL) dump_hashdb(cur->right); return; } #endif static void pivot_hashdb_tree(hashdb_t **parent, hashdb_t *cur, enum pivot direction) { hashdb_t *temp; if (direction == PIVOT_LEFT) { temp = cur->right; cur->right = cur->right->left; temp->left = cur; *parent = temp; } else { // PIVOT_RIGHT temp = cur->left; cur->left = cur->left->right; temp->right = cur; *parent = temp; } return; } static void rebalance_hashdb_tree(hashdb_t **parent) { const uint64_t center = 0x8000000000000000ULL; hashdb_t *cur = *parent; if (unlikely(cur == NULL || parent == NULL)) return; if (cur->left == NULL && cur->right == NULL) return; if (cur->left != NULL) rebalance_hashdb_tree(&(cur->left)); if (cur->right != NULL) rebalance_hashdb_tree(&(cur->right)); if (cur->path_hash > center) { /* This node might be better off to the right */ if (cur->left != NULL && cur->left->path_hash > center) pivot_hashdb_tree(parent, cur, PIVOT_RIGHT); } else if (cur->path_hash < center) { /* This node might be better off to the left */ if (cur->right != NULL && cur->right->path_hash < center) pivot_hashdb_tree(parent, cur, PIVOT_LEFT); } return; } /* in_pathlen allows use of a precomputed path length to avoid extra strlen() calls */ hashdb_t *add_hashdb_entry(char *in_path, int pathlen, const file_t *check) { unsigned int bucket; hashdb_t *file; hashdb_t *cur; uint64_t path_hash; int exclude; int difference; char *path; /* Allocate hashdb on first use */ if (unlikely(hashdb_init == 0)) { memset(hashdb, 0, sizeof(hashdb_t *) * HT_SIZE); hashdb_init = 1; } if (unlikely((in_path == NULL && check == NULL) || (check != NULL && check->d_name == NULL))) return NULL; /* Get path hash and length from supplied path; use hash to choose the bucket */ if (in_path == NULL) path = check->d_name; else path = in_path; if (pathlen == 0) pathlen = strlen(path); if (get_path_hash(path, &path_hash) != 0) return NULL; bucket = path_hash & HT_MASK; if (hashdb[bucket] == NULL) { file = alloc_hashdb_node(pathlen); if (file == NULL) return NULL; file->path_hash = path_hash; hashdb[bucket] = file; } else { cur = hashdb[bucket]; difference = 0; while (1) { /* If path is set then this entry may already exist and we need to check */ if (check != NULL && cur->path != NULL) { if (cur->path_hash == path_hash && strcmp(cur->path, check->d_name) == 0) { /* Should we invalidate this entry? */ exclude = 0; if (cur->mtime != check->mtime) exclude |= 1; if (cur->inode != check->inode) exclude |= 2; if (cur->size != check->size) exclude |= 4; if (exclude == 0) { if (cur->hashcount == 1 && ISFLAG(check->flags, FF_HASH_FULL)) { cur->hashcount = 2; cur->fullhash = check->filehash; hashdb_dirty = 1; } return cur; } else { /* Something changed; invalidate this entry */ cur->hashcount = 0; hashdb_dirty = 1; return NULL; } } } if (cur->path_hash >= path_hash) { if (cur->left == NULL) { file = alloc_hashdb_node(pathlen); if (file == NULL) return NULL; cur->left = file; break; } else { cur = cur->left; difference--; continue; } } else { if (cur->right == NULL) { file = alloc_hashdb_node(pathlen); if (file == NULL) return NULL; cur->right = file; break; } else { cur = cur->right; difference++; continue; } } } if (difference < 0) difference = -difference; if (difference > 64) { rebalance_hashdb_tree(&(hashdb[bucket])); } } /* If a check entry was given then populate it */ if (check != NULL && check->d_name != NULL && ISFLAG(check->flags, FF_HASH_PARTIAL)) { hashdb_dirty = 1; file->path_hash = path_hash; file->path = (char *)((uintptr_t)file + (uintptr_t)sizeof(hashdb_t)); memcpy(file->path, check->d_name, pathlen + 1); *(file->path + pathlen) = '\0'; file->size = check->size; file->inode = check->inode; file->mtime = check->mtime; file->partialhash = check->filehash_partial; file->fullhash = check->filehash; if (ISFLAG(check->flags, FF_HASH_FULL)) file->hashcount = 2; else file->hashcount = 1; } else { /* No check entry? Populate from passed parameters */ file->path = (char *)((uintptr_t)file + (uintptr_t)sizeof(hashdb_t)); file->path_hash = path_hash; } return file; } /* db header format: jdupes hashdb:dbversion,hashtype,update_mtime * db line format: hashcount,partial,full,mtime,size,inode,path */ int64_t load_hash_database(char *dbname) { FILE *db; char line[PATH_MAX + 128]; char buf[PATH_MAX + 128]; char *field, *temp; int db_ver; unsigned int fixed_len; int64_t linenum = 1; #ifdef LOUD_DEBUG time_t db_mtime; char date[32]; #endif /* LOUD_DEBUG */ if (dbname == NULL) goto error_hashdb_null; LOUD(fprintf(stderr, "load_hash_database('%s')\n", dbname);) errno = 0; db = fopen(dbname, "rb"); if (db == NULL) goto warn_hashdb_open; /* Read header line */ if ((fgets(buf, PATH_MAX + 127, db) == NULL) || (ferror(db) != 0)) { if (errno == 0) goto warn_hashdb_open; // empty file = make new DB goto error_hashdb_read; } else if (!ISFLAG(flags, F_HIDEPROGRESS)) fprintf(stderr, "Loading hash database..."); field = strtok(buf, ":"); if (strcmp(field, "jdupes hashdb") != 0) goto error_hashdb_header; field = strtok(NULL, ":"); temp = strtok(field, ","); db_ver = (int)strtoul(temp, NULL, 10); temp = strtok(NULL, ","); hashdb_algo = (int)strtoul(temp, NULL, 10); temp = strtok(NULL, ","); /* Database mod time is currently set but not used */ LOUD(db_mtime = (int)strtoul(temp, NULL, 16);) LOUD(SECS_TO_TIME(date, &db_mtime);) LOUD(fprintf(stderr, "hashdb header: ver %u, algo %u, mod %s\n", db_ver, hashdb_algo, date);) if (db_ver < HASHDB_MIN_VER || db_ver > HASHDB_MAX_VER) goto error_hashdb_version; if (hashdb_algo != hash_algo) goto warn_hashdb_algo; /* v1 has 8-byte sizes; v2 has 16-byte (4GiB+) sizes */ fixed_len = 87; if (db_ver == 1) fixed_len = 71; /* Read database entries */ while (1) { int pathlen; unsigned int linelen; int hashcount; uint64_t partialhash, fullhash = 0; time_t mtime; char *path; hashdb_t *entry; off_t size; jdupes_ino_t inode; errno = 0; if ((fgets(line, PATH_MAX + 128, db) == NULL)) { if (ferror(db) != 0) goto error_hashdb_read; break; } LOUD(fprintf(stderr, "read hashdb: %s", line);) strncpy(buf, line, PATH_MAX + 128); linenum++; linelen = (int64_t)strlen(buf); if (linelen < fixed_len + 1) goto error_hashdb_line; /* Split each entry into fields and * hashcount: 1 = partial only, 2 = partial and full */ field = strtok(buf, ","); if (field == NULL) goto error_hashdb_line; hashcount = (int)strtol(field, NULL, 16); if (hashcount < 1 || hashcount > 2) goto error_hashdb_line; field = strtok(NULL, ","); if (field == NULL) goto error_hashdb_line; partialhash = strtoull(field, NULL, 16); field = strtok(NULL, ","); if (field == NULL) goto error_hashdb_line; if (hashcount == 2) fullhash = strtoull(field, NULL, 16); field = strtok(NULL, ","); if (field == NULL) goto error_hashdb_line; mtime = (time_t)strtoul(field, NULL, 16); field = strtok(NULL, ","); if (field == NULL) goto error_hashdb_line; size = strtoll(field, NULL, 16); if (size == 0) goto error_hashdb_line; field = strtok(NULL, ","); if (field == NULL) goto error_hashdb_line; inode = strtoull(field, NULL, 16); path = buf + fixed_len; path = strtok(path, "\n"); if (path == NULL) goto error_hashdb_line; pathlen = linelen - fixed_len + 1; if (pathlen > PATH_MAX) goto error_hashdb_line; *(path + pathlen) = '\0'; /* Allocate and populate a tree entry */ entry = add_hashdb_entry(path, pathlen, NULL); if (entry == NULL) goto error_hashdb_add; memcpy(entry->path, path, pathlen + 1); entry->mtime = mtime; entry->inode = inode; entry->size = size; entry->partialhash = partialhash; entry->fullhash = fullhash; entry->hashcount = hashcount; } return linenum - 1; warn_hashdb_open: fprintf(stderr, "Creating a new hash database '%s'\n", dbname); return 0; error_hashdb_read: fprintf(stderr, "error reading hash database '%s': %s\n", dbname, strerror(errno)); return -1; error_hashdb_header: fprintf(stderr, "error in header of hash database '%s'\n", dbname); return -2; error_hashdb_version: fprintf(stderr, "error: bad db version %u in hash database '%s'\n", db_ver, dbname); return -3; error_hashdb_line: fprintf(stderr, "\nerror: bad line %" PRId64 " in hash database '%s':\n\n%s\n\n", linenum, dbname, line); return -4; error_hashdb_add: fprintf(stderr, "error: internal failure allocating a hashdb entry\n"); return -5; error_hashdb_null: fprintf(stderr, "error: internal failure: NULL pointer for hashdb\n"); return -6; warn_hashdb_algo: fprintf(stderr, "warning: hashdb uses a different hash algorithm than selected; not loading\n"); return -7; } static int get_path_hash(char *path, uint64_t *path_hash) { uint64_t aligned_path[(PATH_MAX + 8) / sizeof(uint64_t)]; int retval; *path_hash = 0; if ((uintptr_t)path & 0x0f) { strncpy((char *)&aligned_path, path, PATH_MAX); retval = jc_block_hash((uint64_t *)aligned_path, path_hash, strlen((char *)aligned_path)); } else retval = jc_block_hash((uint64_t *)path, path_hash, strlen(path)); return retval; } /* If file hash info is already present in hash database then preload those hashes */ int read_hashdb_entry(file_t *file) { unsigned int bucket; hashdb_t *cur; uint64_t path_hash; int exclude; LOUD(fprintf(stderr, "read_hashdb_entry('%s')\n", file->d_name);) if (file == NULL || file->d_name == NULL) goto error_null; if (get_path_hash(file->d_name, &path_hash) != 0) goto error_path_hash; bucket = path_hash & HT_MASK; if (hashdb[bucket] == NULL) return 0; cur = hashdb[bucket]; while (1) { if (cur->path_hash != path_hash) { if (path_hash < cur->path_hash) cur = cur->left; else cur = cur->right; if (cur == NULL) return 0; continue; } /* Found a matching path hash */ if (strcmp(cur->path, file->d_name) != 0) { cur = cur->left; if (cur == NULL) return 0; continue; } else { /* Found a matching path too but check mtime */ exclude = 0; if (cur->mtime != file->mtime) exclude |= 1; if (cur->inode != file->inode) exclude |= 2; if (cur->size != file->size) exclude |= 4; if (exclude != 0) { /* Invalidate if something has changed */ cur->hashcount = 0; hashdb_dirty = 1; return -1; } file->filehash_partial = cur->partialhash; if (cur->hashcount == 2) { file->filehash = cur->fullhash; SETFLAG(file->flags, (FF_HASH_PARTIAL | FF_HASH_FULL)); } else SETFLAG(file->flags, FF_HASH_PARTIAL); return 1; } } return 0; error_null: fprintf(stderr, "error: internal error: NULL data passed to read_hashdb_entry()\n"); return -255; error_path_hash: fprintf(stderr, "error: internal error hashing a path\n"); return -255; } jdupes-1.27.3/hashdb.h000066400000000000000000000014551447252140200145060ustar00rootroot00000000000000/* File hash database management * This file is part of jdupes; see jdupes.c for license information */ #ifndef JDUPES_HASHDB_H #define JDUPES_HASHDB_H #ifdef __cplusplus extern "C" { #endif #include #include "jdupes.h" typedef struct _hashdb { struct _hashdb *left; struct _hashdb *right; uint64_t path_hash; char *path; uint64_t partialhash; uint64_t fullhash; jdupes_ino_t inode; off_t size; time_t mtime; uint_fast8_t hashcount; } hashdb_t; extern int save_hash_database(const char * const restrict dbname, const int destroy); extern hashdb_t *add_hashdb_entry(char *in_path, const int in_pathlen, const file_t *check); extern int64_t load_hash_database(char *dbname); extern int read_hashdb_entry(file_t *file); #ifdef __cplusplus } #endif #endif /* JDUPES_HASHDB_H */ jdupes-1.27.3/helptext.c000066400000000000000000000243021447252140200151010ustar00rootroot00000000000000/* Help text and version information * This file is part of jdupes; see jdupes.c for license information */ #include #include #include #include #include "filehash.h" #include "helptext.h" #include "jdupes.h" #include "version.h" #ifndef NO_HELPTEXT /* Assemble feature flag string from compile-time options */ const char *feature_flags[] = { #ifdef ENABLE_DEDUPE "dedupe", #endif #ifdef DEBUG "debug", #endif #ifdef __FAST_MATH__ "fastmath", #endif #ifdef LOUD_DEBUG "loud", #endif #ifdef LOW_MEMORY "lowmem", #endif #ifdef NO_CHUNKSIZE "nochunk", #endif #ifdef NO_DELETE "nodel", #endif #ifdef NO_ERRORONDUPE "noeod", #endif #ifdef NO_EXTFILTER "noxf", #endif #ifdef NO_HARDLINKS "nohlink", #endif #ifdef NO_HASHDB "nohashdb", #endif #ifdef NO_JODY_SORT "nojsort", #endif #ifdef NO_JSON "nojson", #endif #ifdef NO_GETOPT_LONG "nolongopt", #endif #ifdef NO_MTIME "nomtime", #endif #ifdef NO_PERMS "noperm", #endif #ifdef NO_SYMLINKS "noslink", #endif #ifdef NO_TRAVCHECK "notrav", #endif #ifdef NO_USER_ORDER "nouorder", #endif #ifdef NO_UNICODE "nounicode", #endif #ifdef UNICODE "unicode", #endif #ifdef ON_WINDOWS "windows", #endif NULL }; #endif /* NO_HELPTEXT */ void help_text(void) { #ifndef NO_HELPTEXT printf("Usage: jdupes [options] FILES and/or DIRECTORIES...\n\n"); printf("Duplicate file sets will be printed by default unless a different action\n"); printf("option is specified (delete, summarize, link, dedupe, etc.)\n"); #ifdef NO_GETOPT_LONG printf("\nWARNING: getopt_long disabled in this build! Long options will not work.\n\n"); #endif #ifdef LOUD printf(" -@ --loud \toutput annoying low-level debug info while running\n"); #endif printf(" -0 --print-null \toutput nulls instead of CR/LF (like 'find -print0')\n"); printf(" -1 --one-file-system\tdo not match files on different filesystems/devices\n"); printf(" -A --no-hidden \texclude hidden files from consideration\n"); #ifdef ENABLE_DEDUPE printf(" -B --dedupe \tdo a copy-on-write (reflink/clone) deduplication\n"); #endif #ifndef NO_CHUNKSIZE printf(" -C --chunk-size=#\toverride I/O chunk size in KiB (min %d, max %d)\n", MIN_CHUNK_SIZE / 1024, MAX_CHUNK_SIZE / 1024); #endif /* NO_CHUNKSIZE */ #ifndef NO_DELETE printf(" -d --delete \tprompt user for files to preserve and delete all\n"); printf(" \tothers; important: under particular circumstances,\n"); printf(" \tdata may be lost when using this option together\n"); printf(" \twith -s or --symlinks, or when specifying a\n"); printf(" \tparticular directory more than once; refer to the\n"); printf(" \tdocumentation for additional information\n"); #endif /* NO_DELETE */ #ifdef DEBUG printf(" -D --debug \toutput debug statistics after completion\n"); #endif #ifndef NO_ERRORONDUPE printf(" -e --error-on-dupe\texit on any duplicate found with status code 255\n"); #endif printf(" -f --omit-first \tomit the first file in each set of matches\n"); printf(" -h --help \tdisplay this help message\n"); #ifndef NO_HARDLINKS printf(" -H --hard-links \ttreat any linked files as duplicate files. Normally\n"); printf(" \tlinked files are treated as non-duplicates for safety\n"); #endif printf(" -i --reverse \treverse (invert) the match sort order\n"); #ifndef NO_USER_ORDER printf(" -I --isolate \tfiles in the same specified directory won't match\n"); #endif #ifndef NO_JSON printf(" -j --json \tproduce JSON (machine-readable) output\n"); #endif /* NO_JSON */ /* printf(" -K --skip-hash \tskip full file hashing (may be faster; 100%% safe)\n"); printf(" \tWARNING: in development, not fully working yet!\n"); */ #ifndef NO_SYMLINKS printf(" -l --link-soft \tmake relative symlinks for duplicates w/o prompting\n"); #endif #ifndef NO_HARDLINKS printf(" -L --link-hard \thard link all duplicate files without prompting\n"); #ifdef ON_WINDOWS printf(" \tWindows allows a maximum of 1023 hard links per file;\n"); printf(" \tlinking large match sets will result in multiple sets\n"); printf(" \tof hard linked files due to this limit.\n"); #endif /* ON_WINDOWS */ #endif /* NO_HARDLINKS */ printf(" -m --summarize \tsummarize dupe information\n"); printf(" -M --print-summarize\tprint match sets and --summarize at the end\n"); #ifndef NO_DELETE printf(" -N --no-prompt \ttogether with --delete, preserve the first file in\n"); printf(" \teach set of duplicates and delete the rest without\n"); printf(" \tprompting the user\n"); #endif /* NO_DELETE */ #ifndef NO_MTIME printf(" -o --order=BY \tselect sort order for output, linking and deleting; by\n"); printf(" \tmtime (BY=time) or filename (BY=name, the default)\n"); #endif #ifndef NO_USER_ORDER printf(" -O --param-order \tParameter order is more important than selected -o sort\n"); #endif #ifndef NO_PERMS printf(" -p --permissions \tdon't consider files with different owner/group or\n"); printf(" \tpermission bits as duplicates\n"); #endif printf(" -P --print=type \tprint extra info (partial, early, fullhash)\n"); printf(" -q --quiet \thide progress indicator\n"); printf(" -Q --quick \tskip byte-for-byte confirmation for quick matching\n"); printf(" \tWARNING: -Q can result in data loss! Be very careful!\n"); printf(" -r --recurse \tfor every directory, process its subdirectories too\n"); printf(" -R --recurse: \tfor each directory given after this option follow\n"); printf(" \tsubdirectories encountered within (note the ':' at\n"); printf(" \tthe end of the option, manpage for more details)\n"); #ifndef NO_SYMLINKS printf(" -s --symlinks \tfollow symlinks\n"); #endif printf(" -S --size \tshow size of duplicate files\n"); printf(" -t --no-change-check\tdisable security check for file changes (aka TOCTTOU)\n"); printf(" -T --partial-only \tmatch based on partial hashes only. WARNING:\n"); printf(" \tEXTREMELY DANGEROUS paired with destructive actions!\n"); printf(" -u --print-unique\tprint only a list of unique (non-matched) files\n"); printf(" -U --no-trav-check\tdisable double-traversal safety check (BE VERY CAREFUL)\n"); printf(" \tThis fixes a Google Drive File Stream recursion issue\n"); printf(" -v --version \tdisplay jdupes version and license information\n"); #ifndef NO_EXTFILTER printf(" -X --ext-filter=x:y\tfilter files based on specified criteria\n"); printf(" \tUse '-X help' for detailed extfilter help\n"); #endif /* NO_EXTFILTER */ printf(" -y --hash-db=file\tuse a hash database text file to speed up repeat runs\n"); printf(" \tPassing '-y .' will expand to '-y jdupes_hashdb.txt'\n"); printf(" -z --zero-match \tconsider zero-length files to be duplicates\n"); printf(" -Z --soft-abort \tIf the user aborts (i.e. CTRL-C) act on matches so far\n"); #ifndef ON_WINDOWS printf(" \tYou can send SIGUSR1 to the program to toggle this\n"); #endif #else /* NO_HELPTEXT */ version_text(0); #endif /* NO_HELPTEXT */ return; } void version_text(int short_version) { printf("jdupes %s (%s) ", VER, VERDATE); #ifndef NO_HELPTEXT /* Indicate bitness information */ if (sizeof(uintptr_t) == 8) { if (sizeof(long) == 4) printf("64-bit i32"); else if (sizeof(long) == 8) printf("64-bit"); } else if (sizeof(uintptr_t) == 4) { if (sizeof(long) == 4) printf("32-bit"); else if (sizeof(long) == 8) printf("32-bit i64"); #if defined(__x86_64__) && SIZE_MAX == 0xffffffff printf(" (x32 ABI)"); #endif } else printf("%u-bit i%u", (unsigned int)(sizeof(uintptr_t) * 8), (unsigned int)(sizeof(long) * 8)); if (!short_version) { printf(", linked to libjodycode %s (%s)\n", jc_version, jc_verdate); printf("Hash algorithms available:"); for (int i = 0; i < HASH_ALGO_COUNT; i++) printf(" %s%c", hash_algo_list[i], i == (HASH_ALGO_COUNT - 1) ? '\n' : ','); } else printf("\n"); printf("Compile-time feature flags:"); if (*feature_flags != NULL) { int c = 0; while (feature_flags[c] != NULL) { printf(" %s", feature_flags[c]); c++; } } else printf(" none"); printf("\n"); if (short_version) return; printf("Copyright (C) 2015-2023 by Jody Bruchon and contributors\n\n"); printf("Permission is hereby granted, free of charge, to any person obtaining a copy of\n"); printf("this software and associated documentation files (the \"Software\"), to deal in\n"); printf("the Software without restriction, including without limitation the rights to\n"); printf("use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n"); printf("of the Software, and to permit persons to whom the Software is furnished to do\n"); printf("so, subject to the following conditions:\n\n"); printf("The above copyright notice and this permission notice shall be included in all\n"); printf("copies or substantial portions of the Software.\n\n"); printf("THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n"); printf("IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n"); printf("FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n"); printf("AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n"); printf("LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n"); printf("OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n"); printf("SOFTWARE.\n"); printf("\nIf you find this software useful, please consider financially supporting\n"); printf("its development through the author's home page: https://www.jodybruchon.com/\n"); printf("Report bugs, get new releases, or learn about jdupes: https://www.jdupes.com/\n"); #else (void)short_version; printf("\nBuilt with no help text. You're on your own.\n"); #endif /* NO_HELPTEXT */ return; } jdupes-1.27.3/helptext.h000066400000000000000000000005041447252140200151040ustar00rootroot00000000000000/* Help text and version information * This file is part of jdupes; see jdupes.c for license information */ #ifndef JDUPES_HELPTEXT_H #define JDUPES_HELPTEXT_H #ifdef __cplusplus extern "C" { #endif void version_text(int short_version); void help_text(void); #ifdef __cplusplus } #endif #endif /* JDUPES_HELPTEXT_H */ jdupes-1.27.3/icon/000077500000000000000000000000001447252140200140275ustar00rootroot00000000000000jdupes-1.27.3/icon/icon_jdupes_256.ico000066400000000000000000000264321447252140200174300ustar00rootroot00000000000000 -PNG  IHDR\rf IDATxy|V"BT*BZjPHnJO;ڪŢ-BUuA@vŽ 쐐YI4@;.}'=y;w9(((((((((((((((((((((JdTq>A`P-w ?k'L.X2+MѪ9wZ`5_WeoWWE)rTY81K0Fi0n%5+D%2`>0 jUD/dC` I/Ձs-e[|,N^iAD+/ǁUQu@D*D9@+7߿/RT$y| ܃U/+^ƨ6iHuPPU9xJ. d)>!Ģ LDt4>nM"JTf'$X0<䓁NZ\\LII? c޽޽M6dƍDze˂h`8 \xf7nԬYq6KZl+qy:QRR})** ؾ};7of̘1'9π0%".\y a4k1 yOQ5jԠVZs1xK_ٹs'W?gر̘1)L~ իWzdffr1TZg׮]lذE;o{=]S`f^j%I PZ5?xڴiC߾}:u*Wfȑ-p!fP)Dvv6{._=oׯW^u^Ĝ*Hjp8ժU#''|gL>\/"Za< GC%ޜr)p ̜9ӪU+-qT-%I)PLڷoϻ˄ <FS>%E̤s̘1aÆI &)kʩ]6_~9'Nv8DDVV3gQ" @9wޓ1*)Ҷm[f͚%z) ic._q̞=[RfV|R]vL0AR5 eC\x̟?_R fu@I.rƌh5R4?O?~N]vcG}SN<3VCڵk?6GUvѵkD:uOp$Z*EII Ƞf͚Ԯ]ڵkSn]NF^T)R꫁oҤIH(9<ԭg}DgTwTȱl@0`_=gqdeeѻwoFx%4O!)Vu낥я~:@Ld ~q t:pxqR% VCЏJiڴi)Y!6IVpu23|q>V L}nO=@'$pu[jojꫯ} uo0arآE@'Zvmәd6#իW~:^ i }$Z! @}jҏ[ҫW/]@J # 'b׮]~L4Vf8H0WPfM߂u>,^{)SH 8KP 2HImoA~ </49h1++˷`"]Srw{`r+e$V!;;۷={\(IDii)ׯgŌ?#Fx@ @ ;v$33ӷ [Ϯ]ؾ};֭cŌ7YNSmڴ $|́OEH.^{ҿSTTDQQlݺ7h"-[@w`|BӉd6m:=`O?Kݻ{---gϞV79+?~yQtZ."X$1s A>zIl# Y9<Ȇwa7.izwqtޝ iS>>%V ?~OgF-}vdz h|Iu=.m۶%w޽K2d2\Ʀ@End3V@{M*b j֬3RyW]uI vT()?T຿%1|L9W^|@OSzGnٴiSZnСc7n!Q|[y2{O:Rգ~uYԫW/󖔔0rHǏ=иqc2N9x MƠAQc Fi/Ȋ8:WbhѢ-Z̜9Çܹ3_իWsNCff&{,z*gyoYf j(;^W䫯e˖I9?ٳxV- СCkgO '''f ^ I9%%% :TM%5ԢEXpahEygĤ;v,=n 5*̜9E. `ڴi\{[1./|Aş-[зo_[$y%Τ8q"_|@EX(уo6v񧤤K)JVW_}N:Inf;Qn1p3xaʔ) 2Vm;U1[l;GCqO{gG /F/wޒo<)o E]?Oa#=Mw#J%e @~~>'O.[n\RzL17p'o߾l۶MzPRR¨Q$K1ן5())aժU >Mٳ t¼q!+Bnݘcƌa޼y~}\D#a ЛCcUٳͥ]vߟ:)q.]K/СC:T$!޽{ٽ{76mbɒ%7.l/}1y0GIٳg3{lZnM~h޼97Ĵ~zV\Ʉ w+lmK˨{0?v z~tޝ\>l8$33ڵkSV-ٿ?ߗ}a͛/֭[ T6;8įLڰ[Q+-1!^fzlXdC\mJJ +.zs{xC${¤vL2.䩐*E: ϾK,J NR)nJ19xo[,3cO"J & l4R?J"'aS`0ʖbRfk*$@t=|pfmz̨,Dc&cf20^vE`#)?: 0 FbV:~w"`1?$]W<޶b#3gp84d Dfc3ܘEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE< 8RLgEQL=`7w=ӈuG[+J(yCq":&ZED4Q rf /lT` ؜] 8!s>n x)끯ajb2 " R's~LP5D:\ykH4RD?ڣJa<^/m_\)1H5`)++0^'yfO YvD_X%4~#,dG/: &sY4&EV,AK%y }_ 5bpG&Y3"Ǒ+ûve>UP,F`bXt-8e>&s yG;}{Mr%%8YeuXuɅ$I P_ m`;Pׇ3N"s^\* V0.^} vYq0{v`ec8~ ccrep{|r2U,k\.s kO<\ )qp</ |HK0B X\,p 8ؿ|8z^&ukbPcGٽw|0pUc>`{,Q0+,>OmN(o;}c7ӧ;i72W!A7#f@T #H(h1A:wofaYްAY7ٽd\{YMyy_߻}ph+6i`^`%W?\&];^/8Z}>_ R=E0m;yCߊ,tlktgwԵke2ʲlyEI,s+XԨhٍ# N%òam5s]z(+W]aՐgo!|1x$O?/؟"[!".ƣ#wqơGߍqr|k{yi2˗eo]"!p n#kQFQs3aw+ѭ[rsqJKeA p [|fHo#~1ps+qhC{j#ex!y>S)x *(,Ok0y$f<_KKwTR8>(sg c2zpWvq^_e{|!:F D/,?[`< V ;d'駁pX:8lᙐX Vj%0ӱ1rp;~p:e獛غUYx@&s*̑#?%a][yos#Ig?zn,;w\ W;jAhiR.]]k=A̓˧'x>A87bTO۹N_N7|A`bFed2}T.s˖`e4[|xq+p'$8փM:2CJHnz'JzoA_EK97κ( Z WۊᬳmJԭ++"#Ef1yr⩈rmzFD=Y̒BEklsD.pw1/w"4<:\Ye^e/", @oךm1,QP`hFK3L Fg% Šr ּ3E( &vJnojXb-xoثyY)[FxK<`/+3dƑKb7X,-yCO=:in4 [+0M+\Z^!YwШ=f-gBFLlߗ*m\~B+^K2ɪm9ث`l"fѬk-똸^錙kǠ&q&ϫXn= 4 ›}.w\13_T0AxjX֌\ cHUq,~30xnZ %9gn;N]RI$b w30̓0n} YrB&WOCپ]ky8rY53ZEnas_'ImnZcu攕HhL^wp> ey]\zP_(iH_n²w+#SOם:UVld WB)ȓu$.`hyG]H&o_]BC<bOJ6#JiBy&tKoEq %*\X _Prv#_w u/kN.sZy]) B ;uFkg%&_"`;U+b]& IDAT+9{`DJ0r~1^&H$Q.ʂp" A"CU 0cH]s>xyY]/NEӧJmpPV)𢰮o CY0la`-"ߗm <-rܩh^x) [#_PG꛿ ?6EG`^+0@܇e7 vf/r9G^d82HYkń\@`ȟU0~1y fD^D/syyN*z%.IR`X~d)_{Cy^!$ˊ @<#J Ep , aﴻbH Tp,fWGLd.a=S\XP'1xxK^lFTX l""W@?ʉm$dOw.bE0B}ƁzS nSV'a.F lD\똬pPC!c0R(3+y<Eq<2yIدPF<0$eIwc=lPĴr*T_^A(m3^jFaΔnD}f3'{ ]N0dfb`s̐ &{DuFaثpkWSOºuA]}A *w-&SgK6`:{ _?II*<*<8V [꿕;p0,x׮P8Y){q~ ; wq &돒 V5agRH8=Lj|YѳقYJTRSv^۫l1 q:KfC4an+@eg1˲zm!q],|a6Hс wq/A5Aʽ:?e)(/*sHia5F $x¸$8\YQe[?뇅y_QjlT%*!4Eجw!=`</0{ppb ܌Mfyx kc/Aɱ{wKuKeU^\Hd`~b!tP5Q XK{s@޾K6s/7}zwԽ{Q~\nVyƫql @7$ K{Lv~5;(2m+ Gc :ʃLL4֞uR릟ր4}1<׷qabYvFCf+4* ئ =H?|(.A>xXIgcK>kT.soMKǙ/%03< 88,8$~H.п[gnS~]'+ P.}b;A /[/ea=W&C8aiWg]UDOAJ 6 lpX5z1>*6gp  հx7@2VqaUv8luC?8uD wp8S&#M1O1CJ$"G ^^"P]p(v?y8Mpî Ft8k2%Uf$@b' M-KeP">oo7EkfUȳ &x;_ɕ) WDn8bG`X<1 o ˏd`_c+$0gbvXRoԓ1,ZլP20WYa34&&[ۀwKJ.&D|?`+UXIHy]GLm-{Bb |L;MM(Uj$SKJ)fS:+Jpp#&k;y20.EI !t?A|~k\EQ4L%DcOU*JH/30&.*yFw`F30ք,_QRdrlIp& Th̆<>LjMOUvK04+PEQEQEQEQEQEQEQEQEQT|IENDB`jdupes-1.27.3/icon/icon_jdupes_256.xcf000066400000000000000000000716671447252140200174500ustar00rootroot00000000000000gimp xcf v011CC qPath #1 AMBAMBA`BA`BB"BBEB B[0BxBpBB{@BB{@BB{@BRBp B-`B[0BBEA B"AA`AA`AAMAAMAAMAAMBA BA BA B)A B)A B)AB)AB)BB)B%B0 B/`B>B9 BMB> BePB> BB> B(B9 BpB/`BB%BBBABABA BBXBB0BBBpBBHB(BB(BB(BB(BB(AB(AB(ABABABABBBBBBhB֨BHBB8BPBXBɸBhBXBhBBhBhBXBB8BBHBBhBBBBBABABABABABABBBBBBBBBBHBBpBBCDBCDBCBCBCBCBCBCBCpBCpBC&BC.BC2BC7BC9PBq`C9PBPC9PB0 C7BC2B C.AC&ACpACpACDACDACDACDBCB|CB|CB%pCB%pCB%pCB%pCB%pC!B%pC%4B(C'$B/ C)B5C*B@C*BPC*B`C)BkC'$Br`C%4BxC!B|CB|CB|CB|CE\BCE\BCsBCsBCsBCsB CsB CsB CT B CT B CT B CT BCT BCT BCpBCpBCpBCpBhCpBhCpBhCT BhCT BhCT BhCT B'CT B'CT B'CsB'CsB'CsB'CsACsACsACE\ACE\ACE\ACE\BB0CoB@CoBOCnBZCmBeClBrCklB}CiB}CiBkPC`BkPC`Bb0CbLBX0CcBNCdLBDCeB9PCepB.CepBPCepACbAC\lA CVACNACDAC:4AC1AC,LAրC&lBC#B@C#B0C#B? C$HBIC&BRC)HBWC,BWC1pBWC1pBWC3xBWC3xBSC2BM@C0BFPC/B>C/ B5C.B-C.BC.BC0A C4A@C8PA`C=lA`CCA`CJdACOACSB CWlBCY`B-0CY`B6CY`B? CXBFCX BL@CW`BRCVHBWCTLBWCTLBWCX`BWCX`BWCX`B~CX`B~CX`B~CX`B~C2B~C2B~C*tBvC$XBeCBUCB?PCdB CdA CdAC<AyC%AC,@ɀC7P@ɀCC@ɀCQ@AC[A@CchACk BCoB3CPB&CPB@CNBCLBPCJB CGB CCB C@DBPC=TBC;0B@C9@B&C8B3C8B?C8BHC9 BOC;0BVpC=TBYC@DBYCCBYCGBVpCJBOCLBHCNB?CPBC9BC9BC9BC9BC9BC.BC.BC.BC.BC.BC.BC9BCPBCPBCPBCPBCPBCD4BCD4BCD4BCD4BCD4BCD4BCPCChCChCChCChCChC7CC7CC7CC,CC,CC,CCChCBC_CBC_CMC_CMC_CMC_CRhCLCRhCLCRhCLC\CLC\CLC\CLCWC_CWC_CWC_CbC_CbC_CbC_CgCLCgCLCgCLCtxCLCtxCLCtxCLCtxCBCtxCBCtxCBCjDCBCjDCBCjDCBCmC3CmC3CmC3Cz4C3Cz4C3Cz4C3Cz4C(LCz4C(LCz4C(LCpC(LCpC(LCpC(LCulCCulCCulCCjDCCjDCCjDCCeC(LCeC(LCeC(LC[xC(LC[xC(LC[xC(LC`4CC`4CC`4CCU$CCU$CCU$CCPxC(LCPxC(LCPxC(LCBC(LCBC(LCBC(LCBC3CBC3CBC3CMC3CMC3CMC3CJ CBCJ CBCJ CBC=CBC=CBC=CBC=CLC=CLC=CLCGhCLCGhCLCGhCLCBC_CU CBCU CBCXC3CXC3CXC3CbC3CbC3CbC3C_DCBC_DCBC_DCBCU CB gimp-commentCreated with GIMPgimp-image-grid(style solid) (fgcolor (color-rgba 0 0 0 1)) (bgcolor (color-rgba 1 1 1 1)) (xspacing 10) (yspacing 10) (spacing-unit inches) (xoffset 0) (yoffset 0) (offset-unit inches) Xp DUPE ...!? "     %$#gimp-text-layer(markup "DUPE\n@=/#") (font "FairyTaleJF") (font-size 150) (font-size-unit pixels) (antialias yes) (language "en-us") (base-direction ltr) (color (color-rgb 0 0 0)) (justify left) (box-mode dynamic) (box-unit pixels) (hinting yes) XX&U."1@5@9z=@EmFJ%ORSV,F#"$&'()*+,--.                        F#"$&'()*+,--.                        F#"$&'()*+,--.                        F/δ^/#/ӓM /T/ /"w /$0/%p/'/(/)u/)E/*/+/ ?BUk./ ,x/  / 3/ ;/ OC/ / 8/ ! / \^ /  /  /  / Q / .? /  Z /  q /  /  /  /  /  /  / | / } /  /  /  /  /  B                                           B                                           B                                           B                                                                                       B#&()*+,-. . / /                         / . . --,+*(B#&()*+,-. . / /                         / . . --,+*(B#&()*+,-. . / /                         / . . --,+*(Bλc@ۥ_!] $t&G')*x+a+ , , [\bmy j c @^ w   \ ( / B  I 6 # F    G w 5 k -T} -: , +/ +* )('%JY-------------%%%%%%%%%%%%%%%%* * * * * * * * * * * * * %Y-------------%%%%%%%%%%%%%%%%* * * * * * * * * * * * * %Y-------------%%%%%%%%%%%%%%%%* * * * * * * * * * * * * %Y,,,,,,,,,,,,  % % % % % % % % % % % % % % % %) ) ) ) ) ) ) ) ) ) ) )  %             .--,+*)('&$"^             .--,+*)('&$"^             .--,+*)('&$"^/  /  p / Z / 1> / T /  /  /  / _Y /  / </ / U;/ ?/ 6/ / +x/ ;>Pf|$/+/* /)6/)b/(/'w/%^/$%/"k/ | /N/БI /ε^/^                 ..-,+*('$" $*                 ..-,+*('$" $*                 ..-,+*('$" $*               k ) Q ^ + D   ] e 3   b h }xt GX}dQ\r+U*#)zo(&,"%yI#0 zX {, u*$ \u%*Vνm/'$! ---------------------------@'$! ---------------------------@'$! ---------------------------@"Sr"n9  [VPJ@, - - - - - - - - - - - - - - - - - - - - - - - - - - -@%%%%%%%%%%%%%%%%%%-------------%%%%%%%%%%%%%%%%%%-------------%%%%%%%%%%%%%%%%%%------------- % % % % % % % % % % % % % % % % % % ,,,,,,,,,,,,[ .($ "%'(*,                   ! " " # $ $                                         [!Hlz|gS>#.9 W(%4$4- !e !## 2%5'b${J% !5b & Z Q  Eb u |4 1 ?5  "i P d 1  K] l F n sb u $ !c M% &Y [ /  'A  /  I2 D b {R N`}l\L;7@IR 4 4 4 4 4 4 4 4 4 4 4 53 4 4 4 5 5 5 5 5 5 5 5 5 5 5 55 5 5 5 3 3 3 3 3 3 3 3 3 3 3 r33 3 3 3f 2 2 2 2 2 2 3 2 3 2 2 2 2 2 2 2 2 2 2            2 2 2 2 2 2 2 2 2 2 3 2 3 2 3      <;;;;;;;;;;66666fx 22j 2$2[ 2 02M 3 =2> 3 L20 2 Z2$2 h22 w22} 2 2n ! /_ /,/Q / 9/B / F/4 / U/'/ c22 r2 2 2 2r 22d 2'2U 3 42G 3 A28 3 P+ ^ m                                      G                           ......                                    G                           ...... @ I>   `  K ~  ;C $  f  B p  -I +  {l  9 b N 1  rs  0 TT 7  $33333333333333333333c /  ]n   V _ *  ai  " Z [ %  ec  & ^ W   i^  * b-w-w-w-w-w-w                  $ $ # " " !       3 2 2 111 ++**(&"#)Q exK L^]B0 A '   +5   f + X L# #V z!J 3  b }  y 8 1 " o u |  ~  dv_D 4LYD,3 H3 21 2 1 1X 1 8 x8$ jOD !rCIƂA 9qI俩 O@''[ %?$3|  x/ Y4#fV)!Tx̼PQ4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 3 3 3 3 3 3 3 3&      2 3 2 3 2 ) ) * * ) * * + * + + , + 2 2 3 2 3 2 3 3 466666>>>====<<<<;@ {  w   h  #2Y 3 /2K 3 <2< ) K)U/ * Y*") h*[* v+* { +b +l ,+'^ 2+2O 3 82A 3 F22 3 T34.....                              D.....                              D -w-w-w-w  V 4 k *N   oX  . e #   j^  ( ^ # de # X * ^l  QDzx=< New Layer!? "     %$#YZppZ\J]^_acAdfg:gj klimJoD0qQ#0"ƕS0&}#0)Z0,s0.c 00( 01h 03 040506 0708|08B090:0:0:0:0:0:0:0:0:0:0:0:0:00e0000{0@0000000@88888888888888888888888888@¯wT2 )ҝe.ӈ- 1a 3c5=689:; <=\=>`{;;::;B^@::::::::::::::::::::::::::::7777777777777700|0}000000000D000!00m00:0:0:0:0:0:0:0:0:0:0:0:0:0908408f070605 04 0301Z 00 0.U 0,g0)U0&v0"L 0սoNX88888888888888899[9qT_=8<;kx:9f8 7kt64>,3 Z2 0 . ,^),&lf#!: e%!hͅ:'"S¤f4|=<;,:O9Y8L745 461E /" +y.%Ѯa-DC0 @77777777777777 " " "::::::::::::::::::::::::::::Da8_p{tZ:,> y2%Z݊) &B$("^& )@9+MO-P f/ {2 g3 P3:456k7*78Q99t::e;;M<<<d===U>>?3>=<i<;;'::9o99g%%|$ $m##^""P"!Ar! 3! '! !' ! qq0cP00T^00Fm0 07{00*000#00/00v<00hJ0 0 YY0 0 Jh 0   <v    .    " @,: 4/: u/h /$F . 9/U z/Z .c . >/ /L . . D/ /> . . I/s /0 /x;x;x;x;x*"0;;;;;;;!0>>~>7>==?=<{<;;$::#99 8E876)5O4 ~4 3 2 1 u/F.-+-*h(^%^4yt #`n"'AeϵyP$I77777777fI7@9)9K76W54c3 2t o1 0f z/ K/W / P1I 2P;<3   *  z7   lF  ]T  Nb  @q  2  %   & ~2\tAaO"cS]Dl( j6z/ p "5.w%<%&J&&X''f();x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x, X,,`,+++Y****S))))L((((FV@@ Background!? "     %$#qrssrrrrrrrsss/s?sOs_soss@@jdupes-1.27.3/icon/icon_jdupes_all.ico000066400000000000000000000463261447252140200176700ustar00rootroot00000000000000 -V00Z- <DdrKPNG  IHDR\rf IDATxy|V"BT*BZjPHnJO;ڪŢ-BUuA@vŽ 쐐YI4@;.}'=y;w9(((((((((((((((((((((JdTq>A`P-w ?k'L.X2+MѪ9wZ`5_WeoWWE)rTY81K0Fi0n%5+D%2`>0 jUD/dC` I/Ձs-e[|,N^iAD+/ǁUQu@D*D9@+7߿/RT$y| ܃U/+^ƨ6iHuPPU9xJ. d)>!Ģ LDt4>nM"JTf'$X0<䓁NZ\\LII? c޽޽M6dƍDze˂h`8 \xf7nԬYq6KZl+qy:QRR})** ؾ};7of̘1'9π0%".\y a4k1 yOQ5jԠVZs1xK_ٹs'W?gر̘1)L~ իWzdffr1TZg׮]lذE;o{=]S`f^j%I PZ5?xڴiC߾}:u*Wfȑ-p!fP)Dvv6{._=oׯW^u^Ĝ*Hjp8ժU#''|gL>\/"Za< GC%ޜr)p ̜9ӪU+-qT-%I)PLڷoϻ˄ <FS>%E̤s̘1aÆI &)kʩ]6_~9'Nv8DDVV3gQ" @9wޓ1*)Ҷm[f͚%z) ic._q̞=[RfV|R]vL0AR5 eC\x̟?_R fu@I.rƌh5R4?O?~N]vcG}SN<3VCڵk?6GUvѵkD:uOp$Z*EII Ƞf͚Ԯ]ڵkSn]NF^T)R꫁oҤIH(9<ԭg}DgTwTȱl@0`_=gqdeeѻwoFx%4O!)Vu낥я~:@Ld ~q t:pxqR% VCЏJiڴi)Y!6IVpu23|q>V L}nO=@'$pu[jojꫯ} uo0arآE@'Zvmәd6#իW~:^ i }$Z! @}jҏ[ҫW/]@J # 'b׮]~L4Vf8H0WPfM߂u>,^{)SH 8KP 2HImoA~ </49h1++˷`"]Srw{`r+e$V!;;۷={\(IDii)ׯgŌ?#Fx@ @ ;v$33ӷ [Ϯ]ؾ};֭cŌ7YNSmڴ $|́OEH.^{ҿSTTDQQlݺ7h"-[@w`|BӉd6m:=`O?Kݻ{---gϞV79+?~yQtZ."X$1s A>zIl# Y9<Ȇwa7.izwqtޝ iS>>%V ?~OgF-}vdz h|Iu=.m۶%w޽K2d2\Ʀ@End3V@{M*b j֬3RyW]uI vT()?T຿%1|L9W^|@OSzGnٴiSZnСc7n!Q|[y2{O:Rգ~uYԫW/󖔔0rHǏ=иqc2N9x MƠAQc Fi/Ȋ8:WbhѢ-Z̜9Çܹ3_իWsNCff&{,z*gyoYf j(;^W䫯e˖I9?ٳxV- СCkgO '''f ^ I9%%% :TM%5ԢEXpahEygĤ;v,=n 5*̜9E. `ڴi\{[1./|Aş-[зo_[$y%Τ8q"_|@EX(уo6v񧤤K)JVW_}N:Inf;Qn1p3xaʔ) 2Vm;U1[l;GCqO{gG /F/wޒo<)o E]?Oa#=Mw#J%e @~~>'O.[n\RzL17p'o߾l۶MzPRR¨Q$K1ן5())aժU >Mٳ t¼q!+Bnݘcƌa޼y~}\D#a ЛCcUٳͥ]vߟ:)q.]K/СC:T$!޽{ٽ{76mbɒ%7.l/}1y0GIٳg3{lZnM~h޼97Ĵ~zV\Ʉ w+lmK˨{0?v z~tޝ\>l8$33ڵkSV-ٿ?ߗ}a͛/֭[ T6;8įLڰ[Q+-1!^fzlXdC\mJJ +.zs{xC${¤vL2.䩐*E: ϾK,J NR)nJ19xo[,3cO"J & l4R?J"'aS`0ʖbRfk*$@t=|pfmz̨,Dc&cf20^vE`#)?: 0 FbV:~w"`1?$]W<޶b#3gp84d Dfc3ܘEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE< 8RLgEQL=`7w=ӈuG[+J(yCq":&ZED4Q rf /lT` ؜] 8!s>n x)끯ajb2 " R's~LP5D:\ykH4RD?ڣJa<^/m_\)1H5`)++0^'yfO YvD_X%4~#,dG/: &sY4&EV,AK%y }_ 5bpG&Y3"Ǒ+ûve>UP,F`bXt-8e>&s yG;}{Mr%%8YeuXuɅ$I P_ m`;Pׇ3N"s^\* V0.^} vYq0{v`ec8~ ccrep{|r2U,k\.s kO<\ )qp</ |HK0B X\,p 8ؿ|8z^&ukbPcGٽw|0pUc>`{,Q0+,>OmN(o;}c7ӧ;i72W!A7#f@T #H(h1A:wofaYްAY7ٽd\{YMyy_߻}ph+6i`^`%W?\&];^/8Z}>_ R=E0m;yCߊ,tlktgwԵke2ʲlyEI,s+XԨhٍ# N%òam5s]z(+W]aՐgo!|1x$O?/؟"[!".ƣ#wqơGߍqr|k{yi2˗eo]"!p n#kQFQs3aw+ѭ[rsqJKeA p [|fHo#~1ps+qhC{j#ex!y>S)x *(,Ok0y$f<_KKwTR8>(sg c2zpWvq^_e{|!:F D/,?[`< V ;d'駁pX:8lᙐX Vj%0ӱ1rp;~p:e獛غUYx@&s*̑#?%a][yos#Ig?zn,;w\ W;jAhiR.]]k=A̓˧'x>A87bTO۹N_N7|A`bFed2}T.s˖`e4[|xq+p'$8փM:2CJHnz'JzoA_EK97κ( Z WۊᬳmJԭ++"#Ef1yr⩈rmzFD=Y̒BEklsD.pw1/w"4<:\Ye^e/", @oךm1,QP`hFK3L Fg% Šr ּ3E( &vJnojXb-xoثyY)[FxK<`/+3dƑKb7X,-yCO=:in4 [+0M+\Z^!YwШ=f-gBFLlߗ*m\~B+^K2ɪm9ث`l"fѬk-똸^錙kǠ&q&ϫXn= 4 ›}.w\13_T0AxjX֌\ cHUq,~30xnZ %9gn;N]RI$b w30̓0n} YrB&WOCپ]ky8rY53ZEnas_'ImnZcu攕HhL^wp> ey]\zP_(iH_n²w+#SOם:UVld WB)ȓu$.`hyG]H&o_]BC<bOJ6#JiBy&tKoEq %*\X _Prv#_w u/kN.sZy]) B ;uFkg%&_"`;U+b]& IDAT+9{`DJ0r~1^&H$Q.ʂp" A"CU 0cH]s>xyY]/NEӧJmpPV)𢰮o CY0la`-"ߗm <-rܩh^x) [#_PG꛿ ?6EG`^+0@܇e7 vf/r9G^d82HYkń\@`ȟU0~1y fD^D/syyN*z%.IR`X~d)_{Cy^!$ˊ @<#J Ep , aﴻbH Tp,fWGLd.a=S\XP'1xxK^lFTX l""W@?ʉm$dOw.bE0B}ƁzS nSV'a.F lD\똬pPC!c0R(3+y<Eq<2yIدPF<0$eIwc=lPĴr*T_^A(m3^jFaΔnD}f3'{ ]N0dfb`s̐ &{DuFaثpkWSOºuA]}A *w-&SgK6`:{ _?II*<*<8V [꿕;p0,x׮P8Y){q~ ; wq &돒 V5agRH8=Lj|YѳقYJTRSv^۫l1 q:KfC4an+@eg1˲zm!q],|a6Hс wq/A5Aʽ:?e)(/*sHia5F $x¸$8\YQe[?뇅y_QjlT%*!4Eجw!=`</0{ppb ܌Mfyx kc/Aɱ{wKuKeU^\Hd`~b!tP5Q XK{s@޾K6s/7}zwԽ{Q~\nVyƫql @7$ K{Lv~5;(2m+ Gc :ʃLL4֞uR릟ր4}1<׷qabYvFCf+4* ئ =H?|(.A>xXIgcK>kT.soMKǙ/%03< 88,8$~H.п[gnS~]'+ P.}b;A /[/ea=W&C8aiWg]UDOAJ 6 lpX5z1>*6gp  հx7@2VqaUv8luC?8uD wp8S&#M1O1CJ$"G ^^"P]p(v?y8Mpî Ft8k2%Uf$@b' M-KeP">oo7EkfUȳ &x;_ɕ) WDn8bG`X<1 o ˏd`_c+$0gbvXRoԓ1,ZլP20WYa34&&[ۀwKJ.&D|?`+UXIHy]GLm-{Bb |L;MM(Uj$SKJ)fS:+Jpp#&k;y20.EI !t?A|~k\EQ4L%DcOU*JH/30&.*yFw`F30ք,_QRdrlIp& Th̆<>LjMOUvK04+PEQEQEQEQEQEQEQEQEQT|IENDB`(0` $(,  8:JYhcB$u| "!X'/2"%#6f/:(+)*-++-,=@/20<IG@MB798Q;><VZBECPDGEbhILJjlm^NQOotSUSvgxVYW}Y\Zbc\_]eg^a_vadbjmcfdfhfpmgjhhkiiljtrknlmpnqtq~}y|z|}   }JYɨ38Uɳx5,^l6O=ǞKKdǽ28O8uɍKK!)uq>ǁ0e~~~~~~~~~L=O6ӬѫTfdze~~~~~~~~~LMR>qɥ4CCCCCCCCC ?O3ZZǽ2T#OR !kǃɴ_rrrrrrrrrCFO/ǞYɺe~~~~~~~~~L'OB;Ddfl<ĺIWWWWWWWWW+BO-ɻ^Ǣ*OB͵ͫ ǨdBM'ɺt/OB"]iZ HO$k y  m(ʷwwcw%;;[V;ppppp`w;;|PjV(;wVw;󚚚P|cVc;zwwV;w|V;sssss1wVw;w;|V;V;7V;βgҀAβj(ҲUA@@p@ p  WU< +     0 p p( @  # &6V^fm"#uw|(O#1\*46c.:;<B=JK@BQ===@@@AAACCCDDD_`HHHMMgKKKZjkMMMl]XXZZxyYYY}\\\^^^___cccdddgggppjjjqqlllxxrrrssstttie"l\qp 4w8E#%- bL6zAF>!rZZZZZZI'+dغЯ|OjP DDDDDD27o̿ŋH?"t......*)˘aMZZZZZZI7fֆ‚K{(Bc333333 ,&n1gku5 íYd0vm/$˜Jh]WϹVD$$$ 8)L)$6$D1D 5B/O$$D$D !OOOOO$647(D O'OD$&D  8OOOO-6=$6;BFBO $A1 O"OQQM+ QQ?#Q2QQQQQQQQ.QNGQ++Q+B9 BMB> BePB> BB> B(B9 BpB/`BB%BBBABABA BBXBB0BBBpBBHB(BB(BB(BB(BB(AB(AB(ABABABABBBBBBhB֨BHBB8BPBXBɸBhBXBhBBhBhBXBB8BBHBBhBBBBBABABABABABABBBBBBBBBBHBBpBBCDBCDBCBCBCBCBCBCBCpBCpBC&BC.BC2BC7BC9PBq`C9PBPC9PB0 C7BC2B C.AC&ACpACpACDACDACDACDBCB|CB|CB%pCB%pCB%pCB%pCB%pC!B%pC%4B(C'$B/ C)B5C*B@C*BPC*B`C)BkC'$Br`C%4BxC!B|CB|CB|CB|CE\BCE\BCsBCsBCsBCsB CsB CsB CT B CT B CT B CT BCT BCT BCpBCpBCpBCpBhCpBhCpBhCT BhCT BhCT BhCT B'CT B'CT B'CsB'CsB'CsB'CsACsACsACE\ACE\ACE\ACE\BB0CoB@CoBOCnBZCmBeClBrCklB}CiB}CiBkPC`BkPC`Bb0CbLBX0CcBNCdLBDCeB9PCepB.CepBPCepACbAC\lA CVACNACDAC:4AC1AC,LAրC&lBC#B@C#B0C#B? C$HBIC&BRC)HBWC,BWC1pBWC1pBWC3xBWC3xBSC2BM@C0BFPC/B>C/ B5C.B-C.BC.BC0A C4A@C8PA`C=lA`CCA`CJdACOACSB CWlBCY`B-0CY`B6CY`B? CXBFCX BL@CW`BRCVHBWCTLBWCTLBWCX`BWCX`BWCX`B~CX`B~CX`B~CX`B~C2B~C2B~C*tBvC$XBeCBUCB?PCdB CdA CdAC<AyC%AC,@ɀC7P@ɀCC@ɀCQ@AC[A@CchACk BCoB3CPB&CPB@CNBCLBPCJB CGB CCB C@DBPC=TBC;0B@C9@B&C8B3C8B?C8BHC9 BOC;0BVpC=TBYC@DBYCCBYCGBVpCJBOCLBHCNB?CPBC9BC9BC9BC9BC9BC.BC.BC.BC.BC.BC.BC9BCPBCPBCPBCPBCPBCD4BCD4BCD4BCD4BCD4BCD4BCPCChCChCChCChCChC7CC7CC7CC,CC,CC,CCChCBC_CBC_CMC_CMC_CMC_CRhCLCRhCLCRhCLC\CLC\CLC\CLCWC_CWC_CWC_CbC_CbC_CbC_CgCLCgCLCgCLCtxCLCtxCLCtxCLCtxCBCtxCBCtxCBCjDCBCjDCBCjDCBCmC3CmC3CmC3Cz4C3Cz4C3Cz4C3Cz4C(LCz4C(LCz4C(LCpC(LCpC(LCpC(LCulCCulCCulCCjDCCjDCCjDCCeC(LCeC(LCeC(LC[xC(LC[xC(LC[xC(LC`4CC`4CC`4CCU$CCU$CCU$CCPxC(LCPxC(LCPxC(LCBC(LCBC(LCBC(LCBC3CBC3CBC3CMC3CMC3CMC3CJ CBCJ CBCJ CBC=CBC=CBC=CBC=CLC=CLC=CLCGhCLCGhCLCGhCLCBC_CU CBCU CBCXC3CXC3CXC3CbC3CbC3CbC3C_DCBC_DCBC_DCBCU CB gimp-commentCreated with GIMPgimp-image-grid(style solid) (fgcolor (color-rgba 0 0 0 1)) (bgcolor (color-rgba 1 1 1 1)) (xspacing 10) (yspacing 10) (spacing-unit inches) (xoffset 0) (yoffset 0) (offset-unit inches) ?HOR256!? "     %$#+?n?z* $&u'+,@/3B8A9{=:x?hQ-t(.]Jҥd".d*B"@RI!ĬRLZ*AJP 11D}gzϤaq9 @ @ @pDuC_'k&G}}p}Zj1__%1Qz[rNXv p|| GGGP.!LOOk>w?,n:$I|`6Y _QVaeeEA_ 0̌y2簳Fq=+i40???&t:9lLRL4vMb?J݃'u궟glŢ~"Hh?`uu J:?Ph{ᐞNNNƶB~Y:b1 ]6~Y%=ǹ'nބVSSS\~xZ%Cلr h677'>R!hwޡx{'L`g/qqq`p9RDXjg|s0Z~"J}FD?qppNfYD6Uu߇;o}"t: Bjx<^W˥ɽT*PZ̢krLOO ?33C4W*b4w:^krt3i6q ?11Az\I&ݾo`TJYlnnYƆ?wpǩV"mb98qYou׵?*ݿ|>u^}Zw܅#` 03l~%q?fԯ7wp@qxKQODR!AB/>%DbCi>%z KM@hyI]%*6Μͮaaf9;gci0==)Ҽ.cSu?>I/_Kj OLLDccc?66ְHDk!Ѻ.p6]?G ̷7:gffԗe]]?;;[_ִYZZ"enn_VVFһO xdZC 7\__ͣ(**"f||2wRzyBBBAnb~e=w>`-B}Tmcx\\ZS;NScAΒ_477~IhR\\].6񾥥05>;F1*++yccRLNN '-"%=㺻íc(xx3c333gU?cRw|CCC eW9QQQʷ,ub^^Ƚ^<777íydZZZD_V/--15ξSp^ j"8!X75ȵ{2 X 4a:{Q*IIIMo;s+w5 \WB5=srrx#s(P|ǹvy}}+㛕et`]XBhoTnxWWR$;{|ee/..*{\zϣ@ }33ad'L>0ArW߅p <cy)\ : a5t8iV:)U0oۡWM h,@Z&,K4Àmo5w uӐ?oa-xսJPnBpq,*Vk"8P\;ű ҡ[ iti:7:CҦyGrN$|t:$+h|lV+~OT*`@aDf?(N1܃J~s*!J0"}JBu5PۥFAlv{o&z=j6TשP(P2Cs3۶MZͳjJrErd(HL7seYQ[m{Usgv܎[E{_qzJg[ qfY=wɝrG4yy嶸UgGEsz ~~᏷,_ ~ ~~1 0 0 0 ÔF\x1 0n^Pr ]K(,&}^1R.}_OTt)]׭Sz-o] ԘrO;6௹3?????/8V[5C/I$IH"T%xKHQO MPB""ZKZ 7 .(z?EʈeQT+4fdvʨ{g;v~9s2a„ &L0a„ &L0 r@ Xր͠T`X $0Iy3,M}Me+ӳŸ'sho+_F(O={;KB7N1?ԲWoOAg#"8|}? ^)۫>z,}ܮ)yZ*o{+%`Pe_:Y5i=!tN >Ksm?UE][GYs<'8_JێK]ʾAkfܚhO?HS ) =iAMIK]qBSY%hw֤ 489#q49Ƭ555;3iv'F&yǼ̼ wo9 i}- H|t^Y_ ;7*|tg;|gN5q|!e9=oK5+k^c%ݟukzOߋ Uޣ9En~Le!x]Cc}K{~Qhw=Cf.O{w#3Wǖ%ͿXZ+ts53?>_ 9ϟ;e$2u'Q#-:󤾂Eiݤ1(kӝOnBȼ7Q"bCDwߢ`[J{N8:IxsEҋb_*'""mY:X]g?~\{j@DD? JxMHQOY!B  .*lQ ]"HZB7nlJ~@!?BqaB"*"Bb&bdON8C̽hwc |BBBBBBBBBBBBB< l"5۱!o@ׂw7c' {oÐ&Z]`8rޗݿsKsZbm50vZҒXX3 zv{ʹ) l[ *ネ-tn>AT»{AÿN5ÚQS{z. bé&ݔA3.|R}>.t_ƞ]& }ǹm\j[}دBw&yϐss(vAqiCmtoU_QcWm3)GepD͜R_`wydwS>!h`.7o\gv6A>-cfqQ7{|ߘ K:n΀beܥbl3*!yDZ[ ~wOo5w4 _V̅^Ofs,XmYUe{/謝zr枚3OO1d>Uv]0fdbV0?hjj g7 tyǎf?O0/+3佱V]Y\^ܳpxD!3ւ{`ߏ̅C>~~=>NѮ 6}6NDM{M#=i}>??ߐ҂;=6]s=1wmkØ"Rי1/Z2iE{}Dlzg{z0eq/kاx]UUǗHB/ $N!$$ "كHL!*C=X4(E3QD*K!L?'FIQ1g mFs:{97kGg8w[w߽'鐊E^_XWzU5۱I\Ȝ'XǼK^{D~62wXb=V"Od+d7^ۚkijUIj?h쵥!v+iȜ7_n&|#ǿ៳:i~6a"i_A sNƻZP{ZFQ 'b'jת?-`|>[!/ H j9%c~ꉸfOqH\{X, .ÿE~L}͆X/5hWS n$/3'/͕%{FgN?nCņF/-16υ7C!\S<;x}೰M\Y_SRM2Q]q!/ya8*|yo[_kƮ=¯ (>ϽS찁Ъx|N~'rQoA5˯"g 7U33gV{e-㙬6s>⽞=;ybV1y熱x9'y_3Q9+w @!Ux؃-A l%y)8-ZAU7jo^@æRA?S[\>'R|v%kp1^1y\6?:j `by|ko{k ﹆=jB \r8wޞWJOL}BameXjLt `~ƞX>!q}XyI!'8s0 S)\^wv#|}RaՏkP({Eq:}D1[`LYtj}C&?AꐮBo+)+~`< `˂?oTU?Wc1-} Wo}(zTUOQ-T4JToF3֋o>'yqUtzV5G\Yժ:-n/K[w3XN:qcqMjUZժVUawfx혯JDAG""6f2  L.}æ b2loQe`8w޹+"Fú\݌?4h~駟~駟~UO?O?GqU3o)˶߹.*˵? ek|V+M MET4|ffLiB8pK1=f iSڿ=;e?Pxw5SfN_w4;6\fc0;9-O4ɋxMHTQǯ}QQ$!8 @Z(E"M .JEBF"")f`B) >L*I*S{;oFg=<߽5q3cXUv"[Z?m\\ SKThnojes ft\/OgO7=DvHVuϪ1^"'?gC:z8$c'sD}8 &N$}/'C_Vխ&ς#!?du3z>]fKdW)@:qFiN2DU/FdEsg%ؠRrNFi,=h.I>oϫƷ1Gi_[&{"Lϱ m;o-`}SX!'<@~J_TcjuQg5.ur <~>NAcBZ2w~GƴO wBar_?" o3ɛ`ǵWYs7b.A*qvyRd<2dUdMWQ{G} -!AiW.?pqឧDRN7יhGg~b.=b8?Yo <Y YS޿ wVq,ݪ^r_{bmZ.3Gho1ȝnB`7%J rguUЭ qZv̹z@Ȗw x=玚h%;-ύ OLB稂-ϯx=ߝ29j@fx͇fYb> _P|I7Qs(F7H'AŞ#_kbKB$(6wxM+Q`c_36l$†ldcmg%5!M(decwhf|S2yL\s,:9s.r`67]Elsn3̈+ 9:4Ę;;9?Ɵ_)&_/y}7|ݳ93$wzʻl3JP`=_^\֏Q!''yϓv{;g>>q*[{v3o:3evtxy] IG= e;&PPѮδ{c/{cLP5sgmn2BWWxTkqj?72cF)((((((?;W+Q@@0048!? "     hh%$#@00@00@xLǏv&\c-[8.4vD꺌1M@L1鬳Z-V ZQ`ZU,R~]ȏ…/}{A|r99|>9"x/*}IZس=6bc.`,ArLkݱ6{,RJuYYYTVVQZZJEEEtX͛ ۵kz?xѷpBm϶˗/ ޜ3gxٛ9t萗sΑ]r?>Ǐ۷o_[[k-ZH[lo7AAM8m,X@>\reX===1T[[566۷=l].uttxQYY9uQhh(ٳǘkpp֬Y3j۶mڦO]/ϟp+U\\l/իW7nN  0F{MMXΔKss3z¶~Klo׏Z~~>ݻhȪW-@{v1jv-[m?h_~}Lwwwk1j* yxlPsN… @􇇇{3+::/^L|V>Z-7880h~ٷID-s8R{xIa!SVbA)UH%2ê4-[R)덍*'O 6MV:C՛@I]G"??KBNהR;;粜K1(͕uuVVo ~_&#C~Jd!}̬YC(rwy}:A~ٰy[id?{ά%}Q97a^A}&qn A/K+O"y)xS}ʰ|V1yϬX8OWצg]ܻnY)op@%G]|R?0OI}ɿ^U%yw"jQ x^~NQ E)n h\K ڸnЬ]黣{﯁zk1yF!߁lG<|_*~$q֨c/LJu3I1!c"ykÑh;?R?ac7tdeTKJC-L ;gxBÃjG)\C,\;!_H`NQ,u >Ru 6FZ%On/szM G@Ejj*WUUqee%{<cπP\RR"m+BFF꟒¦_\\> ͉{ֲ~noo砠 㕕QT6MMMb˺Jcc:niU/++qffUfS-ohhłUزe׫?~ss5U'%%q^^///Z<<O:U[ZZ;/uvv*{aa:e®/:f/..rBBPmSSS<88ΗqxxАEiir}s,AlXX=s566ƹb}YȈ~~~nRWWgq&Ȱs Lcw߂]rlcg;vRA[y߱{A )qr_'#osxy =Jww$>q8-MU0 ~>O^چVǡfh{u{_k+ė/+O S`4&p!W]rMA|8!geYN2x$0 G<(j@׌y'ɏֶj-myyXn!C`3e;o6?? ]$(~>sO'>_!cc/UnA"΁8S_:I}mmn`/ZA.2 S%wB>1OCcg>8WѻZ?~[-C{I#1ghܣ8[>qWKc\҂^=dќc=:3EEGGo\ p 7ӫ9,.;;mv]ιT߯~'Bś9 %hYhruyZ]l{){?]%MN0}$dV#w(shD24!? "     tt%$#OPP2xݕkAkY$x HA\/a-a(PBXAO!MoѐCB) B}clH{;޼=}0*NmSr9K?x9|G}\.'d~Z`/c_my^Oy JBtZ̓~X4K&] vka=&!+DZ 8PA .A? &>|צhqهsќ3NS aMwAc_Ĩn)h^o"- YW]=Apܛ!!T0Sڏ+`e"y܉m416!? "     |u%$#,SSTx1 0G=:{N]  {N"AJ)"hKM&XImG#ӄ,(F|E J k >\u@#p9֓92Zb ` a.ԝ-u;?4?4Kod E/~@Sֶ/F{^KSªj\%Vא )4[Mަ!,wpxh7{vE>mwr?GjF]Ye2.9G0ޫa<jdupes-1.27.3/icon/jdupes.png000066400000000000000000000261251447252140200160350ustar00rootroot00000000000000PNG  IHDR\rfiCCPICC profile(}=H@_["UqP"EP :\!4iHR\ׂUg]\AIEJ_Rhq?{ܽ SͮjN\~E a `L=Ys|׻(>W &|"qEN xf?I0 \\5yOdH/3<0t qd/Qǻ{;{LWr` pHYs.#.#x?vtIME$}wFtEXtCommentCreated with GIMPW IDATxyOJL%KʕBIr*RKnqՍkriJ{c2,ws|g8ayy߯y                    Pʔ~y (2Ҋ${ÏyYK owpX[8U:Rͺi_2[ KW_RF.1/Ff)QZ,-x}i mz`/EʇAc5l??s7RO-RDS6L7f! Jp0"1w%;opi[.`@jxi搠G37<(D.X;,"pV RG9`$Ԕn#7nPXXdff3g8|0)))̛7m۶ե xq*`Ϟ=ԯ_߫gP+_e˖^}c޼yy~+ s瓛KNN999( 2С ,(u^ Eoߞ$mZ"{9sHAfp73|2aуΝetWB]twߕZ.eDEEK2[ @2e4h J|@= l#*Ub̘1mVj;'h+T@TTQQQT\/5kRJʔ͈B <|7}7HNNʕ#66XTBll, 47[ڴi /o- ߿Ջ>}жm[j׮:7pCSNYFz%q):ÇoCT~n2exG8$&&ôixҿf͚%Ns&LK.t֍5kPTTZf&M0`xɃ>ȤI(,,t%; 7p+Vо}{FE^^^bĈ,^ؕf͚IAѣ0ahт:uHcǎue)p7/2~+^z)CZ#HII*UBOgW2:v(fl߾/_ީ1 6Z /|oJ/QF! w1Cm#9lN5~k֫0p@DoƘ p~wPz@".= roڵnr뭷'e5jԐ` ?9pQ"7- ={0 #_>-xQW_}ѣGmOA(Ν;:#p))03 nfW.5|ʠuwJxWѱcǤXn۽{.a^K%`MV [hF{C`HMMu%n܄2 : =+v+"ܹsb裏"XU !زe oD˩z7>:BC1h )`ܜy?~ڿqpB-yꩧXhm)Rbl*T =-[0|p,Y_e_:8vzn '2dWR' [`9"ݻK2l0' ܏r-( \c 66kgϞ8p[laL6ͱ\x< ҹsg(?HOOɓ߿-[0o<֯_̕ /p۴iJ#GJ4P''''Ǐs!ٶm/ 拪hg99J?ݐ#3g7-**|ܝ@_QsqvroJI˅c˴@}n>: j=o?:r x9x0h~ ; V޽{SV->|X;mNNw|yԩc吢"*/ܜ Tlw%aߺ@Vd&Ԇe!_w}nuVn x㍖/s$wq˼ue! ac<nk׮XBbL#y'TeeĒJ*V l~E}#*1ckٳg5kv'Oz}/CoPY[ ڵk絗܌pi9om6d6,Ɔ<PGa#=; *sNդI$W{v2sLe8U슸ET +%~?ҥKvnٸqcZnԩ=tPUg!#joft97G}ԕ,Ǯ6s=ٳ nz22j(ڕ ߿ڵ+={t]_PP9s e:wڵk5j+_6L:5oJ*ox$11nšCJ& !R aX`sΥm۶>g}wWA.֭.^?Ol"ժUcr刍ZjTRjժquQJ޷?k,FEQn]>s-[رcmL4޽{SvmWnt0-ZEȕ+W2e2P]v%>>ӧ9{,TZ xb,-=@8]e%;ve˖kٳg9r], xS'ҧOkAs}Y'r7>2Fre2qDo2Ӂivs7ocɢB4vcPL<2s̰rJm̋wT1Dzexu GmI(:u*}X<ѣGG{nLBƍ'))ɉ@soy:]ٳgӳgO/^Lnnnd1{L+cUEEE`/pDt%" =9H[he|-,&@-/e֙"`NS{n;H|AI_C\m.M9k,z<p/y[t De6'r@gѧ,5me9t’0W" Vd F~F[av CZ>xћs >5ǗBΪ[`oi%x&M01 C] ˜0-V ^_ئMz6՗smL_TZw ǫp ` ic8d`t'pޜtOU]ŀqA}gսVr[((wV32{G{63ϙ `9^(7 ^qlͷ< 7z˼;~'ԗy]3e>*YfP#4Ic0}ZxFo +$ozm~kdΛ/}ozh)^/>uXA_[tDzsʟFg6ռ :rݻe&$T~|/7q%pXyOqV ο._R^W<)s2عH ts;Tjb(:ĉF6؄zF=͞Zlfe/p/vs_Fy[?( O#yw;o$?؝)ۡx OȎ2{34Ayx26LEE4 @,o9QSa FLF`k>I0I] =Rg=f*b ~sx9>_vȪulZVx*;@mMa͹f!Ac>o] .rT|!O" wzԼWei~f86Xz:}BӦ @ wt S_ftEMBoAYTkL1UWGjavc|6| +80,U9vO;m&4 @OQ4R?6m<͚уͷOboI;W`́U>xXAK^CЏؚmړSJSqpv}S[ l=Em}6sނ2e>ӗq̑#e< ўֆZ\u`s_&I괈G-4e:VW@hD?6[nїgo{y))]"8"GY{¶h) ^s~ڥK5j/3%%P.M) -PasZSXZkzq eΞ3s^wѢ@3Gs'{*D{{$Q[ؓ#(~]uedԗ۷ϟF8_(Q/M_vr?ss^>N̚z2֗7 Lwls bH.?>ם|FӦioKס̽{}L @MM 5B?.AoF@o:{PKdR@w׵;zWV[:^[ߡgv6^Z'/h)Ohe^Lt..Z~u4io*Ȁitwkdiݛu3&7b`tK%c3ixov>j/p~;9T 'c)֠~n³C0qN?sj߿G^GZ>Ey 8N׬_wRw@w1(Ç~8 M7 f_O¾O3YY$IwՓ٪>}d.Xh|9g ÿOh:ah?:26;gsAh=Q)|I%PȪ[+`lw l!Z[IƉhJyLVq/3>uX]j@(dJ8ꬖr}8w8'+h;NA` 3IDAT1A;XՕtwN/GyWZR~jGz`Ƭ`} n@||Mdh~Y }P<4KIhPJY{t]:8d ePL_h_R@Gs+GZЈ.3QػW Ces,'N;PL8pK#2QxTj 7k Ծ l$VXwh 2(c =Lfʤk{qq:}d ߊ<X6_ &o9nb2 hh[%;zSAjycf~;U@s` aubY|n^FلZ ljv۠M T`g]f'Cb?iKퟓ'DjͻE%Ww -?- uciWhR?Q.]e!nM)(vBpT/tw L#㤫vo@?/B#.?uxC[p+WҎqp޽?QvG}W9911zrO֗~߹Ye;xGeX%@\$E]AI}r;̄rz^)sȶmJl'ifP^r(xK5l6e@ )d_]4X_V_͛~25^^Zv^tХNvivw`0H.ڥ?Cᆳdܮ˩S]+MeΞm+o/ T z:lpu0<(q aN fEʟF{. `羚U^zH_Q5#C_neNZp%\ee,B 0XR,7svi;x'ݳS t'e>% 9 8+7nPu6f. &:Iy43y9_ͼh 擗.&{oTPQ[?lXr(:zΉT$5mVX}F .+ &`0y.,eٕ*oʸ{/.?g~.%E_rF jۏ[&PnU`T<0:ۗo <*?+jF~;җۻU8/>N bˆv^ 3~lO?WMg՗yℭ"~񑾵+Й$GF,tCug?Zn7oT_Ж-z:tЗ1apN6;~0ӂBo /jajW Q>E#8z7֗^l[M?2U~*80jv xܾ\ 4d~W==]`j?35fxVbB?ؕlcMz o[CsZϳ@vúHoEa`D`hG18F`V@N[=S~}9G{^նX1X 8gُ~p9k/9p'ys<AW)ʿJ&Ӎ8\ȜH GRw"*Sڍ # 2 79)H Bh4 \:QvkIQSBEZP*XWg.NsizX{](DaL2L/й2exlB)6LpT ,p0Lu)VN|%#K|2w!Gʛ` {foRa:%T (KZT̟X)ױumT ڨ Lcp-P ) RԁL.lThèQRL/*AAAAAAAAA"T| IENDB`jdupes-1.27.3/interrupt.c000066400000000000000000000021771447252140200153060ustar00rootroot00000000000000/* Signal handler/interruption functions * This file is part of jdupes; see jdupes.c for license information */ #include #include #include #include #include #include "likely_unlikely.h" #include "jdupes.h" /* CTRL-C */ int interrupt = 0; #ifndef ON_WINDOWS static int usr1_toggle = 0; #endif /* Catch CTRL-C and either notify or terminate */ void catch_interrupt(const int signum) { (void)signum; interrupt = 1; exit_status = EXIT_FAILURE; return; } /* SIGUSR1 for -Z toggle; not available on Windows */ #ifndef ON_WINDOWS void catch_sigusr1(const int signum) { (void)signum; if (!ISFLAG(flags, F_SOFTABORT)) { SETFLAG(flags, F_SOFTABORT); usr1_toggle = 1; } else { CLEARFLAG(flags, F_SOFTABORT); usr1_toggle = 2; } return; } void check_sigusr1(void) { /* Notify of change to soft abort status if SIGUSR1 received */ if (unlikely(usr1_toggle != 0)) { fprintf(stderr, "\njdupes received a USR1 signal; soft abort (-Z) is now %s\n", usr1_toggle == 1 ? "ON" : "OFF" ); usr1_toggle = 0; } return; } #else #define check_sigusr1() #endif jdupes-1.27.3/interrupt.h000066400000000000000000000011221447252140200153000ustar00rootroot00000000000000/* Signal handler/interruption functions * This file is part of jdupes; see jdupes.c for license information */ #ifndef JDUPES_INTERRUPT_H #define JDUPES_INTERRUPT_H #ifdef __cplusplus extern "C" { #endif #include "jdupes.h" extern int interrupt; void catch_interrupt(const int signum); void start_progress_alarm(void); void stop_progress_alarm(void); #ifdef ON_WINDOWS #define check_sigusr1() #else void catch_sigusr1(const int signum); void catch_sigalrm(const int signum); void check_sigusr1(void); #endif /* ON_WINDOWS */ #ifdef __cplusplus } #endif #endif /* JDUPES_INTERRUPT_H */ jdupes-1.27.3/jdupes.1000066400000000000000000000347271447252140200144700ustar00rootroot00000000000000.TH JDUPES 1 .\" NAME should be all caps, SECTION should be 1-8, maybe w/ subsection .\" other parms are allowed: see man(7), man(1) .SH NAME jdupes \- finds and performs actions upon duplicate files .SH SYNOPSIS .B jdupes [ .I options ] .I DIRECTORIES \|.\|.\|. .SH "DESCRIPTION" Searches the given path(s) for duplicate files. Such files are found by comparing file sizes, then partial and full file hashes, followed by a byte-by-byte comparison. The default behavior with no other "action options" specified (delete, summarize, link, dedupe, etc.) is to print sets of matching files. .SH OPTIONS .TP .B -@ --loud output annoying low-level debug info while running .TP .B -0 --print-null when printing matches, use null bytes instead of CR/LF bytes, just like 'find -print0' does. This has no effect with any action mode other than the default "print matches" (delete, link, etc. will still print normal line endings in the output.) .TP .B -1 --one-file-system do not match files that are on different filesystems or devices .TP .B -A --no-hidden exclude hidden files from consideration .TP .B -B --dedupe call same-extents ioctl or clonefile() to trigger a filesystem-level data deduplication on disk (known as copy-on-write, CoW, cloning, or reflink); only a few filesystems support this (BTRFS; XFS when mkfs.xfs was used with -m crc=1,reflink=1; Apple APFS) .TP .B -C --chunk-size=\fInumber-of-KiB\fR set the I/O chunk size manually; larger values may improve performance on rotating media by reducing the number of head seeks required, but also increases memory usage and can reduce performance in some cases .TP .B -D --debug if this feature is compiled in, show debugging statistics and info at the end of program execution .TP .B -d --delete prompt user for files to preserve, deleting all others (see .B CAVEATS below) .TP .B -e --error-on-dupe exit on any duplicate found with status code 255 .TP .B -f --omit-first omit the first file in each set of matches .TP .B -H --hard-links normally, when two or more files point to the same disk area they are treated as non-duplicates; this option will change this behavior .TP .B -h --help displays help .TP .B -i --reverse reverse (invert) the sort order of matches .TP .B -I --isolate isolate each command-line parameter from one another; only match if the files are under different parameter specifications .TP .B -j --json produce JSON (machine-readable) output .TP .B -L --link-hard replace all duplicate files with hardlinks to the first file in each set of duplicates .TP .B -m --summarize summarize duplicate file information .TP .B -M --print-summarize print matches and summarize the duplicate file information at the end .TP .B -N --no-prompt when used together with \-\-delete, preserve the first file in each set of duplicates and delete the others without prompting the user .TP .B -O --param-order parameter order preservation is more important than the chosen sort; this is particularly useful with the \fB\-N\fP option to ensure that automatic deletion behaves in a controllable way .TP .B -o --order\fR=\fIWORD\fR order files according to WORD: time - sort by modification time name - sort by filename (default) .TP .B -p --permissions don't consider files with different owner/group or permission bits as duplicates .TP .B -P --print=type print extra information to stdout; valid options are: early - matches that pass early size/permission/link/etc. checks partial - files whose partial hashes match fullhash - files whose full hashes match .TP .B -Q --quick .B [WARNING: RISK OF DATA LOSS, SEE CAVEATS] skip byte-for-byte verification of duplicate pairs (use hashes only) .TP .B -q --quiet hide progress indicator .TP .B -R --recurse: for each directory given after this option follow subdirectories encountered within (note the ':' at the end of option; see the Examples section below for further explanation) .TP .B -r --recurse for every directory given follow subdirectories encountered within .TP .B -l --link-soft replace all duplicate files with symlinks to the first file in each set of duplicates .TP .B -S --size show size of duplicate files .TP .B -s --symlinks follow symlinked directories .TP .B -T --partial-only .B [WARNING: EXTREME RISK OF DATA LOSS, SEE CAVEATS] match based on hash of first block of file data, ignoring the rest .TP .B -U --no-trav-check disable double-traversal safety check (BE VERY CAREFUL) .TP .B -u --print-unique print only a list of unique (non-duplicate, unmatched) files .TP .B -v --version display jdupes version and compilation feature flags .TP .B -y --hash-db=file create/use a hash database text file to speed up future runs by caching file hash data .TP .B -X --ext-filter=spec:info exclude/filter files based on specified criteria; general format: .B jdupes -X filter[:value][size_suffix] Some filters take no value or multiple values. Filters that can take a numeric option generally support the size multipliers K/M/G/T/P/E with or without an added iB or B. Multipliers are binary-style unless the -B suffix is used, which will use decimal multipliers. For example, 16k or 16kib = 16384; 16kb = 16000. Multipliers are case-insensitive. Filters have cumulative effects: jdupes -X size+:99 -X size-:101 will cause only files of exactly 100 bytes in size to be included. Extension matching is case-insensitive. Path substring matching is case-sensitive. Supported filters are: .RS .IP `size[+-=]:number[suffix]' match only if size is greater (+), less than (-), or equal to (=) the specified number. The +/- and = specifiers can be combined, i.e. "size+=:4K" will only consider files with a size greater than or equal to four kilobytes (4096 bytes). .IP `noext:ext1[,ext2,...]' exclude files with certain extension(s), specified as a comma-separated list. Do not use a leading dot. .IP `onlyext:ext1[,ext2,...]' only include files with certain extension(s), specified as a comma-separated list. Do not use a leading dot. .IP `nostr:text_string' exclude all paths containing the substring text_string. This scans the full file path, so it can be used to match directories: -X nostr:dir_name/ .IP `onlystr:text_string' require all paths to contain the substring text_string. This scans the full file path, so it can be used to match directories: -X onlystr:dir_name/ .IP `newer:datetime` only include files newer than specified date. Date/time format: "YYYY-MM-DD HH:MM:SS" (time is optional). .IP `older:datetime` only include files older than specified date. Date/time format: "YYYY-MM-DD HH:MM:SS" (time is optional). .RE .TP .B -z --zero-match consider zero-length files to be duplicates; this replaces the old default behavior when \fB\-n\fP was not specified .TP .B -Z --soft-abort if the user aborts the program (as with CTRL-C) act on the matches that were found before the abort was received. For example, if -L and -Z are specified, all matches found prior to the abort will be hard linked. The default behavior without -Z is to abort without taking any actions. .SH NOTES A set of arrows are used in hard linking to show what action was taken on each link candidate. These arrows are as follows: .TP .B ----> This file was successfully hard linked to the first file in the duplicate chain .TP .B -@@-> This file was successfully symlinked to the first file in the chain .TP .B -##-> This file was successfully cloned from the first file in the chain .TP .B -==-> This file was already a hard link to the first file in the chain .TP .B -//-> Linking this file failed due to an error during the linking process .PP Duplicate files are listed together in groups with each file displayed on a separate line. The groups are then separated from each other by blank lines. .SH EXAMPLES .TP .B jdupes a --recurse: b will follow subdirectories under b, but not those under a. .TP .B jdupes a --recurse b will follow subdirectories under both a and b. .TP .B jdupes -O dir1 dir3 dir2 will always place 'dir1' results first in any match set (where relevant) .SH CAVEATS Using .B \-1 or .BR \-\-one\-file\-system prevents matches that cross filesystems, but a more relaxed form of this option may be added that allows cross-matching for all filesystems that each parameter is present on. When using .B \-d or .BR \-\-delete , care should be taken to insure against accidental data loss. .B \-Z or .BR \-\-soft\-abort used to be --hardabort in jdupes prior to v1.5 and had the opposite behavior. Defaulting to taking action on abort is probably not what most users would expect. The decision to invert rather than reassign to a different option was made because this feature was still fairly new at the time of the change. The .B \-O or .BR \-\-param\-order option allows the user greater control over what appears in the first position of a match set, specifically for keeping the \fB\-N\fP option from deleting all but one file in a set in a seemingly random way. All directories specified on the command line will be used as the sorting order of result sets first, followed by the sorting algorithm set by the \fB\-o\fP or \fB\-\-order\fP option. This means that the order of all match pairs for a single directory specification will retain the old sorting behavior even if this option is specified. When used together with options .B \-s or .BR \-\-symlink , a user could accidentally preserve a symlink while deleting the file it points to. The .B \-Q or .BR \-\-quick option only reads each file once, hashes it, and performs comparisons based solely on the hashes. There is a small but significant risk of a hash collision which is the purpose of the failsafe byte-for-byte comparison that this option explicitly bypasses. Do not use it on ANY data set for which any amount of data loss is unacceptable. This option is not included in the help text for the program due to its risky nature. .B You have been warned! The .B \-T or .BR \-\-partial\-only option produces results based on a hash of the first block of file data in each file, ignoring everything else in the file. Partial hash checks have always been an important exclusion step in the jdupes algorithm, usually hashing the first 4096 bytes of data and allowing files that are different at the start to be rejected early. In certain scenarios it may be a useful heuristic for a user to see that a set of files has the same size and the same starting data, even if the remaining data does not match; one example of this would be comparing files with data blocks that are damaged or missing such as an incomplete file transfer or checking a data recovery against known-good copies to see what damaged data can be deleted in favor of restoring the known-good copy. This option is meant to be used with informational actions and .B can result in EXTREME DATA LOSS if used with options that delete files, create hard links, or perform other destructive actions on data based on the matching output. Because of the potential for massive data destruction, .B this option MUST BE SPECIFIED TWICE to take effect and will error out if it is only specified once. Using the .B \-C or .BR \-\-chunk\-size option to override I/O chunk size can increase performance on rotating storage media by reducing "head thrashing," reading larger amounts of data sequentially from each file. This tunable size can have bad side effects; the default size maximizes algorithmic performance without regard to the I/O characteristics of any given device and uses a modest amount of memory, but other values may greatly increase memory usage or incur a lot more system call overhead. Try several different values to see how they affect performance for your hardware and data set. This option does not affect match results in any way, so even if it slows down the file matching process it will not hurt anything. The .B \-y or .BR \-\-hash\-db feature creates and maintains a text file with a list of file paths, hashes, and other metadata that enables jdupes to "remember" file data across runs. Specifying a period '.' as the database file name will use a name of "jdupes_hashdb.txt" instead; this alias makes it easy to use the hash database feature without typing a descriptive name each time. THIS FEATURE IS CURRENTLY UNDER DEVELOPMENT AND HAS MANY QUIRKS. USE IT AT YOUR OWN RISK. In particular, one of the biggest problems with this feature is that it stores every path exactly as specified on the command line; if any paths are passed into jdupes on a subsequent run with a different prefix then they will not be recognized and they will be treated as totally different files. For example, running \fBjdupes \-y . foo/\fP is not the same as \fBjdupes \-y . ./foo\fP nor the same as (from a sibling directory) \fBjdupes \-y ../foo\fP. You must run jdupes from the same working directory and with the same path specifications to take advantage of the hash database feature. When used correctly, a fully populated hash database can reduce subsequent runs with hundreds of thousands of files that normally take a very long time to run down to the directory scanning time plus a couple of seconds. If the directory data is already in the OS disk cache, this can make subsequent runs with over 100K files finish in under one second. .SH REPORTING BUGS Send bug reports and feature requests to jody@jodybruchon.com, or for general information and help, visit www.jdupes.com .SH SUPPORTING DEVELOPMENT If you find this software useful, please consider financially supporting its development through the author's home page: https://www.jodybruchon.com/ .SH AUTHOR jdupes is created and maintained by Jody Bruchon and was forked from fdupes 1.51 by Adrian Lopez .SH LICENSE MIT License Copyright (c) 2015-2023 Jody Lee Bruchon Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. jdupes-1.27.3/jdupes.c000066400000000000000000000712151447252140200145430ustar00rootroot00000000000000/* jdupes (C) 2015-2023 Jody Bruchon Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #ifndef NO_GETOPT_LONG #include #endif #include #include #include "libjodycode_check.h" #include "likely_unlikely.h" #include "jdupes.h" #include "args.h" #include "checks.h" #ifdef DEBUG #include "dumpflags.h" #endif #ifndef NO_EXTFILTER #include "extfilter.h" #endif #include "filehash.h" #include "filestat.h" #ifndef NO_HASHDB #include "hashdb.h" #endif #include "helptext.h" #include "loaddir.h" #include "match.h" #include "progress.h" #include "interrupt.h" #include "sort.h" #ifndef NO_TRAVCHECK #include "travcheck.h" #endif #include "version.h" #ifndef USE_JODY_HASH #include "xxhash.h" #endif #ifdef ENABLE_DEDUPE #ifdef __linux__ #include #endif #endif /* Headers for post-scanning actions */ #include "act_deletefiles.h" #ifdef ENABLE_DEDUPE #include "act_dedupefiles.h" #endif #include "act_linkfiles.h" #include "act_printmatches.h" #ifndef NO_JSON #include "act_printjson.h" #endif /* NO_JSON */ #include "act_summarize.h" /* Detect Windows and modify as needed */ #if defined _WIN32 || defined __MINGW32__ #ifdef UNICODE const wchar_t *FILE_MODE_RO = L"rbS"; wpath_t wstr; #else const char *FILE_MODE_RO = "rbS"; #endif /* UNICODE */ #else /* Not Windows */ const char *FILE_MODE_RO = "rb"; #ifdef UNICODE #error Do not define UNICODE on non-Windows platforms. #undef UNICODE #endif #endif /* _WIN32 || __MINGW32__ */ /* Behavior modification flags (a=action, p=-P) */ uint_fast64_t flags = 0, a_flags = 0, p_flags = 0; static const char *program_name; /* Stat and SIGUSR */ #ifdef ON_WINDOWS struct jc_winstat s; #else struct stat s; #endif #ifndef PARTIAL_HASH_SIZE #define PARTIAL_HASH_SIZE 4096 #endif #ifndef NO_CHUNKSIZE size_t auto_chunk_size = CHUNK_SIZE; #else /* If automatic chunk sizing is disabled, just use a fixed value */ #define auto_chunk_size CHUNK_SIZE #endif /* NO_CHUNKSIZE */ /* Required for progress indicator code */ uintmax_t filecount = 0, progress = 0, item_progress = 0, dupecount = 0; /* Performance and behavioral statistics (debug mode) */ #ifdef DEBUG unsigned int small_file = 0, partial_hash = 0, partial_elim = 0; unsigned int full_hash = 0, partial_to_full = 0, hash_fail = 0; uintmax_t comparisons = 0; #ifdef ON_WINDOWS #ifndef NO_HARDLINKS unsigned int hll_exclude = 0; #endif #endif #endif /* DEBUG */ /* File tree head */ static filetree_t *checktree = NULL; /* Hash algorithm (see filehash.h) */ #ifdef USE_JODY_HASH int hash_algo = HASH_ALGO_JODYHASH64; #else int hash_algo = HASH_ALGO_XXHASH2_64; #endif /* Directory/file parameter position counter */ unsigned int user_item_count = 1; /* Sort order reversal */ int sort_direction = 1; /* For path name mangling */ char tempname[PATHBUF_SIZE * 2]; /* Strings used in multiple places */ const char *s_interrupt = "\nStopping file scan due to user abort\n"; const char *s_no_dupes = "No duplicates found.\n"; /* Exit status; use exit() codes for setting this */ int exit_status = EXIT_SUCCESS; /***** End definitions, begin code *****/ /***** Add new functions here *****/ #ifdef UNICODE int wmain(int argc, wchar_t **wargv) #else int main(int argc, char **argv) #endif { static file_t *files = NULL; static file_t *curfile; static char **oldargv; static int firstrecurse; static int opt; static int pm = 1; static int partialonly_spec = 0; #ifndef NO_MTIME /* Remove if new order types are added! */ static ordertype_t ordertype = ORDER_NAME; #endif #ifndef NO_CHUNKSIZE static long manual_chunk_size = 0; #ifdef __linux__ static struct jc_proc_cacheinfo pci; #endif /* __linux__ */ #endif /* NO_CHUNKSIZE */ #ifdef ENABLE_DEDUPE #ifdef __linux__ static struct utsname utsname; #endif /* __linux__ */ #endif #ifndef NO_HASHDB char *hashdb_name = NULL; int hdblen; int64_t hdbsize; uint64_t hdbout; #endif #ifndef NO_GETOPT_LONG static const struct option long_options[] = { { "loud", 0, 0, '@' }, { "print-null", 0, 0, '0' }, { "one-file-system", 0, 0, '1' }, { "", 0, 0, '9' }, { "no-hidden", 0, 0, 'A' }, { "dedupe", 0, 0, 'B' }, { "chunk-size", 1, 0, 'C' }, { "debug", 0, 0, 'D' }, { "delete", 0, 0, 'd' }, { "error-on-dupe", 0, 0, 'e' }, { "ext-option", 0, 0, 'E' }, { "omit-first", 0, 0, 'f' }, { "hard-links", 0, 0, 'H' }, { "help", 0, 0, 'h' }, { "isolate", 0, 0, 'I' }, { "reverse", 0, 0, 'i' }, { "json", 0, 0, 'j' }, /* { "skip-hash", 0, 0, 'K' }, */ { "link-hard", 0, 0, 'L' }, { "link-soft", 0, 0, 'l' }, { "print-summarize", 0, 0, 'M'}, { "summarize", 0, 0, 'm'}, { "no-prompt", 0, 0, 'N' }, { "param-order", 0, 0, 'O' }, { "order", 1, 0, 'o' }, { "print", 1, 0, 'P' }, { "permissions", 0, 0, 'p' }, { "quick", 0, 0, 'Q' }, { "quiet", 0, 0, 'q' }, { "recurse:", 0, 0, 'R' }, { "recurse", 0, 0, 'r' }, { "size", 0, 0, 'S' }, { "symlinks", 0, 0, 's' }, { "partial-only", 0, 0, 'T' }, { "no-change-check", 0, 0, 't' }, { "no-trav-check", 0, 0, 'U' }, { "print-unique", 0, 0, 'u' }, { "version", 0, 0, 'v' }, { "ext-filter", 1, 0, 'X' }, { "hash-db", 1, 0, 'y' }, { "soft-abort", 0, 0, 'Z' }, { "zero-match", 0, 0, 'z' }, { NULL, 0, 0, 0 } }; #define GETOPT getopt_long #else #define GETOPT getopt #endif #define GETOPT_STRING "@019ABC:DdEefHhIijKLlMmNnOo:P:pQqRrSsTtUuVvX:y:Zz" /* Verify libjodycode compatibility before going further */ if (libjodycode_version_check(1, 0) != 0) { version_text(1); exit(EXIT_FAILURE); } /* Windows buffers our stderr output; don't let it do that */ #ifdef ON_WINDOWS if (setvbuf(stderr, NULL, _IONBF, 0) != 0) fprintf(stderr, "warning: setvbuf() failed\n"); #endif #ifdef UNICODE /* Create a UTF-8 **argv from the wide version */ static char **argv; int wa_err; argv = (char **)malloc(sizeof(char *) * (size_t)argc); if (!argv) jc_oom("main() unicode argv"); wa_err = jc_widearg_to_argv(argc, wargv, argv); if (wa_err != 0) { jc_print_error(wa_err); exit(EXIT_FAILURE); } /* fix up __argv so getopt etc. don't crash */ __argv = argv; jc_set_output_modes(0x0c); #endif /* UNICODE */ #ifndef NO_CHUNKSIZE #ifdef __linux__ /* Auto-tune chunk size to be half of L1 data cache if possible */ jc_get_proc_cacheinfo(&pci); if (pci.l1 != 0) auto_chunk_size = (pci.l1 / 2); else if (pci.l1d != 0) auto_chunk_size = (pci.l1d / 2); /* Must be at least 4096 (4 KiB) and cannot exceed CHUNK_SIZE */ if (auto_chunk_size < MIN_CHUNK_SIZE || auto_chunk_size > MAX_CHUNK_SIZE) auto_chunk_size = CHUNK_SIZE; /* Force to a multiple of 4096 if it isn't already */ if ((auto_chunk_size & 0x00000fffUL) != 0) auto_chunk_size = (auto_chunk_size + 0x00000fffUL) & 0x000ff000; #endif /* __linux__ */ #endif /* NO_CHUNKSIZE */ /* Is stderr a terminal? If not, we won't write progress to it */ #ifdef ON_WINDOWS if (!_isatty(_fileno(stderr))) SETFLAG(flags, F_HIDEPROGRESS); #else if (!isatty(fileno(stderr))) SETFLAG(flags, F_HIDEPROGRESS); #endif program_name = argv[0]; oldargv = cloneargs(argc, argv); while ((opt = GETOPT(argc, argv, GETOPT_STRING #ifndef NO_GETOPT_LONG , long_options, NULL #endif )) != EOF) { if ((uintptr_t)optarg == 0x20) goto error_optarg; switch (opt) { case '0': SETFLAG(a_flags, FA_PRINTNULL); LOUD(fprintf(stderr, "opt: print null instead of newline (--print-null)\n");) break; case '1': SETFLAG(flags, F_ONEFS); LOUD(fprintf(stderr, "opt: recursion across filesystems disabled (--one-file-system)\n");) break; #ifdef DEBUG case '9': SETFLAG(flags, F_BENCHMARKSTOP); break; #endif case 'A': SETFLAG(flags, F_EXCLUDEHIDDEN); break; #ifdef ENABLE_DEDUPE case 'B': #ifdef __linux__ /* Refuse to dedupe on 2.x kernels; they could damage user data */ if (uname(&utsname)) { fprintf(stderr, "Failed to get kernel version! Aborting.\n"); exit(EXIT_FAILURE); } LOUD(fprintf(stderr, "dedupefiles: uname got release '%s'\n", utsname.release)); if (*(utsname.release) == '2' && *(utsname.release + 1) == '.') { fprintf(stderr, "Refusing to dedupe on a 2.x kernel; data loss could occur. Aborting.\n"); exit(EXIT_FAILURE); } /* Kernel-level dedupe will do the byte-for-byte check itself */ if (!ISFLAG(flags, F_PARTIALONLY)) SETFLAG(flags, F_QUICKCOMPARE); #endif /* __linux__ */ SETFLAG(a_flags, FA_DEDUPEFILES); /* It is completely useless to dedupe zero-length extents */ CLEARFLAG(flags, F_INCLUDEEMPTY); LOUD(fprintf(stderr, "opt: CoW/block-level deduplication enabled (--dedupe)\n");) break; #endif /* ENABLE_DEDUPE */ #ifndef NO_CHUNKSIZE case 'C': manual_chunk_size = (strtol(optarg, NULL, 10) & 0x0ffffffcL) << 10; /* Align to 4K sizes */ if (manual_chunk_size < MIN_CHUNK_SIZE || manual_chunk_size > MAX_CHUNK_SIZE) { fprintf(stderr, "warning: invalid manual chunk size (must be %d - %d KiB); using defaults\n", MIN_CHUNK_SIZE / 1024, MAX_CHUNK_SIZE / 1024); LOUD(fprintf(stderr, "Manual chunk size (failed) was apparently '%s' => %ld KiB\n", optarg, manual_chunk_size / 1024)); manual_chunk_size = 0; } else auto_chunk_size = (size_t)manual_chunk_size; LOUD(fprintf(stderr, "Manual chunk size is %ld\n", manual_chunk_size)); break; #endif /* NO_CHUNKSIZE */ #ifndef NO_DELETE case 'd': SETFLAG(a_flags, FA_DELETEFILES); LOUD(fprintf(stderr, "opt: delete files after matching (--delete)\n");) break; #endif /* NO_DELETE */ case 'D': #ifdef DEBUG SETFLAG(flags, F_DEBUG); LOUD(fprintf(stderr, "opt: debugging information (--debug)\n");) #else fprintf(stderr, "warning: -D debugging is not supported in this build, ignoring\n"); #endif break; #ifndef NO_ERRORONDUPE case 'E': fprintf(stderr, "The -E option has been moved to -e as threatened in 1.26.1!\n"); fprintf(stderr, "Fix whatever used -E and try again. This is not a bug. Exiting.\n"); exit(EXIT_FAILURE); break; case 'e': SETFLAG(a_flags, FA_ERRORONDUPE); break; #endif /* NO_ERRORONDUPE */ case 'f': SETFLAG(a_flags, FA_OMITFIRST); LOUD(fprintf(stderr, "opt: omit first match from each match set (--omit-first)\n");) break; case 'h': help_text(); exit(EXIT_SUCCESS); #ifndef NO_HARDLINKS case 'H': SETFLAG(flags, F_CONSIDERHARDLINKS); LOUD(fprintf(stderr, "opt: hard links count as matches (--hard-links)\n");) break; case 'L': SETFLAG(a_flags, FA_HARDLINKFILES); LOUD(fprintf(stderr, "opt: convert duplicates to hard links (--link-hard)\n");) break; #endif case 'i': SETFLAG(flags, F_REVERSESORT); LOUD(fprintf(stderr, "opt: sort order reversal enabled (--reverse)\n");) break; #ifndef NO_USER_ORDER case 'I': SETFLAG(flags, F_ISOLATE); LOUD(fprintf(stderr, "opt: intra-parameter match isolation enabled (--isolate)\n");) break; case 'O': SETFLAG(flags, F_USEPARAMORDER); LOUD(fprintf(stderr, "opt: parameter order takes precedence (--param-order)\n");) break; #else case 'I': case 'O': fprintf(stderr, "warning: -I and -O are disabled and ignored in this build\n"); break; #endif #ifndef NO_JSON case 'j': SETFLAG(a_flags, FA_PRINTJSON); LOUD(fprintf(stderr, "opt: print output in JSON format (--print-json)\n");) break; #endif /* NO_JSON */ case 'K': SETFLAG(flags, F_SKIPHASH); break; case 'm': SETFLAG(a_flags, FA_SUMMARIZEMATCHES); LOUD(fprintf(stderr, "opt: print a summary of match stats (--summarize)\n");) break; case 'M': SETFLAG(a_flags, FA_SUMMARIZEMATCHES); SETFLAG(a_flags, FA_PRINTMATCHES); LOUD(fprintf(stderr, "opt: print matches with a summary (--print-summarize)\n");) break; #ifndef NO_DELETE case 'N': SETFLAG(flags, F_NOPROMPT); LOUD(fprintf(stderr, "opt: delete files without prompting (--noprompt)\n");) break; #endif /* NO_DELETE */ case 'o': #ifndef NO_MTIME /* Remove if new order types are added! */ if (!jc_strncaseeq("name", optarg, 5)) { ordertype = ORDER_NAME; } else if (!jc_strncaseeq("time", optarg, 5)) { ordertype = ORDER_TIME; } else { fprintf(stderr, "invalid value for --order: '%s'\n", optarg); exit(EXIT_FAILURE); } #endif /* NO_MTIME */ break; case 'p': SETFLAG(flags, F_PERMISSIONS); LOUD(fprintf(stderr, "opt: permissions must also match (--permissions)\n");) break; case 'P': LOUD(fprintf(stderr, "opt: print early: '%s' (--print)\n", optarg);) if (jc_streq(optarg, "partial") == 0) SETFLAG(p_flags, PF_PARTIAL); else if (jc_streq(optarg, "early") == 0) SETFLAG(p_flags, PF_EARLYMATCH); else if (jc_streq(optarg, "fullhash") == 0) SETFLAG(p_flags, PF_FULLHASH); else { fprintf(stderr, "Option '%s' is not valid for -P\n", optarg); exit(EXIT_FAILURE); } break; case 'q': SETFLAG(flags, F_HIDEPROGRESS); break; case 'Q': SETFLAG(flags, F_QUICKCOMPARE); LOUD(fprintf(stderr, "opt: byte-for-byte safety check disabled (--quick)\n");) break; case 'r': SETFLAG(flags, F_RECURSE); LOUD(fprintf(stderr, "opt: global recursion enabled (--recurse)\n");) break; case 'R': SETFLAG(flags, F_RECURSEAFTER); LOUD(fprintf(stderr, "opt: partial recursion enabled (--recurse-after)\n");) break; case 't': SETFLAG(flags, F_NOCHANGECHECK); LOUD(fprintf(stderr, "opt: TOCTTOU safety check disabled (--no-change-check)\n");) break; case 'T': partialonly_spec++; if (partialonly_spec == 1) { } if (partialonly_spec == 2) { SETFLAG(flags, F_PARTIALONLY); CLEARFLAG(flags, F_QUICKCOMPARE); } break; case 'u': SETFLAG(a_flags, FA_PRINTUNIQUE); LOUD(fprintf(stderr, "opt: print only non-matched (unique) files (--print-unique)\n");) break; case 'U': SETFLAG(flags, F_NOTRAVCHECK); LOUD(fprintf(stderr, "opt: double-traversal safety check disabled (--no-trav-check)\n");) break; case 'v': case 'V': version_text(0); exit(EXIT_SUCCESS); #ifndef NO_SYMLINKS case 'l': SETFLAG(a_flags, FA_MAKESYMLINKS); LOUD(fprintf(stderr, "opt: convert duplicates to symbolic links (--link-soft)\n");) break; case 's': SETFLAG(flags, F_FOLLOWLINKS); LOUD(fprintf(stderr, "opt: follow symbolic links enabled (--symlinks)\n");) break; #endif case 'S': SETFLAG(a_flags, FA_SHOWSIZE); LOUD(fprintf(stderr, "opt: show size of files enabled (--size)\n");) break; #ifndef NO_EXTFILTER case 'X': add_extfilter(optarg); break; #endif /* NO_EXTFILTER */ #ifndef NO_HASHDB case 'y': SETFLAG(flags, F_HASHDB); LOUD(fprintf(stderr, "opt: use a hash database (--hash-db)\n");) fprintf(stderr, "\nWARNING: THE HASH DATABASE FEATURE IS UNDER HEAVY DEVELOPMENT! It functions\n"); fprintf(stderr, " but there are LOTS OF QUIRKS. The behavior is not fully documented\n"); fprintf(stderr, " yet and basic 'smarts' have not been implemented. USE THIS FEATURE\n"); fprintf(stderr, " AT YOUR OWN RISK. Report hashdb issues to jody@jodybruchon.com\n\n"); hdbsize = 0; hdblen = strlen(optarg) + 1; if (hdblen < 24) hdblen = 24; hashdb_name = (char *)malloc(hdblen); if (hashdb_name == NULL) jc_nullptr("hashdb"); if (strcmp(optarg, ".") == 0) strcpy(hashdb_name, "jdupes_hashdb.txt"); else strcpy(hashdb_name, optarg); break; #endif /* NO_HASHDB */ case 'z': SETFLAG(flags, F_INCLUDEEMPTY); LOUD(fprintf(stderr, "opt: zero-length files count as matches (--zero-match)\n");) break; case 'Z': SETFLAG(flags, F_SOFTABORT); LOUD(fprintf(stderr, "opt: soft-abort mode enabled (--soft-abort)\n");) break; case '@': #ifdef LOUD_DEBUG SETFLAG(flags, F_DEBUG | F_LOUD | F_HIDEPROGRESS); #endif LOUD(fprintf(stderr, "opt: loud debugging enabled, hope you can handle it (--loud)\n");) break; default: if (opt != '?') fprintf(stderr, "Sorry, using '-%c' is not supported in this build.\n", opt); fprintf(stderr, "Try `jdupes --help' for more information.\n"); exit(EXIT_FAILURE); } } if (optind >= argc) { fprintf(stderr, "no files or directories specified (use -h option for help)\n"); exit(EXIT_FAILURE); } /* Make noise if people try to use -T because it's super dangerous */ if (partialonly_spec > 0) { if (partialonly_spec > 2) { fprintf(stderr, "Saying -T three or more times? You're a wizard. No reminders for you.\n"); goto skip_partialonly_noise; } fprintf(stderr, "\nBIG FAT WARNING: -T/--partial-only is EXTREMELY DANGEROUS! Read the manual!\n"); fprintf(stderr, " If used with destructive actions YOU WILL LOSE DATA!\n"); fprintf(stderr, " YOU ARE ON YOUR OWN. Use this power carefully.\n\n"); if (partialonly_spec == 1) { fprintf(stderr, "-T is so dangerous that you must specify it twice to use it. By doing so,\n"); fprintf(stderr, "you agree that you're OK with LOSING ALL OF YOUR DATA BY USING -T.\n\n"); exit(EXIT_FAILURE); } if (partialonly_spec == 2) { fprintf(stderr, "You passed -T twice. I hope you know what you're doing. Last chance!\n\n"); fprintf(stderr, " HIT CTRL-C TO ABORT IF YOU AREN'T CERTAIN!\n "); for (int countdown = 10; countdown > 0; countdown--) { fprintf(stderr, "%d, ", countdown); sleep(1); } fprintf(stderr, "bye-bye, data, it was nice knowing you.\n"); fprintf(stderr, "For wizards: three tees is the way to be.\n\n"); } } skip_partialonly_noise: if (ISFLAG(flags, F_RECURSE) && ISFLAG(flags, F_RECURSEAFTER)) { fprintf(stderr, "options --recurse and --recurse: are not compatible\n"); exit(EXIT_FAILURE); } if (ISFLAG(a_flags, FA_SUMMARIZEMATCHES) && ISFLAG(a_flags, FA_DELETEFILES)) { fprintf(stderr, "options --summarize and --delete are not compatible\n"); exit(EXIT_FAILURE); } #if defined ENABLE_DEDUPE && !defined NO_HARDLINKS if (ISFLAG(flags, F_CONSIDERHARDLINKS) && ISFLAG(a_flags, FA_DEDUPEFILES)) fprintf(stderr, "warning: option --dedupe overrides the behavior of --hardlinks\n"); #endif /* Debugging mode: dump all set flags */ DBG(if (ISFLAG(flags, F_DEBUG)) dump_all_flags();) /* If pm == 0, call printmatches() */ pm = !!ISFLAG(a_flags, FA_SUMMARIZEMATCHES) + !!ISFLAG(a_flags, FA_DELETEFILES) + !!ISFLAG(a_flags, FA_HARDLINKFILES) + !!ISFLAG(a_flags, FA_MAKESYMLINKS) + !!ISFLAG(a_flags, FA_PRINTJSON) + !!ISFLAG(a_flags, FA_PRINTUNIQUE) + !!ISFLAG(a_flags, FA_ERRORONDUPE) + !!ISFLAG(a_flags, FA_DEDUPEFILES); if (pm > 1) { fprintf(stderr, "Only one of --summarize, --print-summarize, --delete, --link-hard,\n--link-soft, --json, --error-on-dupe, or --dedupe may be used\n"); exit(EXIT_FAILURE); } if (pm == 0) SETFLAG(a_flags, FA_PRINTMATCHES); #ifndef ON_WINDOWS /* Catch SIGUSR1 and use it to enable -Z */ signal(SIGUSR1, catch_sigusr1); #endif /* Catch CTRL-C */ signal(SIGINT, catch_interrupt); #ifndef NO_HASHDB if (ISFLAG(flags, F_HASHDB)) { hdbsize = load_hash_database(hashdb_name); if (hdbsize < 0) goto error_load_hashdb; if (hdbsize > 0 && !ISFLAG(flags, F_HIDEPROGRESS)) fprintf(stderr, "%" PRId64 " entries loaded.\n", hdbsize); } #endif /* NO_HASHDB */ /* Progress indicator every second */ if (!ISFLAG(flags, F_HIDEPROGRESS)) { jc_start_alarm(1, 1); /* Force an immediate progress update */ jc_alarm_ring = 1; } if (ISFLAG(flags, F_RECURSEAFTER)) { firstrecurse = nonoptafter("--recurse:", argc, oldargv, argv); if (firstrecurse == argc) firstrecurse = nonoptafter("-R", argc, oldargv, argv); if (firstrecurse == argc) { fprintf(stderr, "-R option must be isolated from other options\n"); exit(EXIT_FAILURE); } /* F_RECURSE is not set for directories before --recurse: */ for (int x = optind; x < firstrecurse; x++) { if (unlikely(interrupt)) goto interrupt_exit; jc_slash_convert(argv[x]); loaddir(argv[x], &files, 0); user_item_count++; } /* Set F_RECURSE for directories after --recurse: */ SETFLAG(flags, F_RECURSE); for (int x = firstrecurse; x < argc; x++) { if (unlikely(interrupt)) goto interrupt_exit; jc_slash_convert(argv[x]); loaddir(argv[x], &files, 1); user_item_count++; } } else { for (int x = optind; x < argc; x++) { if (unlikely(interrupt)) goto interrupt_exit; jc_slash_convert(argv[x]); loaddir(argv[x], &files, ISFLAG(flags, F_RECURSE)); user_item_count++; } } /* Abort on CTRL-C (-Z doesn't matter yet) */ if (unlikely(interrupt)) goto interrupt_exit; /* Force a progress update */ if (!ISFLAG(flags, F_HIDEPROGRESS)) update_phase1_progress("items"); /* We don't need the double traversal check tree anymore */ #ifndef NO_TRAVCHECK travcheck_free(NULL); #endif /* NO_TRAVCHECK */ #ifdef DEBUG /* Pass -9 option to exit after traversal/loading code */ if (ISFLAG(flags, F_BENCHMARKSTOP)) { fprintf(stderr, "\nBenchmarking stop requested; exiting.\n"); goto skip_all_scan_code; } #endif if (ISFLAG(flags, F_REVERSESORT)) sort_direction = -1; if (!ISFLAG(flags, F_HIDEPROGRESS)) fprintf(stderr, "\n"); if (!files) goto skip_file_scan; curfile = files; progress = 0; /* Force an immediate progress update */ if (!ISFLAG(flags, F_HIDEPROGRESS)) jc_alarm_ring = 1; while (curfile) { static file_t **match = NULL; static FILE *file1; static FILE *file2; if (interrupt) { fprintf(stderr, "%s", s_interrupt); if (!ISFLAG(flags, F_SOFTABORT)) exit(EXIT_FAILURE); interrupt = 0; /* reset interrupt for re-use */ goto skip_file_scan; } LOUD(fprintf(stderr, "\nMAIN: current file: %s\n", curfile->d_name)); if (!checktree) registerfile(&checktree, NONE, curfile); else match = checkmatch(checktree, curfile); /* Byte-for-byte check that a matched pair are actually matched */ if (match != NULL) { /* Quick or partial-only compare will never run confirmmatch() * Also skip match confirmation for hard-linked files * (This set of comparisons is ugly, but quite efficient) */ if ( ISFLAG(flags, F_QUICKCOMPARE) || ISFLAG(flags, F_PARTIALONLY) #ifndef NO_HARDLINKS || (ISFLAG(flags, F_CONSIDERHARDLINKS) && (curfile->inode == (*match)->inode) && (curfile->device == (*match)->device)) #endif ) { LOUD(fprintf(stderr, "MAIN: notice: hard linked, quick, or partial-only match (-H/-Q/-T)\n")); #ifndef NO_MTIME registerpair(match, curfile, (ordertype == ORDER_TIME) ? sort_pairs_by_mtime : sort_pairs_by_filename); #else registerpair(match, curfile, sort_pairs_by_filename); #endif dupecount++; goto skip_full_check; } #ifdef UNICODE if (!M2W(curfile->d_name, wstr)) file1 = NULL; else file1 = _wfopen(wstr, FILE_MODE_RO); #else file1 = fopen(curfile->d_name, FILE_MODE_RO); #endif if (!file1) { LOUD(fprintf(stderr, "MAIN: warning: file1 fopen() failed ('%s')\n", curfile->d_name)); curfile = curfile->next; continue; } #ifdef UNICODE if (!M2W((*match)->d_name, wstr)) file2 = NULL; else file2 = _wfopen(wstr, FILE_MODE_RO); #else file2 = fopen((*match)->d_name, FILE_MODE_RO); #endif if (!file2) { fclose(file1); LOUD(fprintf(stderr, "MAIN: warning: file2 fopen() failed ('%s')\n", (*match)->d_name)); curfile = curfile->next; continue; } if (confirmmatch(file1, file2, curfile->size)) { LOUD(fprintf(stderr, "MAIN: registering matched file pair\n")); #ifndef NO_MTIME registerpair(match, curfile, (ordertype == ORDER_TIME) ? sort_pairs_by_mtime : sort_pairs_by_filename); #else registerpair(match, curfile, sort_pairs_by_filename); #endif dupecount++; } DBG(else hash_fail++;) fclose(file1); fclose(file2); } skip_full_check: curfile = curfile->next; check_sigusr1(); if (jc_alarm_ring != 0) { jc_alarm_ring = 0; update_phase2_progress(NULL, -1); } progress++; } if (!ISFLAG(flags, F_HIDEPROGRESS)) fprintf(stderr, "\r%60s\r", " "); skip_file_scan: /* Stop catching CTRL+C and firing alarms */ signal(SIGINT, SIG_DFL); if (!ISFLAG(flags, F_HIDEPROGRESS)) jc_stop_alarm(); if (files == NULL) { printf("%s", s_no_dupes); exit(exit_status); } #ifndef NO_DELETE if (ISFLAG(a_flags, FA_DELETEFILES)) { if (ISFLAG(flags, F_NOPROMPT)) deletefiles(files, 0, 0); else deletefiles(files, 1, stdin); } #endif /* NO_DELETE */ #ifndef NO_SYMLINKS if (ISFLAG(a_flags, FA_MAKESYMLINKS)) linkfiles(files, 0, 0); #endif /* NO_SYMLINKS */ #ifndef NO_HARDLINKS if (ISFLAG(a_flags, FA_HARDLINKFILES)) linkfiles(files, 1, 0); #endif /* NO_HARDLINKS */ #ifdef ENABLE_DEDUPE if (ISFLAG(a_flags, FA_DEDUPEFILES)) dedupefiles(files); #endif /* ENABLE_DEDUPE */ if (ISFLAG(a_flags, FA_PRINTMATCHES)) printmatches(files); if (ISFLAG(a_flags, FA_PRINTUNIQUE)) printunique(files); #ifndef NO_JSON if (ISFLAG(a_flags, FA_PRINTJSON)) printjson(files, argc, argv); #endif /* NO_JSON */ if (ISFLAG(a_flags, FA_SUMMARIZEMATCHES)) { if (ISFLAG(a_flags, FA_PRINTMATCHES)) printf("\n\n"); summarizematches(files); } #ifndef NO_HASHDB if (ISFLAG(flags, F_HASHDB)) { hdbout = save_hash_database(hashdb_name, 1); if (!ISFLAG(flags, F_HIDEPROGRESS)) { if (hdbout > 0) fprintf(stderr, "Wrote %" PRIu64 " entries to the hash database\n", hdbout); else fprintf(stderr, "Hash database is OK (no changes)\n"); } } if (hashdb_name != NULL) free(hashdb_name); #endif #ifdef DEBUG skip_all_scan_code: #endif #ifdef DEBUG if (ISFLAG(flags, F_DEBUG)) { fprintf(stderr, "\n%d partial(%uKiB) (+%d small) -> %d full hash -> %d full (%d partial elim) (%d hash%u fail)\n", partial_hash, PARTIAL_HASH_SIZE >> 10, small_file, full_hash, partial_to_full, partial_elim, hash_fail, (unsigned int)sizeof(uint64_t)*8); fprintf(stderr, "%" PRIuMAX " total files, %" PRIuMAX " comparisons\n", filecount, comparisons); #ifndef NO_CHUNKSIZE if (manual_chunk_size > 0) fprintf(stderr, "I/O chunk size: %ld KiB (manually set)\n", manual_chunk_size >> 10); else { #ifdef __linux__ fprintf(stderr, "I/O chunk size: %" PRIuMAX " KiB (%s)\n", (uintmax_t)(auto_chunk_size >> 10), (pci.l1 + pci.l1d) != 0 ? "dynamically sized" : "default size"); #else fprintf(stderr, "I/O chunk size: %" PRIuMAX " KiB (default size)\n", (uintmax_t)(auto_chunk_size >> 10)); #endif /* __linux__ */ } #endif /* NO_CHUNKSIZE */ #ifdef ON_WINDOWS #ifndef NO_HARDLINKS if (ISFLAG(a_flags, FA_HARDLINKFILES)) fprintf(stderr, "Exclusions based on Windows hard link limit: %u\n", hll_exclude); #endif #endif } #endif /* DEBUG */ exit(exit_status); error_optarg: fprintf(stderr, "error: option '%c' requires an argument\n", opt); exit(EXIT_FAILURE); #ifndef NO_HASHDB error_load_hashdb: free(hashdb_name); exit(EXIT_FAILURE); #endif interrupt_exit: fprintf(stderr, "%s", s_interrupt); exit(EXIT_FAILURE); } jdupes-1.27.3/jdupes.h000066400000000000000000000153441447252140200145510ustar00rootroot00000000000000/* jdupes main program header * See jdupes.c for license information */ #ifndef JDUPES_H #define JDUPES_H #ifdef __cplusplus extern "C" { #endif /* Detect Windows and modify as needed */ #if defined _WIN32 || defined __MINGW32__ #ifndef ON_WINDOWS #define ON_WINDOWS 1 #endif #define NO_SYMLINKS 1 #define NO_PERMS 1 #define NO_SIGACTION 1 #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #include #include #endif /* Win32 */ #include #include #include /* Some types are different on Windows */ #if defined _WIN32 || defined __MINGW32__ typedef uint64_t jdupes_ino_t; typedef uint32_t jdupes_mode_t; #ifdef UNICODE extern const wchar_t *FILE_MODE_RO; #else extern const char *FILE_MODE_RO; #endif /* UNICODE */ #else /* Not Windows */ #include typedef ino_t jdupes_ino_t; typedef mode_t jdupes_mode_t; extern const char *FILE_MODE_RO; #ifdef UNICODE #error Do not define UNICODE on non-Windows platforms. #undef UNICODE #endif #endif /* _WIN32 || __MINGW32__ */ /* Windows + Unicode compilation */ #ifdef UNICODE #ifndef PATHBUF_SIZE #ifndef WPATH_MAX #define WPATH_MAX 8192 #endif #define PATHBUF_SIZE WPATH_MAX #else #ifndef WPATH_MAX #define WPATH_MAX PATHBUF_SIZE #endif #endif /* PATHBUF_SIZE */ typedef wchar_t wpath_t[WPATH_MAX]; #define M2W(a,b) MultiByteToWideChar(CP_UTF8, 0, a, -1, (LPWSTR)b, WPATH_MAX) #define W2M(a,b) WideCharToMultiByte(CP_UTF8, 0, a, -1, (LPSTR)b, WPATH_MAX, NULL, NULL) extern wpath_t wstr; #endif /* UNICODE */ /* Maximum path buffer size to use; must be large enough for a path plus * any work that might be done to the array it's stored in. PATH_MAX is * not always true. Read this article on the false promises of PATH_MAX: * http://insanecoding.blogspot.com/2007/11/pathmax-simply-isnt.html * Windows + Unicode needs a lot more space than UTF-8 in Linux/Mac OS X */ #ifndef PATHBUF_SIZE #define PATHBUF_SIZE 4096 #endif /* Complain if PATHBUF_SIZE is too small */ #if PATHBUF_SIZE < PATH_MAX #if !defined LOW_MEMORY && !defined BARE_BONES #warning "PATHBUF_SIZE is less than PATH_MAX" #endif #endif /* Debugging stats */ #ifdef DEBUG extern unsigned int small_file, partial_hash, partial_elim; extern unsigned int full_hash, partial_to_full, hash_fail; extern uintmax_t comparisons; #ifdef ON_WINDOWS #ifndef NO_HARDLINKS extern unsigned int hll_exclude; #endif #endif #endif /* DEBUG */ #define ISFLAG(a,b) ((a & b) == b) #define SETFLAG(a,b) (a |= b) #define CLEARFLAG(a,b) (a &= (~b)) /* Chunk sizing */ #ifndef CHUNK_SIZE #define CHUNK_SIZE 65536 #endif #ifndef NO_CHUNKSIZE extern size_t auto_chunk_size; /* Larger chunk size makes large files process faster but uses more RAM */ #define MIN_CHUNK_SIZE 4096 #define MAX_CHUNK_SIZE 1048576 * 256 #else /* If automatic chunk sizing is disabled, just use a fixed value */ #define auto_chunk_size CHUNK_SIZE #endif /* NO_CHUNKSIZE */ /* Low memory option overrides */ #ifdef LOW_MEMORY #ifndef NO_PERMS #define NO_PERMS 1 #endif #endif /* Aggressive verbosity for deep debugging */ #ifdef LOUD_DEBUG #ifndef DEBUG #define DEBUG #endif #define LOUD(...) if ISFLAG(flags, F_LOUD) __VA_ARGS__ #else #define LOUD(a) #endif /* Compile out debugging stat counters unless requested */ #ifdef DEBUG #define DBG(a) a #else #define DBG(a) #endif /* Compare two hashes like memcmp() */ #define HASH_COMPARE(a,b) ((a > b) ? 1:((a == b) ? 0:-1)) /* Extend an allocation length to the next 64-bit (8-byte) boundary */ #define EXTEND64(a) ((a & 0x7) > 0 ? ((a & (~0x7)) + 8) : a) /* Behavior modification flags */ extern uint64_t flags, a_flags, p_flags; #define F_RECURSE (1ULL << 0) #define F_HIDEPROGRESS (1ULL << 1) #define F_SOFTABORT (1ULL << 2) #define F_FOLLOWLINKS (1ULL << 3) #define F_INCLUDEEMPTY (1ULL << 4) #define F_CONSIDERHARDLINKS (1ULL << 5) #define F_RECURSEAFTER (1ULL << 6) #define F_NOPROMPT (1ULL << 7) #define F_EXCLUDEHIDDEN (1ULL << 8) #define F_PERMISSIONS (1ULL << 9) #define F_EXCLUDESIZE (1ULL << 10) #define F_QUICKCOMPARE (1ULL << 11) #define F_USEPARAMORDER (1ULL << 12) #define F_REVERSESORT (1ULL << 13) #define F_ISOLATE (1ULL << 14) #define F_ONEFS (1ULL << 15) #define F_PARTIALONLY (1ULL << 16) #define F_NOCHANGECHECK (1ULL << 17) #define F_NOTRAVCHECK (1ULL << 18) #define F_SKIPHASH (1ULL << 19) #define F_BENCHMARKSTOP (1ULL << 29) #define F_HASHDB (1ULL << 30) #define F_LOUD (1ULL << 62) #define F_DEBUG (1ULL << 63) /* Action-related flags */ #define FA_PRINTMATCHES (1U << 0) #define FA_PRINTUNIQUE (1U << 1) #define FA_OMITFIRST (1U << 2) #define FA_SUMMARIZEMATCHES (1U << 3) #define FA_DELETEFILES (1U << 4) #define FA_SHOWSIZE (1U << 5) #define FA_HARDLINKFILES (1U << 6) #define FA_DEDUPEFILES (1U << 7) #define FA_MAKESYMLINKS (1U << 8) #define FA_PRINTNULL (1U << 9) #define FA_PRINTJSON (1U << 10) #define FA_ERRORONDUPE (1U << 11) /* Per-file true/false flags */ #define FF_VALID_STAT (1U << 0) #define FF_HASH_PARTIAL (1U << 1) #define FF_HASH_FULL (1U << 2) #define FF_HAS_DUPES (1U << 3) #define FF_IS_SYMLINK (1U << 4) #define FF_NOT_UNIQUE (1U << 5) #define FF_HASHDB_DIRTY (1U << 5) /* Extra print flags */ #define PF_PARTIAL (1U << 0) #define PF_EARLYMATCH (1U << 1) #define PF_FULLHASH (1U << 2) typedef enum { ORDER_NAME = 0, ORDER_TIME } ordertype_t; #ifndef PARTIAL_HASH_SIZE #define PARTIAL_HASH_SIZE 4096 #endif /* Per-file information */ typedef struct _file { struct _file *duplicates; struct _file *next; char *d_name; uint64_t filehash_partial; uint64_t filehash; jdupes_ino_t inode; off_t size; #ifndef NO_MTIME time_t mtime; #endif dev_t device; uint32_t flags; /* Status flags */ jdupes_mode_t mode; #ifndef NO_ATIME time_t atime; #endif #ifndef NO_USER_ORDER unsigned int user_order; /* Order of the originating command-line parameter */ #endif #ifndef NO_HARDLINKS #ifdef ON_WINDOWS uint32_t nlink; /* link count on Windows is always a DWORD */ #else nlink_t nlink; #endif /* ON_WINDOWS */ #endif #ifndef NO_PERMS uid_t uid; gid_t gid; #endif } file_t; typedef struct _filetree { file_t *file; struct _filetree *left; struct _filetree *right; } filetree_t; /* This gets used in many functions */ #ifdef ON_WINDOWS extern struct jc_winstat s; #define STAT jc_win_stat #else extern struct stat s; #define STAT stat #endif /* Progress indicator variables */ extern uintmax_t filecount, progress, item_progress, dupecount; extern int hash_algo; extern unsigned int user_item_count; extern int sort_direction; extern char tempname[]; extern const char *feature_flags[]; extern const char *s_no_dupes; extern int exit_status; int file_has_changed(file_t * const restrict file); #ifdef __cplusplus } #endif #endif /* JDUPES_H */ jdupes-1.27.3/libjodycode_check.c000066400000000000000000000114561447252140200166760ustar00rootroot00000000000000/* libjodycode version checks * * Code to embed the libjodycode version info and check against the currently * linked libjodycode to check for and report incompatibilities * * Copyright (C) 2023 by Jody Bruchon * Licensed under The MIT License */ #include #include #include #include "libjodycode_check.h" #include "libjodycode_check_defs.h" #ifdef JC_TEST #define JC_TEST_ONLY(a) a #else #define JC_TEST_ONLY(a) #endif const char *jc_build_version = LIBJODYCODE_VER; const int jc_build_api_version = LIBJODYCODE_API_VERSION; const int jc_build_api_featurelevel = LIBJODYCODE_API_FEATURE_LEVEL; const int jc_build_min_featurelevel = MY_FEATURELEVEL_REQ; /* API sub-version info array, terminated with 255 * The user-defined part has moved to libjodycode_check_defs.h * Do not edit this file, edit that one instead! */ /* Build the array data using user definitions */ #if MY_CACHEINFO_REQ == 255 #undef MY_CACHEINFO_REQ #define MY_CACHEINFO_REQ LIBJODYCODE_CACHEINFO_VER #endif #if MY_JODY_HASH_REQ == 255 #undef MY_JODY_HASH_REQ #define MY_JODY_HASH_REQ LIBJODYCODE_JODY_HASH_VER #endif #if MY_OOM_REQ == 255 #undef MY_OOM_REQ #define MY_OOM_REQ LIBJODYCODE_OOM_VER #endif #if MY_PATHS_REQ == 255 #undef MY_PATHS_REQ #define MY_PATHS_REQ LIBJODYCODE_PATHS_VER #endif #if MY_SIZE_SUFFIX_REQ == 255 #undef MY_SIZE_SUFFIX_REQ #define MY_SIZE_SUFFIX_REQ LIBJODYCODE_SIZE_SUFFIX_VER #endif #if MY_SORT_REQ == 255 #undef MY_SORT_REQ #define MY_SORT_REQ LIBJODYCODE_SORT_VER #endif #if MY_STRING_REQ == 255 #undef MY_STRING_REQ #define MY_STRING_REQ LIBJODYCODE_STRING_VER #endif #if MY_STRTOEPOCH_REQ == 255 #undef MY_STRTOEPOCH_REQ #define MY_STRTOEPOCH_REQ LIBJODYCODE_STRTOEPOCH_VER #endif #if MY_WIN_STAT_REQ == 255 #undef MY_WIN_STAT_REQ #define MY_WIN_STAT_REQ LIBJODYCODE_WIN_STAT_VER #endif #if MY_WIN_UNICODE_REQ == 255 #undef MY_WIN_UNICODE_REQ #define MY_WIN_UNICODE_REQ LIBJODYCODE_WIN_UNICODE_VER #endif #if MY_ERROR_REQ == 255 #undef MY_ERROR_REQ #define MY_ERROR_REQ LIBJODYCODE_ERROR_VER #endif #if MY_ALARM_REQ == 255 #undef MY_ALARM_REQ #define MY_ALARM_REQ LIBJODYCODE_ALARM_VER #endif const unsigned char jc_build_api_versiontable[] = { MY_CACHEINFO_REQ, MY_JODY_HASH_REQ, MY_OOM_REQ, MY_PATHS_REQ, MY_SIZE_SUFFIX_REQ, MY_SORT_REQ, MY_STRING_REQ, MY_STRTOEPOCH_REQ, MY_WIN_STAT_REQ, MY_WIN_UNICODE_REQ, MY_ERROR_REQ, MY_ALARM_REQ, 255 }; const char *jc_versiontable_section[] = { "cacheinfo", "jody_hash", "oom", "paths", "size_suffix", "sort", "string", "strtoepoch", "win_stat", "win_unicode", "error", "alarm", NULL }; int libjodycode_version_check(int verbose, int bail) { const unsigned char * const restrict build = jc_build_api_versiontable; const unsigned char * const restrict lib = jc_api_versiontable; int i = 0; JC_TEST_ONLY(if (verbose > 1) fprintf(stderr, "libjodycode version check test code\n\n");) /* Force a version dump if requested */ while (build[i] != 255) { JC_TEST_ONLY(if (verbose > 1) fprintf(stderr, "API %d: %s: builtin ver %u, lib ver %u\n", i, jc_versiontable_section[i], build[i], lib[i]);) if (build[i] != 0 && (lib[i] == 0 || build[i] != lib[i])) goto incompatible_versiontable; i++; } JC_TEST_ONLY(if (verbose > 1) goto incompatible_versiontable;) return 0; incompatible_versiontable: if (verbose) { fprintf(stderr, "\n==============================================================================\n"); fprintf(stderr, "internal error: libjodycode on this system is an incompatible version\n\n"); fprintf(stderr, "Currently using libjodycode v%s, API %d, feature level %d\n", jc_version, jc_api_version, jc_api_featurelevel); fprintf(stderr, " Built against libjodycode v%s, API %d, feature level %d\n\n", jc_build_version, jc_build_api_version, jc_build_api_featurelevel); if (jc_build_min_featurelevel > jc_build_api_featurelevel) fprintf(stderr, "libjodycode feature level %d is required but linked library is level %d\n", jc_build_min_featurelevel, jc_build_api_featurelevel); if (lib[i] == 0) fprintf(stderr, "API sections are missing in libjodycode; it's probably too old.\n"); else fprintf(stderr, "The first incompatible API section found is '%s' (want v%d, got v%d).\n", jc_versiontable_section[i], build[i], lib[i]); fprintf(stderr, "==============================================================================\n\n"); fprintf(stderr, "\nUpdate libjodycode on your system and try again. If you continue to get this\n"); fprintf(stderr, "error, contact the package or distribution maintainer. If all else fails, send\n"); fprintf(stderr, "an email to jody@jodybruchon.com for help (but only as a last resort, please.)\n\n"); } if (bail) exit(EXIT_FAILURE); return 1; } #ifdef JC_TEST int main(void) { libjodycode_version_check(2, 0); return 0; } #endif jdupes-1.27.3/libjodycode_check.h000066400000000000000000000010131447252140200166670ustar00rootroot00000000000000/* libjodycode version check headear * See libjodycode_check.c for license information */ #ifndef LIBJODYCODE_CHECK_H #define LIBJODYCODE_CHECK_H #ifdef __cplusplus extern "C" { #endif extern const int jc_build_api_major; extern const int jc_build_api_minor; extern const char *jc_build_version; extern const char *jc_build_featurelevel; extern const unsigned char jc_build_api_versiontable[]; extern int libjodycode_version_check(int verbose, int bail); #ifdef __cplusplus } #endif #endif /* LIBJODYCODE_CHECK_H */ jdupes-1.27.3/libjodycode_check_defs.h000066400000000000000000000030331447252140200176740ustar00rootroot00000000000000/* libjodycode version checks - user-defined requirements * * Edit this file to match your libjodycode API/feature level requirements * * Copyright (C) 2023 by Jody Bruchon * Licensed under The MIT License */ /* Minimum libjodycode feature level required * You can copy this from the current libjodycode feature level; however, * the ideal number is the lowest number that is still compatible. For * example: in level 1 the alarm API sets the "ring" to 1 like a flag * while the level 2 alarm API increments "ring" when triggered to tell * the application how many alarms have been triggered since reset. For * applications that don't care about the number of alarms or that work * properly without that info, level 1 minimum is fine; those that rely * on the newer behavior must specify a minimum level of 2. */ #define MY_FEATURELEVEL_REQ 1 /* API sub-version requirements * For any libjodycode API you use, copy its number from libjodycode.h to * this list. * To indicate you don't use an API, set it to 0. * To auto-fill the API numbers you're building against, set it to 255. * Any number not matching libjodycode will cause an error exit. */ #define MY_CACHEINFO_REQ 255 #define MY_JODY_HASH_REQ 255 #define MY_OOM_REQ 255 #define MY_PATHS_REQ 255 #define MY_SIZE_SUFFIX_REQ 255 #define MY_SORT_REQ 255 #define MY_STRING_REQ 255 #define MY_STRTOEPOCH_REQ 255 #define MY_WIN_STAT_REQ 255 #define MY_WIN_UNICODE_REQ 255 #define MY_ERROR_REQ 255 #define MY_ALARM_REQ 255 jdupes-1.27.3/likely_unlikely.h000066400000000000000000000011731447252140200164570ustar00rootroot00000000000000/* likely()/unlikely() macros for branch optimization * By Jody Bruchon * Released to the public domain */ #ifndef LIKELY_UNLIKELY_H #define LIKELY_UNLIKELY_H #ifdef __cplusplus extern "C" { #endif /* Un-define if already defined */ #if !defined NO_LIKELY_UNLIKELY && (defined __GNUC__ || defined __clang__) #ifdef likely #undef likely #endif #ifdef unlikely #undef unlikely #endif #define likely(a) __builtin_expect((a), 1) #define unlikely(a) __builtin_expect((a), 0) #else /* no GCC/Clang */ #define likely(a) a #define unlikely(a) a #endif #ifdef __cplusplus } #endif #endif /* LIKELY_UNLIKELY_H */ jdupes-1.27.3/linux-dedupe-static.h000066400000000000000000000011401447252140200171340ustar00rootroot00000000000000/* Bare header for Linux dedupe API */ #ifndef JDUPES_DEDUPESTATIC_H #define JDUPES_DEDUPESTATIC_H #include #include #define FILE_DEDUPE_RANGE_SAME 0 #define FILE_DEDUPE_RANGE_DIFFERS 1 struct file_dedupe_range_info { __s64 dest_fd; __u64 dest_offset; __u64 bytes_deduped; __s32 status; __u32 reserved; }; struct file_dedupe_range { __u64 src_offset; __u64 src_length; __u16 dest_count; __u16 reserved1; __u32 reserved2; struct file_dedupe_range_info info[0]; }; #define FIDEDUPERANGE _IOWR(0x94, 54, struct file_dedupe_range) #endif /* JDUPES_DEDUPESTATIC_H */ jdupes-1.27.3/loaddir.c000066400000000000000000000220541447252140200146640ustar00rootroot00000000000000/* jdupes directory scanning code * This file is part of jdupes; see jdupes.c for license information */ #include #include #include #include #include #include #include #include #include "likely_unlikely.h" #include "jdupes.h" #include "checks.h" #include "filestat.h" #ifndef NO_HASHDB #include "hashdb.h" #endif #include "progress.h" #include "interrupt.h" #ifndef NO_TRAVCHECK #include "travcheck.h" #endif #ifdef UNICODE static wpath_t wname; #endif /* Detect Windows and modify as needed */ #if defined _WIN32 || defined __MINGW32__ const char dir_sep = '\\'; #else /* Not Windows */ const char dir_sep = '/'; #endif /* _WIN32 || __MINGW32__ */ static file_t *init_newfile(const size_t len, file_t * restrict * const restrict filelistp) { file_t * const restrict newfile = (file_t *)malloc(sizeof(file_t)); if (unlikely(!newfile)) jc_oom("init_newfile() file structure"); if (unlikely(!filelistp)) jc_nullptr("init_newfile() filelistp"); LOUD(fprintf(stderr, "init_newfile(len %" PRIuMAX ", filelistp %p)\n", (uintmax_t)len, filelistp)); memset(newfile, 0, sizeof(file_t)); newfile->d_name = (char *)malloc(EXTEND64(len)); if (!newfile->d_name) jc_oom("init_newfile() filename"); newfile->next = *filelistp; #ifndef NO_USER_ORDER newfile->user_order = user_item_count; #endif newfile->size = -1; newfile->duplicates = NULL; return newfile; } /* This is disabled until a check is in place to make it safe */ #if 0 /* Add a single file to the file tree */ file_t *grokfile(const char * const restrict name, file_t * restrict * const restrict filelistp) { file_t * restrict newfile; if (!name || !filelistp) jc_nullptr("grokfile()"); LOUD(fprintf(stderr, "grokfile: '%s' %p\n", name, filelistp)); /* Allocate the file_t and the d_name entries */ newfile = init_newfile(strlen(name) + 2, filelistp); strcpy(newfile->d_name, name); /* Single-file [l]stat() and exclusion condition check */ if (check_singlefile(newfile) != 0) { LOUD(fprintf(stderr, "grokfile: check_singlefile rejected file\n")); free(newfile->d_name); free(newfile); return NULL; } return newfile; } #endif /* Load a directory's contents into the file tree, recursing as needed */ void loaddir(const char * const restrict dir, file_t * restrict * const restrict filelistp, int recurse) { file_t * restrict newfile; struct dirent *dirinfo; size_t dirlen, dirpos; int i, single = 0; jdupes_ino_t inode; dev_t device, n_device; jdupes_mode_t mode; #ifdef UNICODE WIN32_FIND_DATA ffd; HANDLE hFind = INVALID_HANDLE_VALUE; char *p; #else DIR *cd; #endif static int sf_warning = 0; /* single file warning should only appear once */ if (unlikely(dir == NULL || filelistp == NULL)) jc_nullptr("loaddir()"); LOUD(fprintf(stderr, "loaddir: scanning '%s' (order %d, recurse %d)\n", dir, user_item_count, recurse)); if (interrupt) return; /* Get directory stats (or file stats if it's a file) */ i = getdirstats(dir, &inode, &device, &mode); if (unlikely(i < 0)) goto error_stat_dir; /* if dir is actually a file, just add it to the file tree */ if (i == 1) { /* Single file addition is disabled for now because there is no safeguard * against the file being compared against itself if it's added in both a * recursion and explicitly on the command line. */ #if 0 LOUD(fprintf(stderr, "loaddir -> grokfile '%s'\n", dir)); newfile = grokfile(dir, filelistp); if (newfile == NULL) { LOUD(fprintf(stderr, "grokfile rejected '%s'\n", dir)); return; } single = 1; goto add_single_file; #endif if (sf_warning == 0) { fprintf(stderr, "\nFile specs on command line disabled in this version for safety\n"); fprintf(stderr, "This should be restored (and safe) in a future release\n"); fprintf(stderr, "More info at jdupes.com or email jody@jodybruchon.com\n"); sf_warning = 1; } return; /* Remove when single file is restored */ } /* Double traversal prevention tree */ #ifndef NO_TRAVCHECK if (likely(!ISFLAG(flags, F_NOTRAVCHECK))) { i = traverse_check(device, inode); if (unlikely(i == 1)) return; if (unlikely(i == 2)) goto error_stat_dir; } #endif /* NO_TRAVCHECK */ item_progress++; #ifdef UNICODE /* Windows requires \* at the end of directory names */ strncpy(tempname, dir, PATHBUF_SIZE * 2 - 1); p = tempname + strlen(tempname) - 1; if (*p == '/' || *p == '\\') *p = '\0'; strncat(tempname, "\\*", PATHBUF_SIZE * 2 - 1); if (unlikely(!M2W(tempname, wname))) goto error_cd; LOUD(fprintf(stderr, "FindFirstFile: %s\n", dir)); hFind = FindFirstFileW(wname, &ffd); if (unlikely(hFind == INVALID_HANDLE_VALUE)) { LOUD(fprintf(stderr, "\nfile handle bad\n")); goto error_cd; } dirlen = strlen(dir); LOUD(fprintf(stderr, "Loop start\n")); do { char * restrict tp = tempname; size_t d_name_len; /* Get necessary length and allocate d_name */ dirinfo = (struct dirent *)malloc(sizeof(struct dirent)); if (!W2M(ffd.cFileName, dirinfo->d_name)) continue; #else cd = opendir(dir); if (unlikely(!cd)) goto error_cd; dirlen = strlen(dir); while ((dirinfo = readdir(cd)) != NULL) { char * restrict tp = tempname; size_t d_name_len; #endif /* UNICODE */ LOUD(fprintf(stderr, "loaddir: readdir: '%s'\n", dirinfo->d_name)); if (unlikely(!jc_streq(dirinfo->d_name, ".") || !jc_streq(dirinfo->d_name, ".."))) continue; check_sigusr1(); if (jc_alarm_ring != 0) { jc_alarm_ring = 0; update_phase1_progress("dirs"); } /* Assemble the file's full path name, optimized to avoid strcat() */ dirpos = dirlen; d_name_len = strlen(dirinfo->d_name); memcpy(tp, dir, dirpos + 1); if (dirpos != 0 && tp[dirpos - 1] != dir_sep) { tp[dirpos] = dir_sep; dirpos++; } if (unlikely(dirpos + d_name_len + 1 >= (PATHBUF_SIZE * 2))) goto error_overflow; tp += dirpos; memcpy(tp, dirinfo->d_name, d_name_len); tp += d_name_len; *tp = '\0'; d_name_len++; /* Allocate the file_t and the d_name entries */ newfile = init_newfile(dirpos + d_name_len + 2, filelistp); tp = tempname; memcpy(newfile->d_name, tp, dirpos + d_name_len); /*** WARNING: tempname global gets reused by check_singlefile here! ***/ /* Single-file [l]stat() and exclusion condition check */ if (check_singlefile(newfile) != 0) { LOUD(fprintf(stderr, "loaddir: check_singlefile rejected file\n")); free(newfile->d_name); free(newfile); continue; } /* Optionally recurse directories, including symlinked ones if requested */ if (S_ISDIR(newfile->mode)) { if (recurse) { /* --one-file-system - WARNING: this clobbers inode/mode */ if (ISFLAG(flags, F_ONEFS) && (getdirstats(newfile->d_name, &inode, &n_device, &mode) == 0) && (device != n_device)) { LOUD(fprintf(stderr, "loaddir: directory: not recursing (--one-file-system)\n")); free(newfile->d_name); free(newfile); continue; } #ifndef NO_SYMLINKS else if (ISFLAG(flags, F_FOLLOWLINKS) || !ISFLAG(newfile->flags, FF_IS_SYMLINK)) { LOUD(fprintf(stderr, "loaddir: directory(symlink): recursing (-r/-R)\n")); loaddir(newfile->d_name, filelistp, recurse); } #else else { LOUD(fprintf(stderr, "loaddir: directory: recursing (-r/-R)\n")); loaddir(newfile->d_name, filelistp, recurse); } #endif /* NO_SYMLINKS */ } else { LOUD(fprintf(stderr, "loaddir: directory: not recursing\n")); } free(newfile->d_name); free(newfile); if (unlikely(interrupt)) return; continue; } else { //add_single_file: /* Add regular files to list, including symlink targets if requested */ #ifndef NO_SYMLINKS if (!ISFLAG(newfile->flags, FF_IS_SYMLINK) || (ISFLAG(newfile->flags, FF_IS_SYMLINK) && ISFLAG(flags, F_FOLLOWLINKS))) { #else if (S_ISREG(newfile->mode)) { #endif #ifndef NO_HASHDB if (ISFLAG(flags, F_HASHDB)) read_hashdb_entry(newfile); #endif *filelistp = newfile; filecount++; progress++; } else { LOUD(fprintf(stderr, "loaddir: not a regular file: %s\n", newfile->d_name);) free(newfile->d_name); free(newfile); if (single == 1) { single = 0; goto skip_single; } continue; } } /* Skip directory stuff if adding only a single file */ if (single == 1) { single = 0; goto skip_single; } } #ifdef UNICODE while (FindNextFileW(hFind, &ffd) != 0); FindClose(hFind); #else closedir(cd); #endif skip_single: return; error_stat_dir: fprintf(stderr, "\ncould not stat dir "); jc_fwprint(stderr, dir, 1); exit_status = EXIT_FAILURE; return; error_cd: fprintf(stderr, "\ncould not chdir to "); jc_fwprint(stderr, dir, 1); exit_status = EXIT_FAILURE; return; error_overflow: fprintf(stderr, "\nerror: a path overflowed (longer than PATHBUF_SIZE) cannot continue\n"); exit(EXIT_FAILURE); } jdupes-1.27.3/loaddir.h000066400000000000000000000007201447252140200146650ustar00rootroot00000000000000/* jdupes directory scanning code * This file is part of jdupes; see jdupes.c for license information */ #ifndef JDUPES_LOADDIR_H #define JDUPES_LOADDIR_H #ifdef __cplusplus extern "C" { #endif //file_t *grokfile(const char * const restrict name, file_t * restrict * const restrict filelistp); void loaddir(const char * const restrict dir, file_t * restrict * const restrict filelistp, int recurse); #ifdef __cplusplus } #endif #endif /* JDUPES_LOADDIR_H */ jdupes-1.27.3/match.c000066400000000000000000000267701447252140200143530ustar00rootroot00000000000000/* jdupes file matching functions * This file is part of jdupes; see jdupes.c for license information */ #ifdef __linux__ #include #endif #include #include #include #include #include "jdupes.h" #include "likely_unlikely.h" #include "checks.h" #include "filehash.h" #ifndef NO_HASHDB #include "hashdb.h" #endif #include "interrupt.h" #include "match.h" #include "progress.h" void registerpair(file_t **matchlist, file_t *newmatch, int (*comparef)(file_t *f1, file_t *f2)) { file_t *traverse; file_t *back; /* NULL pointer sanity checks */ if (unlikely(matchlist == NULL || newmatch == NULL || comparef == NULL)) jc_nullptr("registerpair()"); LOUD(fprintf(stderr, "registerpair: '%s', '%s'\n", (*matchlist)->d_name, newmatch->d_name);) #ifndef NO_ERRORONDUPE if (ISFLAG(a_flags, FA_ERRORONDUPE)) { if (!ISFLAG(flags, F_HIDEPROGRESS)) fprintf(stderr, "\r"); fprintf(stderr, "Exiting based on user request (-e); duplicates found:\n"); printf("%s\n%s\n", (*matchlist)->d_name, newmatch->d_name); exit(255); } #endif SETFLAG((*matchlist)->flags, FF_HAS_DUPES); back = NULL; traverse = *matchlist; /* FIXME: This needs to be changed! As it currently stands, the compare * function only runs on a pair as it is registered and future pairs can * mess up the sort order. A separate sorting function should happen before * the dupe chain is acted upon rather than while pairs are registered. */ while (traverse) { if (comparef(newmatch, traverse) <= 0) { newmatch->duplicates = traverse; if (!back) { *matchlist = newmatch; /* update pointer to head of list */ SETFLAG(newmatch->flags, FF_HAS_DUPES); CLEARFLAG(traverse->flags, FF_HAS_DUPES); /* flag is only for first file in dupe chain */ } else back->duplicates = newmatch; break; } else { if (traverse->duplicates == 0) { traverse->duplicates = newmatch; if (!back) SETFLAG(traverse->flags, FF_HAS_DUPES); break; } } back = traverse; traverse = traverse->duplicates; } return; } void registerfile(filetree_t * restrict * const restrict nodeptr, const enum tree_direction d, file_t * const restrict file) { filetree_t * restrict branch; if (unlikely(nodeptr == NULL || file == NULL || (d != NONE && *nodeptr == NULL))) jc_nullptr("registerfile()"); LOUD(fprintf(stderr, "registerfile(direction %d)\n", d)); /* Allocate and initialize a new node for the file */ branch = (filetree_t *)malloc(sizeof(filetree_t)); if (unlikely(branch == NULL)) jc_oom("registerfile() branch"); branch->file = file; branch->left = NULL; branch->right = NULL; /* Attach the new node to the requested branch */ switch (d) { case LEFT: (*nodeptr)->left = branch; break; case RIGHT: (*nodeptr)->right = branch; break; case NONE: /* For the root of the tree only */ *nodeptr = branch; break; default: /* This should never ever happen */ fprintf(stderr, "\ninternal error: invalid direction for registerfile(), report this\n"); exit(EXIT_FAILURE); break; } return; } /* Check two files for a match */ file_t **checkmatch(filetree_t * restrict tree, file_t * const restrict file) { int cmpresult = 0; int cantmatch = 0; const uint64_t * restrict filehash; if (unlikely(tree == NULL || file == NULL || tree->file == NULL || tree->file->d_name == NULL || file->d_name == NULL)) jc_nullptr("checkmatch()"); LOUD(fprintf(stderr, "checkmatch ('%s', '%s')\n", tree->file->d_name, file->d_name)); /* If device and inode fields are equal one of the files is a * hard link to the other or the files have been listed twice * unintentionally. We don't want to flag these files as * duplicates unless the user specifies otherwise. */ /* Count the total number of comparisons requested */ DBG(comparisons++;) /* If considering hard linked files as duplicates, they are * automatically duplicates without being read further since * they point to the exact same inode. If we aren't considering * hard links as duplicates, we just return NULL. */ cmpresult = check_conditions(tree->file, file); switch (cmpresult) { case 2: return &tree->file; /* linked files + -H switch */ case -2: return NULL; /* linked files, no -H switch */ case -3: /* user order */ case -4: /* one filesystem */ case -5: /* permissions */ cantmatch = 1; cmpresult = 0; break; default: break; } /* If preliminary matching succeeded, do main file data checks */ if (cmpresult == 0) { /* Print pre-check (early) match candidates if requested */ if (ISFLAG(p_flags, PF_EARLYMATCH)) printf("Early match check passed:\n %s\n %s\n\n", file->d_name, tree->file->d_name); LOUD(fprintf(stderr, "checkmatch: starting file data comparisons\n")); /* Attempt to exclude files quickly with partial file hashing */ if (!ISFLAG(tree->file->flags, FF_HASH_PARTIAL)) { filehash = get_filehash(tree->file, PARTIAL_HASH_SIZE, hash_algo); if (filehash == NULL) return NULL; tree->file->filehash_partial = *filehash; SETFLAG(tree->file->flags, FF_HASH_PARTIAL | FF_HASHDB_DIRTY); } if (!ISFLAG(file->flags, FF_HASH_PARTIAL)) { filehash = get_filehash(file, PARTIAL_HASH_SIZE, hash_algo); if (filehash == NULL) return NULL; file->filehash_partial = *filehash; SETFLAG(file->flags, FF_HASH_PARTIAL | FF_HASHDB_DIRTY); } cmpresult = HASH_COMPARE(file->filehash_partial, tree->file->filehash_partial); LOUD(if (!cmpresult) fprintf(stderr, "checkmatch: partial hashes match\n")); LOUD(if (cmpresult) fprintf(stderr, "checkmatch: partial hashes do not match\n")); DBG(partial_hash++;) /* Print partial hash matching pairs if requested */ if (cmpresult == 0 && ISFLAG(p_flags, PF_PARTIAL)) printf("\nPartial hashes match:\n %s\n %s\n\n", file->d_name, tree->file->d_name); if (file->size <= PARTIAL_HASH_SIZE || ISFLAG(flags, F_PARTIALONLY)) { if (ISFLAG(flags, F_PARTIALONLY)) { LOUD(fprintf(stderr, "checkmatch: partial only mode: treating partial hash as full hash\n")); } else { LOUD(fprintf(stderr, "checkmatch: small file: copying partial hash to full hash\n")); } /* filehash_partial = filehash if file is small enough */ if (!ISFLAG(file->flags, FF_HASH_FULL)) { file->filehash = file->filehash_partial; SETFLAG(file->flags, FF_HASH_FULL | FF_HASHDB_DIRTY); DBG(small_file++;) } if (!ISFLAG(tree->file->flags, FF_HASH_FULL)) { tree->file->filehash = tree->file->filehash_partial; SETFLAG(tree->file->flags, FF_HASH_FULL | FF_HASHDB_DIRTY); DBG(small_file++;) } } else if (cmpresult == 0) { // if (ISFLAG(flags, F_SKIPHASH)) { // LOUD(fprintf(stderr, "checkmatch: skipping full file hashes (F_SKIPMATCH)\n")); // } else { /* If partial match was correct, perform a full file hash match */ if (!ISFLAG(tree->file->flags, FF_HASH_FULL)) { filehash = get_filehash(tree->file, 0, hash_algo); if (filehash == NULL) return NULL; tree->file->filehash = *filehash; SETFLAG(tree->file->flags, FF_HASH_FULL | FF_HASHDB_DIRTY); } if (!ISFLAG(file->flags, FF_HASH_FULL)) { filehash = get_filehash(file, 0, hash_algo); if (filehash == NULL) return NULL; file->filehash = *filehash; SETFLAG(file->flags, FF_HASH_FULL | FF_HASHDB_DIRTY); } /* Full file hash comparison */ cmpresult = HASH_COMPARE(file->filehash, tree->file->filehash); LOUD(if (!cmpresult) fprintf(stderr, "checkmatch: full hashes match\n")); LOUD(if (cmpresult) fprintf(stderr, "checkmatch: full hashes do not match\n")); DBG(full_hash++); // } } else { DBG(partial_elim++); } } /* if (cmpresult == 0) */ /* Add to hash database */ #ifndef NO_HASHDB if (ISFLAG(flags, F_HASHDB)) { if (ISFLAG(file->flags, FF_HASHDB_DIRTY)) { CLEARFLAG(file->flags, FF_HASHDB_DIRTY); add_hashdb_entry(NULL, 0, file); } if (ISFLAG(tree->file->flags, FF_HASHDB_DIRTY)) { CLEARFLAG(tree->file->flags, FF_HASHDB_DIRTY); add_hashdb_entry(NULL, 0, tree->file); } } #endif if ((cantmatch != 0) && (cmpresult == 0)) { LOUD(fprintf(stderr, "checkmatch: rejecting because match not allowed (cantmatch = 1)\n")); cmpresult = -1; } /* How the file tree works * * The tree is sorted by size as files arrive. If the files are the same * size, they are possible duplicates and are checked for duplication. * If they are not a match, the hashes are used to decide whether to * continue with the file to the left or the right in the file tree. * If the direction decision points to a leaf node, the duplicate scan * continues down that path; if it points to an empty node, the current * file is attached to the file tree at that point. * * This allows for quickly finding files of the same size by avoiding * tree branches with differing size groups. */ if (cmpresult < 0) { if (tree->left != NULL) { LOUD(fprintf(stderr, "checkmatch: recursing tree: left\n")); return checkmatch(tree->left, file); } else { LOUD(fprintf(stderr, "checkmatch: registering file: left\n")); registerfile(&tree, LEFT, file); return NULL; } } else if (cmpresult > 0) { if (tree->right != NULL) { LOUD(fprintf(stderr, "checkmatch: recursing tree: right\n")); return checkmatch(tree->right, file); } else { LOUD(fprintf(stderr, "checkmatch: registering file: right\n")); registerfile(&tree, RIGHT, file); return NULL; } } else { /* All compares matched */ DBG(partial_to_full++;) LOUD(fprintf(stderr, "checkmatch: files appear to match based on hashes\n")); if (ISFLAG(p_flags, PF_FULLHASH)) printf("Full hashes match:\n %s\n %s\n\n", file->d_name, tree->file->d_name); return &tree->file; } /* Fall through - should never be reached */ return NULL; } /* Do a byte-by-byte comparison in case two different files produce the same signature. Unlikely, but better safe than sorry. */ int confirmmatch(FILE * const restrict file1, FILE * const restrict file2, const off_t size) { static char *c1 = NULL, *c2 = NULL; size_t r1, r2; off_t bytes = 0; if (unlikely(file1 == NULL || file2 == NULL)) jc_nullptr("confirmmatch()"); LOUD(fprintf(stderr, "confirmmatch running\n")); /* Allocate on first use; OOM if either is ever NULLed */ if (!c1) { c1 = (char *)malloc(auto_chunk_size); c2 = (char *)malloc(auto_chunk_size); } if (unlikely(!c1 || !c2)) jc_oom("confirmmatch() c1/c2"); fseek(file1, 0, SEEK_SET); fseek(file2, 0, SEEK_SET); #ifdef __linux__ posix_fadvise(fileno(file1), 0, size, POSIX_FADV_SEQUENTIAL); posix_fadvise(fileno(file1), 0, size, POSIX_FADV_WILLNEED); posix_fadvise(fileno(file2), 0, size, POSIX_FADV_SEQUENTIAL); posix_fadvise(fileno(file2), 0, size, POSIX_FADV_WILLNEED); #endif /* __linux__ */ do { if (interrupt) return 0; r1 = fread(c1, sizeof(char), auto_chunk_size, file1); r2 = fread(c2, sizeof(char), auto_chunk_size, file2); if (r1 != r2) return 0; /* file lengths are different */ if (memcmp (c1, c2, r1)) return 0; /* file contents are different */ bytes += (off_t)r1; if (jc_alarm_ring != 0) { jc_alarm_ring = 0; update_phase2_progress("confirm", (int)((bytes * 100) / size)); } } while (r2); return 1; } jdupes-1.27.3/match.h000066400000000000000000000014101447252140200143400ustar00rootroot00000000000000/* jdupes file matching functions * This file is part of jdupes; see jdupes.c for license information */ #ifndef JDUPES_MATCH_H #define JDUPES_MATCH_H #ifdef __cplusplus extern "C" { #endif #include #include "jdupes.h" /* registerfile() direction options */ enum tree_direction { NONE, LEFT, RIGHT }; void registerpair(file_t **matchlist, file_t *newmatch, int (*comparef)(file_t *f1, file_t *f2)); void registerfile(filetree_t * restrict * const restrict nodeptr, const enum tree_direction d, file_t * const restrict file); file_t **checkmatch(filetree_t * restrict tree, file_t * const restrict file); int confirmmatch(FILE * const restrict file1, FILE * const restrict file2, const off_t size); #ifdef __cplusplus } #endif #endif /* JDUPES_MATCH_H */ jdupes-1.27.3/progress.c000066400000000000000000000017311447252140200151110ustar00rootroot00000000000000/* jdupes progress indicator see jdupes.c for licensing information */ #include #include #include "jdupes.h" #include "likely_unlikely.h" void update_phase1_progress(const char * const restrict type) { fprintf(stderr, "\rScanning: %" PRIuMAX " files, %" PRIuMAX " %s (in %u specified)", progress, item_progress, type, user_item_count); // fflush(stderr); } /* Update progress indicator if requested */ void update_phase2_progress(const char * const restrict msg, const int file_percent) { static int did_fpct = 0; fprintf(stderr, "\rProgress [%" PRIuMAX "/%" PRIuMAX ", %" PRIuMAX " pairs matched] %" PRIuMAX "%%", progress, filecount, dupecount, (progress * 100) / filecount); if (file_percent > -1 && msg != NULL) { fprintf(stderr, " (%s: %d%%) ", msg, file_percent); did_fpct = 1; } else if (did_fpct != 0) { fprintf(stderr, " "); did_fpct = 0; } // fflush(stderr); return; } jdupes-1.27.3/progress.h000066400000000000000000000006251447252140200151170ustar00rootroot00000000000000/* jdupes argument functions * This file is part of jdupes; see jdupes.c for license information */ #ifndef JDUPES_PROGRESS_H #define JDUPES_PROGRESS_H #ifdef __cplusplus extern "C" { #endif void update_phase1_progress(const char * const restrict type); void update_phase2_progress(const char * const restrict msg, const int file_percent); #ifdef __cplusplus } #endif #endif /* JDUPES_PROGRESS_H */ jdupes-1.27.3/sort.c000066400000000000000000000032311447252140200142310ustar00rootroot00000000000000/* File order sorting functions * This file is part of jdupes; see jdupes.c for license information */ #include #include #include #include "likely_unlikely.h" #include "jdupes.h" #ifndef NO_USER_ORDER static int sort_pairs_by_param_order(file_t *f1, file_t *f2) { if (!ISFLAG(flags, F_USEPARAMORDER)) return 0; if (unlikely(f1 == NULL || f2 == NULL)) jc_nullptr("sort_pairs_by_param_order()"); if (f1->user_order < f2->user_order) return -sort_direction; if (f1->user_order > f2->user_order) return sort_direction; return 0; } #endif #ifndef NO_MTIME int sort_pairs_by_mtime(file_t *f1, file_t *f2) { if (unlikely(f1 == NULL || f2 == NULL)) jc_nullptr("sort_pairs_by_mtime()"); #ifndef NO_USER_ORDER int po = sort_pairs_by_param_order(f1, f2); if (po != 0) return po; #endif /* NO_USER_ORDER */ if (f1->mtime < f2->mtime) return -sort_direction; else if (f1->mtime > f2->mtime) return sort_direction; #ifndef NO_JODY_SORT /* If the mtimes match, use the names to break the tie */ return jc_numeric_sort(f1->d_name, f2->d_name, sort_direction); #else return strcmp(f1->d_name, f2->d_name) ? -sort_direction : sort_direction; #endif /* NO_JODY_SORT */ } #endif int sort_pairs_by_filename(file_t *f1, file_t *f2) { if (unlikely(f1 == NULL || f2 == NULL)) jc_nullptr("sort_pairs_by_filename()"); #ifndef NO_USER_ORDER int po = sort_pairs_by_param_order(f1, f2); if (po != 0) return po; #endif /* NO_USER_ORDER */ #ifndef NO_JODY_SORT return jc_numeric_sort(f1->d_name, f2->d_name, sort_direction); #else return strcmp(f1->d_name, f2->d_name) ? -sort_direction : sort_direction; #endif /* NO_JODY_SORT */ } jdupes-1.27.3/sort.h000066400000000000000000000006111447252140200142350ustar00rootroot00000000000000/* File order sorting functions * This file is part of jdupes; see jdupes.c for license information */ #ifndef JDUPES_SORT_H #define JDUPES_SORT_H #ifdef __cplusplus extern "C" { #endif #include "jdupes.h" #ifndef NO_MTIME int sort_pairs_by_mtime(file_t *f1, file_t *f2); #endif int sort_pairs_by_filename(file_t *f1, file_t *f2); #ifdef __cplusplus } #endif #endif /* JDUPES_SORT_H */ jdupes-1.27.3/stupid_dupes.sh000077500000000000000000000277161447252140200161630ustar00rootroot00000000000000#!/bin/bash # stupid_dupes: find duplicates like jdupes but more slowly with a shell script # Copyright (C) 2020-2023 by Jody Bruchon # # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ######################################### # HOW IT WORKS # ######################################### # # This script loads each file into an array of file paths, then compares every # file against every other file, using various tricks to discard candidates as # quickly as possible without reading and comparing entire files. These include # skipping pairs with mismatched file sizes and hashing only the first 4K block # of each file and comparing the partial hashes. # # Every file is referred to by its index number. Since GNU Bash can use arrays # but doesn't have anything remotely like a C structure to conveniently pack # a bunch of related variables together within, C structures are simulated with # the array index number used as a "pointer." For example, a doubly-linked list # in C is pretty easy to declare: # # struct match { struct match *left; struct match *right; } # # And then an array of these matches: struct match matchlist[MAX_MATCHES]; # # Using arrays, we simulate this (e.g. with file index 15, match 2): # # MLEFT[2] = index number of "left" file in match chain # MRIGHT[2] = index number of "right" file in match chain # # FILES[15] = file path for file #15, referenced by one of the above items # SIZES[15] = file size for file #15 # PHASH[15] = the 4K partial file hash for file #15 # FHASH[15] = the full file hash for file #15 # # The basic algorithm is: verify size match, verify partial hash match, verify # full hash match, verify files match byte-for-byte to be sure. # # There is some extra code to check for match pairing that is doubled up, and # a "processed" flag to prevent double processing of files. PROGNAME=stupid_dupes.sh VER=1.1 VERDATE=2021-01-21 V=1 # Verbosity AC=0 # Argument count PHS=4096 # Partial hash size FQUICK=0 # Quick (no final compare) mode FICNT=0 # File index counter MSCNT=0 # Match set counter STATUS=0 # Exit status # A hash command that outputs a plain file hash (no file names) test -z "$HASHCMD" && HASHCMD=jodyhash # 'find' defaults to no-recurse FRECURSE="-maxdepth 1" # sort option (cat = none) test -z "$SORTCMD" && SORTCMD="cat" ### Function definitions # $1: file path to add add_file () { ((V > 1)) && echo "add_file: '$1'" >&2 SZ="$(stat -c '%s' "$1" || echo FAIL)" if [ "$SZ" = "FAIL" ] then echo "error: add_file: can't stat '$1'" >&2 STATUS=1 return fi ((FICNT += 1)) FILES[FICNT]="$1" SIZES[FICNT]="$SZ" PHASH[FICNT]="NULL" FHASH[FICNT]="NULL" ((V > 1)) && echo "add_file: added as file number $FICNT" >&2 } # $1: hash to get (partial/full); $2: file # to hash get_filehash () { ((V > 1)) && echo "get_filehash: $1:$2 '${FILES[$2]}'" >&2 test -z "${FILES[$2]}" && \ echo "internal error: get_filehash: bad file number passed" >&2 && exit 1 case "$1" in partial) PHASH[$2]="$(dd if="${FILES[$2]}" bs=4096 count=1 2>/dev/null | $HASHCMD || echo "FAIL")" test "${PHASH[$2]}" = "FAIL" && \ echo "get_filehash: hashing failed: '${FILES[$2]}'" >&2 && STATUS=1 ;; full) FHASH[$2]="$($HASHCMD "${FILES[$2]}" || echo "FAIL")" test "${FHASH[$2]}" = "FAIL" && \ echo "get_filehash: hashing failed: '${FILES[$2]}'" >&2 && STATUS=1 ;; *) echo "internal error: get_filehash: invalid hash type '$1'" >&2 exit 1; ;; esac ((V > 1)) && echo "get_filehash: PHASH=${PHASH[$2]}" >&2 return 0 } # $1/$2: file numbers to check for a match check_match () { ((V > 1)) && echo "check_match: checking: $1:'${FILES[$1]}', $2:'${FILES[$2]}'" >&2 # Sizes must match if [ ${SIZES[$1]} != ${SIZES[$2]} ] then ((V > 1)) && \ echo "check_match: sizes differ: ${SIZES[$1]} != ${SIZES[$2]}" >&2 return 1 fi # Check partial hashes test "${PHASH[$1]}" = "NULL" && get_filehash partial "$1" test "${PHASH[$1]}" = "FAIL" && STATUS=1 && return 1 test "${PHASH[$2]}" = "NULL" && get_filehash partial "$2" test "${PHASH[$2]}" = "FAIL" && STATUS=1 && return 1 if [ "${PHASH[$1]}" != "${PHASH[$2]}" ] then ((V > 1)) && echo "check_match: partial hashes don't match" >&2 return 1 else ((V > 1)) && echo "check_match: partial hashes match" >&2 fi # Check full hashes test "{$FHASH[$1]}" = "NULL" && get_filehash full "$1" test "{$FHASH[$1]}" = "FAIL" && STATUS=1 && return 1 test "{$FHASH[$2]}" = "NULL" && get_filehash full "$2" test "{$FHASH[$2]}" = "FAIL" && STATUS=1 && return 1 if [ "${FHASH[$1]}" != "${FHASH[$2]}" ] then ((V > 1)) && echo "check_match: full hashes don't match" >&2 return 1 else ((V > 1)) && echo "check_match: full hashes match" >&2 fi # Byte-for-byte compare the files if ((FQUICK == 1)) || cmp -s "${FILES[$1]}" "${FILES[$2]}" then ((V > 1)) && echo "check_match: files are identical" >&2 return 0 else ((V > 1)) && echo "check_match: files are not identical" >&2 return 1 fi return 1 # should never be reached } # Link a pair of matched file numbers add_to_matches () { ((V > 1)) && echo "add_to_matches: adding: '${FILES[$1]}','${FILES[$2]}'" >&2 MSCNT=$((MSCNT + 1)) MLEFT[$MSCNT]=$1 MRIGHT[$MSCNT]=$2 MPROC[$MSCNT]=0 # Flips to 1 during final processing ((V > 1)) && echo "add_to_matches: set $MSCNT = $1:$2" >&2 return 0 } # Print all matched files print_matches () { ((V > 1)) && echo "print_matches: running" >&2 FIRST=1 CURFILE=0 # Outer loop: find a match pair to start with for ((PRINTCNT = 1; PRINTCNT <= MSCNT; PRINTCNT++)) do ((V > 1)) && echo " outer loop: print count $PRINTCNT, match count $MSCNT" >&2 # Don't reprint already-printed match pairings if (( MPROC[PRINTCNT] != 0)) then ((V > 1)) && echo " skipping processed pair $PRINTCNT" >&2 continue fi CURFILE=${MLEFT[PRINTCNT]} # Print a newline before each new set EXCEPT the first set if ((FIRST == 1)); then FIRST=0; else echo; fi echo "${FILES[CURFILE]}" # Inner loop: find match pairs to print CURCNT=$PRINTCNT; PREVCNT=1; unset PREV; PREV[1]=$CURFILE for ((; CURCNT < MSCNT; CURCNT++)) do ((V > 1)) && echo " inner loop: CC $CURCNT" >&2 ((V > 1)) && echo " files: ${MLEFT[CURCNT]}:'${FILES[${MLEFT[CURCNT]}]}', ${MRIGHT[CURCNT]}:'${FILES[${MRIGHT[CURCNT]}]}'" >&2 if (( MPROC[PRINTCNT] != 0)) then ((V > 1)) && echo " skipping processed pair $CURCNT" >&2 continue fi CURMATCH_L=0; CURMATCH_R=0; PCCNT=0 # For each pair, check both sides for any known match number while ((PCCNT < PREVCNT)) do PCCNT=$((PCCNT + 1)) ((V > 1)) && echo -n " deep loop: $PCCNT <= $PREVCNT" >&2 (( MLEFT[CURCNT] == PREV[PCCNT] )) && CURMATCH_L=${MRIGHT[CURCNT]} (( MRIGHT[CURCNT] == PREV[PCCNT])) && CURMATCH_R=${MLEFT[CURCNT]} ((V > 1)) && echo ", curmatch: $CURMATCH = ${MLEFT[CURCNT]} < ${PREV[PCCNT]} > ${MRIGHT[CURCNT]}" >&2 # If both sides of this pair have been previously seen, # just flag the pair and print nothing. if (( CURMATCH_L != 0 && CURMATCH_R != 0 )) then MPROC[$CURCNT]=1 ((V > 1)) && echo " Flagging: pair $CURCNT (${MLEFT[CURCNT]}:${MRIGHT[CURCNT]}) (R)" >&2 break fi done # If L or R match exists, we have a printable match CURMATCH=0 (( CURMATCH_L != 0 && CURMATCH_R == 0)) && CURMATCH=$CURMATCH_L (( CURMATCH_R != 0 && CURMATCH_L == 0)) && CURMATCH=$CURMATCH_R if ((CURMATCH != 0)) then echo "${FILES[CURMATCH]}" MPROC[$CURCNT]=1 ((V > 1)) && echo " Flagging: pair $CURCNT (${MLEFT[CURCNT]}:${MRIGHT[CURCNT]})" >&2 PREVCNT=$((PREVCNT + 1)) PREV[$PREVCNT]=$CURMATCH fi done done ((V > 1)) && echo "print_matches: complete" >&2 return 0 } show_help () { COPYTEXT="Copyright (C) 2020-2023 by Jody Bruchon and contributors\n" echo "$PROGNAME $VER ($VERDATE)" if [ "$2" = "full" ] then echo -e "$COPYTEXT" echo -e "\nUsage: $PROGNAME [options] file_or_dir1 [more_files ...]\n" echo -e "Options:\n" echo "-r|--recurse Recurse into any subdirectories" echo "-q|--quiet Only show final output and errors" echo "-Q|--quick Skip the full file byte-for-byte comparison" echo "-D|--debug Show lots of extra debugging text" echo "-v|-V|--version Display program version and exit" echo "-h|--help Show this help text and exit" echo "--license Show the full program license text" echo -e "\njdupes is better than me. Get it at jdupes.com\n" fi if [ "$2" = "license" ] then echo -e "$COPYTEXT" echo -e "\nThe MIT License (MIT)\n" echo "Permission is hereby granted, free of charge, to any person obtaining a copy of" echo "this software and associated documentation files (the \"Software\"), to deal in" echo "the Software without restriction, including without limitation the rights to" echo "use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of" echo "the Software, and to permit persons to whom the Software is furnished to do so," echo -e "subject to the following conditions:\n" echo "The above copyright notice and this permission notice shall be included in all" echo -e "copies or substantial portions of the Software.\n" echo "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR" echo "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS" echo "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR" echo "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER" echo "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN" echo "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE." fi exit $1 } ### End function definitions ### Begin main program # Process arguments [[ "$@" = "" ]] && show_help 1 full for X in "$@" do case "$X" in -q|--quiet) V=0 ;; -D|--debug) V=2 ;; -r|--recurse) FRECURSE="" ;; -Q|--quick) FQUICK=1 ;; -v|-V|--version) show_help 0 version ;; -h|--help) show_help 0 full ;; --license) show_help 0 license ;; *) AC=$((AC + 1)); ARGS[AC]="$X" ;; esac done ((V > 1)) && echo "Command line: $(printf %q "$0" "$@")" >&2 # Main loop for ((ARGNUM=1; ARGNUM < AC; ARGNUM++)) do ((V > 1)) && echo -e "Processing argument $ARGNUM: '${ARGS[ARGNUM]}'" >&2 if [[ ! -f "${ARGS[ARGNUM]}" && ! -d "${ARGS[ARGNUM]}" || -h "${ARGS[ARGNUM]}" ]] then echo "warning: not a regular file or directory: '${ARGS[ARGNUM]}'" >&2 STATUS=1 continue fi # Add files/dirs to the list, recursing as needed while IFS= read -r X do add_file "$X" done < <(find "${ARGS[ARGNUM]}" $FRECURSE -type f -size +0 | $SORTCMD) done # If there are not enough files, just exit with no matches ((FICNT < 2)) && echo "No matches found." && exit $STATUS # Check every file pair for matches for ((CNT=1; CNT < FICNT; CNT++)) do for ((SCAN=CNT; SCAN < FICNT;)) do ((SCAN++)) check_match $CNT $SCAN && add_to_matches $CNT $SCAN done done print_matches exit $STATUS jdupes-1.27.3/test.sh000077500000000000000000000001321447252140200144110ustar00rootroot00000000000000#!/bin/sh # This is a dummy test script meant for automated builds to succeed. echo "OK" jdupes-1.27.3/testdir/000077500000000000000000000000001447252140200145555ustar00rootroot00000000000000jdupes-1.27.3/testdir/.hidden_dir/000077500000000000000000000000001447252140200167245ustar00rootroot00000000000000jdupes-1.27.3/testdir/.hidden_dir/hiddendir_two000066400000000000000000000000041447252140200214640ustar00rootroot00000000000000two jdupes-1.27.3/testdir/.hidden_two000066400000000000000000000000041447252140200166740ustar00rootroot00000000000000two jdupes-1.27.3/testdir/Stilltinydupe1000066400000000000000000000000011447252140200174210ustar00rootroot00000000000000 jdupes-1.27.3/testdir/Tinydupe3000066400000000000000000000000011447252140200163530ustar00rootroot00000000000000 jdupes-1.27.3/testdir/Zero_C000066400000000000000000000000001447252140200156470ustar00rootroot00000000000000jdupes-1.27.3/testdir/atinydupe0000066400000000000000000000000011447252140200165510ustar00rootroot00000000000000 jdupes-1.27.3/testdir/block_size_tests/000077500000000000000000000000001447252140200201235ustar00rootroot00000000000000jdupes-1.27.3/testdir/block_size_tests/4095b_file1000066400000000000000000000077771447252140200217130ustar00rootroot00000000000000This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that wojdupes-1.27.3/testdir/block_size_tests/4095b_file2000066400000000000000000000077771447252140200217140ustar00rootroot00000000000000This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that wojdupes-1.27.3/testdir/block_size_tests/4096b_file1000066400000000000000000000100001447252140200216610ustar00rootroot00000000000000This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that worjdupes-1.27.3/testdir/block_size_tests/4096b_file2000066400000000000000000000100001447252140200216620ustar00rootroot00000000000000This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that worjdupes-1.27.3/testdir/block_size_tests/4097b_file1000066400000000000000000000100011447252140200216630ustar00rootroot00000000000000This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that workjdupes-1.27.3/testdir/block_size_tests/4097b_file2000066400000000000000000000100011447252140200216640ustar00rootroot00000000000000This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that workjdupes-1.27.3/testdir/extensions/000077500000000000000000000000001447252140200167545ustar00rootroot00000000000000jdupes-1.27.3/testdir/extensions/fake_doc_001.doc000066400000000000000000000000161447252140200215530ustar00rootroot00000000000000fake mp3 file jdupes-1.27.3/testdir/extensions/fake_doc_002.doc000066400000000000000000000000161447252140200215540ustar00rootroot00000000000000fake mp3 file jdupes-1.27.3/testdir/extensions/fake_mp3_001.mp3000066400000000000000000000000161447252140200214370ustar00rootroot00000000000000fake mp3 file jdupes-1.27.3/testdir/extensions/fake_mp3_002.mp3000066400000000000000000000000161447252140200214400ustar00rootroot00000000000000fake mp3 file jdupes-1.27.3/testdir/extensions/fake_mp4_001.mp4000066400000000000000000000000161447252140200214410ustar00rootroot00000000000000fake mp3 file jdupes-1.27.3/testdir/extensions/fake_mp4_002.mp4000066400000000000000000000000161447252140200214420ustar00rootroot00000000000000fake mp3 file jdupes-1.27.3/testdir/isolate/000077500000000000000000000000001447252140200162155ustar00rootroot00000000000000jdupes-1.27.3/testdir/isolate/1/000077500000000000000000000000001447252140200163555ustar00rootroot00000000000000jdupes-1.27.3/testdir/isolate/1/1.txt000066400000000000000000000000101447252140200172450ustar00rootroot00000000000000isolate jdupes-1.27.3/testdir/isolate/1/2.txt000066400000000000000000000000101447252140200172460ustar00rootroot00000000000000isolate jdupes-1.27.3/testdir/isolate/2/000077500000000000000000000000001447252140200163565ustar00rootroot00000000000000jdupes-1.27.3/testdir/isolate/2/3.txt000066400000000000000000000000101447252140200172500ustar00rootroot00000000000000isolate jdupes-1.27.3/testdir/isolate/2/4.txt000066400000000000000000000000101447252140200172510ustar00rootroot00000000000000isolate jdupes-1.27.3/testdir/isolate/3/000077500000000000000000000000001447252140200163575ustar00rootroot00000000000000jdupes-1.27.3/testdir/isolate/3/5.txt000066400000000000000000000000101447252140200172530ustar00rootroot00000000000000isolate jdupes-1.27.3/testdir/isolate/3/6.txt000066400000000000000000000000101447252140200172540ustar00rootroot00000000000000isolate jdupes-1.27.3/testdir/isolate/3/7.txt000066400000000000000000000000101447252140200172550ustar00rootroot00000000000000isolate jdupes-1.27.3/testdir/isolate/4/000077500000000000000000000000001447252140200163605ustar00rootroot00000000000000jdupes-1.27.3/testdir/isolate/4/8.txt000066400000000000000000000000101447252140200172570ustar00rootroot00000000000000isolate jdupes-1.27.3/testdir/larger_file_1000066400000000000000000002023601447252140200171760ustar00rootroot00000000000000This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! jdupes-1.27.3/testdir/larger_file_2000066400000000000000000002023601447252140200171770ustar00rootroot00000000000000This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! jdupes-1.27.3/testdir/larger_file_3000066400000000000000000002023601447252140200172000ustar00rootroot00000000000000Unlike the other large files, this one is intended to fail matching early. This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner progr jdupes-1.27.3/testdir/larger_file_4000066400000000000000000002023601447252140200172010ustar00rootroot00000000000000This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner program! :-P If you'll excuse me, I have to copy-paste like crazy now. Have fun! This file is a larger file than the other testdir files. Its purpose is to trigger code that works with files larger than the quick hash block size. Since I did not feel like typing out thousands of lines of text, this long line will be duplicated ad infinitum. If you don't like that, write your own duplicate scanner prog Unlike the other large files, this one is designed to fail matching later. jdupes-1.27.3/testdir/nine_upsidedown000066400000000000000000000000041447252140200176640ustar00rootroot00000000000000six jdupes-1.27.3/testdir/notsotinydupe1000066400000000000000000000001021447252140200174760ustar00rootroot00000000000000This is not quite such a small duplicate as the other duplicates. jdupes-1.27.3/testdir/notsotinydupe2000066400000000000000000000001021447252140200174770ustar00rootroot00000000000000This is not quite such a small duplicate as the other duplicates. jdupes-1.27.3/testdir/numeric_sort_copysuffixes/000077500000000000000000000000001447252140200220755ustar00rootroot00000000000000jdupes-1.27.3/testdir/numeric_sort_copysuffixes/file1-0 (1).jpg000066400000000000000000000000041447252140200241700ustar00rootroot00000000000000bar jdupes-1.27.3/testdir/numeric_sort_copysuffixes/file1-0#1.jpg000066400000000000000000000000041447252140200240520ustar00rootroot00000000000000bar jdupes-1.27.3/testdir/numeric_sort_copysuffixes/file1-0.jpg000066400000000000000000000000041447252140200237260ustar00rootroot00000000000000bar jdupes-1.27.3/testdir/numeric_sort_copysuffixes/file1-1 (Copy) (2) (2).jpg000066400000000000000000000000041447252140200254510ustar00rootroot00000000000000bar jdupes-1.27.3/testdir/numeric_sort_copysuffixes/file1-1 (Copy) (2).jpg000066400000000000000000000000041447252140200252060ustar00rootroot00000000000000bar jdupes-1.27.3/testdir/numeric_sort_copysuffixes/file1-1 (Copy).jpg000066400000000000000000000000041447252140200247430ustar00rootroot00000000000000bar jdupes-1.27.3/testdir/numeric_sort_copysuffixes/file1-1.jpg000066400000000000000000000000041447252140200237270ustar00rootroot00000000000000bar jdupes-1.27.3/testdir/numeric_sort_copysuffixes/file1-10.jpg000066400000000000000000000000041447252140200240070ustar00rootroot00000000000000bar jdupes-1.27.3/testdir/numeric_sort_copysuffixes/file1-2.jpg000066400000000000000000000000041447252140200237300ustar00rootroot00000000000000bar jdupes-1.27.3/testdir/numeric_sort_digitsafter/000077500000000000000000000000001447252140200216535ustar00rootroot00000000000000jdupes-1.27.3/testdir/numeric_sort_digitsafter/file001000066400000000000000000000000041447252140200227300ustar00rootroot00000000000000foo jdupes-1.27.3/testdir/numeric_sort_digitsafter/file001a000066400000000000000000000000041447252140200230710ustar00rootroot00000000000000foo jdupes-1.27.3/testdir/numeric_sort_digitsafter/file002000066400000000000000000000000041447252140200227310ustar00rootroot00000000000000foo jdupes-1.27.3/testdir/numeric_sort_digitsafter/file020000066400000000000000000000000041447252140200227310ustar00rootroot00000000000000foo jdupes-1.27.3/testdir/numeric_sort_digitsafter/file021000066400000000000000000000000041447252140200227320ustar00rootroot00000000000000foo jdupes-1.27.3/testdir/numeric_sort_digitsafter/file030000066400000000000000000000000041447252140200227320ustar00rootroot00000000000000foo jdupes-1.27.3/testdir/numeric_sort_digitsafter/file1000066400000000000000000000000041447252140200225700ustar00rootroot00000000000000foo jdupes-1.27.3/testdir/numeric_sort_digitsafter/file10000066400000000000000000000000041447252140200226500ustar00rootroot00000000000000foo jdupes-1.27.3/testdir/numeric_sort_digitsafter/file100000066400000000000000000000000041447252140200227300ustar00rootroot00000000000000foo jdupes-1.27.3/testdir/numeric_sort_digitsafter/file10a000066400000000000000000000000041447252140200230110ustar00rootroot00000000000000foo jdupes-1.27.3/testdir/numeric_sort_digitsafter/file1a2000066400000000000000000000000041447252140200230130ustar00rootroot00000000000000foo jdupes-1.27.3/testdir/numeric_sort_digitsafter/file2000066400000000000000000000000041447252140200225710ustar00rootroot00000000000000foo jdupes-1.27.3/testdir/numeric_sort_digitsafter/file3000066400000000000000000000000041447252140200225720ustar00rootroot00000000000000foo jdupes-1.27.3/testdir/numeric_sort_startwithzero/000077500000000000000000000000001447252140200222775ustar00rootroot00000000000000jdupes-1.27.3/testdir/numeric_sort_startwithzero/00file4000066400000000000000000000000061447252140200233610ustar00rootroot00000000000000pair4 jdupes-1.27.3/testdir/numeric_sort_startwithzero/00file5000066400000000000000000000000061447252140200233620ustar00rootroot00000000000000pair5 jdupes-1.27.3/testdir/numeric_sort_startwithzero/00file5a000066400000000000000000000000061447252140200235230ustar00rootroot00000000000000pair5 jdupes-1.27.3/testdir/numeric_sort_startwithzero/01file4000066400000000000000000000000061447252140200233620ustar00rootroot00000000000000pair4 jdupes-1.27.3/testdir/numeric_sort_startwithzero/0file1000066400000000000000000000000061447252140200232760ustar00rootroot00000000000000pair1 jdupes-1.27.3/testdir/numeric_sort_startwithzero/0file2000066400000000000000000000000061447252140200232770ustar00rootroot00000000000000pair1 jdupes-1.27.3/testdir/numeric_sort_startwithzero/0file3000066400000000000000000000000061447252140200233000ustar00rootroot00000000000000pair3 jdupes-1.27.3/testdir/numeric_sort_startwithzero/1file1000066400000000000000000000000061447252140200232770ustar00rootroot00000000000000pair2 jdupes-1.27.3/testdir/numeric_sort_startwithzero/1file2000066400000000000000000000000061447252140200233000ustar00rootroot00000000000000pair2 jdupes-1.27.3/testdir/numeric_sort_startwithzero/1file3000066400000000000000000000000061447252140200233010ustar00rootroot00000000000000pair3 jdupes-1.27.3/testdir/recursed_a/000077500000000000000000000000001447252140200166715ustar00rootroot00000000000000jdupes-1.27.3/testdir/recursed_a/five000066400000000000000000000000051447252140200175400ustar00rootroot00000000000000five jdupes-1.27.3/testdir/recursed_a/five_2000066400000000000000000000000051447252140200177610ustar00rootroot00000000000000five jdupes-1.27.3/testdir/recursed_a/one000066400000000000000000000000041447252140200173670ustar00rootroot00000000000000one jdupes-1.27.3/testdir/recursed_a/one_2000066400000000000000000000000041447252140200176100ustar00rootroot00000000000000one jdupes-1.27.3/testdir/recursed_a/symlink_infinite_loop000077700000000000000000000000001447252140200254652../recursed_austar00rootroot00000000000000jdupes-1.27.3/testdir/recursed_a/two000066400000000000000000000000041447252140200174170ustar00rootroot00000000000000two jdupes-1.27.3/testdir/recursed_a/two_2000066400000000000000000000000041447252140200176400ustar00rootroot00000000000000two jdupes-1.27.3/testdir/recursed_b/000077500000000000000000000000001447252140200166725ustar00rootroot00000000000000jdupes-1.27.3/testdir/recursed_b/four000066400000000000000000000000051447252140200175630ustar00rootroot00000000000000four jdupes-1.27.3/testdir/recursed_b/one000066400000000000000000000000041447252140200173700ustar00rootroot00000000000000one jdupes-1.27.3/testdir/recursed_b/three000066400000000000000000000000061447252140200177200ustar00rootroot00000000000000three jdupes-1.27.3/testdir/recursed_b/two_plus_one000066400000000000000000000000061447252140200213260ustar00rootroot00000000000000three jdupes-1.27.3/testdir/recursed_c/000077500000000000000000000000001447252140200166735ustar00rootroot00000000000000jdupes-1.27.3/testdir/recursed_c/five000066400000000000000000000000051447252140200175420ustar00rootroot00000000000000five jdupes-1.27.3/testdir/recursed_c/level2/000077500000000000000000000000001447252140200200645ustar00rootroot00000000000000jdupes-1.27.3/testdir/recursed_c/level2/five000066400000000000000000000000051447252140200207330ustar00rootroot00000000000000five jdupes-1.27.3/testdir/recursed_c/level2/one000066400000000000000000000000041447252140200205620ustar00rootroot00000000000000one jdupes-1.27.3/testdir/recursed_c/level2/two000066400000000000000000000000041447252140200206120ustar00rootroot00000000000000two jdupes-1.27.3/testdir/recursed_c/one000066400000000000000000000000041447252140200173710ustar00rootroot00000000000000one jdupes-1.27.3/testdir/recursed_c/two000066400000000000000000000000041447252140200174210ustar00rootroot00000000000000two jdupes-1.27.3/testdir/symlink_dir000077700000000000000000000000001447252140200210562recursed_austar00rootroot00000000000000jdupes-1.27.3/testdir/symlink_test/000077500000000000000000000000001447252140200173025ustar00rootroot00000000000000jdupes-1.27.3/testdir/symlink_test/regular_file000066400000000000000000000000221447252140200216570ustar00rootroot00000000000000symlink test file jdupes-1.27.3/testdir/symlink_test/symlinked_file000077700000000000000000000000001447252140200246012regular_fileustar00rootroot00000000000000jdupes-1.27.3/testdir/symlink_twice_one000077700000000000000000000000001447252140200207512twoustar00rootroot00000000000000jdupes-1.27.3/testdir/symlink_two000077700000000000000000000000001447252140200176062twoustar00rootroot00000000000000jdupes-1.27.3/testdir/tinydupe2000066400000000000000000000000011447252140200164120ustar00rootroot00000000000000 jdupes-1.27.3/testdir/tinydupe4000066400000000000000000000000011447252140200164140ustar00rootroot00000000000000 jdupes-1.27.3/testdir/twice_one000066400000000000000000000000041447252140200164460ustar00rootroot00000000000000two jdupes-1.27.3/testdir/two000066400000000000000000000000041447252140200153030ustar00rootroot00000000000000two jdupes-1.27.3/testdir/unicode_dirnames/000077500000000000000000000000001447252140200200655ustar00rootroot00000000000000jdupes-1.27.3/testdir/unicode_dirnames/Ελληνιά/000077500000000000000000000000001447252140200253525ustar00rootroot00000000000000jdupes-1.27.3/testdir/unicode_dirnames/Ελληνιά/Unicode testfile.txt000066400000000000000000000000301447252140200312720ustar00rootroot00000000000000до свиданияjdupes-1.27.3/testdir/unicode_dirnames/до свидания/000077500000000000000000000000001447252140200275535ustar00rootroot00000000000000jdupes-1.27.3/testdir/unicode_dirnames/до свидания/Unicode testfile.txt000066400000000000000000000000301447252140200334730ustar00rootroot00000000000000до свиданияjdupes-1.27.3/testdir/unicode_dirnames/दसविदानिया/000077500000000000000000000000001447252140200332265ustar00rootroot00000000000000jdupes-1.27.3/testdir/unicode_dirnames/दसविदानिया/Unicode testfile.txt000066400000000000000000000000301447252140200371460ustar00rootroot00000000000000до свиданияjdupes-1.27.3/testdir/unicode_dirnames/怖い/000077500000000000000000000000001447252140200221105ustar00rootroot00000000000000jdupes-1.27.3/testdir/unicode_dirnames/怖い/Unicode testfile.txt000066400000000000000000000000301447252140200260300ustar00rootroot00000000000000до свиданияjdupes-1.27.3/testdir/unicode_dirnames/행운을 빈다/000077500000000000000000000000001447252140200254355ustar00rootroot00000000000000jdupes-1.27.3/testdir/unicode_dirnames/행운을 빈다/Unicode testfile.txt000066400000000000000000000000301447252140200313550ustar00rootroot00000000000000до свиданияjdupes-1.27.3/testdir/unicode_filenames/000077500000000000000000000000001447252140200202265ustar00rootroot00000000000000jdupes-1.27.3/testdir/unicode_filenames/cassé000066400000000000000000000001061447252140200216530ustar00rootroot00000000000000oh hi, this file has a Japanese name for testing this program against!jdupes-1.27.3/testdir/unicode_filenames/Ελληνιά000066400000000000000000000001061447252140200254330ustar00rootroot00000000000000oh hi, this file has a Japanese name for testing this program against!jdupes-1.27.3/testdir/unicode_filenames/до свидания000066400000000000000000000001061447252140200276340ustar00rootroot00000000000000oh hi, this file has a Japanese name for testing this program against!jdupes-1.27.3/testdir/unicode_filenames/दसविदानिया000066400000000000000000000001061447252140200333070ustar00rootroot00000000000000oh hi, this file has a Japanese name for testing this program against!jdupes-1.27.3/testdir/unicode_filenames/怖い000066400000000000000000000001061447252140200221710ustar00rootroot00000000000000oh hi, this file has a Japanese name for testing this program against!jdupes-1.27.3/testdir/unicode_filenames/행운을 빈다000066400000000000000000000001061447252140200255160ustar00rootroot00000000000000oh hi, this file has a Japanese name for testing this program against!jdupes-1.27.3/testdir/with spaces a000066400000000000000000000000141447252140200171060ustar00rootroot00000000000000with spaces jdupes-1.27.3/testdir/with spaces b000066400000000000000000000000141447252140200171070ustar00rootroot00000000000000with spaces jdupes-1.27.3/testdir/zero_a000066400000000000000000000000001447252140200157450ustar00rootroot00000000000000jdupes-1.27.3/testdir/zero_b000066400000000000000000000000001447252140200157460ustar00rootroot00000000000000jdupes-1.27.3/travcheck.c000066400000000000000000000070631447252140200152230ustar00rootroot00000000000000/* jdupes double-traversal prevention tree * See jdupes.c for license information */ #ifndef NO_TRAVCHECK #include #include #include #include "jdupes.h" #include "travcheck.h" /* Simple traversal balancing hash - scrambles inode number */ #define TRAVHASH(device,inode) (((inode << 55 | (inode >> 9)) + (device << 13))) static struct travcheck *travcheck_head = NULL; /* Create a new traversal check object and initialize its values */ static struct travcheck *travcheck_alloc(const dev_t device, const jdupes_ino_t inode, uintmax_t hash) { struct travcheck *trav; LOUD(fprintf(stderr, "travcheck_alloc(dev %" PRIdMAX ", ino %" PRIdMAX ", hash %" PRIuMAX ")\n", (intmax_t)device, (intmax_t)inode, hash);) trav = (struct travcheck *)malloc(sizeof(struct travcheck)); if (trav == NULL) { LOUD(fprintf(stderr, "travcheck_alloc: malloc failed\n");) return NULL; } trav->left = NULL; trav->right = NULL; trav->hash = hash; trav->device = device; trav->inode = inode; LOUD(fprintf(stderr, "travcheck_alloc returned %p\n", (void *)trav);) return trav; } /* De-allocate the travcheck tree */ void travcheck_free(struct travcheck *cur) { LOUD(fprintf(stderr, "travcheck_free(%p)\n", cur);) if (cur == NULL) { if (travcheck_head == NULL) return; cur = travcheck_head; travcheck_head = NULL; } if (cur->left == cur) goto error_travcheck_ptr; if (cur->right == cur) goto error_travcheck_ptr; if (cur->left != NULL) travcheck_free(cur->left); if (cur->right != NULL) travcheck_free(cur->right); if (cur != NULL) free(cur); return; error_travcheck_ptr: fprintf(stderr, "internal error: invalid pointer in travcheck_free(), report this\n"); exit(EXIT_FAILURE); } /* Check to see if device:inode pair has already been traversed */ int traverse_check(const dev_t device, const jdupes_ino_t inode) { struct travcheck *traverse = travcheck_head; uintmax_t travhash; LOUD(fprintf(stderr, "traverse_check(dev %" PRIuMAX ", ino %" PRIuMAX "\n", (uintmax_t)device, (uintmax_t)inode);) travhash = TRAVHASH(device, inode); if (travcheck_head == NULL) { travcheck_head = travcheck_alloc(device, inode, TRAVHASH(device, inode)); if (travcheck_head == NULL) return 2; } else { traverse = travcheck_head; while (1) { if (traverse == NULL) jc_nullptr("traverse_check()"); /* Don't re-traverse directories we've already seen */ if (inode == traverse->inode && device == traverse->device) { LOUD(fprintf(stderr, "traverse_check: already seen: %" PRIuMAX ":%" PRIuMAX "\n", (uintmax_t)device, (uintmax_t)inode);) return 1; } else { if (travhash > traverse->hash) { /* Traverse right */ if (traverse->right == NULL) { LOUD(fprintf(stderr, "traverse_check add right: %" PRIuMAX ", %" PRIuMAX"\n", (uintmax_t)device, (uintmax_t)inode);) traverse->right = travcheck_alloc(device, inode, travhash); if (traverse->right == NULL) return 2; break; } traverse = traverse->right; continue; } else { /* Traverse left */ if (traverse->left == NULL) { LOUD(fprintf(stderr, "traverse_check add left: %" PRIuMAX ", %" PRIuMAX "\n", (uintmax_t)device, (uintmax_t)inode);) traverse->left = travcheck_alloc(device, inode, travhash); if (traverse->left == NULL) return 2; break; } traverse = traverse->left; continue; } } } } return 0; } #endif /* NO_TRAVCHECK */ jdupes-1.27.3/travcheck.h000066400000000000000000000011511447252140200152200ustar00rootroot00000000000000/* jdupes double-traversal prevention tree * See jdupes.c for license information */ #ifndef JDUPES_TRAVCHECK_H #define JDUPES_TRAVCHECK_H #ifdef __cplusplus extern "C" { #endif #ifndef NO_TRAVCHECK /* Tree to track each directory traversed */ struct travcheck { struct travcheck *left; struct travcheck *right; uintmax_t hash; jdupes_ino_t inode; dev_t device; }; /* De-allocate the travcheck tree */ void travcheck_free(struct travcheck *cur); int traverse_check(const dev_t device, const jdupes_ino_t inode); #endif /* NO_TRAVCHECK */ #ifdef __cplusplus } #endif #endif /* JDUPES_TRAVCHECK_H */ jdupes-1.27.3/tune_winres.sh000077500000000000000000000020571447252140200160040ustar00rootroot00000000000000#!/bin/sh WINRES="winres.rc" WINRES_XP="winres_xp.rc" WINRES_MAN="winres.manifest.xml" # Get version number components VER="$(grep '^#define VER "' version.h | cut -d\" -f2)" V1="$(echo "$VER" | cut -d. -f1)"; test -z "$V1" && V1=0 V2="$(echo "$VER" | cut -d. -f2)"; test -z "$V2" && V2=0 V3="$(echo "$VER" | cut -d. -f3)"; test -z "$V3" && V3=0 V4="$(echo "$VER" | cut -d. -f4)"; test -z "$V4" && V4=0 # Build VS_VERSION_INFO product version string with commas PRODVER="$V1,$V2,$V3,$V4" # Extend version to include four discrete numbers XVER="$V1.$V2.$V3.$V4" echo "$VER = $PRODVER ($XVER)" # Actually change the manifest version information sed -i 's/\([A-Z]*\)VERSION [0-9],.*/\1VERSION '"$PRODVER/"';s/"\([A-Za-z]*\)Version", "[0-9],.*"/"\1Version", '"\"$PRODVER\"/" "$WINRES" sed -i 's/\([A-Z]*\)VERSION [0-9],.*/\1VERSION '"$PRODVER/"';s/"\([A-Za-z]*\)Version", "[0-9],.*"/"\1Version", '"\"$PRODVER\"/" "$WINRES_XP" sed -i 's/assemblyIdentity type="win32" name="jdupes" version="[^"]*/assemblyIdentity type="win32" name="jdupes" version="'$XVER/ "$WINRES_MAN" jdupes-1.27.3/version.h000066400000000000000000000004001447252140200147270ustar00rootroot00000000000000/* VERSION determines the program's version number * This file is part of jdupes; see jdupes.c for license information */ #ifndef JDUPES_VERSION_H #define JDUPES_VERSION_H #define VER "1.27.3" #define VERDATE "2023-08-26" #endif /* JDUPES_VERSION_H */ jdupes-1.27.3/winres.manifest.xml000066400000000000000000000006751447252140200167450ustar00rootroot00000000000000 true jdupes-1.27.3/winres.rc000066400000000000000000000016071447252140200147400ustar00rootroot00000000000000#include "winver.h" 1 24 winres.manifest.xml 2 ICON icon/icon_jdupes_all.ico VS_VERSION_INFO VERSIONINFO FILEVERSION 1,27,3,0 PRODUCTVERSION 1,27,3,0 FILEFLAGSMASK 0x3fL FILEFLAGS 0x0L FILEOS 0x40004L FILETYPE 0x1L FILESUBTYPE 0x0L BEGIN BLOCK "StringFileInfo" BEGIN BLOCK "040904b0" BEGIN VALUE "Comments", "(C) 2015-2023 Jody Bruchon , published under The MIT License" VALUE "CompanyName", "Jody Bruchon" VALUE "FileDescription", "jdupes Duplicate File Finder Tool" VALUE "FileVersion", "1,27,3,0" VALUE "InternalName", "jdupes" VALUE "LegalCopyright", "(C) 2015-2023 Jody Bruchon " VALUE "OriginalFilename", "jdupes.exe" VALUE "ProductName", "jdupes" VALUE "ProductVersion", "1,27,3,0" END END BLOCK "VarFileInfo" BEGIN VALUE "Translation", 0x409, 1200 END END jdupes-1.27.3/winres_xp.rc000066400000000000000000000015561447252140200154520ustar00rootroot00000000000000#include "winver.h" 2 ICON icon/icon_jdupes_all.ico VS_VERSION_INFO VERSIONINFO FILEVERSION 1,27,3,0 PRODUCTVERSION 1,27,3,0 FILEFLAGSMASK 0x3fL FILEFLAGS 0x0L FILEOS 0x40004L FILETYPE 0x1L FILESUBTYPE 0x0L BEGIN BLOCK "StringFileInfo" BEGIN BLOCK "040904b0" BEGIN VALUE "Comments", "(C) 2015-2023 Jody Bruchon , published under The MIT License" VALUE "CompanyName", "Jody Bruchon" VALUE "FileDescription", "jdupes Duplicate File Finder Tool" VALUE "FileVersion", "1,27,3,0" VALUE "InternalName", "jdupes" VALUE "LegalCopyright", "(C) 2015-2023 Jody Bruchon " VALUE "OriginalFilename", "jdupes.exe" VALUE "ProductName", "jdupes" VALUE "ProductVersion", "1,27,3,0" END END BLOCK "VarFileInfo" BEGIN VALUE "Translation", 0x409, 1200 END END jdupes-1.27.3/xxhash.c000066400000000000000000000516111447252140200145520ustar00rootroot00000000000000/* * xxHash - Fast Hash algorithm * Copyright (C) 2012-2016, Yann Collet * * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You can contact the author at : * - xxHash homepage: http://www.xxhash.com * - xxHash source repository : https://github.com/Cyan4973/xxHash */ #ifndef USE_JODY_HASH /* ************************************* * Tuning parameters ***************************************/ /*!XXH_FORCE_MEMORY_ACCESS : * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. * It can generate buggy code on targets which do not support unaligned memory accesses. * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) * See http://stackoverflow.com/a/32095106/646947 for details. * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \ || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define XXH_FORCE_MEMORY_ACCESS 2 # elif defined(__INTEL_COMPILER) || \ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ || defined(__ARM_ARCH_7S__) )) # define XXH_FORCE_MEMORY_ACCESS 1 # endif #endif /*!XXH_ACCEPT_NULL_INPUT_POINTER : * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault. * When this macro is enabled, xxHash actively checks input for null pointer. * It it is, result for null input pointers is the same as a null-length input. */ #ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ # define XXH_ACCEPT_NULL_INPUT_POINTER 0 #endif /*!XXH_FORCE_NATIVE_FORMAT : * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. * Results are therefore identical for little-endian and big-endian CPU. * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. * Should endian-independence be of no importance for your application, you may set the #define below to 1, * to improve speed for Big-endian CPU. * This option has no impact on Little_Endian CPU. */ #ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ # define XXH_FORCE_NATIVE_FORMAT 0 #endif /*!XXH_FORCE_ALIGN_CHECK : * This is a minor performance trick, only useful with lots of very small keys. * It means : check for aligned/unaligned input. * The check costs one initial branch per hash; * set it to 0 when the input is guaranteed to be aligned, * or when alignment doesn't matter for performance. */ #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) # define XXH_FORCE_ALIGN_CHECK 0 # else # define XXH_FORCE_ALIGN_CHECK 1 # endif #endif /* ************************************* * Includes & Memory related functions ***************************************/ /*! Modify the local functions below should you wish to use some other memory routines * for malloc(), free() */ #include static void* XXH_malloc(size_t s) { return malloc(s); } static void XXH_free (void* p) { free(p); } /*! and for memcpy() */ #include static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } #define XXH_STATIC_LINKING_ONLY #include "xxhash.h" /* ************************************* * Compiler Specific Options ***************************************/ #ifdef _MSC_VER /* Visual Studio */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # define FORCE_INLINE static __forceinline #else # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # ifdef __GNUC__ # define FORCE_INLINE static inline __attribute__((always_inline)) # else # define FORCE_INLINE static inline # endif # else # define FORCE_INLINE static # endif /* __STDC_VERSION__ */ #endif /* ************************************* * Basic Types ***************************************/ #ifndef MEM_MODULE # if !defined (__VMS) \ && (defined (__cplusplus) \ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # include typedef uint8_t BYTE; typedef uint16_t U16; typedef uint32_t U32; # else typedef unsigned char BYTE; typedef unsigned short U16; typedef unsigned int U32; # endif #endif #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ typedef union { U32 u32; } __attribute__((packed)) unalign; static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } #else /* portable and safe solution. Generally efficient. * see : http://stackoverflow.com/a/32095106/646947 */ static U32 XXH_read32(const void* memPtr) { U32 val; memcpy(&val, memPtr, sizeof(val)); return val; } #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ /* **************************************** * Compiler-specific Functions and Macros ******************************************/ #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ #if defined(_MSC_VER) # define XXH_rotl32(x,r) _rotl(x,r) # define XXH_rotl64(x,r) _rotl64(x,r) #else # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) #endif #if defined(_MSC_VER) /* Visual Studio */ # define XXH_swap32 _byteswap_ulong #elif XXH_GCC_VERSION >= 403 # define XXH_swap32 __builtin_bswap32 #else static U32 XXH_swap32 (U32 x) { return ((x << 24) & 0xff000000 ) | ((x << 8) & 0x00ff0000 ) | ((x >> 8) & 0x0000ff00 ) | ((x >> 24) & 0x000000ff ); } #endif /* ************************************* * Architecture Macros ***************************************/ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ #ifndef XXH_CPU_LITTLE_ENDIAN static const int g_one = 1; # define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one)) #endif /* *************************** * Memory reads *****************************/ typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) { if (align==XXH_unaligned) return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); else return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); } FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) { return XXH_readLE32_align(ptr, endian, XXH_unaligned); } /* ************************************* * Macros ***************************************/ #define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } /* use after variable declarations */ #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) #ifndef XXH_NO_LONG_LONG /* ******************************************************************* * 64-bits hash functions *********************************************************************/ /*====== Memory access ======*/ #ifndef MEM_MODULE # define MEM_MODULE # if !defined (__VMS) \ && (defined (__cplusplus) \ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # include typedef uint64_t U64; # else /* if compiler doesn't support unsigned long long, replace by another 64-bit type */ typedef unsigned long long U64; # endif #endif #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64; static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } #else /* portable and safe solution. Generally efficient. * see : http://stackoverflow.com/a/32095106/646947 */ static U64 XXH_read64(const void* memPtr) { U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ #if defined(_MSC_VER) /* Visual Studio */ # define XXH_swap64 _byteswap_uint64 #elif XXH_GCC_VERSION >= 403 # define XXH_swap64 __builtin_bswap64 #else static U64 XXH_swap64 (U64 x) { return ((x << 56) & 0xff00000000000000ULL) | ((x << 40) & 0x00ff000000000000ULL) | ((x << 24) & 0x0000ff0000000000ULL) | ((x << 8) & 0x000000ff00000000ULL) | ((x >> 8) & 0x00000000ff000000ULL) | ((x >> 24) & 0x0000000000ff0000ULL) | ((x >> 40) & 0x000000000000ff00ULL) | ((x >> 56) & 0x00000000000000ffULL); } #endif FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) { if (align==XXH_unaligned) return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); else return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); } FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) { return XXH_readLE64_align(ptr, endian, XXH_unaligned); } static U64 XXH_readBE64(const void* ptr) { return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); } /*====== xxh64 ======*/ static const U64 PRIME64_1 = 11400714785074694791ULL; static const U64 PRIME64_2 = 14029467366897019727ULL; static const U64 PRIME64_3 = 1609587929392839161ULL; static const U64 PRIME64_4 = 9650029242287828579ULL; static const U64 PRIME64_5 = 2870177450012600261ULL; static U64 XXH64_round(U64 acc, U64 input) { acc += input * PRIME64_2; acc = XXH_rotl64(acc, 31); acc *= PRIME64_1; return acc; } static U64 XXH64_mergeRound(U64 acc, U64 val) { val = XXH64_round(0, val); acc ^= val; acc = acc * PRIME64_1 + PRIME64_4; return acc; } FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) { const BYTE* p = (const BYTE*)input; const BYTE* bEnd = p + len; U64 h64; #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) if (p==NULL) { len=0; bEnd=p=(const BYTE*)(size_t)32; } #endif if (len>=32) { const BYTE* const limit = bEnd - 32; U64 v1 = seed + PRIME64_1 + PRIME64_2; U64 v2 = seed + PRIME64_2; U64 v3 = seed + 0; U64 v4 = seed - PRIME64_1; do { v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; } while (p<=limit); h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); h64 = XXH64_mergeRound(h64, v1); h64 = XXH64_mergeRound(h64, v2); h64 = XXH64_mergeRound(h64, v3); h64 = XXH64_mergeRound(h64, v4); } else { h64 = seed + PRIME64_5; } h64 += (U64) len; while (p+8<=bEnd) { U64 const k1 = XXH64_round(0, XXH_get64bits(p)); h64 ^= k1; h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; p+=8; } if (p+4<=bEnd) { h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; p+=4; } while (p> 33; h64 *= PRIME64_2; h64 ^= h64 >> 29; h64 *= PRIME64_3; h64 ^= h64 >> 32; return h64; } XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) { #if 0 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ XXH64_state_t state; XXH64_reset(&state, seed); XXH64_update(&state, input, len); return XXH64_digest(&state); #else XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if (XXH_FORCE_ALIGN_CHECK) { if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); else return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); } } if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); else return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); #endif } /*====== Hash Streaming ======*/ XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) { return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); } XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) { XXH_free(statePtr); return XXH_OK; } XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) { memcpy(dstState, srcState, sizeof(*dstState)); } XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) { XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ memset(&state, 0, sizeof(state)); state.v1 = seed + PRIME64_1 + PRIME64_2; state.v2 = seed + PRIME64_2; state.v3 = seed + 0; state.v4 = seed - PRIME64_1; /* do not write into reserved, planned to be removed in a future version */ memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); return XXH_OK; } FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) { const BYTE* p = (const BYTE*)input; const BYTE* const bEnd = p + len; if (input==NULL) #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) return XXH_OK; #else return XXH_ERROR; #endif state->total_len += len; if (state->memsize + len < 32) { /* fill in tmp buffer */ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); state->memsize += (U32)len; return XXH_OK; } if (state->memsize) { /* tmp buffer is full */ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); p += 32-state->memsize; state->memsize = 0; } if (p+32 <= bEnd) { const BYTE* const limit = bEnd - 32; U64 v1 = state->v1; U64 v2 = state->v2; U64 v3 = state->v3; U64 v4 = state->v4; do { v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; } while (p<=limit); state->v1 = v1; state->v2 = v2; state->v3 = v3; state->v4 = v4; } if (p < bEnd) { XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); state->memsize = (unsigned)(bEnd-p); } return XXH_OK; } XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) { XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_update_endian(state_in, input, len, XXH_littleEndian); else return XXH64_update_endian(state_in, input, len, XXH_bigEndian); } FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) { const BYTE * p = (const BYTE*)state->mem64; const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize; U64 h64; if (state->total_len >= 32) { U64 const v1 = state->v1; U64 const v2 = state->v2; U64 const v3 = state->v3; U64 const v4 = state->v4; h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); h64 = XXH64_mergeRound(h64, v1); h64 = XXH64_mergeRound(h64, v2); h64 = XXH64_mergeRound(h64, v3); h64 = XXH64_mergeRound(h64, v4); } else { h64 = state->v3 + PRIME64_5; } h64 += (U64) state->total_len; while (p+8<=bEnd) { U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian)); h64 ^= k1; h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; p+=8; } if (p+4<=bEnd) { h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1; h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; p+=4; } while (p> 33; h64 *= PRIME64_2; h64 ^= h64 >> 29; h64 *= PRIME64_3; h64 ^= h64 >> 32; return h64; } XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) { XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_digest_endian(state_in, XXH_littleEndian); else return XXH64_digest_endian(state_in, XXH_bigEndian); } /*====== Canonical representation ======*/ XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) { XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); memcpy(dst, &hash, sizeof(*dst)); } XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) { return XXH_readBE64(src); } #endif /* XXH_NO_LONG_LONG */ #endif /* USE_JODY_HASH */ jdupes-1.27.3/xxhash.h000066400000000000000000000207231447252140200145570ustar00rootroot00000000000000/* xxHash - Extremely Fast Hash algorithm Header File Copyright (C) 2012-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - xxHash source repository : https://github.com/Cyan4973/xxHash */ /* Notice extracted from xxHash homepage : xxHash is an extremely fast Hash algorithm, running at RAM speed limits. It also successfully passes all tests from the SMHasher suite. Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) Name Speed Q.Score Author xxHash 5.4 GB/s 10 CrapWow 3.2 GB/s 2 Andrew MumurHash 3a 2.7 GB/s 10 Austin Appleby SpookyHash 2.0 GB/s 10 Bob Jenkins SBox 1.4 GB/s 9 Bret Mulvey Lookup3 1.2 GB/s 9 Bob Jenkins SuperFastHash 1.2 GB/s 1 Paul Hsieh CityHash64 1.05 GB/s 10 Pike & Alakuijala FNV 0.55 GB/s 5 Fowler, Noll, Vo CRC32 0.43 GB/s 9 MD5-32 0.33 GB/s 10 Ronald L. Rivest SHA1-32 0.28 GB/s 10 Q.Score is a measure of quality of the hash function. It depends on successfully passing SMHasher test set. 10 is a perfect score. A 64-bits version, named XXH64, is available since r35. It offers much better speed, but for 64-bits applications only. Name Speed on 64 bits Speed on 32 bits XXH64 13.8 GB/s 1.9 GB/s XXH32 6.8 GB/s 6.0 GB/s */ #ifndef XXHASH_H_5627135585666179 #define XXHASH_H_5627135585666179 1 #if defined (__cplusplus) extern "C" { #endif /* **************************** * Definitions ******************************/ #include /* size_t */ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; /* **************************** * API modifier ******************************/ /** XXH_PRIVATE_API * This is useful to include xxhash functions in `static` mode * in order to inline them, and remove their symbol from the public list. * Methodology : * #define XXH_PRIVATE_API * #include "xxhash.h" * `xxhash.c` is automatically included. * It's not useful to compile and link it as a separate module. */ #ifdef XXH_PRIVATE_API # ifndef XXH_STATIC_LINKING_ONLY # define XXH_STATIC_LINKING_ONLY # endif # if defined(__GNUC__) # define XXH_PUBLIC_API static __inline __attribute__((unused)) # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define XXH_PUBLIC_API static inline # elif defined(_MSC_VER) # define XXH_PUBLIC_API static __inline # else /* this version may generate warnings for unused static functions */ # define XXH_PUBLIC_API static # endif #else # define XXH_PUBLIC_API /* do nothing */ #endif /* XXH_PRIVATE_API */ /*!XXH_NAMESPACE, aka Namespace Emulation : If you want to include _and expose_ xxHash functions from within your own library, but also want to avoid symbol collisions with other libraries which may also include xxHash, you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values). Note that no change is required within the calling program as long as it includes `xxhash.h` : regular symbol name will be automatically translated by this header. */ #ifdef XXH_NAMESPACE # define XXH_CAT(A,B) A##B # define XXH_NAME2(A,B) XXH_CAT(A,B) # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) #endif /* ************************************* * Version ***************************************/ #define XXH_VERSION_MAJOR 0 #define XXH_VERSION_MINOR 6 #define XXH_VERSION_RELEASE 3 #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) XXH_PUBLIC_API unsigned XXH_versionNumber (void); #ifndef XXH_NO_LONG_LONG /*-********************************************************************** * 64-bits hash ************************************************************************/ typedef unsigned long long XXH64_hash_t; /*! XXH64() : Calculate the 64-bits hash of sequence of length "len" stored at memory address "input". "seed" can be used to alter the result predictably. This function runs faster on 64-bits systems, but slower on 32-bits systems (see benchmark). */ XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); /*====== Streaming ======*/ typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); /*====== Canonical representation ======*/ typedef struct { unsigned char digest[8]; } XXH64_canonical_t; XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); #endif /* XXH_NO_LONG_LONG */ #ifdef XXH_STATIC_LINKING_ONLY /* ================================================================================================ This section contains declarations which are not guaranteed to remain stable. They may change in future versions, becoming incompatible with a different version of the library. These declarations should only be used with static linking. Never use them in association with dynamic linking ! =================================================================================================== */ /* These definitions are only meant to make possible static allocation of XXH state, on stack or in a struct for example. Never use members directly. */ #ifndef XXH_NO_LONG_LONG /* remove 64-bits support */ struct XXH64_state_s { unsigned long long total_len; unsigned long long v1; unsigned long long v2; unsigned long long v3; unsigned long long v4; unsigned long long mem64[4]; /* buffer defined as U64 for alignment */ unsigned memsize; unsigned reserved[2]; /* never read nor write, will be removed in a future version */ }; /* typedef'd to XXH64_state_t */ #endif #ifdef XXH_PRIVATE_API # include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */ #endif #endif /* XXH_STATIC_LINKING_ONLY */ #if defined (__cplusplus) } #endif #endif /* XXHASH_H_5627135585666179 */