Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 22 Oct 2018 19:45:18 +0000 (UTC)
From:      Conrad Meyer <cem@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-vendor@freebsd.org
Subject:   svn commit: r339610 - in vendor/zstd/dist: . contrib/adaptive-compression contrib/long_distance_matching contrib/meson doc doc/images lib lib/common lib/compress lib/decompress lib/deprecated lib/d...
Message-ID:  <201810221945.w9MJjIMU075839@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: cem
Date: Mon Oct 22 19:45:18 2018
New Revision: 339610
URL: https://svnweb.freebsd.org/changeset/base/339610

Log:
  import zstd 1.3.3

Added:
  vendor/zstd/dist/contrib/adaptive-compression/
  vendor/zstd/dist/contrib/adaptive-compression/Makefile   (contents, props changed)
  vendor/zstd/dist/contrib/adaptive-compression/README.md
  vendor/zstd/dist/contrib/adaptive-compression/adapt.c   (contents, props changed)
  vendor/zstd/dist/contrib/adaptive-compression/datagencli.c   (contents, props changed)
  vendor/zstd/dist/contrib/adaptive-compression/test-correctness.sh   (contents, props changed)
  vendor/zstd/dist/contrib/adaptive-compression/test-performance.sh   (contents, props changed)
  vendor/zstd/dist/contrib/long_distance_matching/
  vendor/zstd/dist/contrib/long_distance_matching/Makefile   (contents, props changed)
  vendor/zstd/dist/contrib/long_distance_matching/README.md
  vendor/zstd/dist/contrib/long_distance_matching/ldm.c   (contents, props changed)
  vendor/zstd/dist/contrib/long_distance_matching/ldm.h   (contents, props changed)
  vendor/zstd/dist/contrib/long_distance_matching/ldm_common.c   (contents, props changed)
  vendor/zstd/dist/contrib/long_distance_matching/ldm_params.h   (contents, props changed)
  vendor/zstd/dist/contrib/long_distance_matching/main.c   (contents, props changed)
  vendor/zstd/dist/doc/images/ldmCspeed.png   (contents, props changed)
  vendor/zstd/dist/doc/images/ldmDspeed.png   (contents, props changed)
  vendor/zstd/dist/doc/images/linux-4.7-12-compress.png   (contents, props changed)
  vendor/zstd/dist/doc/images/linux-4.7-12-decompress.png   (contents, props changed)
  vendor/zstd/dist/doc/images/linux-git-compress.png   (contents, props changed)
  vendor/zstd/dist/doc/images/linux-git-decompress.png   (contents, props changed)
  vendor/zstd/dist/doc/images/zstd_logo86.png   (contents, props changed)
  vendor/zstd/dist/lib/compress/zstd_compress_internal.h   (contents, props changed)
  vendor/zstd/dist/tests/seqgen.c   (contents, props changed)
  vendor/zstd/dist/tests/seqgen.h   (contents, props changed)
Deleted:
  vendor/zstd/dist/lib/compress/zstd_compress.h
Modified:
  vendor/zstd/dist/Makefile
  vendor/zstd/dist/NEWS
  vendor/zstd/dist/README.md
  vendor/zstd/dist/circle.yml
  vendor/zstd/dist/contrib/meson/meson.build
  vendor/zstd/dist/doc/zstd_compression_format.md
  vendor/zstd/dist/doc/zstd_manual.html
  vendor/zstd/dist/lib/BUCK
  vendor/zstd/dist/lib/common/bitstream.h
  vendor/zstd/dist/lib/common/mem.h
  vendor/zstd/dist/lib/common/pool.c
  vendor/zstd/dist/lib/common/zstd_common.c
  vendor/zstd/dist/lib/common/zstd_internal.h
  vendor/zstd/dist/lib/compress/zstd_compress.c
  vendor/zstd/dist/lib/compress/zstd_double_fast.c
  vendor/zstd/dist/lib/compress/zstd_double_fast.h
  vendor/zstd/dist/lib/compress/zstd_fast.c
  vendor/zstd/dist/lib/compress/zstd_fast.h
  vendor/zstd/dist/lib/compress/zstd_lazy.c
  vendor/zstd/dist/lib/compress/zstd_lazy.h
  vendor/zstd/dist/lib/compress/zstd_ldm.h
  vendor/zstd/dist/lib/compress/zstd_opt.c
  vendor/zstd/dist/lib/compress/zstd_opt.h
  vendor/zstd/dist/lib/compress/zstdmt_compress.c
  vendor/zstd/dist/lib/compress/zstdmt_compress.h
  vendor/zstd/dist/lib/decompress/zstd_decompress.c
  vendor/zstd/dist/lib/deprecated/zbuff_compress.c
  vendor/zstd/dist/lib/dictBuilder/zdict.c
  vendor/zstd/dist/lib/legacy/zstd_v01.c
  vendor/zstd/dist/lib/legacy/zstd_v02.c
  vendor/zstd/dist/lib/legacy/zstd_v03.c
  vendor/zstd/dist/lib/legacy/zstd_v04.c
  vendor/zstd/dist/lib/legacy/zstd_v05.c
  vendor/zstd/dist/lib/legacy/zstd_v06.c
  vendor/zstd/dist/lib/legacy/zstd_v07.c
  vendor/zstd/dist/lib/zstd.h
  vendor/zstd/dist/programs/BUCK
  vendor/zstd/dist/programs/Makefile
  vendor/zstd/dist/programs/bench.c
  vendor/zstd/dist/programs/bench.h
  vendor/zstd/dist/programs/dibio.c
  vendor/zstd/dist/programs/fileio.c
  vendor/zstd/dist/programs/fileio.h
  vendor/zstd/dist/programs/platform.h
  vendor/zstd/dist/programs/util.h
  vendor/zstd/dist/programs/zstd.1
  vendor/zstd/dist/programs/zstd.1.md
  vendor/zstd/dist/programs/zstdcli.c
  vendor/zstd/dist/tests/Makefile
  vendor/zstd/dist/tests/decodecorpus.c
  vendor/zstd/dist/tests/fullbench.c
  vendor/zstd/dist/tests/fuzzer.c
  vendor/zstd/dist/tests/paramgrill.c
  vendor/zstd/dist/tests/playTests.sh
  vendor/zstd/dist/tests/zbufftest.c
  vendor/zstd/dist/tests/zstreamtest.c
  vendor/zstd/dist/zlibWrapper/BUCK
  vendor/zstd/dist/zlibWrapper/examples/zwrapbench.c
  vendor/zstd/dist/zlibWrapper/zstd_zlibwrapper.c

Modified: vendor/zstd/dist/Makefile
==============================================================================
--- vendor/zstd/dist/Makefile	Mon Oct 22 19:39:20 2018	(r339609)
+++ vendor/zstd/dist/Makefile	Mon Oct 22 19:45:18 2018	(r339610)
@@ -72,9 +72,12 @@ zstdmt:
 zlibwrapper:
 	$(MAKE) -C $(ZWRAPDIR) test
 
+.PHONY: check
+check: shortest
+
 .PHONY: test shortest
 test shortest:
-	$(MAKE) -C $(PRGDIR) allVariants
+	$(MAKE) -C $(PRGDIR) allVariants MOREFLAGS="-g -DZSTD_DEBUG=1"
 	$(MAKE) -C $(TESTDIR) $@
 
 .PHONY: examples
@@ -127,11 +130,6 @@ uninstall:
 travis-install:
 	$(MAKE) install PREFIX=~/install_test_dir
 
-.PHONY: gppbuild
-gppbuild: clean
-	g++ -v
-	CC=g++ $(MAKE) -C programs all CFLAGS="-O3 -Wall -Wextra -Wundef -Wshadow -Wcast-align -Werror"
-
 .PHONY: gcc5build
 gcc5build: clean
 	gcc-5 -v
@@ -163,7 +161,7 @@ aarch64build: clean
 	CC=aarch64-linux-gnu-gcc CFLAGS="-Werror" $(MAKE) allzstd
 
 ppcbuild: clean
-	CC=powerpc-linux-gnu-gcc CLAGS="-m32 -Wno-attributes -Werror" $(MAKE) allzstd
+	CC=powerpc-linux-gnu-gcc CFLAGS="-m32 -Wno-attributes -Werror" $(MAKE) allzstd
 
 ppc64build: clean
 	CC=powerpc-linux-gnu-gcc CFLAGS="-m64 -Werror" $(MAKE) allzstd

Modified: vendor/zstd/dist/NEWS
==============================================================================
--- vendor/zstd/dist/NEWS	Mon Oct 22 19:39:20 2018	(r339609)
+++ vendor/zstd/dist/NEWS	Mon Oct 22 19:45:18 2018	(r339610)
@@ -1,3 +1,15 @@
+v1.3.3
+perf: faster zstd_opt strategy (levels 17-19)
+fix : bug #944 : multithreading with shared ditionary and large data, reported by @gsliepen
+cli : fix : content size written in header by default
+cli : fix : improved LZ4 format support, by @felixhandte
+cli : new : hidden command `-S`, to benchmark multiple files while generating one result per file
+api : fix : support large skippable frames, by @terrelln
+api : fix : streaming interface was adding a useless 3-bytes null block to small frames
+api : change : when setting `pledgedSrcSize`, use `ZSTD_CONTENTSIZE_UNKNOWN` macro value to mean "unknown"
+build: fix : compilation under rhel6 and centos6, reported by @pixelb
+build: added `check` target
+
 v1.3.2
 new : long range mode, using --long command, by Stella Lau (@stellamplau)
 new : ability to generate and decode magicless frames (#591)

Modified: vendor/zstd/dist/README.md
==============================================================================
--- vendor/zstd/dist/README.md	Mon Oct 22 19:39:20 2018	(r339609)
+++ vendor/zstd/dist/README.md	Mon Oct 22 19:45:18 2018	(r339610)
@@ -1,15 +1,16 @@
- __Zstandard__, or `zstd` as short version, is a fast lossless compression algorithm,
- targeting real-time compression scenarios at zlib-level and better compression ratios.
+<p align="center"><img src="https://raw.githubusercontent.com/facebook/zstd/readme/doc/images/zstd_logo86.png" alt="Zstandard"></p>
 
-It is provided as an open-source BSD-licensed **C** library,
-and a command line utility producing and decoding `.zst` and `.gz` files.
-For other programming languages,
-you can consult a list of known ports on [Zstandard homepage](http://www.zstd.net/#other-languages).
+__Zstandard__, or `zstd` as short version, is a fast lossless compression algorithm,
+targeting real-time compression scenarios at zlib-level and better compression ratios.
+It's backed by a very fast entropy stage, provided by [Huff0 and FSE library](https://github.com/Cyan4973/FiniteStateEntropy).
 
-| dev branch status |
-|-------------------|
-| [![Build Status][travisDevBadge]][travisLink]   [![Build status][AppveyorDevBadge]][AppveyorLink]   [![Build status][CircleDevBadge]][CircleLink]
+The project is provided as an open-source BSD-licensed **C** library,
+and a command line utility producing and decoding `.zst`, `.gz`, `.xz` and `.lz4` files.
+Should your project require another programming language,
+a list of known ports and bindings is provided on [Zstandard homepage](http://www.zstd.net/#other-languages).
 
+Development branch status : [![Build Status][travisDevBadge]][travisLink]   [![Build status][AppveyorDevBadge]][AppveyorLink]   [![Build status][CircleDevBadge]][CircleLink]
+
 [travisDevBadge]: https://travis-ci.org/facebook/zstd.svg?branch=dev "Continuous Integration test suite"
 [travisLink]: https://travis-ci.org/facebook/zstd
 [AppveyorDevBadge]: https://ci.appveyor.com/api/projects/status/xt38wbdxjk5mrbem/branch/dev?svg=true "Windows test suite"
@@ -17,8 +18,9 @@ you can consult a list of known ports on [Zstandard ho
 [CircleDevBadge]: https://circleci.com/gh/facebook/zstd/tree/dev.svg?style=shield "Short test suite"
 [CircleLink]: https://circleci.com/gh/facebook/zstd
 
+### Benchmarks
 
-As a reference, several fast compression algorithms were tested and compared
+For reference, several fast compression algorithms were tested and compared
 on a server running Linux Debian (`Linux version 4.8.0-1-amd64`),
 with a Core i7-6700K CPU @ 4.0GHz,
 using [lzbench], an open-source in-memory benchmark by @inikep
@@ -43,7 +45,9 @@ on the [Silesia compression corpus].
 [LZ4]: http://www.lz4.org/
 
 Zstd can also offer stronger compression ratios at the cost of compression speed.
-Speed vs Compression trade-off is configurable by small increments. Decompression speed is preserved and remains roughly the same at all settings, a property shared by most LZ compression algorithms, such as [zlib] or lzma.
+Speed vs Compression trade-off is configurable by small increments.
+Decompression speed is preserved and remains roughly the same at all settings,
+a property shared by most LZ compression algorithms, such as [zlib] or lzma.
 
 The following tests were run
 on a server running Linux Debian (`Linux version 4.8.0-1-amd64`)
@@ -56,8 +60,8 @@ Compression Speed vs Ratio | Decompression Speed
 ---------------------------|--------------------
 ![Compression Speed vs Ratio](doc/images/Cspeed4.png "Compression Speed vs Ratio") | ![Decompression Speed](doc/images/Dspeed4.png "Decompression Speed")
 
-Several algorithms can produce higher compression ratios, but at slower speeds, falling outside of the graph.
-For a larger picture including very slow modes, [click on this link](doc/images/DCspeed5.png) .
+A few other algorithms can produce higher compression ratios at slower speeds, falling outside of the graph.
+For a larger picture including slow modes, [click on this link](doc/images/DCspeed5.png).
 
 
 ### The case for Small Data compression
@@ -84,7 +88,7 @@ Training works if there is some correlation in a famil
 Hence, deploying one dictionary per type of data will provide the greatest benefits.
 Dictionary gains are mostly effective in the first few KB. Then, the compression algorithm will gradually use previously decoded content to better compress the rest of the file.
 
-#### Dictionary compression How To :
+#### Dictionary compression How To:
 
 1) Create the dictionary
 
@@ -99,19 +103,16 @@ Dictionary gains are mostly effective in the first few
 `zstd -D dictionaryName --decompress FILE.zst`
 
 
-### Build
+### Build instructions
 
-Once you have the repository cloned, there are multiple ways provided to build Zstandard.
-
 #### Makefile
 
-If your system is compatible with a standard `make` (or `gmake`) binary generator,
-you can simply run it at the root directory.
-It will generate `zstd` within root directory.
+If your system is compatible with standard `make` (or `gmake`),
+invoking `make` in root directory will generate `zstd` cli in root directory.
 
-Other available options include :
-- `make install` : create and install zstd binary, library and man page
-- `make test` : create and run `zstd` and test tools on local platform
+Other available options include:
+- `make install` : create and install zstd cli, library and man pages
+- `make check` : create and run `zstd`, tests its behavior on local platform
 
 #### cmake
 
@@ -125,9 +126,9 @@ A Meson project is provided within `contrib/meson`.
 
 #### Visual Studio (Windows)
 
-Going into `build` directory, you will find additional possibilities :
-- Projects for Visual Studio 2005, 2008 and 2010
-  + VS2010 project is compatible with VS2012, VS2013 and VS2015
+Going into `build` directory, you will find additional possibilities:
+- Projects for Visual Studio 2005, 2008 and 2010.
+  + VS2010 project is compatible with VS2012, VS2013 and VS2015.
 - Automated build scripts for Visual compiler by @KrzysFR , in `build/VS_scripts`,
   which will build `zstd` cli and `libzstd` library without any need to open Visual Studio solution.
 
@@ -143,11 +144,7 @@ Zstandard is dual-licensed under [BSD](LICENSE) and [G
 
 ### Contributing
 
-The "dev" branch is the one where all contributions will be merged before reaching "master".
-If you plan to propose a patch, please commit into the "dev" branch or its own feature branch.
+The "dev" branch is the one where all contributions are merged before reaching "master".
+If you plan to propose a patch, please commit into the "dev" branch, or its own feature branch.
 Direct commit to "master" are not permitted.
 For more information, please read [CONTRIBUTING](CONTRIBUTING.md).
-
-### Miscellaneous
-
-Zstd entropy stage is provided by [Huff0 and FSE, from Finite State Entropy library](https://github.com/Cyan4973/FiniteStateEntropy).

Modified: vendor/zstd/dist/circle.yml
==============================================================================
--- vendor/zstd/dist/circle.yml	Mon Oct 22 19:39:20 2018	(r339609)
+++ vendor/zstd/dist/circle.yml	Mon Oct 22 19:45:18 2018	(r339610)
@@ -3,13 +3,11 @@ dependencies:
     - sudo dpkg --add-architecture i386
     - sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test; sudo apt-get -y -qq update
     - sudo apt-get -y install gcc-powerpc-linux-gnu gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross
-    - sudo apt-get -y install libstdc++-7-dev clang gcc g++ gcc-5 gcc-6 gcc-7 zlib1g-dev liblzma-dev
-    - sudo apt-get -y install linux-libc-dev:i386 libc6-dev-i386
 
 test:
   override:
     - ? |
-        if [[ "$CIRCLE_NODE_INDEX" == "0" ]]                                    ; then cc -v; make all   && make clean && make -C lib libzstd-nomt && make clean; fi &&
+        if [[ "$CIRCLE_NODE_INDEX" == "0" ]]                                    ; then cc -v; CFLAGS="-O0 -Werror" make all && make clean; fi &&
         if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make gnu90build   && make clean; fi
       :
         parallel: true
@@ -20,32 +18,17 @@ test:
         parallel: true
     - ? |
         if [[ "$CIRCLE_NODE_INDEX" == "0" ]]                                    ; then make c11build     && make clean; fi &&
-        if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make cmakebuild   && make clean; fi
+        if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make ppc64build   && make clean; fi
       :
         parallel: true
     - ? |
-        if [[ "$CIRCLE_NODE_INDEX" == "0" ]]                                    ; then make gppbuild     && make clean; fi &&
-        if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make gcc5build    && make clean; fi
-      :
-        parallel: true
-    - ? |
-        if [[ "$CIRCLE_NODE_INDEX" == "0" ]]                                    ; then make gcc6build    && make clean; fi &&
-        if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make clangbuild   && make clean; fi
-      :
-        parallel: true
-    - ? |
-        if [[ "$CIRCLE_NODE_INDEX" == "0" ]]                                    ; then make m32build     && make clean; fi &&
-        if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make armbuild     && make clean; fi
-      :
-        parallel: true
-    - ? |
         if [[ "$CIRCLE_NODE_INDEX" == "0" ]]                                    ; then make aarch64build && make clean; fi &&
         if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make ppcbuild     && make clean; fi
       :
         parallel: true
     - ? |
-        if [[ "$CIRCLE_NODE_INDEX" == "0" ]]                                    ; then make ppc64build   && make clean; fi &&
-        if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make gcc7build    && make clean; fi
+        if [[ "$CIRCLE_NODE_INDEX" == "0" ]]                                    ; then make -j regressiontest && make clean; fi &&
+        if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make armbuild     && make clean; fi
       :
         parallel: true
     - ? |
@@ -54,8 +37,8 @@ test:
       :
         parallel: true
     - ? |
-        if [[ "$CIRCLE_NODE_INDEX" == "0" ]]                                    ; then make -j regressiontest && make clean; fi &&
-        if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then true; fi # Could add another test here
+        if [[ "$CIRCLE_NODE_INDEX" == "0" ]]                                    ; then make cxxtest      && make clean; fi &&
+        if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make -C lib libzstd-nomt && make clean; fi
       :
         parallel: true
 

Added: vendor/zstd/dist/contrib/adaptive-compression/Makefile
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ vendor/zstd/dist/contrib/adaptive-compression/Makefile	Mon Oct 22 19:45:18 2018	(r339610)
@@ -0,0 +1,76 @@
+
+ZSTDDIR = ../../lib
+PRGDIR  = ../../programs
+ZSTDCOMMON_FILES := $(ZSTDDIR)/common/*.c
+ZSTDCOMP_FILES   := $(ZSTDDIR)/compress/*.c
+ZSTDDECOMP_FILES := $(ZSTDDIR)/decompress/*.c
+ZSTD_FILES  := $(ZSTDDECOMP_FILES) $(ZSTDCOMMON_FILES) $(ZSTDCOMP_FILES)
+
+MULTITHREAD_LDFLAGS = -pthread
+DEBUGFLAGS= -g -DZSTD_DEBUG=1
+CPPFLAGS += -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \
+            -I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR)
+CFLAGS   ?= -O3
+CFLAGS   += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow                 \
+            -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
+            -Wstrict-prototypes -Wundef -Wformat-security                   \
+            -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings      \
+            -Wredundant-decls
+CFLAGS   += $(DEBUGFLAGS)
+CFLAGS   += $(MOREFLAGS)
+FLAGS     = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) $(MULTITHREAD_LDFLAGS)
+
+all: adapt datagen
+
+adapt: $(ZSTD_FILES) adapt.c
+	$(CC) $(FLAGS) $^ -o $@
+
+adapt-debug: $(ZSTD_FILES) adapt.c
+	$(CC) $(FLAGS) -DDEBUG_MODE=2 $^ -o adapt
+
+datagen : $(PRGDIR)/datagen.c datagencli.c
+	$(CC)      $(FLAGS) $^ -o $@
+
+test-adapt-correctness: datagen adapt
+	@./test-correctness.sh
+	@echo "test correctness complete"
+
+test-adapt-performance: datagen adapt
+	@./test-performance.sh
+	@echo "test performance complete"
+
+clean:
+	@$(RM) -f adapt datagen
+	@$(RM) -rf *.dSYM
+	@$(RM) -f tmp*
+	@$(RM) -f tests/*.zst
+	@$(RM) -f tests/tmp*
+	@echo "finished cleaning"
+
+#-----------------------------------------------------------------------------
+# make install is validated only for Linux, OSX, BSD, Hurd and Solaris targets
+#-----------------------------------------------------------------------------
+ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS))
+
+ifneq (,$(filter $(shell uname),SunOS))
+INSTALL ?= ginstall
+else
+INSTALL ?= install
+endif
+
+PREFIX  ?= /usr/local
+DESTDIR ?=
+BINDIR  ?= $(PREFIX)/bin
+
+INSTALL_PROGRAM ?= $(INSTALL) -m 755
+
+install: adapt
+	@echo Installing binaries
+	@$(INSTALL) -d -m 755 $(DESTDIR)$(BINDIR)/
+	@$(INSTALL_PROGRAM) adapt $(DESTDIR)$(BINDIR)/zstd-adaptive
+	@echo zstd-adaptive installation completed
+
+uninstall:
+	@$(RM) $(DESTDIR)$(BINDIR)/zstd-adaptive
+	@echo zstd-adaptive programs successfully uninstalled
+endif

Added: vendor/zstd/dist/contrib/adaptive-compression/README.md
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ vendor/zstd/dist/contrib/adaptive-compression/README.md	Mon Oct 22 19:45:18 2018	(r339610)
@@ -0,0 +1,91 @@
+### Summary
+
+`adapt` is a new compression tool targeted at optimizing performance across network connections and pipelines. The tool is aimed at sensing network speeds and adapting compression level based on network or pipe speeds.
+In situations where the compression level does not appropriately match the network/pipe speed, compression may be bottlenecking the entire pipeline or the files may not be compressed as much as they potentially could be, therefore losing efficiency. It also becomes quite impractical to manually measure and set an optimalcompression level (which could potentially change over time). 
+
+### Using `adapt`
+
+In order to build and use the tool, you can simply run `make adapt` in the `adaptive-compression` directory under `contrib`. This will generate an executable available for use. Another possible method of installation is running `make install`, which will create and install the binary as the command `zstd-adaptive`.
+
+Similar to many other compression utilities, `zstd-adaptive` can be invoked by using the following format:
+
+`zstd-adaptive [options] [file(s)]`
+
+Supported options for the above format are described below. 
+
+`zstd-adaptive` also supports reading from `stdin` and writing to `stdout`, which is potentially more useful. By default, if no files are given, `zstd-adaptive` reads from and writes to standard I/O. Therefore, you can simply insert it within a pipeline like so:
+
+`cat FILE | zstd-adaptive | ssh "cat - > tmp.zst"`
+
+If a file is provided, it is also possible to force writing to stdout using the `-c` flag like so:
+
+`zstd-adaptive -c FILE | ssh "cat - > tmp.zst"`
+
+Several options described below can be used to control the behavior of `zstd-adaptive`. More specifically, using the `-l#` and `-u#` flags will will set upper and lower bounds so that the compression level will always be within that range. The `-i#` flag can also be used to change the initial compression level. If an initial compression level is not provided, the initial compression level will be chosen such that it is within the appropriate range (it becomes equal to the lower bound). 
+
+### Options
+`-oFILE` : write output to `FILE`
+
+`-i#`    : provide initial compression level (must within the appropriate bounds)
+
+`-h`     : display help/information
+
+`-f`     : force the compression level to stay constant
+
+`-c`     : force write to `stdout`
+
+`-p`     : hide progress bar
+
+`-q`     : quiet mode -- do not show progress bar or other information
+
+`-l#`    : set a lower bound on the compression level (default is 1)
+
+`-u#`    : set an upper bound on the compression level (default is 22)
+### Benchmarking / Test results
+#### Artificial Tests
+These artificial tests were run by using the `pv` command line utility in order to limit pipe speeds (25 MB/s read and 5 MB/s write limits were chosen to mimic severe throughput constraints). A 40 GB backup file was sent through a pipeline, compressed, and written out to a file. Compression time, size, and ratio were computed. Data for `zstd -15` was excluded from these tests because the test runs quite long.
+
+<table>
+<tr><th> 25 MB/s read limit </th></tr>
+<tr><td>
+
+| Compressor Name | Ratio | Compressed Size | Compression Time |
+|:----------------|------:|----------------:|-----------------:| 
+| zstd -3         | 2.108 |       20.718 GB |      29m 48.530s |
+| zstd-adaptive   | 2.230 |       19.581 GB |      29m 48.798s |
+
+</td><tr>
+</table>
+
+<table>
+<tr><th> 5 MB/s write limit </th></tr>
+<tr><td>
+
+| Compressor Name | Ratio | Compressed Size | Compression Time |
+|:----------------|------:|----------------:|-----------------:| 
+| zstd -3         | 2.108 |       20.718 GB |   1h 10m 43.076s |
+| zstd-adaptive   | 2.249 |       19.412 GB |   1h 06m 15.577s |
+
+</td></tr>
+</table>
+
+The commands used for this test generally followed the form:
+
+`cat FILE | pv -L 25m -q | COMPRESSION | pv -q > tmp.zst # impose 25 MB/s read limit`
+
+`cat FILE | pv -q | COMPRESSION | pv -L 5m -q > tmp.zst  # impose 5 MB/s write limit`
+
+#### SSH Tests
+
+The following tests were performed by piping a relatively large backup file (approximately 80 GB) through compression and over SSH to be stored on a server. The test data includes statistics for time and compressed size  on `zstd` at several compression levels, as well as `zstd-adaptive`. The data highlights the potential advantages that `zstd-adaptive` has over using a low static compression level and the negative imapcts that using an excessively high static compression level can have on
+pipe throughput.
+
+| Compressor Name | Ratio | Compressed Size | Compression Time |
+|:----------------|------:|----------------:|-----------------:|
+| zstd -3         | 2.212 |       32.426 GB |   1h 17m 59.756s |
+| zstd -15        | 2.374 |       30.213 GB |   2h 56m 59.441s |
+| zstd-adaptive   | 2.315 |       30.993 GB |   1h 18m 52.860s |
+
+The commands used for this test generally followed the form: 
+
+`cat FILE | COMPRESSION | ssh dev "cat - > tmp.zst"`

Added: vendor/zstd/dist/contrib/adaptive-compression/adapt.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ vendor/zstd/dist/contrib/adaptive-compression/adapt.c	Mon Oct 22 19:45:18 2018	(r339610)
@@ -0,0 +1,1137 @@
+/*
+ * Copyright (c) 2017-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ */
+
+#include <stdio.h>      /* fprintf */
+#include <stdlib.h>     /* malloc, free */
+#include <pthread.h>    /* pthread functions */
+#include <string.h>     /* memset */
+#include "zstd_internal.h"
+#include "util.h"
+
+#define DISPLAY(...) fprintf(stderr, __VA_ARGS__)
+#define PRINT(...) fprintf(stdout, __VA_ARGS__)
+#define DEBUG(l, ...) { if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); } }
+#define FILE_CHUNK_SIZE 4 << 20
+#define MAX_NUM_JOBS 2
+#define stdinmark  "/*stdin*\\"
+#define stdoutmark "/*stdout*\\"
+#define MAX_PATH 256
+#define DEFAULT_DISPLAY_LEVEL 1
+#define DEFAULT_COMPRESSION_LEVEL 6
+#define MAX_COMPRESSION_LEVEL_CHANGE 2
+#define CONVERGENCE_LOWER_BOUND 5
+#define CLEVEL_DECREASE_COOLDOWN 5
+#define CHANGE_BY_TWO_THRESHOLD 0.1
+#define CHANGE_BY_ONE_THRESHOLD 0.65
+
+#ifndef DEBUG_MODE
+static int g_displayLevel = DEFAULT_DISPLAY_LEVEL;
+#else
+static int g_displayLevel = DEBUG_MODE;
+#endif
+
+static unsigned g_compressionLevel = DEFAULT_COMPRESSION_LEVEL;
+static UTIL_time_t g_startTime;
+static size_t g_streamedSize = 0;
+static unsigned g_useProgressBar = 1;
+static UTIL_freq_t g_ticksPerSecond;
+static unsigned g_forceCompressionLevel = 0;
+static unsigned g_minCLevel = 1;
+static unsigned g_maxCLevel;
+
+typedef struct {
+    void* start;
+    size_t size;
+    size_t capacity;
+} buffer_t;
+
+typedef struct {
+    size_t filled;
+    buffer_t buffer;
+} inBuff_t;
+
+typedef struct {
+    buffer_t src;
+    buffer_t dst;
+    unsigned jobID;
+    unsigned lastJobPlusOne;
+    size_t compressedSize;
+    size_t dictSize;
+} jobDescription;
+
+typedef struct {
+    pthread_mutex_t pMutex;
+    int noError;
+} mutex_t;
+
+typedef struct {
+    pthread_cond_t pCond;
+    int noError;
+} cond_t;
+
+typedef struct {
+    unsigned compressionLevel;
+    unsigned numJobs;
+    unsigned nextJobID;
+    unsigned threadError;
+
+    /*
+     * JobIDs for the next jobs to be created, compressed, and written
+     */
+    unsigned jobReadyID;
+    unsigned jobCompressedID;
+    unsigned jobWriteID;
+    unsigned allJobsCompleted;
+
+    /*
+     * counter for how many jobs in a row the compression level has not changed
+     * if the counter becomes >= CONVERGENCE_LOWER_BOUND, the next time the
+     * compression level tries to change (by non-zero amount) resets the counter
+     * to 1 and does not apply the change
+     */
+    unsigned convergenceCounter;
+
+    /*
+     * cooldown counter in order to prevent rapid successive decreases in compression level
+     * whenever compression level is decreased, cooldown is set to CLEVEL_DECREASE_COOLDOWN
+     * whenever adaptCompressionLevel() is called and cooldown != 0, it is decremented
+     * as long as cooldown != 0, the compression level cannot be decreased
+     */
+    unsigned cooldown;
+
+    /*
+     * XWaitYCompletion
+     * Range from 0.0 to 1.0
+     * if the value is not 1.0, then this implies that thread X waited on thread Y to finish
+     * and thread Y was XWaitYCompletion finished at the time of the wait (i.e. compressWaitWriteCompletion=0.5
+     * implies that the compression thread waited on the write thread and it was only 50% finished writing a job)
+     */
+    double createWaitCompressionCompletion;
+    double compressWaitCreateCompletion;
+    double compressWaitWriteCompletion;
+    double writeWaitCompressionCompletion;
+
+    /*
+     * Completion values
+     * Range from 0.0 to 1.0
+     * Jobs are divided into mini-chunks in order to measure completion
+     * these values are updated each time a thread finishes its operation on the
+     * mini-chunk (i.e. finishes writing out, compressing, etc. this mini-chunk).
+     */
+    double compressionCompletion;
+    double writeCompletion;
+    double createCompletion;
+
+    mutex_t jobCompressed_mutex;
+    cond_t jobCompressed_cond;
+    mutex_t jobReady_mutex;
+    cond_t jobReady_cond;
+    mutex_t allJobsCompleted_mutex;
+    cond_t allJobsCompleted_cond;
+    mutex_t jobWrite_mutex;
+    cond_t jobWrite_cond;
+    mutex_t compressionCompletion_mutex;
+    mutex_t createCompletion_mutex;
+    mutex_t writeCompletion_mutex;
+    mutex_t compressionLevel_mutex;
+    size_t lastDictSize;
+    inBuff_t input;
+    jobDescription* jobs;
+    ZSTD_CCtx* cctx;
+} adaptCCtx;
+
+typedef struct {
+    adaptCCtx* ctx;
+    FILE* dstFile;
+} outputThreadArg;
+
+typedef struct {
+    FILE* srcFile;
+    adaptCCtx* ctx;
+    outputThreadArg* otArg;
+} fcResources;
+
+static void freeCompressionJobs(adaptCCtx* ctx)
+{
+    unsigned u;
+    for (u=0; u<ctx->numJobs; u++) {
+        jobDescription job = ctx->jobs[u];
+        free(job.dst.start);
+        free(job.src.start);
+    }
+}
+
+static int destroyMutex(mutex_t* mutex)
+{
+    if (mutex->noError) {
+        int const ret = pthread_mutex_destroy(&mutex->pMutex);
+        return ret;
+    }
+    return 0;
+}
+
+static int destroyCond(cond_t* cond)
+{
+    if (cond->noError) {
+        int const ret = pthread_cond_destroy(&cond->pCond);
+        return ret;
+    }
+    return 0;
+}
+
+static int freeCCtx(adaptCCtx* ctx)
+{
+    if (!ctx) return 0;
+    {
+        int error = 0;
+        error |= destroyMutex(&ctx->jobCompressed_mutex);
+        error |= destroyCond(&ctx->jobCompressed_cond);
+        error |= destroyMutex(&ctx->jobReady_mutex);
+        error |= destroyCond(&ctx->jobReady_cond);
+        error |= destroyMutex(&ctx->allJobsCompleted_mutex);
+        error |= destroyCond(&ctx->allJobsCompleted_cond);
+        error |= destroyMutex(&ctx->jobWrite_mutex);
+        error |= destroyCond(&ctx->jobWrite_cond);
+        error |= destroyMutex(&ctx->compressionCompletion_mutex);
+        error |= destroyMutex(&ctx->createCompletion_mutex);
+        error |= destroyMutex(&ctx->writeCompletion_mutex);
+        error |= destroyMutex(&ctx->compressionLevel_mutex);
+        error |= ZSTD_isError(ZSTD_freeCCtx(ctx->cctx));
+        free(ctx->input.buffer.start);
+        if (ctx->jobs){
+            freeCompressionJobs(ctx);
+            free(ctx->jobs);
+        }
+        free(ctx);
+        return error;
+    }
+}
+
+static int initMutex(mutex_t* mutex)
+{
+    int const ret = pthread_mutex_init(&mutex->pMutex, NULL);
+    mutex->noError = !ret;
+    return ret;
+}
+
+static int initCond(cond_t* cond)
+{
+    int const ret = pthread_cond_init(&cond->pCond, NULL);
+    cond->noError = !ret;
+    return ret;
+}
+
+static int initCCtx(adaptCCtx* ctx, unsigned numJobs)
+{
+    ctx->compressionLevel = g_compressionLevel;
+    {
+        int pthreadError = 0;
+        pthreadError |= initMutex(&ctx->jobCompressed_mutex);
+        pthreadError |= initCond(&ctx->jobCompressed_cond);
+        pthreadError |= initMutex(&ctx->jobReady_mutex);
+        pthreadError |= initCond(&ctx->jobReady_cond);
+        pthreadError |= initMutex(&ctx->allJobsCompleted_mutex);
+        pthreadError |= initCond(&ctx->allJobsCompleted_cond);
+        pthreadError |= initMutex(&ctx->jobWrite_mutex);
+        pthreadError |= initCond(&ctx->jobWrite_cond);
+        pthreadError |= initMutex(&ctx->compressionCompletion_mutex);
+        pthreadError |= initMutex(&ctx->createCompletion_mutex);
+        pthreadError |= initMutex(&ctx->writeCompletion_mutex);
+        pthreadError |= initMutex(&ctx->compressionLevel_mutex);
+        if (pthreadError) return pthreadError;
+    }
+    ctx->numJobs = numJobs;
+    ctx->jobReadyID = 0;
+    ctx->jobCompressedID = 0;
+    ctx->jobWriteID = 0;
+    ctx->lastDictSize = 0;
+
+
+    ctx->createWaitCompressionCompletion = 1;
+    ctx->compressWaitCreateCompletion = 1;
+    ctx->compressWaitWriteCompletion = 1;
+    ctx->writeWaitCompressionCompletion = 1;
+    ctx->createCompletion = 1;
+    ctx->writeCompletion = 1;
+    ctx->compressionCompletion = 1;
+    ctx->convergenceCounter = 0;
+    ctx->cooldown = 0;
+
+    ctx->jobs = calloc(1, numJobs*sizeof(jobDescription));
+
+    if (!ctx->jobs) {
+        DISPLAY("Error: could not allocate space for jobs during context creation\n");
+        return 1;
+    }
+
+    /* initializing jobs */
+    {
+        unsigned jobNum;
+        for (jobNum=0; jobNum<numJobs; jobNum++) {
+            jobDescription* job = &ctx->jobs[jobNum];
+            job->src.start = malloc(2 * FILE_CHUNK_SIZE);
+            job->dst.start = malloc(ZSTD_compressBound(FILE_CHUNK_SIZE));
+            job->lastJobPlusOne = 0;
+            if (!job->src.start || !job->dst.start) {
+                DISPLAY("Could not allocate buffers for jobs\n");
+                return 1;
+            }
+            job->src.capacity = FILE_CHUNK_SIZE;
+            job->dst.capacity = ZSTD_compressBound(FILE_CHUNK_SIZE);
+        }
+    }
+
+    ctx->nextJobID = 0;
+    ctx->threadError = 0;
+    ctx->allJobsCompleted = 0;
+
+    ctx->cctx = ZSTD_createCCtx();
+    if (!ctx->cctx) {
+        DISPLAY("Error: could not allocate ZSTD_CCtx\n");
+        return 1;
+    }
+
+    ctx->input.filled = 0;
+    ctx->input.buffer.capacity = 2 * FILE_CHUNK_SIZE;
+
+    ctx->input.buffer.start = malloc(ctx->input.buffer.capacity);
+    if (!ctx->input.buffer.start) {
+        DISPLAY("Error: could not allocate input buffer\n");
+        return 1;
+    }
+    return 0;
+}
+
+static adaptCCtx* createCCtx(unsigned numJobs)
+{
+
+    adaptCCtx* const ctx = calloc(1, sizeof(adaptCCtx));
+    if (ctx == NULL) {
+        DISPLAY("Error: could not allocate space for context\n");
+        return NULL;
+    }
+    {
+        int const error = initCCtx(ctx, numJobs);
+        if (error) {
+            freeCCtx(ctx);
+            return NULL;
+        }
+        return ctx;
+    }
+}
+
+static void signalErrorToThreads(adaptCCtx* ctx)
+{
+    ctx->threadError = 1;
+    pthread_mutex_lock(&ctx->jobReady_mutex.pMutex);
+    pthread_cond_signal(&ctx->jobReady_cond.pCond);
+    pthread_mutex_unlock(&ctx->jobReady_mutex.pMutex);
+
+    pthread_mutex_lock(&ctx->jobCompressed_mutex.pMutex);
+    pthread_cond_broadcast(&ctx->jobCompressed_cond.pCond);
+    pthread_mutex_unlock(&ctx->jobReady_mutex.pMutex);
+
+    pthread_mutex_lock(&ctx->jobWrite_mutex.pMutex);
+    pthread_cond_signal(&ctx->jobWrite_cond.pCond);
+    pthread_mutex_unlock(&ctx->jobWrite_mutex.pMutex);
+
+    pthread_mutex_lock(&ctx->allJobsCompleted_mutex.pMutex);
+    pthread_cond_signal(&ctx->allJobsCompleted_cond.pCond);
+    pthread_mutex_unlock(&ctx->allJobsCompleted_mutex.pMutex);
+}
+
+static void waitUntilAllJobsCompleted(adaptCCtx* ctx)
+{
+    if (!ctx) return;
+    pthread_mutex_lock(&ctx->allJobsCompleted_mutex.pMutex);
+    while (ctx->allJobsCompleted == 0 && !ctx->threadError) {
+        pthread_cond_wait(&ctx->allJobsCompleted_cond.pCond, &ctx->allJobsCompleted_mutex.pMutex);
+    }
+    pthread_mutex_unlock(&ctx->allJobsCompleted_mutex.pMutex);
+}
+
+/* map completion percentages to values for changing compression level */
+static unsigned convertCompletionToChange(double completion)
+{
+    if (completion < CHANGE_BY_TWO_THRESHOLD) {
+        return 2;
+    }
+    else if (completion < CHANGE_BY_ONE_THRESHOLD) {
+        return 1;
+    }
+    else {
+        return 0;
+    }
+}
+
+/*
+ * Compression level is changed depending on which part of the compression process is lagging
+ * Currently, three theads exist for job creation, compression, and file writing respectively.
+ * adaptCompressionLevel() increments or decrements compression level based on which of the threads is lagging
+ * job creation or file writing lag => increased compression level
+ * compression thread lag           => decreased compression level
+ * detecting which thread is lagging is done by keeping track of how many calls each thread makes to pthread_cond_wait
+ */
+static void adaptCompressionLevel(adaptCCtx* ctx)
+{
+    double createWaitCompressionCompletion;
+    double compressWaitCreateCompletion;
+    double compressWaitWriteCompletion;
+    double writeWaitCompressionCompletion;
+    double const threshold = 0.00001;
+    unsigned prevCompressionLevel;
+
+    pthread_mutex_lock(&ctx->compressionLevel_mutex.pMutex);
+    prevCompressionLevel = ctx->compressionLevel;
+    pthread_mutex_unlock(&ctx->compressionLevel_mutex.pMutex);
+
+
+    if (g_forceCompressionLevel) {
+        pthread_mutex_lock(&ctx->compressionLevel_mutex.pMutex);
+        ctx->compressionLevel = g_compressionLevel;
+        pthread_mutex_unlock(&ctx->compressionLevel_mutex.pMutex);
+        return;
+    }
+
+
+    DEBUG(2, "adapting compression level %u\n", prevCompressionLevel);
+
+    /* read and reset completion measurements */
+    pthread_mutex_lock(&ctx->compressionCompletion_mutex.pMutex);
+    DEBUG(2, "createWaitCompressionCompletion %f\n", ctx->createWaitCompressionCompletion);
+    DEBUG(2, "writeWaitCompressionCompletion %f\n", ctx->writeWaitCompressionCompletion);
+    createWaitCompressionCompletion = ctx->createWaitCompressionCompletion;
+    writeWaitCompressionCompletion = ctx->writeWaitCompressionCompletion;
+    pthread_mutex_unlock(&ctx->compressionCompletion_mutex.pMutex);
+
+    pthread_mutex_lock(&ctx->writeCompletion_mutex.pMutex);
+    DEBUG(2, "compressWaitWriteCompletion %f\n", ctx->compressWaitWriteCompletion);
+    compressWaitWriteCompletion = ctx->compressWaitWriteCompletion;
+    pthread_mutex_unlock(&ctx->writeCompletion_mutex.pMutex);
+
+    pthread_mutex_lock(&ctx->createCompletion_mutex.pMutex);
+    DEBUG(2, "compressWaitCreateCompletion %f\n", ctx->compressWaitCreateCompletion);
+    compressWaitCreateCompletion = ctx->compressWaitCreateCompletion;
+    pthread_mutex_unlock(&ctx->createCompletion_mutex.pMutex);
+    DEBUG(2, "convergence counter: %u\n", ctx->convergenceCounter);
+
+    assert(g_minCLevel <= prevCompressionLevel && g_maxCLevel >= prevCompressionLevel);
+
+    /* adaptation logic */
+    if (ctx->cooldown) ctx->cooldown--;
+
+    if ((1-createWaitCompressionCompletion > threshold || 1-writeWaitCompressionCompletion > threshold) && ctx->cooldown == 0) {
+        /* create or write waiting on compression */
+        /* use whichever one waited less because it was slower */
+        double const completion = MAX(createWaitCompressionCompletion, writeWaitCompressionCompletion);
+        unsigned const change = convertCompletionToChange(completion);
+        unsigned const boundChange = MIN(change, prevCompressionLevel - g_minCLevel);
+        if (ctx->convergenceCounter >= CONVERGENCE_LOWER_BOUND && boundChange != 0) {
+            /* reset convergence counter, might have been a spike */
+            ctx->convergenceCounter = 0;
+            DEBUG(2, "convergence counter reset, no change applied\n");
+        }
+        else if (boundChange != 0) {
+            pthread_mutex_lock(&ctx->compressionLevel_mutex.pMutex);
+            ctx->compressionLevel -= boundChange;
+            pthread_mutex_unlock(&ctx->compressionLevel_mutex.pMutex);
+            ctx->cooldown = CLEVEL_DECREASE_COOLDOWN;
+            ctx->convergenceCounter = 1;
+
+            DEBUG(2, "create or write threads waiting on compression, tried to decrease compression level by %u\n\n", boundChange);
+        }
+    }
+    else if (1-compressWaitWriteCompletion > threshold || 1-compressWaitCreateCompletion > threshold) {
+        /* compress waiting on write */
+        double const completion = MIN(compressWaitWriteCompletion, compressWaitCreateCompletion);
+        unsigned const change = convertCompletionToChange(completion);
+        unsigned const boundChange = MIN(change, g_maxCLevel - prevCompressionLevel);
+        if (ctx->convergenceCounter >= CONVERGENCE_LOWER_BOUND && boundChange != 0) {
+            /* reset convergence counter, might have been a spike */
+            ctx->convergenceCounter = 0;
+            DEBUG(2, "convergence counter reset, no change applied\n");
+        }
+        else if (boundChange != 0) {
+            pthread_mutex_lock(&ctx->compressionLevel_mutex.pMutex);
+            ctx->compressionLevel += boundChange;
+            pthread_mutex_unlock(&ctx->compressionLevel_mutex.pMutex);
+            ctx->cooldown = 0;
+            ctx->convergenceCounter = 1;
+
+            DEBUG(2, "compress waiting on write or create, tried to increase compression level by %u\n\n", boundChange);
+        }
+
+    }
+
+    pthread_mutex_lock(&ctx->compressionLevel_mutex.pMutex);
+    if (ctx->compressionLevel == prevCompressionLevel) {
+        ctx->convergenceCounter++;
+    }
+    pthread_mutex_unlock(&ctx->compressionLevel_mutex.pMutex);
+}
+
+static size_t getUseableDictSize(unsigned compressionLevel)
+{
+    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, 0);
+    unsigned const overlapLog = compressionLevel >= (unsigned)ZSTD_maxCLevel() ? 0 : 3;
+    size_t const overlapSize = 1 << (params.cParams.windowLog - overlapLog);
+    return overlapSize;
+}
+
+static void* compressionThread(void* arg)
+{
+    adaptCCtx* const ctx = (adaptCCtx*)arg;
+    unsigned currJob = 0;
+    for ( ; ; ) {
+        unsigned const currJobIndex = currJob % ctx->numJobs;
+        jobDescription* const job = &ctx->jobs[currJobIndex];
+        DEBUG(2, "starting compression for job %u\n", currJob);
+
+        {
+            /* check if compression thread will have to wait */
+            unsigned willWaitForCreate = 0;
+            unsigned willWaitForWrite = 0;
+
+            pthread_mutex_lock(&ctx->jobReady_mutex.pMutex);
+            if (currJob + 1 > ctx->jobReadyID) willWaitForCreate = 1;
+            pthread_mutex_unlock(&ctx->jobReady_mutex.pMutex);
+
+            pthread_mutex_lock(&ctx->jobWrite_mutex.pMutex);
+            if (currJob - ctx->jobWriteID >= ctx->numJobs) willWaitForWrite = 1;
+            pthread_mutex_unlock(&ctx->jobWrite_mutex.pMutex);
+
+
+            pthread_mutex_lock(&ctx->createCompletion_mutex.pMutex);
+            if (willWaitForCreate) {
+                DEBUG(2, "compression will wait for create on job %u\n", currJob);
+                ctx->compressWaitCreateCompletion = ctx->createCompletion;
+                DEBUG(2, "create completion %f\n", ctx->compressWaitCreateCompletion);
+
+            }
+            else {
+                ctx->compressWaitCreateCompletion = 1;
+            }
+            pthread_mutex_unlock(&ctx->createCompletion_mutex.pMutex);
+
+            pthread_mutex_lock(&ctx->writeCompletion_mutex.pMutex);
+            if (willWaitForWrite) {
+                DEBUG(2, "compression will wait for write on job %u\n", currJob);
+                ctx->compressWaitWriteCompletion = ctx->writeCompletion;
+                DEBUG(2, "write completion %f\n", ctx->compressWaitWriteCompletion);
+            }
+            else {
+                ctx->compressWaitWriteCompletion = 1;
+            }
+            pthread_mutex_unlock(&ctx->writeCompletion_mutex.pMutex);
+
+        }
+
+        /* wait until job is ready */
+        pthread_mutex_lock(&ctx->jobReady_mutex.pMutex);
+        while (currJob + 1 > ctx->jobReadyID && !ctx->threadError) {
+            pthread_cond_wait(&ctx->jobReady_cond.pCond, &ctx->jobReady_mutex.pMutex);
+        }
+        pthread_mutex_unlock(&ctx->jobReady_mutex.pMutex);
+
+        /* wait until job previously in this space is written */
+        pthread_mutex_lock(&ctx->jobWrite_mutex.pMutex);
+        while (currJob - ctx->jobWriteID >= ctx->numJobs && !ctx->threadError) {
+            pthread_cond_wait(&ctx->jobWrite_cond.pCond, &ctx->jobWrite_mutex.pMutex);
+        }
+        pthread_mutex_unlock(&ctx->jobWrite_mutex.pMutex);
+        /* reset compression completion */
+        pthread_mutex_lock(&ctx->compressionCompletion_mutex.pMutex);
+        ctx->compressionCompletion = 0;
+        pthread_mutex_unlock(&ctx->compressionCompletion_mutex.pMutex);
+
+        /* adapt compression level */
+        if (currJob) adaptCompressionLevel(ctx);
+
+        pthread_mutex_lock(&ctx->compressionLevel_mutex.pMutex);
+        DEBUG(2, "job %u compressed with level %u\n", currJob, ctx->compressionLevel);
+        pthread_mutex_unlock(&ctx->compressionLevel_mutex.pMutex);
+
+        /* compress the data */
+        {
+            size_t const compressionBlockSize = ZSTD_BLOCKSIZE_MAX; /* 128 KB */
+            unsigned cLevel;
+            unsigned blockNum = 0;
+            size_t remaining = job->src.size;
+            size_t srcPos = 0;
+            size_t dstPos = 0;
+
+            pthread_mutex_lock(&ctx->compressionLevel_mutex.pMutex);
+            cLevel = ctx->compressionLevel;
+            pthread_mutex_unlock(&ctx->compressionLevel_mutex.pMutex);
+
+            /* reset compressed size */
+            job->compressedSize = 0;
+            DEBUG(2, "calling ZSTD_compressBegin()\n");
+            /* begin compression */
+            {
+                size_t const useDictSize = MIN(getUseableDictSize(cLevel), job->dictSize);
+                size_t const dictModeError = ZSTD_setCCtxParameter(ctx->cctx, ZSTD_p_forceRawDict, 1);
+                ZSTD_parameters params = ZSTD_getParams(cLevel, 0, useDictSize);
+                params.cParams.windowLog = 23;
+                {

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201810221945.w9MJjIMU075839>