Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 2 May 2017 18:30:55 +0000 (UTC)
From:      Dimitry Andric <dim@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-vendor@freebsd.org
Subject:   svn commit: r317687 - in vendor/compiler-rt/dist: cmake/Modules include/sanitizer include/xray lib/asan lib/sanitizer_common lib/scudo lib/tsan/go lib/tsan/rtl lib/tsan/tests/rtl lib/ubsan lib/xray...
Message-ID:  <201705021830.v42IUtnS000965@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: dim
Date: Tue May  2 18:30:55 2017
New Revision: 317687
URL: https://svnweb.freebsd.org/changeset/base/317687

Log:
  Vendor import of compiler-rt trunk r301939:
  https://llvm.org/svn/llvm-project/compiler-rt/trunk@301939

Added:
  vendor/compiler-rt/dist/lib/scudo/scudo_tls.h   (contents, props changed)
  vendor/compiler-rt/dist/lib/scudo/scudo_tls_linux.cpp   (contents, props changed)
  vendor/compiler-rt/dist/lib/scudo/scudo_tls_linux.h   (contents, props changed)
  vendor/compiler-rt/dist/test/asan/TestCases/Linux/global-overflow-bfd.cc   (contents, props changed)
  vendor/compiler-rt/dist/test/asan/TestCases/Linux/global-overflow-lld.cc   (contents, props changed)
  vendor/compiler-rt/dist/test/asan/TestCases/Linux/globals-gc-sections-lld.cc   (contents, props changed)
  vendor/compiler-rt/dist/test/asan/TestCases/small_memcpy_test.cc   (contents, props changed)
  vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/
  vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_compile.py   (contents, props changed)
  vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_env.py   (contents, props changed)
  vendor/compiler-rt/dist/test/sanitizer_common/ios_commands/iossim_run.py   (contents, props changed)
Deleted:
  vendor/compiler-rt/dist/test/asan/TestCases/Linux/globals-gc-sections.cc
  vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-direct-activation.cc
  vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-direct-large.cc
  vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-direct.cc
  vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-fork-direct.cc
  vendor/compiler-rt/dist/test/asan/TestCases/coverage-levels.cc
Modified:
  vendor/compiler-rt/dist/cmake/Modules/AddCompilerRT.cmake
  vendor/compiler-rt/dist/include/sanitizer/tsan_interface.h
  vendor/compiler-rt/dist/include/xray/xray_log_interface.h
  vendor/compiler-rt/dist/lib/asan/asan_globals.cc
  vendor/compiler-rt/dist/lib/asan/asan_interceptors.cc
  vendor/compiler-rt/dist/lib/asan/asan_interface.inc
  vendor/compiler-rt/dist/lib/asan/asan_interface_internal.h
  vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
  vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc
  vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_flags.inc
  vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux.h
  vendor/compiler-rt/dist/lib/scudo/CMakeLists.txt
  vendor/compiler-rt/dist/lib/scudo/scudo_allocator.cpp
  vendor/compiler-rt/dist/lib/scudo/scudo_allocator.h
  vendor/compiler-rt/dist/lib/scudo/scudo_utils.cpp
  vendor/compiler-rt/dist/lib/scudo/scudo_utils.h
  vendor/compiler-rt/dist/lib/tsan/go/buildgo.sh
  vendor/compiler-rt/dist/lib/tsan/rtl/tsan_external.cc
  vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interface_ann.cc
  vendor/compiler-rt/dist/lib/tsan/rtl/tsan_platform_linux.cc
  vendor/compiler-rt/dist/lib/tsan/rtl/tsan_report.cc
  vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl.cc
  vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl.h
  vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_mutex.cc
  vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_report.cc
  vendor/compiler-rt/dist/lib/tsan/tests/rtl/tsan_posix.cc
  vendor/compiler-rt/dist/lib/ubsan/ubsan_diag.cc
  vendor/compiler-rt/dist/lib/ubsan/ubsan_handlers.cc
  vendor/compiler-rt/dist/lib/xray/xray_log_interface.cc
  vendor/compiler-rt/dist/test/asan/CMakeLists.txt
  vendor/compiler-rt/dist/test/asan/TestCases/Darwin/dead-strip.c
  vendor/compiler-rt/dist/test/asan/TestCases/Darwin/dump_registers.cc
  vendor/compiler-rt/dist/test/asan/TestCases/Darwin/reexec-insert-libraries-env.cc
  vendor/compiler-rt/dist/test/asan/TestCases/Darwin/scribble.cc
  vendor/compiler-rt/dist/test/asan/TestCases/Darwin/unset-insert-libraries-on-exec.cc
  vendor/compiler-rt/dist/test/asan/TestCases/Posix/asan-sigbus.cpp
  vendor/compiler-rt/dist/test/asan/TestCases/Posix/current_allocated_bytes.cc
  vendor/compiler-rt/dist/test/asan/TestCases/Posix/fread_fwrite.cc
  vendor/compiler-rt/dist/test/asan/TestCases/coverage-disabled.cc
  vendor/compiler-rt/dist/test/asan/TestCases/initialization-bug.cc
  vendor/compiler-rt/dist/test/asan/TestCases/strtok.c
  vendor/compiler-rt/dist/test/asan/lit.cfg
  vendor/compiler-rt/dist/test/asan/lit.site.cfg.in
  vendor/compiler-rt/dist/test/lit.common.cfg
  vendor/compiler-rt/dist/test/lit.common.configured.in
  vendor/compiler-rt/dist/test/sanitizer_common/TestCases/sanitizer_coverage_symbolize.cc
  vendor/compiler-rt/dist/test/tsan/Darwin/xpc-cancel.mm
  vendor/compiler-rt/dist/test/tsan/Darwin/xpc-race.mm
  vendor/compiler-rt/dist/test/tsan/Darwin/xpc.mm
  vendor/compiler-rt/dist/test/tsan/ignore_lib1.cc
  vendor/compiler-rt/dist/test/tsan/ignore_lib5.cc
  vendor/compiler-rt/dist/test/ubsan/TestCases/Float/cast-overflow.cpp
  vendor/compiler-rt/dist/test/ubsan/TestCases/Misc/log-path_test.cc
  vendor/compiler-rt/dist/test/ubsan/TestCases/Misc/missing_return.cpp
  vendor/compiler-rt/dist/test/ubsan/TestCases/TypeCheck/misaligned.cpp

Modified: vendor/compiler-rt/dist/cmake/Modules/AddCompilerRT.cmake
==============================================================================
--- vendor/compiler-rt/dist/cmake/Modules/AddCompilerRT.cmake	Tue May  2 18:30:52 2017	(r317686)
+++ vendor/compiler-rt/dist/cmake/Modules/AddCompilerRT.cmake	Tue May  2 18:30:55 2017	(r317687)
@@ -210,6 +210,14 @@ function(add_compiler_rt_runtime name ty
         set_target_properties(${libname} PROPERTIES IMPORT_PREFIX "")
         set_target_properties(${libname} PROPERTIES IMPORT_SUFFIX ".lib")
       endif()
+      if(APPLE)
+        # Ad-hoc sign the dylibs
+        add_custom_command(TARGET ${libname}
+          POST_BUILD  
+          COMMAND codesign --sign - $<TARGET_FILE:${libname}>
+          WORKING_DIRECTORY ${COMPILER_RT_LIBRARY_OUTPUT_DIR}
+        )
+      endif()
     endif()
     install(TARGETS ${libname}
       ARCHIVE DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR}

Modified: vendor/compiler-rt/dist/include/sanitizer/tsan_interface.h
==============================================================================
--- vendor/compiler-rt/dist/include/sanitizer/tsan_interface.h	Tue May  2 18:30:52 2017	(r317686)
+++ vendor/compiler-rt/dist/include/sanitizer/tsan_interface.h	Tue May  2 18:30:55 2017	(r317687)
@@ -68,7 +68,8 @@ const unsigned __tsan_mutex_recursive_un
 void __tsan_mutex_create(void *addr, unsigned flags);
 
 // Annotate destruction of a mutex.
-// Supported flags: none.
+// Supported flags:
+//   - __tsan_mutex_linker_init
 void __tsan_mutex_destroy(void *addr, unsigned flags);
 
 // Annotate start of lock operation.

Modified: vendor/compiler-rt/dist/include/xray/xray_log_interface.h
==============================================================================
--- vendor/compiler-rt/dist/include/xray/xray_log_interface.h	Tue May  2 18:30:52 2017	(r317686)
+++ vendor/compiler-rt/dist/include/xray/xray_log_interface.h	Tue May  2 18:30:55 2017	(r317687)
@@ -10,7 +10,73 @@
 // This file is a part of XRay, a function call tracing system.
 //
 // APIs for installing a new logging implementation.
+//
 //===----------------------------------------------------------------------===//
+///
+/// XRay allows users to implement their own logging handlers and install them
+/// to replace the default runtime-controllable implementation that comes with
+/// compiler-rt/xray. The "flight data recorder" (FDR) mode implementation uses
+/// this API to install itself in an XRay-enabled binary. See
+/// compiler-rt/lib/xray_fdr_logging.{h,cc} for details of that implementation.
+///
+/// The high-level usage pattern for these APIs look like the following:
+///
+///   // Before we try initializing the log implementation, we must set it as
+///   // the log implementation. We provide the function pointers that define
+///   // the various initialization, finalization, and other pluggable hooks
+///   // that we need.
+///   __xray_set_log_impl({...});
+///
+///   // Once that's done, we can now initialize the implementation. Each
+///   // implementation has a chance to let users customize the implementation
+///   // with a struct that their implementation supports. Roughly this might
+///   // look like:
+///   MyImplementationOptions opts;
+///   opts.enable_feature = true;
+///   ...
+///   auto init_status = __xray_log_init(
+///       BufferSize, MaxBuffers, &opts, sizeof opts);
+///   if (init_status != XRayLogInitStatus::XRAY_LOG_INITIALIZED) {
+///     // deal with the error here, if there is one.
+///   }
+///
+///   // When the log implementation has had the chance to initialize, we can
+///   // now patch the sleds.
+///   auto patch_status = __xray_patch();
+///   if (patch_status != XRayPatchingStatus::SUCCESS) {
+///     // deal with the error here, if it is an error.
+///   }
+///
+///   // If we want to stop the implementation, we can then finalize it (before
+///   // optionally flushing the log).
+///   auto fin_status = __xray_log_finalize();
+///   if (fin_status != XRayLogInitStatus::XRAY_LOG_FINALIZED) {
+///     // deal with the error here, if it is an error.
+///   }
+///
+///   // We can optionally wait before flushing the log to give other threads a
+///   // chance to see that the implementation is already finalized. Also, at
+///   // this point we can optionally unpatch the sleds to reduce overheads at
+///   // runtime.
+///   auto unpatch_status = __xray_unpatch();
+///   if (unpatch_status != XRayPatchingStatus::SUCCESS) {
+//      // deal with the error here, if it is an error.
+//    }
+///
+///   // If there are logs or data to be flushed somewhere, we can do so only
+///   // after we've finalized the log. Some implementations may not actually
+///   // have anything to log (it might keep the data in memory, or periodically
+///   // be logging the data anyway).
+///   auto flush_status = __xray_log_flushLog();
+///   if (flush_status != XRayLogFlushStatus::XRAY_LOG_FLUSHED) {
+///     // deal with the error here, if it is an error.
+///   }
+///
+///
+/// NOTE: Before calling __xray_patch() again, consider re-initializing the
+/// implementation first. Some implementations might stay in an "off" state when
+/// they are finalized, while some might be in an invalid/unknown state.
+///
 #ifndef XRAY_XRAY_LOG_INTERFACE_H
 #define XRAY_XRAY_LOG_INTERFACE_H
 
@@ -19,36 +85,141 @@
 
 extern "C" {
 
+/// This enum defines the valid states in which the logging implementation can
+/// be at.
 enum XRayLogInitStatus {
+  /// The default state is uninitialized, and in case there were errors in the
+  /// initialization, the implementation MUST return XRAY_LOG_UNINITIALIZED.
   XRAY_LOG_UNINITIALIZED = 0,
+
+  /// Some implementations support multi-stage init (or asynchronous init), and
+  /// may return XRAY_LOG_INITIALIZING to signal callers of the API that
+  /// there's an ongoing initialization routine running. This allows
+  /// implementations to support concurrent threads attempting to initialize,
+  /// while only signalling success in one.
   XRAY_LOG_INITIALIZING = 1,
+
+  /// When an implementation is done initializing, it MUST return
+  /// XRAY_LOG_INITIALIZED. When users call `__xray_patch()`, they are
+  /// guaranteed that the implementation installed with
+  /// `__xray_set_log_impl(...)` has been initialized.
   XRAY_LOG_INITIALIZED = 2,
+
+  /// Some implementations might support multi-stage finalization (or
+  /// asynchronous finalization), and may return XRAY_LOG_FINALIZING to signal
+  /// callers of the API that there's an ongoing finalization routine running.
+  /// This allows implementations to support concurrent threads attempting to
+  /// finalize, while only signalling success/completion in one.
   XRAY_LOG_FINALIZING = 3,
+
+  /// When an implementation is done finalizing, it MUST return
+  /// XRAY_LOG_FINALIZED. It is up to the implementation to determine what the
+  /// semantics of a finalized implementation is. Some implementations might
+  /// allow re-initialization once the log is finalized, while some might always
+  /// be on (and that finalization is a no-op).
   XRAY_LOG_FINALIZED = 4,
 };
 
+/// This enum allows an implementation to signal log flushing operations via
+/// `__xray_log_flushLog()`, and the state of flushing the log.
 enum XRayLogFlushStatus {
   XRAY_LOG_NOT_FLUSHING = 0,
   XRAY_LOG_FLUSHING = 1,
   XRAY_LOG_FLUSHED = 2,
 };
 
+/// A valid XRay logging implementation MUST provide all of the function
+/// pointers in XRayLogImpl when being installed through `__xray_set_log_impl`.
+/// To be precise, ALL the functions pointers MUST NOT be nullptr.
 struct XRayLogImpl {
+  /// The log initialization routine provided by the implementation, always
+  /// provided with the following parameters:
+  ///
+  ///   - buffer size
+  ///   - maximum number of buffers
+  ///   - a pointer to an argument struct that the implementation MUST handle
+  ///   - the size of the argument struct
+  ///
+  /// See XRayLogInitStatus for details on what the implementation MUST return
+  /// when called.
+  ///
+  /// If the implementation needs to install handlers aside from the 0-argument
+  /// function call handler, it MUST do so in this initialization handler.
+  ///
+  /// See xray_interface.h for available handler installation routines.
   XRayLogInitStatus (*log_init)(size_t, size_t, void *, size_t);
+
+  /// The log finalization routine provided by the implementation.
+  ///
+  /// See XRayLogInitStatus for details on what the implementation MUST return
+  /// when called.
   XRayLogInitStatus (*log_finalize)();
+
+  /// The 0-argument function call handler. XRay logging implementations MUST
+  /// always have a handler for function entry and exit events. In case the
+  /// implementation wants to support arg1 (or other future extensions to XRay
+  /// logging) those MUST be installed by the installed 'log_init' handler.
   void (*handle_arg0)(int32_t, XRayEntryType);
+
+  /// The log implementation provided routine for when __xray_log_flushLog() is
+  /// called.
+  ///
+  /// See XRayLogFlushStatus for details on what the implementation MUST return
+  /// when called.
   XRayLogFlushStatus (*flush_log)();
 };
 
+/// This function installs a new logging implementation that XRay will use. In
+/// case there are any nullptr members in Impl, XRay will *uninstall any
+/// existing implementations*. It does NOT patch the instrumentation sleds.
+///
+/// NOTE: This function does NOT attempt to finalize the currently installed
+/// implementation. Use with caution.
+///
+/// It is guaranteed safe to call this function in the following states:
+///
+///   - When the implementation is UNINITIALIZED.
+///   - When the implementation is FINALIZED.
+///   - When there is no current implementation installed.
+///
+/// It is logging implementation defined what happens when this function is
+/// called while in any other states.
 void __xray_set_log_impl(XRayLogImpl Impl);
+
+/// This function removes the currently installed implementation. It will also
+/// uninstall any handlers that have been previously installed. It does NOT
+/// unpatch the instrumentation sleds.
+///
+/// NOTE: This function does NOT attempt to finalize the currently installed
+/// implementation. Use with caution.
+///
+/// It is guaranteed safe to call this function in the following states:
+///
+///   - When the implementation is UNINITIALIZED.
+///   - When the implementation is FINALIZED.
+///   - When there is no current implementation installed.
+///
+/// It is logging implementation defined what happens when this function is
+/// called while in any other states.
+void __xray_remove_log_impl();
+
+/// Invokes the installed implementation initialization routine. See
+/// XRayLogInitStatus for what the return values mean.
 XRayLogInitStatus __xray_log_init(size_t BufferSize, size_t MaxBuffers,
                                   void *Args, size_t ArgsSize);
+
+/// Invokes the installed implementation finalization routine. See
+/// XRayLogInitStatus for what the return values mean.
 XRayLogInitStatus __xray_log_finalize();
+
+/// Invokes the install implementation log flushing routine. See
+/// XRayLogFlushStatus for what the return values mean.
 XRayLogFlushStatus __xray_log_flushLog();
 
 } // extern "C"
 
 namespace __xray {
+
 // Options used by the LLVM XRay FDR implementation.
 struct FDRLoggingOptions {
   bool ReportErrors = false;

Modified: vendor/compiler-rt/dist/lib/asan/asan_globals.cc
==============================================================================
--- vendor/compiler-rt/dist/lib/asan/asan_globals.cc	Tue May  2 18:30:52 2017	(r317686)
+++ vendor/compiler-rt/dist/lib/asan/asan_globals.cc	Tue May  2 18:30:55 2017	(r317687)
@@ -332,6 +332,26 @@ void __asan_unregister_image_globals(upt
   *flag = 0;
 }
 
+void __asan_register_elf_globals(uptr *flag, void *start, void *stop) {
+  if (*flag) return;
+  if (!start) return;
+  CHECK_EQ(0, ((uptr)stop - (uptr)start) % sizeof(__asan_global));
+  __asan_global *globals_start = (__asan_global*)start;
+  __asan_global *globals_stop = (__asan_global*)stop;
+  __asan_register_globals(globals_start, globals_stop - globals_start);
+  *flag = 1;
+}
+
+void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop) {
+  if (!*flag) return;
+  if (!start) return;
+  CHECK_EQ(0, ((uptr)stop - (uptr)start) % sizeof(__asan_global));
+  __asan_global *globals_start = (__asan_global*)start;
+  __asan_global *globals_stop = (__asan_global*)stop;
+  __asan_unregister_globals(globals_start, globals_stop - globals_start);
+  *flag = 0;
+}
+
 // Register an array of globals.
 void __asan_register_globals(__asan_global *globals, uptr n) {
   if (!flags()->report_globals) return;

Modified: vendor/compiler-rt/dist/lib/asan/asan_interceptors.cc
==============================================================================
--- vendor/compiler-rt/dist/lib/asan/asan_interceptors.cc	Tue May  2 18:30:52 2017	(r317686)
+++ vendor/compiler-rt/dist/lib/asan/asan_interceptors.cc	Tue May  2 18:30:55 2017	(r317687)
@@ -37,12 +37,19 @@
 namespace __asan {
 
 // Return true if we can quickly decide that the region is unpoisoned.
+// We assume that a redzone is at least 16 bytes.
 static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) {
   if (size == 0) return true;
   if (size <= 32)
     return !AddressIsPoisoned(beg) &&
            !AddressIsPoisoned(beg + size - 1) &&
            !AddressIsPoisoned(beg + size / 2);
+  if (size <= 64)
+    return !AddressIsPoisoned(beg) &&
+           !AddressIsPoisoned(beg + size / 4) &&
+           !AddressIsPoisoned(beg + size - 1) &&
+           !AddressIsPoisoned(beg + 3 * size / 4) &&
+           !AddressIsPoisoned(beg + size / 2);
   return false;
 }
 

Modified: vendor/compiler-rt/dist/lib/asan/asan_interface.inc
==============================================================================
--- vendor/compiler-rt/dist/lib/asan/asan_interface.inc	Tue May  2 18:30:52 2017	(r317686)
+++ vendor/compiler-rt/dist/lib/asan/asan_interface.inc	Tue May  2 18:30:55 2017	(r317687)
@@ -64,6 +64,7 @@ INTERFACE_FUNCTION(__asan_poison_stack_m
 INTERFACE_FUNCTION(__asan_print_accumulated_stats)
 INTERFACE_FUNCTION(__asan_region_is_poisoned)
 INTERFACE_FUNCTION(__asan_register_globals)
+INTERFACE_FUNCTION(__asan_register_elf_globals)
 INTERFACE_FUNCTION(__asan_register_image_globals)
 INTERFACE_FUNCTION(__asan_report_error)
 INTERFACE_FUNCTION(__asan_report_exp_load1)
@@ -149,6 +150,7 @@ INTERFACE_FUNCTION(__asan_unpoison_intra
 INTERFACE_FUNCTION(__asan_unpoison_memory_region)
 INTERFACE_FUNCTION(__asan_unpoison_stack_memory)
 INTERFACE_FUNCTION(__asan_unregister_globals)
+INTERFACE_FUNCTION(__asan_unregister_elf_globals)
 INTERFACE_FUNCTION(__asan_unregister_image_globals)
 INTERFACE_FUNCTION(__asan_version_mismatch_check_v8)
 INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber)

Modified: vendor/compiler-rt/dist/lib/asan/asan_interface_internal.h
==============================================================================
--- vendor/compiler-rt/dist/lib/asan/asan_interface_internal.h	Tue May  2 18:30:52 2017	(r317686)
+++ vendor/compiler-rt/dist/lib/asan/asan_interface_internal.h	Tue May  2 18:30:55 2017	(r317687)
@@ -67,6 +67,11 @@ extern "C" {
   SANITIZER_INTERFACE_ATTRIBUTE
   void __asan_unregister_image_globals(uptr *flag);
 
+  SANITIZER_INTERFACE_ATTRIBUTE
+  void __asan_register_elf_globals(uptr *flag, void *start, void *stop);
+  SANITIZER_INTERFACE_ATTRIBUTE
+  void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop);
+
   // These two functions should be called by the instrumented code.
   // 'globals' is an array of structures describing 'n' globals.
   SANITIZER_INTERFACE_ATTRIBUTE

Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
==============================================================================
--- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep.cc	Tue May  2 18:30:52 2017	(r317686)
+++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep.cc	Tue May  2 18:30:55 2017	(r317687)
@@ -84,7 +84,6 @@ class CoverageData {
   void AfterFork(int child_pid);
   void Extend(uptr npcs);
   void Add(uptr pc, u32 *guard);
-  void DumpAsBitSet();
   void DumpOffsets();
   void DumpAll();
 
@@ -156,6 +155,13 @@ void CoverageData::DirectOpen() {
 
 void CoverageData::Init() {
   pc_fd = kInvalidFd;
+
+  if (!common_flags()->coverage) return;
+  Printf("**\n***\n***\n");
+  Printf("**WARNING: this implementation of SanitizerCoverage is deprecated\n");
+  Printf("**WARNING: and will be removed in future versions\n");
+  Printf("**WARNING: See https://clang.llvm.org/docs/SanitizerCoverage.html\n");
+  Printf("**\n***\n***\n");
 }
 
 void CoverageData::Enable() {
@@ -165,6 +171,8 @@ void CoverageData::Enable() {
       MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit"));
   atomic_store(&pc_array_index, 0, memory_order_relaxed);
   if (common_flags()->coverage_direct) {
+    Report("coverage_direct=1 is deprecated, don't use it.\n");
+    Die();
     atomic_store(&pc_array_size, 0, memory_order_relaxed);
   } else {
     atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
@@ -419,35 +427,6 @@ static fd_t CovOpenFile(InternalScopedSt
   return fd;
 }
 
-void CoverageData::DumpAsBitSet() {
-  if (!common_flags()->coverage_bitset) return;
-  if (!size()) return;
-  InternalScopedBuffer<char> out(size());
-  InternalScopedString path(kMaxPathLength);
-  for (uptr m = 0; m < module_name_vec.size(); m++) {
-    uptr n_set_bits = 0;
-    auto r = module_name_vec[m];
-    CHECK(r.copied_module_name);
-    CHECK_LE(r.beg, r.end);
-    CHECK_LE(r.end, size());
-    for (uptr i = r.beg; i < r.end; i++) {
-      uptr pc = UnbundlePc(pc_array[i]);
-      out[i] = pc ? '1' : '0';
-      if (pc)
-        n_set_bits++;
-    }
-    const char *base_name = StripModuleName(r.copied_module_name);
-    fd_t fd = CovOpenFile(&path, /* packed */false, base_name, "bitset-sancov");
-    if (fd == kInvalidFd) return;
-    WriteToFile(fd, out.data() + r.beg, r.end - r.beg);
-    CloseFile(fd);
-    VReport(1,
-            " CovDump: bitset of %zd bits written for '%s', %zd bits are set\n",
-            r.end - r.beg, base_name, n_set_bits);
-  }
-}
-
-
 void CoverageData::GetRangeOffsets(const NamedPcRange& r, Symbolizer* sym,
     InternalMmapVector<uptr>* offsets) const {
   offsets->clear();
@@ -565,7 +544,6 @@ void CoverageData::DumpAll() {
   if (!coverage_enabled || common_flags()->coverage_direct) return;
   if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
     return;
-  DumpAsBitSet();
   DumpOffsets();
 }
 

Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc
==============================================================================
--- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc	Tue May  2 18:30:52 2017	(r317686)
+++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc	Tue May  2 18:30:55 2017	(r317687)
@@ -98,10 +98,6 @@ static void SanitizerDumpCoverage(const 
   InternalFree(file_path);
   InternalFree(module_name);
   InternalFree(pcs);
-
-  if (sancov_flags()->symbolize) {
-    Printf("TODO(aizatsky): call sancov to symbolize\n");
-  }
 }
 
 // Collects trace-pc guard coverage.

Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_flags.inc
==============================================================================
--- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_flags.inc	Tue May  2 18:30:52 2017	(r317686)
+++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_flags.inc	Tue May  2 18:30:55 2017	(r317687)
@@ -142,12 +142,6 @@ COMMON_FLAG(bool, coverage_pcs, true,
 COMMON_FLAG(bool, coverage_order_pcs, false,
              "If true, the PCs will be dumped in the order they've"
              " appeared during the execution.")
-COMMON_FLAG(bool, coverage_bitset, false,
-            "If set (and if 'coverage' is set too), the coverage information "
-            "will also be dumped as a bitset to a separate file.")
-COMMON_FLAG(bool, coverage_counters, false,
-            "If set (and if 'coverage' is set too), the bitmap that corresponds"
-            " to coverage counters will be dumped.")
 COMMON_FLAG(bool, coverage_direct, SANITIZER_ANDROID,
             "If set, coverage information will be dumped directly to a memory "
             "mapped file. This way data is not lost even if the process is "

Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux.h
==============================================================================
--- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux.h	Tue May  2 18:30:52 2017	(r317686)
+++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux.h	Tue May  2 18:30:55 2017	(r317687)
@@ -88,6 +88,46 @@ bool LibraryNameIs(const char *full_name
 
 // Call cb for each region mapped by map.
 void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
+
+#if SANITIZER_ANDROID
+
+#if defined(__aarch64__)
+# define __get_tls() \
+    ({ void** __v; __asm__("mrs %0, tpidr_el0" : "=r"(__v)); __v; })
+#elif defined(__arm__)
+# define __get_tls() \
+    ({ void** __v; __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); __v; })
+#elif defined(__mips__)
+// On mips32r1, this goes via a kernel illegal instruction trap that's
+// optimized for v1.
+# define __get_tls() \
+    ({ register void** __v asm("v1"); \
+       __asm__(".set    push\n" \
+               ".set    mips32r2\n" \
+               "rdhwr   %0,$29\n" \
+               ".set    pop\n" : "=r"(__v)); \
+       __v; })
+#elif defined(__i386__)
+# define __get_tls() \
+    ({ void** __v; __asm__("movl %%gs:0, %0" : "=r"(__v)); __v; })
+#elif defined(__x86_64__)
+# define __get_tls() \
+    ({ void** __v; __asm__("mov %%fs:0, %0" : "=r"(__v)); __v; })
+#else
+#error "Unsupported architecture."
+#endif
+
+// The Android Bionic team has allocated a TLS slot for TSan starting with N,
+// given that Android currently doesn't support ELF TLS. It is used to store
+// Sanitizers thread specific data.
+static const int TLS_SLOT_TSAN = 8;
+
+ALWAYS_INLINE uptr *get_android_tls_ptr() {
+  return reinterpret_cast<uptr *>(&__get_tls()[TLS_SLOT_TSAN]);
+}
+
+#endif  // SANITIZER_ANDROID
+
 }  // namespace __sanitizer
 
 #endif  // SANITIZER_FREEBSD || SANITIZER_LINUX

Modified: vendor/compiler-rt/dist/lib/scudo/CMakeLists.txt
==============================================================================
--- vendor/compiler-rt/dist/lib/scudo/CMakeLists.txt	Tue May  2 18:30:52 2017	(r317686)
+++ vendor/compiler-rt/dist/lib/scudo/CMakeLists.txt	Tue May  2 18:30:55 2017	(r317687)
@@ -14,6 +14,7 @@ set(SCUDO_SOURCES
   scudo_interceptors.cpp
   scudo_new_delete.cpp
   scudo_termination.cpp
+  scudo_tls_linux.cpp
   scudo_utils.cpp)
 
 # Enable the SSE 4.2 instruction set for scudo_crc32.cpp, if available.

Modified: vendor/compiler-rt/dist/lib/scudo/scudo_allocator.cpp
==============================================================================
--- vendor/compiler-rt/dist/lib/scudo/scudo_allocator.cpp	Tue May  2 18:30:52 2017	(r317686)
+++ vendor/compiler-rt/dist/lib/scudo/scudo_allocator.cpp	Tue May  2 18:30:55 2017	(r317687)
@@ -15,6 +15,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "scudo_allocator.h"
+#include "scudo_tls.h"
 #include "scudo_utils.h"
 
 #include "sanitizer_common/sanitizer_allocator_interface.h"
@@ -26,44 +27,6 @@
 
 namespace __scudo {
 
-#if SANITIZER_CAN_USE_ALLOCATOR64
-const uptr AllocatorSpace = ~0ULL;
-const uptr AllocatorSize = 0x40000000000ULL;
-typedef DefaultSizeClassMap SizeClassMap;
-struct AP {
-  static const uptr kSpaceBeg = AllocatorSpace;
-  static const uptr kSpaceSize = AllocatorSize;
-  static const uptr kMetadataSize = 0;
-  typedef __scudo::SizeClassMap SizeClassMap;
-  typedef NoOpMapUnmapCallback MapUnmapCallback;
-  static const uptr kFlags =
-      SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
-};
-typedef SizeClassAllocator64<AP> PrimaryAllocator;
-#else
-// Currently, the 32-bit Sanitizer allocator has not yet benefited from all the
-// security improvements brought to the 64-bit one. This makes the 32-bit
-// version of Scudo slightly less toughened.
-static const uptr RegionSizeLog = 20;
-static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog;
-# if SANITIZER_WORDSIZE == 32
-typedef FlatByteMap<NumRegions> ByteMap;
-# elif SANITIZER_WORDSIZE == 64
-typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap;
-# endif  // SANITIZER_WORDSIZE
-typedef DefaultSizeClassMap SizeClassMap;
-typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 0, SizeClassMap,
-    RegionSizeLog, ByteMap> PrimaryAllocator;
-#endif  // SANITIZER_CAN_USE_ALLOCATOR64
-
-typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
-typedef ScudoLargeMmapAllocator SecondaryAllocator;
-typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
-  ScudoBackendAllocator;
-
-static ScudoBackendAllocator &getBackendAllocator();
-
-static thread_local Xorshift128Plus Prng;
 // Global static cookie, initialized at start-up.
 static uptr Cookie;
 
@@ -88,6 +51,8 @@ INLINE u32 computeCRC32(u32 Crc, uptr Da
 #endif  // defined(__SSE4_2__)
 }
 
+static ScudoBackendAllocator &getBackendAllocator();
+
 struct ScudoChunk : UnpackedHeader {
   // We can't use the offset member of the chunk itself, as we would double
   // fetch it without any warranty that it wouldn't have been tampered. To
@@ -188,32 +153,44 @@ ScudoChunk *getScudoChunk(uptr UserBeg) 
   return reinterpret_cast<ScudoChunk *>(UserBeg - AlignedChunkHeaderSize);
 }
 
-static bool ScudoInitIsRunning = false;
+struct AllocatorOptions {
+  u32 QuarantineSizeMb;
+  u32 ThreadLocalQuarantineSizeKb;
+  bool MayReturnNull;
+  s32 ReleaseToOSIntervalMs;
+  bool DeallocationTypeMismatch;
+  bool DeleteSizeMismatch;
+  bool ZeroContents;
 
-static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT;
-static pthread_key_t PThreadKey;
+  void setFrom(const Flags *f, const CommonFlags *cf);
+  void copyTo(Flags *f, CommonFlags *cf) const;
+};
+
+void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
+  MayReturnNull = cf->allocator_may_return_null;
+  ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms;
+  QuarantineSizeMb = f->QuarantineSizeMb;
+  ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
+  DeallocationTypeMismatch = f->DeallocationTypeMismatch;
+  DeleteSizeMismatch = f->DeleteSizeMismatch;
+  ZeroContents = f->ZeroContents;
+}
 
-static thread_local bool ThreadInited = false;
-static thread_local bool ThreadTornDown = false;
-static thread_local AllocatorCache Cache;
-
-static void teardownThread(void *p) {
-  uptr v = reinterpret_cast<uptr>(p);
-  // The glibc POSIX thread-local-storage deallocation routine calls user
-  // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
-  // We want to be called last since other destructors might call free and the
-  // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
-  // quarantine and swallowing the cache.
-  if (v < PTHREAD_DESTRUCTOR_ITERATIONS) {
-    pthread_setspecific(PThreadKey, reinterpret_cast<void *>(v + 1));
-    return;
-  }
-  drainQuarantine();
-  getBackendAllocator().DestroyCache(&Cache);
-  ThreadTornDown = true;
+void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
+  cf->allocator_may_return_null = MayReturnNull;
+  cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs;
+  f->QuarantineSizeMb = QuarantineSizeMb;
+  f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
+  f->DeallocationTypeMismatch = DeallocationTypeMismatch;
+  f->DeleteSizeMismatch = DeleteSizeMismatch;
+  f->ZeroContents = ZeroContents;
 }
 
-static void initInternal() {
+static void initScudoInternal(const AllocatorOptions &Options);
+
+static bool ScudoInitIsRunning = false;
+
+void initScudo() {
   SanitizerToolName = "Scudo";
   CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
   ScudoInitIsRunning = true;
@@ -227,25 +204,13 @@ static void initInternal() {
 
   AllocatorOptions Options;
   Options.setFrom(getFlags(), common_flags());
-  initAllocator(Options);
+  initScudoInternal(Options);
 
-  MaybeStartBackgroudThread();
+  // TODO(kostyak): determine if MaybeStartBackgroudThread could be of some use.
 
   ScudoInitIsRunning = false;
 }
 
-static void initGlobal() {
-  pthread_key_create(&PThreadKey, teardownThread);
-  initInternal();
-}
-
-static void NOINLINE initThread() {
-  pthread_once(&GlobalInited, initGlobal);
-  pthread_setspecific(PThreadKey, reinterpret_cast<void *>(1));
-  getBackendAllocator().InitCache(&Cache);
-  ThreadInited = true;
-}
-
 struct QuarantineCallback {
   explicit QuarantineCallback(AllocatorCache *Cache)
     : Cache_(Cache) {}
@@ -278,26 +243,20 @@ struct QuarantineCallback {
 
 typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
 typedef ScudoQuarantine::Cache ScudoQuarantineCache;
-static thread_local ScudoQuarantineCache ThreadQuarantineCache;
+COMPILER_CHECK(sizeof(ScudoQuarantineCache) <=
+               sizeof(ScudoThreadContext::QuarantineCachePlaceHolder));
 
-void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
-  MayReturnNull = cf->allocator_may_return_null;
-  ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms;
-  QuarantineSizeMb = f->QuarantineSizeMb;
-  ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
-  DeallocationTypeMismatch = f->DeallocationTypeMismatch;
-  DeleteSizeMismatch = f->DeleteSizeMismatch;
-  ZeroContents = f->ZeroContents;
+AllocatorCache *getAllocatorCache(ScudoThreadContext *ThreadContext) {
+  return &ThreadContext->Cache;
 }
 
-void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
-  cf->allocator_may_return_null = MayReturnNull;
-  cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs;
-  f->QuarantineSizeMb = QuarantineSizeMb;
-  f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
-  f->DeallocationTypeMismatch = DeallocationTypeMismatch;
-  f->DeleteSizeMismatch = DeleteSizeMismatch;
-  f->ZeroContents = ZeroContents;
+ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) {
+  return reinterpret_cast<
+      ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder);
+}
+
+Xorshift128Plus *getPrng(ScudoThreadContext *ThreadContext) {
+  return &ThreadContext->Prng;
 }
 
 struct ScudoAllocator {
@@ -313,6 +272,7 @@ struct ScudoAllocator {
   StaticSpinMutex FallbackMutex;
   AllocatorCache FallbackAllocatorCache;
   ScudoQuarantineCache FallbackQuarantineCache;
+  Xorshift128Plus FallbackPrng;
 
   bool DeallocationTypeMismatch;
   bool ZeroContents;
@@ -361,13 +321,13 @@ struct ScudoAllocator {
         static_cast<uptr>(Options.QuarantineSizeMb) << 20,
         static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
     BackendAllocator.InitCache(&FallbackAllocatorCache);
-    Cookie = Prng.getNext();
+    FallbackPrng.initFromURandom();
+    Cookie = FallbackPrng.getNext();
   }
 
   // Helper function that checks for a valid Scudo chunk. nullptr isn't.
   bool isValidPointer(const void *UserPtr) {
-    if (UNLIKELY(!ThreadInited))
-      initThread();
+    initThreadMaybe();
     if (!UserPtr)
       return false;
     uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
@@ -379,8 +339,7 @@ struct ScudoAllocator {
   // Allocates a chunk.
   void *allocate(uptr Size, uptr Alignment, AllocType Type,
                  bool ForceZeroContents = false) {
-    if (UNLIKELY(!ThreadInited))
-      initThread();
+    initThreadMaybe();
     if (UNLIKELY(!IsPowerOfTwo(Alignment))) {
       dieWithMessage("ERROR: alignment is not a power of 2\n");
     }
@@ -407,11 +366,16 @@ struct ScudoAllocator {
     bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment);
 
     void *Ptr;
+    uptr Salt;
     uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment;
-    if (LIKELY(!ThreadTornDown)) {
-      Ptr = BackendAllocator.Allocate(&Cache, NeededSize, AllocationAlignment);
+    ScudoThreadContext *ThreadContext = getThreadContext();
+    if (LIKELY(ThreadContext)) {
+      Salt = getPrng(ThreadContext)->getNext();
+      Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext),
+                                      NeededSize, AllocationAlignment);
     } else {
       SpinMutexLock l(&FallbackMutex);
+      Salt = FallbackPrng.getNext();
       Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
                                       AllocationAlignment);
     }
@@ -453,7 +417,7 @@ struct ScudoAllocator {
       if (TrailingBytes)
         Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
     }
-    Header.Salt = static_cast<u8>(Prng.getNext());
+    Header.Salt = static_cast<u8>(Salt);
     getScudoChunk(UserBeg)->storeHeader(&Header);
     void *UserPtr = reinterpret_cast<void *>(UserBeg);
     // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
@@ -462,16 +426,17 @@ struct ScudoAllocator {
 
   // Place a chunk in the quarantine. In the event of a zero-sized quarantine,
   // we directly deallocate the chunk, otherwise the flow would lead to the
-  // chunk being checksummed twice, once before Put and once in Recycle, with
-  // no additional security value.
+  // chunk being loaded (and checked) twice, and stored (and checksummed) once,
+  // with no additional security value.
   void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header,
                                    uptr Size) {
     bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0);
     if (BypassQuarantine) {
       Chunk->eraseHeader();
       void *Ptr = Chunk->getAllocBeg(Header);
-      if (LIKELY(!ThreadTornDown)) {
-        getBackendAllocator().Deallocate(&Cache, Ptr);
+      ScudoThreadContext *ThreadContext = getThreadContext();
+      if (LIKELY(ThreadContext)) {
+        getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr);
       } else {
         SpinMutexLock Lock(&FallbackMutex);
         getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr);
@@ -480,9 +445,12 @@ struct ScudoAllocator {
       UnpackedHeader NewHeader = *Header;
       NewHeader.State = ChunkQuarantine;
       Chunk->compareExchangeHeader(&NewHeader, Header);
-      if (LIKELY(!ThreadTornDown)) {
-        AllocatorQuarantine.Put(&ThreadQuarantineCache,
-                                QuarantineCallback(&Cache), Chunk, Size);
+      ScudoThreadContext *ThreadContext = getThreadContext();
+      if (LIKELY(ThreadContext)) {
+        AllocatorQuarantine.Put(getQuarantineCache(ThreadContext),
+                                QuarantineCallback(
+                                    getAllocatorCache(ThreadContext)),
+                                Chunk, Size);
       } else {
         SpinMutexLock l(&FallbackMutex);
         AllocatorQuarantine.Put(&FallbackQuarantineCache,
@@ -495,8 +463,7 @@ struct ScudoAllocator {
   // Deallocates a Chunk, which means adding it to the delayed free list (or
   // Quarantine).
   void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
-    if (UNLIKELY(!ThreadInited))
-      initThread();
+    initThreadMaybe();
     // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
     if (!UserPtr)
       return;
@@ -542,8 +509,7 @@ struct ScudoAllocator {
   // Reallocates a chunk. We can save on a new allocation if the new requested
   // size still fits in the chunk.
   void *reallocate(void *OldPtr, uptr NewSize) {
-    if (UNLIKELY(!ThreadInited))
-      initThread();
+    initThreadMaybe();
     uptr UserBeg = reinterpret_cast<uptr>(OldPtr);
     if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
       dieWithMessage("ERROR: attempted to reallocate a chunk not properly "
@@ -585,8 +551,7 @@ struct ScudoAllocator {
 
   // Helper function that returns the actual usable size of a chunk.
   uptr getUsableSize(const void *Ptr) {
-    if (UNLIKELY(!ThreadInited))
-      initThread();
+    initThreadMaybe();
     if (!Ptr)
       return 0;
     uptr UserBeg = reinterpret_cast<uptr>(Ptr);
@@ -602,22 +567,22 @@ struct ScudoAllocator {
   }
 
   void *calloc(uptr NMemB, uptr Size) {
-    if (UNLIKELY(!ThreadInited))
-      initThread();
+    initThreadMaybe();
     uptr Total = NMemB * Size;
     if (Size != 0 && Total / Size != NMemB)  // Overflow check
       return BackendAllocator.ReturnNullOrDieOnBadRequest();
     return allocate(Total, MinAlignment, FromMalloc, true);
   }
 
-  void drainQuarantine() {
-    AllocatorQuarantine.Drain(&ThreadQuarantineCache,
-                              QuarantineCallback(&Cache));
+  void commitBack(ScudoThreadContext *ThreadContext) {
+    AllocatorCache *Cache = getAllocatorCache(ThreadContext);
+    AllocatorQuarantine.Drain(getQuarantineCache(ThreadContext),
+                              QuarantineCallback(Cache));
+    BackendAllocator.DestroyCache(Cache);
   }
 
   uptr getStats(AllocatorStat StatType) {
-    if (UNLIKELY(!ThreadInited))
-      initThread();
+    initThreadMaybe();
     uptr stats[AllocatorStatCount];
     BackendAllocator.GetStats(stats);
     return stats[StatType];
@@ -630,12 +595,18 @@ static ScudoBackendAllocator &getBackend
   return Instance.BackendAllocator;
 }
 
-void initAllocator(const AllocatorOptions &Options) {
+static void initScudoInternal(const AllocatorOptions &Options) {
   Instance.init(Options);
 }
 
-void drainQuarantine() {
-  Instance.drainQuarantine();
+void ScudoThreadContext::init() {
+  getBackendAllocator().InitCache(&Cache);
+  Prng.initFromURandom();
+  memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
+}
+
+void ScudoThreadContext::commitBack() {
+  Instance.commitBack(this);
 }
 
 void *scudoMalloc(uptr Size, AllocType Type) {

Modified: vendor/compiler-rt/dist/lib/scudo/scudo_allocator.h
==============================================================================
--- vendor/compiler-rt/dist/lib/scudo/scudo_allocator.h	Tue May  2 18:30:52 2017	(r317686)
+++ vendor/compiler-rt/dist/lib/scudo/scudo_allocator.h	Tue May  2 18:30:55 2017	(r317687)
@@ -53,7 +53,7 @@ struct UnpackedHeader {
   u64 Offset            : 16; // Offset from the beginning of the backend
                               // allocation to the beginning of the chunk
                               // itself, in multiples of MinAlignment. See
-                              /// comment about its maximum value and in init().
+                              // comment about its maximum value and in init().
   u64 Salt              : 8;
 };
 
@@ -62,7 +62,7 @@ COMPILER_CHECK(sizeof(UnpackedHeader) ==
 
 // Minimum alignment of 8 bytes for 32-bit, 16 for 64-bit
 const uptr MinAlignmentLog = FIRST_32_SECOND_64(3, 4);
-const uptr MaxAlignmentLog = 24; // 16 MB
+const uptr MaxAlignmentLog = 24;  // 16 MB
 const uptr MinAlignment = 1 << MinAlignmentLog;
 const uptr MaxAlignment = 1 << MaxAlignmentLog;
 
@@ -70,21 +70,44 @@ const uptr ChunkHeaderSize = sizeof(Pack
 const uptr AlignedChunkHeaderSize =
     (ChunkHeaderSize + MinAlignment - 1) & ~(MinAlignment - 1);
 
-struct AllocatorOptions {
-  u32 QuarantineSizeMb;
-  u32 ThreadLocalQuarantineSizeKb;
-  bool MayReturnNull;
-  s32 ReleaseToOSIntervalMs;
-  bool DeallocationTypeMismatch;
-  bool DeleteSizeMismatch;
-  bool ZeroContents;
-
-  void setFrom(const Flags *f, const CommonFlags *cf);
-  void copyTo(Flags *f, CommonFlags *cf) const;
+#if SANITIZER_CAN_USE_ALLOCATOR64
+const uptr AllocatorSpace = ~0ULL;
+const uptr AllocatorSize = 0x40000000000ULL;  // 4TB.
+typedef DefaultSizeClassMap SizeClassMap;
+struct AP {
+  static const uptr kSpaceBeg = AllocatorSpace;
+  static const uptr kSpaceSize = AllocatorSize;
+  static const uptr kMetadataSize = 0;
+  typedef __scudo::SizeClassMap SizeClassMap;
+  typedef NoOpMapUnmapCallback MapUnmapCallback;
+  static const uptr kFlags =
+      SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
 };
+typedef SizeClassAllocator64<AP> PrimaryAllocator;
+#else
+// Currently, the 32-bit Sanitizer allocator has not yet benefited from all the
+// security improvements brought to the 64-bit one. This makes the 32-bit
+// version of Scudo slightly less toughened.
+static const uptr RegionSizeLog = 20;
+static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog;
+# if SANITIZER_WORDSIZE == 32
+typedef FlatByteMap<NumRegions> ByteMap;
+# elif SANITIZER_WORDSIZE == 64
+typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap;
+# endif  // SANITIZER_WORDSIZE
+typedef DefaultSizeClassMap SizeClassMap;
+typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 0, SizeClassMap,
+    RegionSizeLog, ByteMap> PrimaryAllocator;
+#endif  // SANITIZER_CAN_USE_ALLOCATOR64
+
+#include "scudo_allocator_secondary.h"
 
-void initAllocator(const AllocatorOptions &options);
-void drainQuarantine();
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+typedef ScudoLargeMmapAllocator SecondaryAllocator;
+typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
+    ScudoBackendAllocator;
+
+void initScudo();
 
 void *scudoMalloc(uptr Size, AllocType Type);
 void scudoFree(void *Ptr, AllocType Type);
@@ -98,8 +121,6 @@ int scudoPosixMemalign(void **MemPtr, up
 void *scudoAlignedAlloc(uptr Alignment, uptr Size);
 uptr scudoMallocUsableSize(void *Ptr);
 
-#include "scudo_allocator_secondary.h"
-
 }  // namespace __scudo
 
 #endif  // SCUDO_ALLOCATOR_H_

Added: vendor/compiler-rt/dist/lib/scudo/scudo_tls.h
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ vendor/compiler-rt/dist/lib/scudo/scudo_tls.h	Tue May  2 18:30:55 2017	(r317687)
@@ -0,0 +1,40 @@
+//===-- scudo_tls.h ---------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo thread local structure definition.
+/// Implementation will differ based on the thread local storage primitives
+/// offered by the underlying platform.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TLS_H_
+#define SCUDO_TLS_H_
+
+#include "scudo_allocator.h"
+#include "scudo_utils.h"
+
+namespace __scudo {
+
+struct ALIGNED(64) ScudoThreadContext {
+ public:
+  AllocatorCache Cache;
+  Xorshift128Plus Prng;
+  uptr QuarantineCachePlaceHolder[4];
+  void init();
+  void commitBack();
+};
+
+void initThread();
+
+// Fastpath functions are defined in the following platform specific headers.
+#include "scudo_tls_linux.h"

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201705021830.v42IUtnS000965>